repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
fsonntag/ud858 | Lesson_4/00_Conference_Central/conference.py | 35 | 3749 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - - - -
# TODO
# registers API
api = endpoints.api_server([ConferenceApi])
| gpl-3.0 |
JackKelly/neuralnilm_prototype | scripts/e445.py | 2 | 38141 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
444: just testing new DimshuffleLayer.
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 5000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 1024
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['washer dryer', 'washing machine'],
'hair straighteners'
# 'television',
# 'dish washer',
# ['fridge freezer', 'fridge', 'freezer']
],
max_appliance_powers=[2400, 500, 200, 2500, 200],
# max_input_power=200,
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[1800, 60, 60, 1800, 60],
min_off_durations=[600, 12, 12, 1800, 12],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0.2,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
2000: 1e-3,
10000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': Conv1DLayer,
'num_filters': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'label': 'dense0',
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
},
{
'type': SharedWeightsDenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None,
'W': 'ref:dense0.W.T'
}
]
net = Net(**net_dict_copy)
return net
def exp_j(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_k(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_l(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_m(name):
# avg valid cost = 0.0016604423
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_n(name):
# i but with no biases
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'label': 'dense0',
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None,
'b': None
},
{
'type': SharedWeightsDenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None,
'W': 'ref:dense0.W.T',
'b': None
}
]
net = Net(**net_dict_copy)
return net
def exp_o(name):
# tied biases (forwards)
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': 'ref:conv0.b.T',
'shared_weights': True,
'shared_biases': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_p(name):
# tied biases (backwards)
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': 'ref:conv0.b.T[::-1]',
'shared_weights': True,
'shared_biases': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_q(name):
# separate biases
# avg valid cost = 0.0012744671
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_r(name):
# no biases
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid',
'b': None
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'b': None,
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_s(name):
# separate biases
# q but don't reverse W
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_t(name):
# separate biases
# based on s
# but with dense layers in between
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_u(name):
# separate biases
# based on s
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
# {
# 'type': DimshuffleLayer,
# 'pattern': (0, 2, 1) # back to (batch, time, features)
# },
# {
# 'type': DenseLayer,
# 'num_units': 1021 * NUM_FILTERS,
# 'nonlinearity': rectify
# },
# {
# 'type': ReshapeLayer,
# 'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
# },
# {
# 'type': DimshuffleLayer,
# 'pattern': (0, 2, 1) # (batch, features, time)
# },
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_v(name):
# separate biases
# based on s
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-1,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_w(name):
# separate biases
# based on s
# like v but with a Dense linear layer
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-2,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': None
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def exp_x(name):
# separate biases
# based on s
# like v but with a Dense linear layer
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-2,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': 1021,
'nonlinearity': rectify
},
{
'label': 'dense1',
'type': DenseLayer,
'num_units': 256,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': SharedWeightsDenseLayer,
'num_units': 256,
'nonlinearity': rectify,
'W': 'ref:dense2.W.T'
},
{
'type': SharedWeightsDenseLayer,
'num_units': 1021,
'nonlinearity': rectify,
'W': 'ref:dense1.W.T'
},
{
'type': SharedWeightsDenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': None,
'W': 'ref:dense0.W.T'
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full',
'W': 'ref:conv0.W.dimshuffle(1, 0, 2)[:, :, ::-1]',
'shared_weights': True,
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('x')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=10000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e445.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
apporc/neutron | neutron/tests/unit/agent/linux/test_pd.py | 27 | 1163 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import pd
from neutron.tests import base as tests_base
class FakeRouter(object):
def __init__(self, router_id):
self.router_id = router_id
class TestPrefixDelegation(tests_base.DietTestCase):
def test_remove_router(self):
l3_agent = mock.Mock()
router_id = 1
l3_agent.pd.routers = {router_id: pd.get_router_entry(None)}
pd.remove_router(None, None, l3_agent, router=FakeRouter(router_id))
self.assertTrue(l3_agent.pd.delete_router_pd.called)
self.assertEqual({}, l3_agent.pd.routers)
| apache-2.0 |
geodynamics/gale | config/scons/scons-local-1.2.0/SCons/compat/_scons_sets.py | 132 | 19624 | """Classes to represent arbitrary sets (including sets of sets).
This module implements sets using dictionaries whose values are
ignored. The usual operations (union, intersection, deletion, etc.)
are provided as both methods and operators.
Important: sets are not sequences! While they support 'x in s',
'len(s)', and 'for x in s', none of those operations are unique for
sequences; for example, mappings support all three as well. The
characteristic operation for sequences is subscripting with small
integers: s[i], for i in range(len(s)). Sets don't support
subscripting at all. Also, sequences allow multiple occurrences and
their elements have a definite order; sets on the other hand don't
record multiple occurrences and don't remember the order of element
insertion (which is why they don't support s[i]).
The following classes are provided:
BaseSet -- All the operations common to both mutable and immutable
sets. This is an abstract class, not meant to be directly
instantiated.
Set -- Mutable sets, subclass of BaseSet; not hashable.
ImmutableSet -- Immutable sets, subclass of BaseSet; hashable.
An iterable argument is mandatory to create an ImmutableSet.
_TemporarilyImmutableSet -- A wrapper around a Set, hashable,
giving the same hash value as the immutable set equivalent
would have. Do not use this class directly.
Only hashable objects can be added to a Set. In particular, you cannot
really add a Set as an element to another Set; if you try, what is
actually added is an ImmutableSet built from it (it compares equal to
the one you tried adding).
When you ask if `x in y' where x is a Set and y is a Set or
ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and
what's tested is actually `z in y'.
"""
# Code history:
#
# - Greg V. Wilson wrote the first version, using a different approach
# to the mutable/immutable problem, and inheriting from dict.
#
# - Alex Martelli modified Greg's version to implement the current
# Set/ImmutableSet approach, and make the data an attribute.
#
# - Guido van Rossum rewrote much of the code, made some API changes,
# and cleaned up the docstrings.
#
# - Raymond Hettinger added a number of speedups and other
# improvements.
from __future__ import generators
try:
from itertools import ifilter, ifilterfalse
except ImportError:
# Code to make the module run under Py2.2
def ifilter(predicate, iterable):
if predicate is None:
def predicate(x):
return x
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
def predicate(x):
return x
for x in iterable:
if not predicate(x):
yield x
try:
True, False
except NameError:
True, False = (0==0, 0!=0)
__all__ = ['BaseSet', 'Set', 'ImmutableSet']
class BaseSet(object):
"""Common base class for mutable and immutable sets."""
__slots__ = ['_data']
# Constructor
def __init__(self):
"""This is an abstract class."""
# Don't call this from a concrete subclass!
if self.__class__ is BaseSet:
raise TypeError, ("BaseSet is an abstract class. "
"Use Set or ImmutableSet.")
# Standard protocols: __len__, __repr__, __str__, __iter__
def __len__(self):
"""Return the number of elements of a set."""
return len(self._data)
def __repr__(self):
"""Return string representation of a set.
This looks like 'Set([<list of elements>])'.
"""
return self._repr()
# __str__ is the same as __repr__
__str__ = __repr__
def _repr(self, sorted=False):
elements = self._data.keys()
if sorted:
elements.sort()
return '%s(%r)' % (self.__class__.__name__, elements)
def __iter__(self):
"""Return an iterator over the elements or a set.
This is the keys iterator for the underlying dict.
"""
return self._data.iterkeys()
# Three-way comparison is not supported. However, because __eq__ is
# tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and
# then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this
# case).
def __cmp__(self, other):
raise TypeError, "can't compare sets using cmp()"
# Equality comparisons using the underlying dicts. Mixed-type comparisons
# are allowed here, where Set == z for non-Set z always returns False,
# and Set != z always True. This allows expressions like "x in y" to
# give the expected result when y is a sequence of mixed types, not
# raising a pointless TypeError just because y contains a Set, or x is
# a Set and y contain's a non-set ("in" invokes only __eq__).
# Subtle: it would be nicer if __eq__ and __ne__ could return
# NotImplemented instead of True or False. Then the other comparand
# would get a chance to determine the result, and if the other comparand
# also returned NotImplemented then it would fall back to object address
# comparison (which would always return False for __eq__ and always
# True for __ne__). However, that doesn't work, because this type
# *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented,
# Python tries __cmp__ next, and the __cmp__ here then raises TypeError.
def __eq__(self, other):
if isinstance(other, BaseSet):
return self._data == other._data
else:
return False
def __ne__(self, other):
if isinstance(other, BaseSet):
return self._data != other._data
else:
return True
# Copying operations
def copy(self):
"""Return a shallow copy of a set."""
result = self.__class__()
result._data.update(self._data)
return result
__copy__ = copy # For the copy module
def __deepcopy__(self, memo):
"""Return a deep copy of a set; used by copy module."""
# This pre-creates the result and inserts it in the memo
# early, in case the deep copy recurses into another reference
# to this same set. A set can't be an element of itself, but
# it can certainly contain an object that has a reference to
# itself.
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
data = result._data
value = True
for elt in self:
data[deepcopy(elt, memo)] = value
return result
# Standard set operations: union, intersection, both differences.
# Each has an operator version (e.g. __or__, invoked with |) and a
# method version (e.g. union).
# Subtle: Each pair requires distinct code so that the outcome is
# correct when the type of other isn't suitable. For example, if
# we did "union = __or__" instead, then Set().union(3) would return
# NotImplemented instead of raising TypeError (albeit that *why* it
# raises TypeError as-is is also a bit subtle).
def __or__(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.union(other)
def union(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
result = self.__class__(self)
result._update(other)
return result
def __and__(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.intersection(other)
def intersection(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
other = Set(other)
if len(self) <= len(other):
little, big = self, other
else:
little, big = other, self
common = ifilter(big._data.has_key, little)
return self.__class__(common)
def __xor__(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
result = self.__class__()
data = result._data
value = True
selfdata = self._data
try:
otherdata = other._data
except AttributeError:
otherdata = Set(other)._data
for elt in ifilterfalse(otherdata.has_key, selfdata):
data[elt] = value
for elt in ifilterfalse(selfdata.has_key, otherdata):
data[elt] = value
return result
def __sub__(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.difference(other)
def difference(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
result = self.__class__()
data = result._data
try:
otherdata = other._data
except AttributeError:
otherdata = Set(other)._data
value = True
for elt in ifilterfalse(otherdata.has_key, self):
data[elt] = value
return result
# Membership test
def __contains__(self, element):
"""Report whether an element is a member of a set.
(Called in response to the expression `element in self'.)
"""
try:
return element in self._data
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
return transform() in self._data
# Subset and superset test
def issubset(self, other):
"""Report whether another set contains this set."""
self._binary_sanity_check(other)
if len(self) > len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(other._data.has_key, self):
return False
return True
def issuperset(self, other):
"""Report whether this set contains another set."""
self._binary_sanity_check(other)
if len(self) < len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(self._data.has_key, other):
return False
return True
# Inequality comparisons using the is-subset relation.
__le__ = issubset
__ge__ = issuperset
def __lt__(self, other):
self._binary_sanity_check(other)
return len(self) < len(other) and self.issubset(other)
def __gt__(self, other):
self._binary_sanity_check(other)
return len(self) > len(other) and self.issuperset(other)
# Assorted helpers
def _binary_sanity_check(self, other):
# Check that the other argument to a binary operation is also
# a set, raising a TypeError otherwise.
if not isinstance(other, BaseSet):
raise TypeError, "Binary operation only permitted between sets"
def _compute_hash(self):
# Calculate hash code for a set by xor'ing the hash codes of
# the elements. This ensures that the hash code does not depend
# on the order in which elements are added to the set. This is
# not called __hash__ because a BaseSet should not be hashable;
# only an ImmutableSet is hashable.
result = 0
for elt in self:
result ^= hash(elt)
return result
def _update(self, iterable):
# The main loop for update() and the subclass __init__() methods.
data = self._data
# Use the fast update() method when a dictionary is available.
if isinstance(iterable, BaseSet):
data.update(iterable._data)
return
value = True
if type(iterable) in (list, tuple, xrange):
# Optimized: we know that __iter__() and next() can't
# raise TypeError, so we can move 'try:' out of the loop.
it = iter(iterable)
while True:
try:
for element in it:
data[element] = value
return
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
else:
# Safe: only catch TypeError where intended
for element in iterable:
try:
data[element] = value
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
class ImmutableSet(BaseSet):
"""Immutable set class."""
__slots__ = ['_hashcode']
# BaseSet + hashing
def __init__(self, iterable=None):
"""Construct an immutable set from an optional iterable."""
self._hashcode = None
self._data = {}
if iterable is not None:
self._update(iterable)
def __hash__(self):
if self._hashcode is None:
self._hashcode = self._compute_hash()
return self._hashcode
def __getstate__(self):
return self._data, self._hashcode
def __setstate__(self, state):
self._data, self._hashcode = state
class Set(BaseSet):
""" Mutable set class."""
__slots__ = []
# BaseSet + operations requiring mutability; no hashing
def __init__(self, iterable=None):
"""Construct a set from an optional iterable."""
self._data = {}
if iterable is not None:
self._update(iterable)
def __getstate__(self):
# getstate's results are ignored if it is not
return self._data,
def __setstate__(self, data):
self._data, = data
def __hash__(self):
"""A Set cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError, "Can't hash a Set, only an ImmutableSet."
# In-place union, intersection, differences.
# Subtle: The xyz_update() functions deliberately return None,
# as do all mutating operations on built-in container types.
# The __xyz__ spellings have to return self, though.
def __ior__(self, other):
"""Update a set with the union of itself and another."""
self._binary_sanity_check(other)
self._data.update(other._data)
return self
def union_update(self, other):
"""Update a set with the union of itself and another."""
self._update(other)
def __iand__(self, other):
"""Update a set with the intersection of itself and another."""
self._binary_sanity_check(other)
self._data = (self & other)._data
return self
def intersection_update(self, other):
"""Update a set with the intersection of itself and another."""
if isinstance(other, BaseSet):
self &= other
else:
self._data = (self.intersection(other))._data
def __ixor__(self, other):
"""Update a set with the symmetric difference of itself and another."""
self._binary_sanity_check(other)
self.symmetric_difference_update(other)
return self
def symmetric_difference_update(self, other):
"""Update a set with the symmetric difference of itself and another."""
data = self._data
value = True
if not isinstance(other, BaseSet):
other = Set(other)
if self is other:
self.clear()
for elt in other:
if elt in data:
del data[elt]
else:
data[elt] = value
def __isub__(self, other):
"""Remove all elements of another set from this set."""
self._binary_sanity_check(other)
self.difference_update(other)
return self
def difference_update(self, other):
"""Remove all elements of another set from this set."""
data = self._data
if not isinstance(other, BaseSet):
other = Set(other)
if self is other:
self.clear()
for elt in ifilter(data.has_key, other):
del data[elt]
# Python dict-like mass mutations: update, clear
def update(self, iterable):
"""Add all values from an iterable (such as a list or file)."""
self._update(iterable)
def clear(self):
"""Remove all elements from this set."""
self._data.clear()
# Single-element mutations: add, remove, discard
def add(self, element):
"""Add an element to a set.
This has no effect if the element is already present.
"""
try:
self._data[element] = True
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
self._data[transform()] = True
def remove(self, element):
"""Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
try:
del self._data[element]
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
del self._data[transform()]
def discard(self, element):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
try:
self.remove(element)
except KeyError:
pass
def pop(self):
"""Remove and return an arbitrary set element."""
return self._data.popitem()[0]
def __as_immutable__(self):
# Return a copy of self as an immutable set
return ImmutableSet(self)
def __as_temporarily_immutable__(self):
# Return self wrapped in a temporarily immutable set
return _TemporarilyImmutableSet(self)
class _TemporarilyImmutableSet(BaseSet):
# Wrap a mutable set as if it was temporarily immutable.
# This only supplies hashing and equality comparisons.
def __init__(self, set):
self._set = set
self._data = set._data # Needed by ImmutableSet.__eq__()
def __hash__(self):
return self._set._compute_hash()
| gpl-2.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/paginator.py | 129 | 5006 | """
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from google.appengine.ext import db
from cache import Cache
class Paginator(object):
"""
This class is used for maintaining pagination objects.
"""
@classmethod
def get(cls, count=10, q_filters={}, search=None, start=None, model=None, \
order='ASC', order_by='__key__'):
"""
get queries the database on model, starting with key, ordered by
order. It receives count + 1 items, returning count and setting a
next field to the count + 1 item key. It then reverses the sort, and
grabs count objects, returning the last as a the previous.
Arguments:
count: The amount of entries to pull on query
q_filter: The filter value (optional)
search: Search is used for SearchableModel searches
start: The key to start the page from
model: The Model object to query against. This is not a
string, it must be a Model derived object.
order: The order in which to pull the values.
order_by: The attribute to order results by. This defaults to
__key__
Returns a dict:
{
'next': next_key,
'prev': prev_key,
'items': entities_pulled
}
"""
# argument validation
if model == None:
raise ValueError('You must pass a model to query')
# a valid model object will have a gql method.
if callable(model.gql) == False:
raise TypeError('model must be a valid model object.')
# cache check
cache_string = "gae_paginator_"
for q_filter in q_filters:
cache_string = cache_string + q_filter + "_" + q_filters[q_filter] + "_"
cache_string = cache_string + "index"
c = Cache()
if c.has_key(cache_string):
return c[cache_string]
# build query
query = model.all()
if len(q_filters) > 0:
for q_filter in q_filters:
query.filter(q_filter + " = ", q_filters[q_filter])
if start:
if order.lower() == "DESC".lower():
query.filter(order_by + " <", start)
else:
query.filter(order_by + " >", start)
if search:
query.search(search)
if order.lower() == "DESC".lower():
query.order("-" + order_by)
else:
query.order(order_by)
results = query.fetch(count + 1)
if len(results) == count + 1:
next = getattr(results[count - 1], order_by)
# reverse the query to get the value for previous
if start is not None:
rquery = model.all()
for q_filter in q_filters:
rquery.filter(q_filter + " = ", q_filters[q_filter])
if search:
query.search(search)
if order.lower() == "DESC".lower():
rquery.order(order_by)
else:
rquery.order("-" + order_by)
rresults = rquery.fetch(count)
previous = getattr(results[0], order_by)
else:
previous = None
else:
next = None
return {
"results": results,
"next": next,
"previous": previous
}
| apache-2.0 |
dreibh/planetlab-lxc-nodemanager | config.py | 1 | 1404 | #!/usr/bin/python3
#
# Parses the PLC configuration file /etc/planetlab/plc_config, which
# is bootstrapped by Boot Manager, but managed by us.
#
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
import os
class Config:
"""
Parses Python configuration files; all variables in the file are
assigned to class attributes.
"""
def __init__(self, file = "/etc/planetlab/plc_config"):
try:
exec(compile(open(file).read(), file, 'exec'), self.__dict__)
except:
raise Exception("Could not parse " + file)
if int(self.PLC_API_PORT) == 443:
uri = "https://"
if hasattr(self, 'PLC_API_CA_SSL_CRT'):
self.cacert = self.PLC_API_CA_SSL_CRT
elif os.path.exists('/usr/boot/cacert.pem'):
self.cacert = '/usr/boot/cacert.pem'
else:
raise Exception("No boot server certificate bundle available")
else:
uri = "http://"
self.cacert = None
uri += self.PLC_API_HOST + \
":" + str(self.PLC_API_PORT) + \
"/" + self.PLC_API_PATH + "/"
self.plc_api_uri = uri
if __name__ == '__main__':
from pprint import pprint
for (k, v) in Config().__dict__.items():
if k not in ['__builtins__']:
pprint ( (k, v), )
| bsd-3-clause |
double-y/django | tests/model_inheritance_regress/tests.py | 137 | 18655 | """
Regression tests for Model inheritance behavior.
"""
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
self.assertRaises(
Place.DoesNotExist,
Place.objects.get,
pk=ident)
self.assertRaises(
ItalianRestaurant.DoesNotExist,
ItalianRestaurant.objects.get,
pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
self.assertRaises(
ArticleWithAuthor.DoesNotExist,
c3.get_next_by_pub_date)
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
self.assertRaises(
ArticleWithAuthor.DoesNotExist,
c1.get_previous_by_pub_date)
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
Regression tests for #10406
If there's a one-to-one link between a child model and the parent and
no explicit pk declared, we can use the one-to-one link as the pk on
the child.
"""
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
# However, the connector from child to parent need not be the pk on
# the child at all.
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
# Check that many-to-many relations defined on an abstract base class
# are correctly inherited (and created) on the child class.
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees = [p1, p3]
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees = [p2, p4]
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# Check that a subclass of a subclass of an abstract model doesn't get
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees = [p4]
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
Test that a model which has different primary key for the parent model
passes unique field checking correctly. Refs #17615.
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
Senator.objects.get(pk=senator.pk)
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
| bsd-3-clause |
frankenstein91/MetaGet | MetaGetServer/settings.py | 1 | 3141 | """
Django settings for MetaGetServer project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')d$(h48o+38--%v&a%epyprjgwm0%mg^ny73)xvozlue(8xe@y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'MetaGet'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MetaGetServer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MetaGetServer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'de-de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 |
f4exb/gr-e4406a | docs/doxygen/doxyxml/generated/index.py | 344 | 1871 | #!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
import os
import sys
import compound
import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class CompoundTypeSub(supermod.CompoundType):
def __init__(self, kind=None, refid=None, name='', member=None):
supermod.CompoundType.__init__(self, kind, refid, name, member)
def find_members(self, details):
"""
Returns a list of all members which match details
"""
results = []
for member in self.member:
if details.match(member):
results.append(member)
return results
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
| gpl-3.0 |
danielkitta/libsigrokdecode | decoders/spiflash/__init__.py | 7 | 1193 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012-2015 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
This decoder stacks on top of the 'spi' PD and decodes the xx25 series
SPI (NOR) flash chip protocol.
It currently supports the MX25L1605D/MX25L3205D/MX25L6405D.
Details:
http://www.macronix.com/QuickPlace/hq/PageLibrary4825740B00298A3B.nsf/h_Index/3F21BAC2E121E17848257639003A3146/$File/MX25L1605D-3205D-6405D-1.5.pdf
'''
from .pd import Decoder
| gpl-3.0 |
mohitsethi/packstack | packstack/puppet/modules/packstack/lib/facter/netns.py | 12 | 7681 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import contextlib
import inspect
import os
import random
import subprocess
import sys
import tempfile
import time
import uuid
import unittest
def execute(cmd_string, check_error=True, return_code=0, input=None,
block=True, error_msg='Error executing cmd'):
print cmd_string
cmd = cmd_string.split(' ')
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if input:
proc.communicate(input=input)
elif block:
proc.wait()
if (check_error and
proc.returncode is not None and
proc.returncode != return_code):
msg = """
%(error_msg)s
Command: %(cmd)s
Exit Code: %(code)s
""".strip() % dict(cmd=' '.join(cmd),
code=proc.returncode,
error_msg=error_msg)
if input:
msg += "\n Stdin: %s" % input
if not proc.stdout.closed:
msg += "\n Stdout: %s" % proc.stdout.read()
if not proc.stderr.closed:
msg += "\n Stderr: %s" % proc.stderr.read()
raise Exception(msg)
return proc
def e(cmd, prefix='ip netns exec ', sudo=False, **kwargs):
frame_locals = inspect.getargvalues(sys._getframe(1))[3]
if sudo:
prefix = 'sudo ' + prefix
return execute(prefix + cmd % frame_locals, **kwargs)
def rand_name(name='test'):
return '%s-%s' % (name, str(random.randint(1, 0x7fffffff)))
@contextlib.contextmanager
def add_namespace():
name = rand_name('testns')
try:
e('ip netns add %(name)s', prefix='')
e('%(name)s ip link set lo up')
yield name
finally:
e('ip netns delete %(name)s', prefix='')
@contextlib.contextmanager
def add_namespaces():
with add_namespace() as ns1:
with add_namespace() as ns2:
yield ns1, ns2
def add_veth_pair(ns1, ns2, veth1, veth2, address1, address2):
e('ip link add %(veth1)s netns %(ns1)s type veth '
'peer name %(veth2)s netns %(ns2)s', prefix='')
e('%(ns1)s ip link show %(veth1)s')
e('%(ns2)s ip link show %(veth2)s')
e('%(ns1)s ip -4 addr add %(address1)s/24 brd 255.255.255.0 '
'scope global dev %(veth1)s')
e('%(ns2)s ip -4 addr add %(address2)s/24 brd 255.255.255.0 '
'scope global dev %(veth2)s')
e('%(ns1)s ip link set %(veth1)s up')
e('%(ns2)s ip link set %(veth2)s up')
class TestNetns(unittest.TestCase):
def test_neutron_netns_cmds(self):
"""Exercise the netns functionality required by neutron.
- Check that a veth pair can be configured to transit traffic
between 2 namespaces
- Check that iptables filtering can be configured
- Check that iptables routing can be configured
"""
# Naming scheme [resource][id]_[namespace id]
veth1_1 = 'veth1_1'
veth1_2 = 'veth1_2'
address1_1 = '192.168.0.1'
address1_2 = '192.168.0.2'
with add_namespaces() as (ns1, ns2):
# Check that inter-namespace connectivity can be established
add_veth_pair(ns1, ns2, veth1_1, veth1_2, address1_1, address1_2)
e('%(ns1)s ip link list')
e('%(ns1)s ip link show %(veth1_1)s')
e('%(ns1)s arping -A -U -I %(veth1_1)s '
'-c 1 %(address1_1)s')
e('%(ns2)s route add default gw %(address1_1)s')
e('%(ns2)s ping -c 1 -w 1 %(address1_1)s')
e('ping -c 1 -w 1 %(address1_1)s', prefix='', return_code=1,
error_msg='Namespace isolation not supported!')
# Check that iptables filtering and save/restore can be performed
try:
iptables_filename = os.path.join(
tempfile.gettempdir(),
'iptables-%s' % str(uuid.uuid4()))
e('%%(ns1)s iptables-save > %s' % iptables_filename)
e('%(ns1)s iptables -A INPUT -p icmp --icmp-type 8 -j DROP')
e('%(ns2)s ping -c 1 -w 1 %(address1_1)s', return_code=1)
e('%%(ns1)s iptables-restore < %s' % iptables_filename)
e('%(ns2)s ping -c 1 -w 1 %(address1_1)s')
finally:
if os.path.exists(iptables_filename):
os.unlink(iptables_filename)
# Create another namespace (ns3) that is connected to ns1
# via a different subnet, so that traffic between ns3 and
# ns2 will have to be routed by ns1:
#
# ns2 <- 192.168.0.0/24 -> ns1 <- 192.168.1.0/24 -> ns3
#
with add_namespace() as ns3:
veth2_1 = 'veth2_1'
veth2_3 = 'veth2_3'
address2_1 = '192.168.1.1'
address2_3 = '192.168.1.2'
add_veth_pair(ns1, ns3, veth2_1, veth2_3,
address2_1, address2_3)
e('%(ns1)s sysctl -w net.ipv4.ip_forward=1')
e('%(ns1)s iptables -t nat -A POSTROUTING -o %(veth2_1)s -j '
'MASQUERADE')
e('%(ns1)s iptables -A FORWARD -i %(veth2_1)s -o %(veth1_1)s '
'-m state --state RELATED,ESTABLISHED -j ACCEPT')
e('%(ns1)s iptables -A FORWARD -i %(veth1_1)s -o %(veth2_1)s '
'-j ACCEPT')
e('%(ns2)s ping -c 1 -w 1 %(address2_3)s')
# Check that links can be torn down
e('%(ns1)s ip -4 addr del %(address1_1)s/24 '
'dev %(veth1_1)s')
e('%(ns1)s ip link delete %(veth1_1)s')
def test_domain_socket_access(self):
"""Check that a domain socket can be accessed regardless of namespace.
Neutron extends nova' metadata service - which identifies VM's
by their ip addresses - to configurations with overlapping
ips. Support is provided by:
- a proxy in each namespace (neutron-ns-metadata-proxy)
- the proxy can uniquely identify a given VM by its ip
address in the context of the router or network of the
namespace.
- a metadata agent (neutron-metadata-agent) that forwards
requests from the namespace proxies to nova's metadata
service.
Communication between the proxies and the agent is over a unix
domain socket. It is necessary that access to a domain socket
not be restricted by namespace, or such communication will not
be possible.
"""
try:
execute('which nc')
except Exception:
self.fail("The 'nc' command is not available - please install it.")
sock_filename = os.path.join(tempfile.gettempdir(),
'testsock-%s' % str(uuid.uuid4()))
server = None
try:
# Create a server in the root namespace attached to a domain socket
server = e('nc -lU %(sock_filename)s', sudo=False, prefix='',
block=False)
# Attempt to connect to the domain socket from within a namespace
with add_namespace() as ns:
e('%(ns)s nc -U %(sock_filename)s', input='magic',
error_msg='Unable to communicate between namespaces via '
'domain sockets.')
finally:
if server:
server.kill()
if os.path.exists(sock_filename):
os.unlink(sock_filename)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
baylee/django | django/contrib/contenttypes/models.py | 49 | 6717 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class ContentTypeManager(models.Manager):
use_in_migrations = True
def __init__(self, *args, **kwargs):
super(ContentTypeManager, self).__init__(*args, **kwargs)
# Cache shared by all the get_for_* methods to speed up
# ContentType retrieval.
self._cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# Start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.model_name)
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class() will return None.
# Hence, there is no reliance on model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self._cache.setdefault(using, {})[key] = ct
self._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
def __str__(self):
return self.name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| bsd-3-clause |
40223234/40223234 | static/Brython3.1.1-20150328-091302/Lib/test/regrtest.py | 718 | 65317 | #! /usr/bin/python3.3
"""
Usage:
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
--timeout TIMEOUT
-- dump the traceback and exit if a test takes more
than TIMEOUT seconds; disabled if TIMEOUT is negative
or equals to zero
--wait -- wait for user input, e.g., allow a debugger to be attached
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- display test output on failure
-d/--debug -- print traceback for failed tests
-q/--quiet -- no output unless one or more tests fail
-o/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
--testdir DIR
-- execute test files in the specified directory (instead
of the Python stdlib test suite)
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-n/--nowindows -- suppress error message boxes on Windows
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import faulthandler
import getopt
import io
import json
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=0, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsoS:rf:lu:t:TD:NLR:FdwWM:nj:Gm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'coverage', 'slaveargs=', 'forever', 'debug',
'start=', 'nowindows', 'header', 'testdir=', 'timeout=', 'wait',
'failfast', 'match=', 'next='])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
timeout = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = 1
elif o == '--next':
single = int(a)
elif o in ('-o', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
match_tests = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
# CWD is replaced with a temporary dir before calling main(), so we
# need join it with the saved CWD so it goes where the user expects.
coverdir = os.path.join(support.SAVEDCWD, a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
if use_mp <= 0:
try:
import multiprocessing
# Use all cores + extras for tests that like to sleep
use_mp = 2 + multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
use_mp = 3
if use_mp == 1:
use_mp = None
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
elif o == '--testdir':
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
testdir = os.path.join(support.SAVEDCWD, a)
elif o == '--timeout':
if hasattr(faulthandler, 'dump_tracebacks_later'):
timeout = float(a)
if timeout <= 0:
timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_tracebacks_later")
timeout = None
elif o == '--wait':
input("Press any key to continue...")
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if single and fromfile:
usage("-s and -f don't go together!")
if use_mp and trace:
usage("-T and -j don't go together!")
if use_mp and findleaks:
usage("-l and -j don't go together!")
if use_mp and support.max_memuse:
usage("-M and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(support.SAVEDCWD, fromfile))
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single != 1 or tests or args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if testdir:
alltests = findtests(testdir, list(), set())
else:
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
first_selected = selected[0]
index_selected = alltests.index(first_selected)
if index_selected + single > len(alltests):
single = len(alltests) - index_selected
selected = alltests[index_selected:index_selected+single]
try:
next_single_test = alltests[index_selected+single]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if start:
try:
del selected[:selected.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(selected)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
pending = MultiprocessTests(tests)
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
debug=debug, output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
)
# -E is needed by some tests, e.g. test_import
# Running the child from the same working directory ensures
# that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet, timeout=timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, debug,
output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
verbose = True
ok = runtest(test, True, quiet, huntrleaks, debug, timeout=timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, debug=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_tracebacks_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
debug, display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks, debug,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_tracebacks_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions', 'threading._dangling',
'multiprocessing.process._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'support.TESTFN',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, debug=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
tests = unittest.TestLoader().loadTestsFromModule(the_module)
test_runner = lambda: support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner,
huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = (
('win32',
"""
test__locale
test_crypt
test_curses
test_dbm
test_devpoll
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
"""),
('linux',
"""
test_curses
test_devpoll
test_largefile
test_kqueue
test_ossaudiodev
"""),
('unixware',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('openunix',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('sco_sv',
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
"""),
('darwin',
"""
test__locale
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_gdb
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
"""),
('sunos',
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
"""),
('hp-ux',
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
"""),
('cygwin',
"""
test_curses
test_dbm
test_devpoll
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
"""),
('os2emx',
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
"""),
('freebsd',
"""
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
"""),
('aix',
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
"""),
('openbsd',
"""
test_ctypes
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
('netbsd',
"""
test_ctypes
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
)
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
expected = None
for item in _expectations:
if sys.platform.startswith(item[0]):
expected = item[1]
break
if expected is not None:
self.expected = set(expected.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = {"test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"}
self.expected |= WIN_ONLY
if sys.platform != 'sunos5':
self.expected.add('test_nis')
if support.python_is_optimized():
self.expected.add("test_gdb")
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def _make_temp_dir_for_build(TEMPDIR):
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
return TEMPDIR, TESTCWD
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
TEMPDIR, TESTCWD = _make_temp_dir_for_build(TEMPDIR)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
| gpl-3.0 |
vbannai/neutron | neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py | 8 | 3144 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""floatingip_status
Revision ID: 2eeaf963a447
Revises: f44ab9871cd6
Create Date: 2014-01-14 11:58:13.754747
"""
# revision identifiers, used by Alembic.
revision = '2eeaf963a447'
down_revision = 'f44ab9871cd6'
# This migration is applied to all L3 capable plugins
migration_for_plugins = [
'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2',
'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2',
'neutron.plugins.cisco.network_plugin.PluginV2',
'neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2',
'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin',
'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin',
'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2',
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin',
'neutron.plugins.midonet.plugin.MidonetPluginV2',
'neutron.plugins.ml2.plugin.Ml2Plugin',
'neutron.plugins.nec.nec_plugin.NECPluginV2',
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.nuage.plugin.NuagePlugin',
'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.'
'NeutronPluginPLUMgridV2',
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column('floatingips',
sa.Column('last_known_router_id',
sa.String(length=36),
nullable=True))
op.add_column('floatingips',
sa.Column('status',
sa.String(length=16),
nullable=True))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('floatingips', 'last_known_router_id')
op.drop_column('floatingips', 'status')
| apache-2.0 |
Andre9642/BrailleExtender | buildVars.py | 1 | 4573 | import os.path
import subprocess
import time
updateChannel = "unknown"
hashCommit = "unknown"
outBranchName = subprocess.check_output(
["git", "branch", "--show-current"]).strip().decode()
out = subprocess.check_output(
["git", "status", "--porcelain"]).strip().decode()
if not out.strip():
label = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"]).strip().decode()
if len(hashCommit) == 7:
hashCommit = label
if outBranchName.strip():
updateChannel = "stable" if outBranchName in [
"stable", "master"] else "dev"
# Build customizations
# Change this file instead of sconstruct or manifest files, whenever possible.
# Full getext (please don't change)
def _(x): return x
# Add-on information variables
addon_info = {
# for previously unpublished addons, please follow the community guidelines at:
# https://bitbucket.org/nvdaaddonteam/todo/raw/master/guidelines.txt
# add-on Name, internal for nvda
"addon_name": "BrailleExtender",
# Add-on summary, usually the user visible name of the addon.
# Translators: Summary for this add-on to be shown on installation and add-on information.
"addon_summary": _("Braille Extender"),
# Add-on description
# Translators: Long description to be shown for this add-on on add-on information from add-ons manager
"addon_description": [
_("BrailleExtender is a NVDA add-on that provides various features at braille level. Currently, the following features are implemented"), ":",
"\n* ", _("reload two favorite braille display with shortcuts"), ".",
"\n* ", _("automatic review cursor tethering in terminal role like in PuTTY, Powershell, bash, cmd"), ".",
"\n* ", _("auto scroll"), ".",
"\n* ", _("switch between several input/output braille tables"), ".",
"\n* ", _("mark the text with special attributes through dot 7, dot 8 or both"), ".",
"\n* ", _("use two output braille tables simultaneously"), ".",
"\n* ", _("display tab signs as spaces"), ".",
"\n* ", _("reverse forward scroll and back scroll buttons"), ".",
"\n* ", _("say the current line during text scrolling either in review mode, or in focus mode or both"), ".",
"\n* ", _("translate text easily in Unicode braille and vice versa. E.g.: z <--> ⠵"), ".",
"\n* ", _("convert cell description to Unicode braille and vice versa. E.g.: 123 <--> ⠇"), ".",
"\n* ", _("lock braille keyboard"), ".",
"\n* ", _("launch an application/URL with gesture"), ".",
"\n* ", _("braille dictionaries"), ".",
"\n* ", _("type with one-hand from braille keyboard"), ".",
"\n* ", _("display undefined characters from braille tables (including emojis) using altenative representations"), ".",
"\n* ", _("enter any character from braille keyboard (including emojis)"), ".",
"\n* ", _("skip blank lines during text scrolling"), ".",
"\n* ", _("and much more!"),
"\n\n", _(
"For some braille displays, it extends the braille display commands to provide"), ":",
"\n* ", _("offer complete gesture maps including function keys, multimedia keys, quick navigation, etc."), ";",
"\n* ", _("emulate modifier keys, and thus any keyboard shortcut"), ";",
"\n* ", _("offer several keyboard configurations concerning the possibility to input dots 7 and 8, enter and backspace"), ";",
"\n* ", _("add actions and quick navigation through a rotor"), "."
],
# version
"addon_version": time.strftime("%y.%m.%d:") + hashCommit,
# Author(s)
"addon_author": "André-Abush Clause <dev@andreabc.net> " + _("and other contributors"),
# URL for the add-on documentation support
"addon_url": "https://andreabc.net/projects/NVDA_addons/BrailleExtender",
# Documentation file name
"addon_docFileName": "readme.html",
# Minimum NVDA version supported (e.g. "2018.3.0", minor version is optional)
"addon_minimumNVDAVersion": "2019.3",
# Last NVDA version supported/tested (e.g. "2018.4.0", ideally more recent than minimum version)
"addon_lastTestedNVDAVersion": "2021.1",
# Add-on update channel (default is stable or None)
"addon_updateChannel": updateChannel,
}
# Define the python files that are the sources of your add-on.
# You can use glob expressions here, they will be expanded.
pythonSources = [os.path.join("addon", "*.py"),
os.path.join("addon", "globalPlugins", "brailleExtender", "*.py")]
# Files that contain strings for translation. Usually your python sources
i18nSources = pythonSources + ["buildVars.py"]
# Files that will be ignored when building the nvda-addon file
# Paths are relative to the addon directory, not to the root directory of your addon sources.
excludedFiles = []
| gpl-2.0 |
elssar/calibre | src/odf/elementtypes.py | 13 | 10219 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import *
# Inline element don't cause a box
# They are analogous to the HTML elements SPAN, B, I etc.
inline_elements = (
(TEXTNS,u'a'),
(TEXTNS,u'author-initials'),
(TEXTNS,u'author-name'),
(TEXTNS,u'bibliography-mark'),
(TEXTNS,u'bookmark-ref'),
(TEXTNS,u'chapter'),
(TEXTNS,u'character-count'),
(TEXTNS,u'conditional-text'),
(TEXTNS,u'creation-date'),
(TEXTNS,u'creation-time'),
(TEXTNS,u'creator'),
(TEXTNS,u'database-display'),
(TEXTNS,u'database-name'),
(TEXTNS,u'database-next'),
(TEXTNS,u'database-row-number'),
(TEXTNS,u'database-row-select'),
(TEXTNS,u'date'),
(TEXTNS,u'dde-connection'),
(TEXTNS,u'description'),
(TEXTNS,u'editing-cycles'),
(TEXTNS,u'editing-duration'),
(TEXTNS,u'execute-macro'),
(TEXTNS,u'expression'),
(TEXTNS,u'file-name'),
(TEXTNS,u'hidden-paragraph'),
(TEXTNS,u'hidden-text'),
(TEXTNS,u'image-count'),
(TEXTNS,u'initial-creator'),
(TEXTNS,u'keywords'),
(TEXTNS,u'measure'),
(TEXTNS,u'modification-date'),
(TEXTNS,u'modification-time'),
(TEXTNS,u'note-ref'),
(TEXTNS,u'object-count'),
(TEXTNS,u'page-continuation'),
(TEXTNS,u'page-count'),
(TEXTNS,u'page-number'),
(TEXTNS,u'page-variable-get'),
(TEXTNS,u'page-variable-set'),
(TEXTNS,u'paragraph-count'),
(TEXTNS,u'placeholder'),
(TEXTNS,u'print-date'),
(TEXTNS,u'printed-by'),
(TEXTNS,u'print-time'),
(TEXTNS,u'reference-ref'),
(TEXTNS,u'ruby'),
(TEXTNS,u'ruby-base'),
(TEXTNS,u'ruby-text'),
(TEXTNS,u'script'),
(TEXTNS,u'sender-city'),
(TEXTNS,u'sender-company'),
(TEXTNS,u'sender-country'),
(TEXTNS,u'sender-email'),
(TEXTNS,u'sender-fax'),
(TEXTNS,u'sender-firstname'),
(TEXTNS,u'sender-initials'),
(TEXTNS,u'sender-lastname'),
(TEXTNS,u'sender-phone-private'),
(TEXTNS,u'sender-phone-work'),
(TEXTNS,u'sender-position'),
(TEXTNS,u'sender-postal-code'),
(TEXTNS,u'sender-state-or-province'),
(TEXTNS,u'sender-street'),
(TEXTNS,u'sender-title'),
(TEXTNS,u'sequence'),
(TEXTNS,u'sequence-ref'),
(TEXTNS,u'sheet-name'),
(TEXTNS,u'span'),
(TEXTNS,u'subject'),
(TEXTNS,u'table-count'),
(TEXTNS,u'table-formula'),
(TEXTNS,u'template-name'),
(TEXTNS,u'text-input'),
(TEXTNS,u'time'),
(TEXTNS,u'title'),
(TEXTNS,u'user-defined'),
(TEXTNS,u'user-field-get'),
(TEXTNS,u'user-field-input'),
(TEXTNS,u'variable-get'),
(TEXTNS,u'variable-input'),
(TEXTNS,u'variable-set'),
(TEXTNS,u'word-count'),
)
# It is almost impossible to determine what elements are block elements.
# There are so many that don't fit the form
block_elements = (
(TEXTNS,u'h'),
(TEXTNS,u'p'),
(TEXTNS,u'list'),
(TEXTNS,u'list-item'),
(TEXTNS,u'section'),
)
declarative_elements = (
(OFFICENS,u'font-face-decls'),
(PRESENTATIONNS,u'date-time-decl'),
(PRESENTATIONNS,u'footer-decl'),
(PRESENTATIONNS,u'header-decl'),
(TABLENS,u'table-template'),
(TEXTNS,u'alphabetical-index-entry-template'),
(TEXTNS,u'alphabetical-index-source'),
(TEXTNS,u'bibliography-entry-template'),
(TEXTNS,u'bibliography-source'),
(TEXTNS,u'dde-connection-decls'),
(TEXTNS,u'illustration-index-entry-template'),
(TEXTNS,u'illustration-index-source'),
(TEXTNS,u'index-source-styles'),
(TEXTNS,u'index-title-template'),
(TEXTNS,u'note-continuation-notice-backward'),
(TEXTNS,u'note-continuation-notice-forward'),
(TEXTNS,u'notes-configuration'),
(TEXTNS,u'object-index-entry-template'),
(TEXTNS,u'object-index-source'),
(TEXTNS,u'sequence-decls'),
(TEXTNS,u'table-index-entry-template'),
(TEXTNS,u'table-index-source'),
(TEXTNS,u'table-of-content-entry-template'),
(TEXTNS,u'table-of-content-source'),
(TEXTNS,u'user-field-decls'),
(TEXTNS,u'user-index-entry-template'),
(TEXTNS,u'user-index-source'),
(TEXTNS,u'variable-decls'),
)
empty_elements = (
(ANIMNS,u'animate'),
(ANIMNS,u'animateColor'),
(ANIMNS,u'animateMotion'),
(ANIMNS,u'animateTransform'),
(ANIMNS,u'audio'),
(ANIMNS,u'param'),
(ANIMNS,u'set'),
(ANIMNS,u'transitionFilter'),
(CHARTNS,u'categories'),
(CHARTNS,u'data-point'),
(CHARTNS,u'domain'),
(CHARTNS,u'error-indicator'),
(CHARTNS,u'floor'),
(CHARTNS,u'grid'),
(CHARTNS,u'legend'),
(CHARTNS,u'mean-value'),
(CHARTNS,u'regression-curve'),
(CHARTNS,u'stock-gain-marker'),
(CHARTNS,u'stock-loss-marker'),
(CHARTNS,u'stock-range-line'),
(CHARTNS,u'symbol-image'),
(CHARTNS,u'wall'),
(DR3DNS,u'cube'),
(DR3DNS,u'extrude'),
(DR3DNS,u'light'),
(DR3DNS,u'rotate'),
(DR3DNS,u'sphere'),
(DRAWNS,u'contour-path'),
(DRAWNS,u'contour-polygon'),
(DRAWNS,u'equation'),
(DRAWNS,u'fill-image'),
(DRAWNS,u'floating-frame'),
(DRAWNS,u'glue-point'),
(DRAWNS,u'gradient'),
(DRAWNS,u'handle'),
(DRAWNS,u'hatch'),
(DRAWNS,u'layer'),
(DRAWNS,u'marker'),
(DRAWNS,u'opacity'),
(DRAWNS,u'page-thumbnail'),
(DRAWNS,u'param'),
(DRAWNS,u'stroke-dash'),
(FORMNS,u'connection-resource'),
(FORMNS,u'list-value'),
(FORMNS,u'property'),
(MANIFESTNS,u'algorithm'),
(MANIFESTNS,u'key-derivation'),
(METANS,u'auto-reload'),
(METANS,u'document-statistic'),
(METANS,u'hyperlink-behaviour'),
(METANS,u'template'),
(NUMBERNS,u'am-pm'),
(NUMBERNS,u'boolean'),
(NUMBERNS,u'day'),
(NUMBERNS,u'day-of-week'),
(NUMBERNS,u'era'),
(NUMBERNS,u'fraction'),
(NUMBERNS,u'hours'),
(NUMBERNS,u'minutes'),
(NUMBERNS,u'month'),
(NUMBERNS,u'quarter'),
(NUMBERNS,u'scientific-number'),
(NUMBERNS,u'seconds'),
(NUMBERNS,u'text-content'),
(NUMBERNS,u'week-of-year'),
(NUMBERNS,u'year'),
(OFFICENS,u'dde-source'),
(PRESENTATIONNS,u'date-time'),
(PRESENTATIONNS,u'footer'),
(PRESENTATIONNS,u'header'),
(PRESENTATIONNS,u'placeholder'),
(PRESENTATIONNS,u'play'),
(PRESENTATIONNS,u'show'),
(PRESENTATIONNS,u'sound'),
(SCRIPTNS,u'event-listener'),
(STYLENS,u'column'),
(STYLENS,u'column-sep'),
(STYLENS,u'drop-cap'),
(STYLENS,u'footnote-sep'),
(STYLENS,u'list-level-properties'),
(STYLENS,u'map'),
(STYLENS,u'ruby-properties'),
(STYLENS,u'table-column-properties'),
(STYLENS,u'tab-stop'),
(STYLENS,u'text-properties'),
(SVGNS,u'definition-src'),
(SVGNS,u'font-face-format'),
(SVGNS,u'font-face-name'),
(SVGNS,u'stop'),
(TABLENS,u'body'),
(TABLENS,u'cell-address'),
(TABLENS,u'cell-range-source'),
(TABLENS,u'change-deletion'),
(TABLENS,u'consolidation'),
(TABLENS,u'database-source-query'),
(TABLENS,u'database-source-sql'),
(TABLENS,u'database-source-table'),
(TABLENS,u'data-pilot-display-info'),
(TABLENS,u'data-pilot-field-reference'),
(TABLENS,u'data-pilot-group-member'),
(TABLENS,u'data-pilot-layout-info'),
(TABLENS,u'data-pilot-member'),
(TABLENS,u'data-pilot-sort-info'),
(TABLENS,u'data-pilot-subtotal'),
(TABLENS,u'dependency'),
(TABLENS,u'error-macro'),
(TABLENS,u'even-columns'),
(TABLENS,u'even-rows'),
(TABLENS,u'filter-condition'),
(TABLENS,u'first-column'),
(TABLENS,u'first-row'),
(TABLENS,u'highlighted-range'),
(TABLENS,u'insertion-cut-off'),
(TABLENS,u'iteration'),
(TABLENS,u'label-range'),
(TABLENS,u'last-column'),
(TABLENS,u'last-row'),
(TABLENS,u'movement-cut-off'),
(TABLENS,u'named-expression'),
(TABLENS,u'named-range'),
(TABLENS,u'null-date'),
(TABLENS,u'odd-columns'),
(TABLENS,u'odd-rows'),
(TABLENS,u'operation'),
(TABLENS,u'scenario'),
(TABLENS,u'sort-by'),
(TABLENS,u'sort-groups'),
(TABLENS,u'source-range-address'),
(TABLENS,u'source-service'),
(TABLENS,u'subtotal-field'),
(TABLENS,u'table-column'),
(TABLENS,u'table-source'),
(TABLENS,u'target-range-address'),
(TEXTNS,u'alphabetical-index-auto-mark-file'),
(TEXTNS,u'alphabetical-index-mark'),
(TEXTNS,u'alphabetical-index-mark-end'),
(TEXTNS,u'alphabetical-index-mark-start'),
(TEXTNS,u'bookmark'),
(TEXTNS,u'bookmark-end'),
(TEXTNS,u'bookmark-start'),
(TEXTNS,u'change'),
(TEXTNS,u'change-end'),
(TEXTNS,u'change-start'),
(TEXTNS,u'dde-connection-decl'),
(TEXTNS,u'index-entry-bibliography'),
(TEXTNS,u'index-entry-chapter'),
(TEXTNS,u'index-entry-link-end'),
(TEXTNS,u'index-entry-link-start'),
(TEXTNS,u'index-entry-page-number'),
(TEXTNS,u'index-entry-tab-stop'),
(TEXTNS,u'index-entry-text'),
(TEXTNS,u'index-source-style'),
(TEXTNS,u'line-break'),
(TEXTNS,u'page'),
(TEXTNS,u'reference-mark'),
(TEXTNS,u'reference-mark-end'),
(TEXTNS,u'reference-mark-start'),
(TEXTNS,u's'),
(TEXTNS,u'section-source'),
(TEXTNS,u'sequence-decl'),
(TEXTNS,u'soft-page-break'),
(TEXTNS,u'sort-key'),
(TEXTNS,u'tab'),
(TEXTNS,u'toc-mark'),
(TEXTNS,u'toc-mark-end'),
(TEXTNS,u'toc-mark-start'),
(TEXTNS,u'user-field-decl'),
(TEXTNS,u'user-index-mark'),
(TEXTNS,u'user-index-mark-end'),
(TEXTNS,u'user-index-mark-start'),
(TEXTNS,u'variable-decl')
)
| gpl-3.0 |
krisys/django | tests/admin_inlines/tests.py | 15 | 43604 | from __future__ import unicode_literals
from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', email='super@example.com', password='secret')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInline(TestDataMixin, TestCase):
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.client.force_login(self.superuser)
self.factory = RequestFactory()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
holder = Holder.objects.get(dummy=13)
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="required">Title1</th>', html=True)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome." />',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline" />',
1
)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />',
html=True
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100" />',
html=True
)
self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />',
html=True
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100" />',
html=True
)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_min_num(self):
"""
Ensure that min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms)
self.assertContains(response, total_forms)
def test_custom_min_num(self):
"""
Ensure that get_min_num is called and used correctly.
"""
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms % 2)
self.assertContains(response, total_forms % 5)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertContains(response, min_forms % 5)
self.assertContains(response, total_forms % 8)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
self.inner2_id = inner2.id
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS" />',
html=True
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
| bsd-3-clause |
iModels/ffci | account/migrations/0013_auto_20160324_1734.py | 1 | 6038 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-24 22:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0012_auto_20160314_0745'),
]
operations = [
migrations.CreateModel(
name='GithubHooks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repos_hook', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='GithubRepos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repos_name', models.CharField(max_length=255, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='github_repos', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
migrations.RemoveField(
model_name='account',
name='github_repos1',
),
migrations.RemoveField(
model_name='account',
name='github_repos10',
),
migrations.RemoveField(
model_name='account',
name='github_repos10_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos11',
),
migrations.RemoveField(
model_name='account',
name='github_repos11_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos12',
),
migrations.RemoveField(
model_name='account',
name='github_repos12_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos13',
),
migrations.RemoveField(
model_name='account',
name='github_repos13_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos14',
),
migrations.RemoveField(
model_name='account',
name='github_repos14_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos15',
),
migrations.RemoveField(
model_name='account',
name='github_repos15_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos16',
),
migrations.RemoveField(
model_name='account',
name='github_repos16_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos17',
),
migrations.RemoveField(
model_name='account',
name='github_repos17_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos18',
),
migrations.RemoveField(
model_name='account',
name='github_repos18_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos19',
),
migrations.RemoveField(
model_name='account',
name='github_repos19_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos1_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos2',
),
migrations.RemoveField(
model_name='account',
name='github_repos20',
),
migrations.RemoveField(
model_name='account',
name='github_repos20_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos2_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos3',
),
migrations.RemoveField(
model_name='account',
name='github_repos3_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos4',
),
migrations.RemoveField(
model_name='account',
name='github_repos4_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos5',
),
migrations.RemoveField(
model_name='account',
name='github_repos5_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos6',
),
migrations.RemoveField(
model_name='account',
name='github_repos6_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos7',
),
migrations.RemoveField(
model_name='account',
name='github_repos7_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos8',
),
migrations.RemoveField(
model_name='account',
name='github_repos8_hook',
),
migrations.RemoveField(
model_name='account',
name='github_repos9',
),
migrations.RemoveField(
model_name='account',
name='github_repos9_hook',
),
migrations.AddField(
model_name='githubhooks',
name='github_repos',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='github_hooks', to='account.GithubRepos', verbose_name='github_repos'),
),
]
| mit |
svn2github/protobuf-mirror | python/google/protobuf/pyext/descriptor_cpp2_test.py | 25 | 2493 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.pyext behavior."""
__author__ = 'anuraag@google.com (Anuraag Agrawal)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2'
# We must set the implementation version above before the google3 imports.
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
# Run all tests from the original module by putting them in our namespace.
# pylint: disable=wildcard-import
from google.protobuf.internal.descriptor_test import *
class ConfirmCppApi2Test(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('cpp', api_implementation.Type())
self.assertEqual(2, api_implementation.Version())
if __name__ == '__main__':
basetest.main()
| bsd-3-clause |
2014c2g7/c2g7 | wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/signals.py | 1016 | 2403 | import signal
import weakref
from functools import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.original_handler = default_handler
if isinstance(default_handler, int):
if default_handler == signal.SIG_DFL:
# Pretend it's signal.default_int_handler instead.
default_handler = signal.default_int_handler
elif default_handler == signal.SIG_IGN:
# Not quite the same thing as SIG_IGN, but the closest we
# can make it: do nothing.
def default_handler(unused_signum, unused_frame):
pass
else:
raise TypeError("expected SIGINT signal handler to be "
"signal.SIG_IGN, signal.SIG_DFL, or a "
"callable object")
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.WeakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
| gpl-2.0 |
geekboxzone/lollipop_external_chromium_org | tools/idl_parser/idl_lexer.py | 44 | 8095 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Lexer for PPAPI IDL
The lexer uses the PLY library to build a tokenizer which understands both
WebIDL and Pepper tokens.
WebIDL, and WebIDL regular expressions can be found at:
http://www.w3.org/TR/2012/CR-WebIDL-20120419/
PLY can be found at:
http://www.dabeaz.com/ply/
"""
import os.path
import sys
#
# Try to load the ply module, if not, then assume it is in the third_party
# directory.
#
try:
# Disable lint check which fails to find the ply module.
# pylint: disable=F0401
from ply import lex
except ImportError:
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, '..', '..', 'third_party')
sys.path.append(third_party)
# pylint: disable=F0401
from ply import lex
#
# IDL Lexer
#
class IDLLexer(object):
# 'literals' is a value expected by lex which specifies a list of valid
# literal tokens, meaning the token type and token value are identical.
literals = r'"*.(){}[],;:=+-/~|&^?<>'
# 't_ignore' contains ignored characters (spaces and tabs)
t_ignore = ' \t'
# 'tokens' is a value required by lex which specifies the complete list
# of valid token types.
tokens = [
# Data types
'float',
'integer',
'string',
# Symbol and keywords types
'COMMENT',
'identifier',
# MultiChar operators
'ELLIPSIS',
]
# 'keywords' is a map of string to token type. All tokens matching
# KEYWORD_OR_SYMBOL are matched against keywords dictionary, to determine
# if the token is actually a keyword.
keywords = {
'any' : 'ANY',
'attribute' : 'ATTRIBUTE',
'boolean' : 'BOOLEAN',
'byte' : 'BYTE',
'ByteString' : 'BYTESTRING',
'callback' : 'CALLBACK',
'const' : 'CONST',
'creator' : 'CREATOR',
'Date' : 'DATE',
'deleter' : 'DELETER',
'dictionary' : 'DICTIONARY',
'DOMString' : 'DOMSTRING',
'double' : 'DOUBLE',
'enum' : 'ENUM',
'false' : 'FALSE',
'float' : 'FLOAT',
'exception' : 'EXCEPTION',
'getter': 'GETTER',
'implements' : 'IMPLEMENTS',
'Infinity' : 'INFINITY',
'inherit' : 'INHERIT',
'interface' : 'INTERFACE',
'legacycaller' : 'LEGACYCALLER',
'long' : 'LONG',
'Nan' : 'NAN',
'null' : 'NULL',
'object' : 'OBJECT',
'octet' : 'OCTET',
'optional' : 'OPTIONAL',
'or' : 'OR',
'partial' : 'PARTIAL',
'readonly' : 'READONLY',
'RegExp' : 'REGEXP',
'sequence' : 'SEQUENCE',
'serializer' : 'SERIALIZER',
'setter': 'SETTER',
'short' : 'SHORT',
'static' : 'STATIC',
'stringifier' : 'STRINGIFIER',
'typedef' : 'TYPEDEF',
'true' : 'TRUE',
'unsigned' : 'UNSIGNED',
'unrestricted' : 'UNRESTRICTED',
'void' : 'VOID'
}
# Token definitions
#
# Lex assumes any value or function in the form of 't_<TYPE>' represents a
# regular expression where a match will emit a token of type <TYPE>. In the
# case of a function, the function is called when a match is made. These
# definitions come from WebIDL.
#
# These need to be methods for lexer construction, despite not using self.
# pylint: disable=R0201
def t_ELLIPSIS(self, t):
r'\.\.\.'
return t
# Regex needs to be in the docstring
# pylint: disable=C0301
def t_float(self, t):
r'-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+)'
return t
def t_integer(self, t):
r'-?([1-9][0-9]*|0[Xx][0-9A-Fa-f]+|0[0-7]*)'
return t
# A line ending '\n', we use this to increment the line number
def t_LINE_END(self, t):
r'\n+'
self.AddLines(len(t.value))
# We do not process escapes in the IDL strings. Strings are exclusively
# used for attributes and enums, and not used as typical 'C' constants.
def t_string(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
self.AddLines(t.value.count('\n'))
return t
# A C or C++ style comment: /* xxx */ or //
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
self.AddLines(t.value.count('\n'))
return t
# A symbol or keyword.
def t_KEYWORD_OR_SYMBOL(self, t):
r'_?[A-Za-z][A-Za-z_0-9]*'
# All non-keywords are assumed to be symbols
t.type = self.keywords.get(t.value, 'identifier')
# We strip leading underscores so that you can specify symbols with the same
# value as a keywords (E.g. a dictionary named 'interface').
if t.value[0] == '_':
t.value = t.value[1:]
return t
def t_ANY_error(self, t):
msg = 'Unrecognized input'
line = self.Lexer().lineno
# If that line has not been accounted for, then we must have hit
# EoF, so compute the beginning of the line that caused the problem.
if line >= len(self.index):
# Find the offset in the line of the first word causing the issue
word = t.value.split()[0]
offs = self.lines[line - 1].find(word)
# Add the computed line's starting position
self.index.append(self.Lexer().lexpos - offs)
msg = 'Unexpected EoF reached after'
pos = self.Lexer().lexpos - self.index[line]
out = self.ErrorMessage(line, pos, msg)
sys.stderr.write(out + '\n')
self._lex_errors += 1
def AddLines(self, count):
# Set the lexer position for the beginning of the next line. In the case
# of multiple lines, tokens can not exist on any of the lines except the
# last one, so the recorded value for previous lines are unused. We still
# fill the array however, to make sure the line count is correct.
self.Lexer().lineno += count
for _ in range(count):
self.index.append(self.Lexer().lexpos)
def FileLineMsg(self, line, msg):
# Generate a message containing the file and line number of a token.
filename = self.Lexer().filename
if filename:
return "%s(%d) : %s" % (filename, line + 1, msg)
return "<BuiltIn> : %s" % msg
def SourceLine(self, line, pos):
# Create a source line marker
caret = ' ' * pos + '^'
# We decrement the line number since the array is 0 based while the
# line numbers are 1 based.
return "%s\n%s" % (self.lines[line - 1], caret)
def ErrorMessage(self, line, pos, msg):
return "\n%s\n%s" % (
self.FileLineMsg(line, msg),
self.SourceLine(line, pos))
#
# Tokenizer
#
# The token function returns the next token provided by IDLLexer for matching
# against the leaf paterns.
#
def token(self):
tok = self.Lexer().token()
if tok:
self.last = tok
return tok
def GetTokens(self):
outlist = []
while True:
t = self.Lexer().token()
if not t:
break
outlist.append(t)
return outlist
def Tokenize(self, data, filename='__no_file__'):
lexer = self.Lexer()
lexer.lineno = 1
lexer.filename = filename
lexer.input(data)
self.lines = data.split('\n')
def KnownTokens(self):
return self.tokens
def Lexer(self):
if not self._lexobj:
self._lexobj = lex.lex(object=self, lextab=None, optimize=0)
return self._lexobj
def _AddToken(self, token):
if token in self.tokens:
raise RuntimeError('Same token: ' + token)
self.tokens.append(token)
def _AddTokens(self, tokens):
for token in tokens:
self._AddToken(token)
def _AddKeywords(self, keywords):
for key in keywords:
value = key.upper()
self._AddToken(value)
self.keywords[key] = value
def _DelKeywords(self, keywords):
for key in keywords:
self.tokens.remove(key.upper())
del self.keywords[key]
def __init__(self):
self.index = [0]
self._lex_errors = 0
self.linex = []
self.filename = None
self.keywords = {}
self.tokens = []
self._AddTokens(IDLLexer.tokens)
self._AddKeywords(IDLLexer.keywords)
self._lexobj = None
self.last = None
self.lines = None
# If run by itself, attempt to build the lexer
if __name__ == '__main__':
lexer_object = IDLLexer()
| bsd-3-clause |
suda/micropython | tests/bytecode/pylib-tests/keyword.py | 761 | 2049 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| mit |
pinterest/pinball | tests/pinball/workflow/job_test.py | 6 | 3066 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for the job."""
import unittest
from pinball.workflow.event import Event
from pinball.workflow.job import ShellJob
from pinball.workflow.job_executor import ExecutionRecord
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class JobTestCase(unittest.TestCase):
def test_retry(self):
job = ShellJob(name='some_job')
# Empty history.
self.assertFalse(job.retry())
# History with a successful execution.
record = ExecutionRecord(instance=123, exit_code=0)
job.history.append(record)
self.assertRaises(AssertionError, job.retry)
# History with too many failures.
record = ExecutionRecord(instance=1234, exit_code=1)
job.history.append(record)
self.assertFalse(job.retry())
# History without too many failures.
job.max_attempts = 2
self.assertTrue(job.retry())
# History with too many failures in a different instance.
job.history.append(record)
record = ExecutionRecord(instance=12345, exit_code=1)
job.history.append(record)
self.assertTrue(job.retry())
class ShellJobTestCase(unittest.TestCase):
def test_customize_command(self):
job = ShellJob(name='some_job',
inputs=['some_input', 'some_other_input'])
some_event = Event(attributes={'some_attr': 'some_value'})
some_other_event = Event(attributes={
'some_attr': 'some_other_value',
'yet_another_attr': 'yet_another_value'})
execution_record = ExecutionRecord(instance=123, start_time=10)
execution_record.events = [some_event, some_other_event]
job.history = [execution_record]
# Empty command.
job.command = ''
self.assertEqual('', job.customize_command())
# Command with no attributes.
job.command = 'some_command'
self.assertEqual('some_command', job.customize_command())
# Command with attributes.
job.command = ('%(non_existent_attr)s %(some_attr)s '
'%(yet_another_attr)s')
self.assertEqual(' some_value,some_other_value yet_another_value',
job.customize_command())
# Command with percentage marks.
job.command = ('%% some_command')
self.assertEqual('% some_command', job.customize_command())
| apache-2.0 |
gooddata/openstack-nova | nova/virt/powervm/mgmt.py | 3 | 7539 | # Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities related to the PowerVM management partition.
The management partition is a special LPAR that runs the PowerVM REST API
service. It itself appears through the REST API as a LogicalPartition of type
aixlinux, but with the is_mgmt_partition property set to True.
The PowerVM Nova Compute service runs on the management partition.
"""
import glob
import os
from os import path
from oslo_concurrency import lockutils
from oslo_log import log as logging
from pypowervm.tasks import partition as pvm_par
import retrying
from nova import exception
from nova.privsep import path as priv_path
LOG = logging.getLogger(__name__)
_MP_UUID = None
@lockutils.synchronized("mgmt_lpar_uuid")
def mgmt_uuid(adapter):
"""Returns the management partitions UUID."""
global _MP_UUID
if not _MP_UUID:
_MP_UUID = pvm_par.get_this_partition(adapter).uuid
return _MP_UUID
def discover_vscsi_disk(mapping, scan_timeout=300):
"""Bring a mapped device into the management partition and find its name.
Based on a VSCSIMapping, scan the appropriate virtual SCSI host bus,
causing the operating system to discover the mapped device. Find and
return the path of the newly-discovered device based on its UDID in the
mapping.
Note: scanning the bus will cause the operating system to discover *all*
devices on that bus. However, this method will only return the path for
the specific device from the input mapping, based on its UDID.
:param mapping: The pypowervm.wrappers.virtual_io_server.VSCSIMapping
representing the mapping of the desired disk to the
management partition.
:param scan_timeout: The maximum number of seconds after scanning to wait
for the specified device to appear.
:return: The udev-generated ("/dev/sdX") name of the discovered disk.
:raise NoDiskDiscoveryException: If the disk did not appear after the
specified timeout.
:raise UniqueDiskDiscoveryException: If more than one disk appears with the
expected UDID.
"""
# Calculate the Linux slot number from the client adapter slot number.
lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
# We'll match the device ID based on the UDID, which is actually the last
# 32 chars of the field we get from PowerVM.
udid = mapping.backing_storage.udid[-32:]
LOG.debug("Trying to discover VSCSI disk with UDID %(udid)s on slot "
"%(slot)x.", {'udid': udid, 'slot': lslot})
# Find the special file to scan the bus, and scan it.
# This glob should yield exactly one result, but use the loop just in case.
for scanpath in glob.glob(
'/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot):
# Writing '- - -' to this sysfs file triggers bus rescan
priv_path.writefile(scanpath, 'a', '- - -')
# Now see if our device showed up. If so, we can reliably match it based
# on its Linux ID, which ends with the disk's UDID.
dpathpat = '/dev/disk/by-id/*%s' % udid
# The bus scan is asynchronous. Need to poll, waiting for the device to
# spring into existence. Stop when glob finds at least one device, or
# after the specified timeout. Sleep 1/4 second between polls.
@retrying.retry(retry_on_result=lambda result: not result, wait_fixed=250,
stop_max_delay=scan_timeout * 1000)
def _poll_for_dev(globpat):
return glob.glob(globpat)
try:
disks = _poll_for_dev(dpathpat)
except retrying.RetryError as re:
raise exception.NoDiskDiscoveryException(
bus=lslot, udid=udid, polls=re.last_attempt.attempt_number,
timeout=scan_timeout)
# If we get here, _poll_for_dev returned a nonempty list. If not exactly
# one entry, this is an error.
if len(disks) != 1:
raise exception.UniqueDiskDiscoveryException(path_pattern=dpathpat,
count=len(disks))
# The by-id path is a symlink. Resolve to the /dev/sdX path
dpath = path.realpath(disks[0])
LOG.debug("Discovered VSCSI disk with UDID %(udid)s on slot %(slot)x at "
"path %(devname)s.",
{'udid': udid, 'slot': lslot, 'devname': dpath})
return dpath
def remove_block_dev(devpath, scan_timeout=10):
"""Remove a block device from the management partition.
This method causes the operating system of the management partition to
delete the device special files associated with the specified block device.
:param devpath: Any path to the block special file associated with the
device to be removed.
:param scan_timeout: The maximum number of seconds after scanning to wait
for the specified device to disappear.
:raise InvalidDevicePath: If the specified device or its 'delete' special
file cannot be found.
:raise DeviceDeletionException: If the deletion was attempted, but the
device special file is still present
afterward.
"""
# Resolve symlinks, if any, to get to the /dev/sdX path
devpath = path.realpath(devpath)
try:
os.stat(devpath)
except OSError:
raise exception.InvalidDevicePath(path=devpath)
devname = devpath.rsplit('/', 1)[-1]
delpath = '/sys/block/%s/device/delete' % devname
try:
os.stat(delpath)
except OSError:
raise exception.InvalidDevicePath(path=delpath)
LOG.debug("Deleting block device %(devpath)s from the management "
"partition via special file %(delpath)s.",
{'devpath': devpath, 'delpath': delpath})
# Writing '1' to this sysfs file deletes the block device and rescans.
priv_path.writefile(delpath, 'a', '1')
# The bus scan is asynchronous. Need to poll, waiting for the device to
# disappear. Stop when stat raises OSError (dev file not found) - which is
# success - or after the specified timeout (which is failure). Sleep 1/4
# second between polls.
@retrying.retry(retry_on_result=lambda result: result, wait_fixed=250,
stop_max_delay=scan_timeout * 1000)
def _poll_for_del(statpath):
try:
os.stat(statpath)
return True
except OSError:
# Device special file is absent, as expected
return False
try:
_poll_for_del(devpath)
except retrying.RetryError as re:
# stat just kept returning (dev file continued to exist).
raise exception.DeviceDeletionException(
devpath=devpath, polls=re.last_attempt.attempt_number,
timeout=scan_timeout)
# Else stat raised - the device disappeared - all done.
| apache-2.0 |
pkuyym/Paddle | python/paddle/fluid/tests/unittests/decorators.py | 5 | 1266 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
__all__ = ['many_times', 'prog_scope']
def many_times(times):
def __impl__(fn):
def __fn__(*args, **kwargs):
for _ in range(times):
fn(*args, **kwargs)
return __fn__
return __impl__
def prog_scope():
def __impl__(fn):
def __fn__(*args, **kwargs):
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
fn(*args, **kwargs)
return __fn__
return __impl__
| apache-2.0 |
annarev/tensorflow | tensorflow/python/keras/layers/convolutional.py | 1 | 140802 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Args:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros
evenly to the left/right or up/down of the input such that output has the
same height/width dimension as the input. `"causal"` results in causal
(dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel. If None, the
default initializer (glorot_uniform) will be used.
bias_initializer: An initializer for the bias vector. If None, the default
initializer (zeros) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
conv_op=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
if isinstance(filters, float):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self._validate_init()
self._is_causal = self.padding == 'causal'
self._channels_first = self.data_format == 'channels_first'
self._tf_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
def _validate_init(self):
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
'The number of filters must be evenly divisible by the number of '
'groups. Received: groups={}, filters={}'.format(
self.groups, self.filters))
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (self.kernel_size,))
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and `SeparableConv1D`.')
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
'The number of input channels must be evenly divisible by the number '
'of groups. Received groups={}, but the input has {} channels '
'(full input shape is {}).'.format(self.groups, input_channel,
input_shape))
kernel_shape = self.kernel_size + (input_channel // self.groups,
self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={channel_axis: input_channel})
# Convert Keras formats to TF native formats.
if self.padding == 'causal':
tf_padding = 'VALID' # Causal padding handled in `call`.
elif isinstance(self.padding, six.string_types):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
tf_dilations = list(self.dilation_rate)
tf_strides = list(self.strides)
tf_op_name = self.__class__.__name__
if tf_op_name == 'Conv1D':
tf_op_name = 'conv1d' # Backwards compat.
self._convolution_op = functools.partial(
nn_ops.convolution_v2,
strides=tf_strides,
padding=tf_padding,
dilations=tf_dilations,
data_format=self._tf_data_format,
name=tf_op_name)
self.built = True
def call(self, inputs):
input_shape = inputs.shape
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = array_ops.pad(inputs, self._compute_causal_padding(inputs))
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
output_rank = outputs.shape.rank
if self.rank == 1 and self._channels_first:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
# Handle multiple batch dimensions.
if output_rank is not None and output_rank > 2 + self.rank:
def _apply_fn(o):
return nn.bias_add(o, self.bias, data_format=self._tf_data_format)
outputs = conv_utils.squeeze_batch_dims(
outputs, _apply_fn, inner_rank=self.rank + 1)
else:
outputs = nn.bias_add(
outputs, self.bias, data_format=self._tf_data_format)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(input_shape)
outputs.set_shape(out_shape)
if self.activation is not None:
return self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_rank = len(input_shape) - self.rank - 1
if self.data_format == 'channels_last':
return tensor_shape.TensorShape(
input_shape[:batch_rank]
+ self._spatial_output_shape(input_shape[batch_rank:-1])
+ [self.filters])
else:
return tensor_shape.TensorShape(
input_shape[:batch_rank] + [self.filters] +
self._spatial_output_shape(input_shape[batch_rank + 1:]))
def _recreate_conv_op(self, inputs): # pylint: disable=unused-argument
return False
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'dilation_rate':
self.dilation_rate,
'groups':
self.groups,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self, inputs):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if getattr(inputs.shape, 'ndims', None) is None:
batch_rank = 1
else:
batch_rank = len(inputs.shape) - 2
if self.data_format == 'channels_last':
causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return -1 - self.rank
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Examples:
>>> # The inputs are 128-length vectors with 10 timesteps, and the batch size
>>> # is 4.
>>> input_shape = (4, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu',input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 8, 32)
>>> # With extended batch shape [4, 7] (e.g. weather data where batch
>>> # dimensions correspond to spatial location and the third dimension
>>> # corresponds to time.)
>>> input_shape = (4, 7, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 8, 32)
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"` or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
`"causal"` results in causal (dilated) convolutions, e.g. `output[t]`
does not depend on `input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
3+D tensor with shape: `batch_shape + (steps, input_dim)`
Output shape:
3+D tensor with shape: `batch_shape + (new_steps, filters)`
`steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`. You can use `None` when
a dimension has variable size.
Examples:
>>> # The inputs are 28x28 RGB images with `channels_last` and the batch
>>> # size is 4.
>>> input_shape = (4, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 26, 26, 2)
>>> # With `dilation_rate` as 2.
>>> input_shape = (4, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 24, 24, 2)
>>> # With `padding` as "same".
>>> input_shape = (4, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', padding="same", input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 28, 28, 2)
>>> # With extended batch shape [4, 7]:
>>> input_shape = (4, 7, 28, 28, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv2D(
... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 26, 26, 2)
Args:
filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to specify
the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch_size, height, width, channels)` while
`channels_first` corresponds to inputs with shape `(batch_size, channels,
height, width)`. It defaults to the `image_data_format` value found in
your Keras config file at `~/.keras/keras.json`. If you never set it, then
it will be `channels_last`.
dilation_rate: an integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (see
`keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (see
`keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
4+D tensor with shape: `batch_shape + (channels, rows, cols)` if
`data_format='channels_first'`
or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if
`data_format='channels_last'`.
Output shape:
4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if
`data_format='channels_first'` or 4+D tensor with shape: `batch_shape +
(new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows`
and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4+ representing
`activation(conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is `"causal"`.
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28x28 volumes with a single channel, and the
>>> # batch size is 4
>>> input_shape =(4, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 26, 26, 26, 2)
>>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,
>>> # with 7 frames per video.
>>> input_shape = (4, 7, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 26, 26, 26, 2)
Args:
filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,
spatial_dim3, channels)` while `channels_first` corresponds to inputs with
shape `batch_shape + (channels, spatial_dim1, spatial_dim2,
spatial_dim3)`. It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`. If you never set it, then it
will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (see
`keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (see
`keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,
conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,
channels)` if data_format='channels_last'.
Output shape:
5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,
new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,
new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,
`new_conv_dim2` and `new_conv_dim3` values might have changed due to
padding.
Returns:
A tensor of rank 5+ representing
`activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv1DTranspose',
'keras.layers.Convolution1DTranspose')
class Conv1DTranspose(Conv1D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer length of the 1D convolution window.
strides: An integer specifying the stride of the convolution along the
time dimension. Specifying a stride value != 1 is incompatible with
specifying a `dilation_rate` value != 1. Defaults to 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer specifying the amount of padding along
the time dimension of the output tensor.
The amount of output padding must be lower than the stride.
If set to `None` (default), the output shape is inferred.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, length)`.
dilation_rate: an integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying a `dilation_rate` value != 1 is
incompatible with specifying a stride value != 1.
Also dilation rate larger than 1 is not currently supported.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
3D tensor with shape:
`(batch_size, steps, channels)`
Output shape:
3D tensor with shape:
`(batch_size, new_steps, filters)`
If `output_padding` is specified:
```
new_timesteps = ((timesteps - 1) * strides + kernel_size -
2 * padding + output_padding)
```
Returns:
A tensor of rank 3 representing
`activation(conv1dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 1, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 3:
raise ValueError('Inputs should have rank 3. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
t_axis = 2
else:
t_axis = 1
length = inputs_shape[t_axis]
if self.output_padding is None:
output_padding = None
else:
output_padding = self.output_padding[0]
# Infer the dynamic output shape:
out_length = conv_utils.deconv_output_length(
length, self.kernel_size[0], padding=self.padding,
output_padding=output_padding, stride=self.strides[0],
dilation=self.dilation_rate[0])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_length)
else:
output_shape = (batch_size, out_length, self.filters)
data_format = conv_utils.convert_data_format(self.data_format, ndim=3)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn_ops.conv1d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding.upper(),
data_format=data_format,
dilations=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, t_axis = 1, 2
else:
c_axis, t_axis = 2, 1
if self.output_padding is None:
output_padding = None
else:
output_padding = self.output_padding[0]
output_shape[c_axis] = self.filters
output_shape[t_axis] = conv_utils.deconv_output_length(
output_shape[t_axis],
self.kernel_size[0],
padding=self.padding,
output_padding=output_padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv1DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input '
'shape: ' + str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# TODO(scottzhu): Extract this into a utility function that can be applied
# to all convolutional layers, which currently lost the static shape
# information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (
see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
5D tensor with shape:
`(batch_size, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch_size, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch_size, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch_size, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
If `output_padding` is specified::
```
new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +
output_padding[2])
```
Returns:
A tensor of rank 5 representing
`activation(conv3dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Args:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel (
see `keras.initializers`). If None, then the default initializer (
'glorot_uniform') will be used.
pointwise_initializer: An initializer for the pointwise convolution kernel (
see `keras.initializers`). If None, then the default initializer
('glorot_uniform') will be used.
bias_initializer: An initializer for the bias vector. If None, the default
initializer ('zeros') will be used (see `keras.initializers`).
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input. `"causal"` results in causal
(dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel (
see `keras.initializers`). If None, then the default initializer (
'glorot_uniform') will be used.
pointwise_initializer: An initializer for the pointwise convolution kernel (
see `keras.initializers`). If None, then the default initializer
('glorot_uniform') will be used.
bias_initializer: An initializer for the bias vector. If None, the default
initializer ('zeros') will be used (see `keras.initializers`).
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel (see `keras.regularizers`).
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel (see `keras.regularizers`).
bias_regularizer: Optional regularizer for the bias vector (
see `keras.regularizers`).
activity_regularizer: Optional regularizer function for the output (
see `keras.regularizers`).
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training (
see `keras.constraints`).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer` (
see `keras.constraints`).
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer` (
see `keras.constraints`).
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
Input shape:
3D tensor with shape:
`(batch_size, channels, steps)` if data_format='channels_first'
or 5D tensor with shape:
`(batch_size, steps, channels)` if data_format='channels_last'.
Output shape:
3D tensor with shape:
`(batch_size, filters, new_steps)` if data_format='channels_first'
or 3D tensor with shape:
`(batch_size, new_steps, filters)` if data_format='channels_last'.
`new_steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(separableconv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding(inputs))
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
# Explicitly broadcast inputs and kernels to 4D.
# TODO(fchollet): refactor when a native separable_conv1d op is available.
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
"""Depthwise separable 2D convolution.
Separable convolutions consist of first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: An initializer for the depthwise convolution kernel (
see `keras.initializers`). If None, then the default initializer (
'glorot_uniform') will be used.
pointwise_initializer: An initializer for the pointwise convolution kernel (
see `keras.initializers`). If None, then the default initializer
('glorot_uniform') will be used.
bias_initializer: An initializer for the bias vector. If None, the default
initializer ('zeros') will be used (see `keras.initializers`).
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix (see `keras.regularizers`).
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix (
see `keras.constraints`).
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(separableconv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consist of performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Args:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix (
see `keras.initializers`). If None, the default initializer (
'glorot_uniform') will be used.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). If None, the default initializer (
'zeros') will bs used.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation') (
see `keras.regularizers`).
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`[batch_size, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch_size, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch_size, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch_size, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(depthwiseconv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0],
self.dilation_rate[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1],
self.dilation_rate[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@keras_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = tf.keras.layers.UpSampling1D(size=2)(x)
>>> print(y)
tf.Tensor(
[[[ 0 1 2]
[ 0 1 2]
[ 3 4 5]
[ 3 4 5]]
[[ 6 7 8]
[ 6 7 8]
[ 9 10 11]
[ 9 10 11]]], shape=(2, 4, 3), dtype=int64)
Args:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by `size[0]` and `size[1]` respectively.
Examples:
>>> input_shape = (2, 2, 1, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[ 0 1 2]]
[[ 3 4 5]]]
[[[ 6 7 8]]
[[ 9 10 11]]]]
>>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)
>>> print(y)
tf.Tensor(
[[[[ 0 1 2]
[ 0 1 2]]
[[ 3 4 5]
[ 3 4 5]]]
[[[ 6 7 8]
[ 6 7 8]]
[[ 9 10 11]
[ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)
Args:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {
'size': self.size,
'data_format': self.data_format,
'interpolation': self.interpolation
}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Examples:
>>> input_shape = (2, 1, 2, 1, 3)
>>> x = tf.constant(1, shape=input_shape)
>>> y = tf.keras.layers.UpSampling3D(size=2)(x)
>>> print(y.shape)
(2, 2, 4, 2, 3)
Args:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)
>>> print(y)
tf.Tensor(
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]], shape=(2, 6, 3), dtype=int64)
Args:
padding: Int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch_size, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch_size, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Examples:
>>> input_shape = (1, 1, 2, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[0 1]
[2 3]]]]
>>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x)
>>> print(y)
tf.Tensor(
[[[[0 0]
[0 0]
[0 0]
[0 0]]
[[0 0]
[0 1]
[2 3]
[0 0]]
[[0 0]
[0 0]
[0 0]
[0 0]]]], shape=(1, 3, 4, 2), dtype=int64)
Args:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Examples:
>>> input_shape = (1, 1, 2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x)
>>> print(y.shape)
(1, 5, 6, 6, 3)
Args:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + self.padding[2][0] + self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + self.padding[2][0] + self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Args:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Examples:
>>> input_shape = (2, 28, 28, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)
>>> print(y.shape)
(2, 24, 20, 3)
Args:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, cropped_rows, cropped_cols)`
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Examples:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> print(y.shape)
(2, 24, 20, 6, 3)
Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints: interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| apache-2.0 |
annarev/tensorflow | tensorflow/python/keras/layers/preprocessing/reduction_test.py | 11 | 8871 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.reduction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers.preprocessing import reduction
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class ReductionTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[3.0, 3.0], [3.0, 2.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [2.0, 1.5]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [1.0, 1.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[6.0, 6.0], [3.0, 2.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[6.0, 6.0], [4.0, 3.0]]
})
def test_unweighted_ragged_reduction(self, reduction_str, expected_output):
data = ragged_factory_ops.constant([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0]]])
input_tensor = keras.Input(shape=(None, None), ragged=True)
output_tensor = reduction.Reduction(reduction=reduction_str)(input_tensor)
model = keras.Model(input_tensor, output_tensor)
output = model.predict(data)
self.assertAllClose(expected_output, output)
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[4.0, 4.0], [1.5, 6.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [1.666667, 1.75]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [1.0, 1.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[12.0, 12.0], [1.5, 6.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[8.0, 8.0], [2.5, 7.0]]
}, {
"testcase_name": "sqrtn",
"reduction_str": "sqrtn",
"expected_output": [[3.265986, 3.265986], [2.236067, 2.213594]]
})
def test_weighted_ragged_reduction(self, reduction_str, expected_output):
data = ragged_factory_ops.constant([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0]]])
input_tensor = keras.Input(shape=(None, None), ragged=True)
weights = ragged_factory_ops.constant([[[1.0, 1.0], [2.0, 2.0], [1.0, 1.0]],
[[0.5, 1.0], [1.0, 3.0]]])
weight_input_tensor = keras.Input(shape=(None, None), ragged=True)
output_tensor = reduction.Reduction(reduction=reduction_str)(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
self.assertAllClose(expected_output, output)
def test_weighted_ragged_reduction_with_different_dimensionality(self):
data = ragged_factory_ops.constant([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0]]])
input_tensor = keras.Input(shape=(None, None), ragged=True)
weights = ragged_factory_ops.constant([[1.0, 2.0, 1.0], [1.0, 1.0]])
weight_input_tensor = keras.Input(shape=(None,), ragged=True)
output_tensor = reduction.Reduction(reduction="mean")(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
expected_output = [[2.0, 2.0], [2.0, 1.5]]
self.assertAllClose(expected_output, output)
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[3.0, 3.0], [3.0, 2.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [1.333333, 1.0]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [0.0, 0.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[6.0, 6.0], [0.0, 0.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[6.0, 6.0], [4.0, 3.0]]
})
def test_unweighted_dense_reduction(self, reduction_str, expected_output):
data = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0], [0.0, 0.0]]])
input_tensor = keras.Input(shape=(None, None))
output_tensor = reduction.Reduction(reduction=reduction_str)(input_tensor)
model = keras.Model(input_tensor, output_tensor)
output = model.predict(data)
self.assertAllClose(expected_output, output)
@parameterized.named_parameters(
{
"testcase_name": "max",
"reduction_str": "max",
"expected_output": [[4.0, 4.0], [1.5, 6.0]]
}, {
"testcase_name": "mean",
"reduction_str": "mean",
"expected_output": [[2.0, 2.0], [1.666667, 1.75]]
}, {
"testcase_name": "min",
"reduction_str": "min",
"expected_output": [[1.0, 1.0], [0.0, 0.0]]
}, {
"testcase_name": "prod",
"reduction_str": "prod",
"expected_output": [[12.0, 12.0], [0.0, 0.0]]
}, {
"testcase_name": "sum",
"reduction_str": "sum",
"expected_output": [[8.0, 8.0], [2.5, 7.0]]
}, {
"testcase_name": "sqrtn",
"reduction_str": "sqrtn",
"expected_output": [[3.265986, 3.265986], [2.236067, 2.213594]]
})
def test_weighted_dense_reduction(self, reduction_str, expected_output):
data = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0], [0.0, 0.0]]])
input_tensor = keras.Input(shape=(None, None))
weights = np.array([[[1.0, 1.0], [2.0, 2.0], [1.0, 1.0]],
[[0.5, 1.0], [1.0, 3.0], [0.0, 0.0]]])
weight_input_tensor = keras.Input(shape=(None, None))
output_tensor = reduction.Reduction(reduction=reduction_str)(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
self.assertAllClose(expected_output, output)
def test_weighted_dense_reduction_with_different_dimensionality(self):
data = np.array([[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
[[3.0, 1.0], [1.0, 2.0], [0.0, 0.0]]])
input_tensor = keras.Input(shape=(None, None))
weights = np.array([[1.0, 2.0, 1.0], [1.0, 1.0, 0.0]])
weight_input_tensor = keras.Input(shape=(None,))
output_tensor = reduction.Reduction(reduction="mean")(
input_tensor, weights=weight_input_tensor)
model = keras.Model([input_tensor, weight_input_tensor], output_tensor)
output = model.predict([data, weights])
expected_output = [[2.0, 2.0], [2.0, 1.5]]
self.assertAllClose(expected_output, output)
def test_sqrtn_fails_on_unweighted_ragged(self):
input_tensor = keras.Input(shape=(None, None), ragged=True)
with self.assertRaisesRegex(ValueError, ".*sqrtn.*"):
_ = reduction.Reduction(reduction="sqrtn")(input_tensor)
def test_sqrtn_fails_on_unweighted_dense(self):
input_tensor = keras.Input(shape=(None, None))
with self.assertRaisesRegex(ValueError, ".*sqrtn.*"):
_ = reduction.Reduction(reduction="sqrtn")(input_tensor)
if __name__ == "__main__":
test.main()
| apache-2.0 |
vtafaucet/virtacoin | qa/rpc-tests/bipdersig-p2p.py | 4 | 6504 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework import ComparisonTestFramework
from util import *
from mininode import CTransaction, NetworkThread
from blocktools import create_coinbase, create_block
from binascii import hexlify, unhexlify
import cStringIO
from comptool import TestInstance, TestManager
from script import CScript
import time
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
'''
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
'''
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + '\0' + i[-1])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
'''
This test is meant to exercise BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP66Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = time.time()
''' 98 more version 2 blocks '''
test_blocks = []
for i in xrange(98):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in xrange(749):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 751st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in xrange(199):
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
| mit |
ulif/pulp | bindings/pulp/bindings/responses.py | 10 | 5890 | from gettext import gettext as _
RESPONSE_ACCEPTED = 'accepted'
RESPONSE_POSTPONED = 'postponed'
RESPONSE_REJECTED = 'rejected'
STATE_ACCEPTED = 'accepted'
STATE_RUNNING = 'running'
STATE_WAITING = 'waiting'
STATE_FINISHED = 'finished'
STATE_ERROR = 'error'
STATE_CANCELED = 'canceled'
STATE_SKIPPED = 'skipped'
COMPLETED_STATES = (STATE_FINISHED, STATE_ERROR, STATE_CANCELED, STATE_SKIPPED)
class Response(object):
"""
Contains the data received from the server on a successful request.
"""
def __init__(self, response_code, response_body):
self.response_code = response_code
self.response_body = response_body
def __str__(self):
return _("Response: code [%(c)s] body [%(b)s]") % {'c': self.response_code,
'b': self.response_body}
def is_async(self):
"""
Returns if the response indicated an asynchronous task has been queued
on the server. If this returns true, the response_body will be a Task.
Otherwise, the response body will be a Document.
@return: true if the request did not immediately execute and complete
but rather was queued to be run asynchronously
@rtype: bool
"""
return isinstance(self.response_body, Task)
class Task(object):
"""
Contains the data received from a call to the server that describes an
asynchronous call queued or running on the server.
This class provides a number of utility methods for interpreting the state
of the task which should be used whenever possible instead of manually
interpreting the structure of the data within.
Below is a sample task dictionary that can be copied for unit tests that
need to simulate a task response:
TASK_TEMPLATE = {
"exception": None,
"task_id": 'default-id',
"tags": [],
"start_time": None,
"traceback": None,
"state": None,
"finish_time": None,
"schedule_id": None,
"result": None,
"progress_report": {},
}
"""
def __init__(self, response_body):
"""
Initialize the Task based on the data returned from Pulp's task API.
:param response_body: The de-serialized response from Pulp's task API
:type response_body: dict
"""
# Tasking identity information
if '_href' in response_body:
self.href = response_body['_href']
else:
self.href = None
self.task_id = response_body.get('task_id')
self.tags = response_body.get('tags', [])
self.start_time = response_body.get('start_time')
self.finish_time = response_body.get('finish_time')
# Related to the callable being executed
self.state = response_body.get('state')
self.progress_report = response_body.get('progress_report')
self.result = response_body.get('result')
self.exception = response_body.get('exception')
self.traceback = response_body.get('traceback')
self.error = response_body.get('error')
self.spawned_tasks = []
spawned_tasks = response_body.get('spawned_tasks')
if spawned_tasks:
for task in spawned_tasks:
self.spawned_tasks.append(Task(task))
def is_waiting(self):
"""
Indicates if the task has been accepted but has not yet been able to
run. This may be due to the task being blocked by another task or
because the server is busy with other items.
:rtype: bool
"""
return self.state == STATE_WAITING
def is_running(self):
"""
Indicates if the task is in the process of running on the server.
:rtype: bool
"""
return self.state == STATE_RUNNING
def was_accepted(self):
"""
Indicates if the task was accepted by the agent.
:rtype: bool
"""
return self.state == STATE_ACCEPTED
def is_completed(self):
"""
Indicates if the task has finished running and will not begin again,
regardless of the result (error, success, or canceled).
:rtype: bool
"""
return self.state in COMPLETED_STATES
def was_successful(self):
"""
Indicates if a task finished successfully. If the task is not finished,
this call returns False.
:rtype: bool
"""
return self.state == STATE_FINISHED
def was_failure(self):
"""
Indicates if a task finished with an error. If the task is not finished,
this call returns False.
:rtype: bool
"""
return self.state == STATE_ERROR
def was_skipped(self):
"""
Indicates if a task was skipped. If the task is not finished, this call
returns False
:rtype: bool
"""
return self.state == STATE_SKIPPED
def was_cancelled(self):
"""
Indicates if a task was cancelled.
:rtype: bool
"""
return self.state == STATE_CANCELED
def __str__(self):
"""
Return a string representation of this Task.
:return: String representation of self
:rtype: unicode
"""
return _(u'Task: %(id)s State: %(state)s') % {'id': self.task_id, 'state': self.state}
class BlockingReason(object):
"""
Represents a single reason a task was postponed or blocked.
"""
def __init__(self, resource_id, resource_type, operation):
self.resource_id = resource_id
self.resource_type = resource_type
self.operation = operation
def __str__(self):
subs = {'i': self.resource_id, 't': self.resource_type, 'o': self.operation}
return _('BlockingReason: id [%(i)s] type [%(t)s] operation [%(o)s]') % subs
| gpl-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/httplib.py | 11 | 50897 | r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from array import array
import os
import re
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
# maximum amount of headers accepted
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more leniant than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if len(hlist) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
elif headerseen is not None:
# An empty header name. These aren't allowed in HTTP, but it's
# probably a benign mistake. Don't add the header, just keep
# going.
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
if buffering:
# The caller won't be using any sock.recv() calls, so buffering
# is fine and recommended for performance.
self.fp = sock.makefile('rb')
else:
# The buffer size is specified as zero, because the headers of
# the response are read with readline(). If the reads were
# buffered the readline() calls could consume some of the
# response, which make be read via a recv() on the underlying
# socket.
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
fp = self.fp
if fp:
self.fp = None
fp.close()
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self.close()
raise
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if not s and amt:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self.close()
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(''.join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
elif amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ''.join(value)
elif amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return ''.join(value)
else:
value.append(self._safe_read(chunk_left))
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return ''.join(value)
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
# NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never
# return less than x bytes unless EOF is encountered. It now handles
# signal interruptions (socket.error EINTR) internally. This code
# never caught that exception anyways. It seems largely pointless.
# self.fp.read(amt) will work fine.
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
if strict is not None:
self.strict = strict
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unittests
# to replace with a suitable mock
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
""" Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP Connect tunneling, the host passed to the
constructor is used as proxy server that relays all communication to the
endpoint passed to set_tunnel. This is done by sending a HTTP CONNECT
request to the proxy server when the connection is established.
This method must be called before the HTTP connection has been
established.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
# Verify if this is required.
if self.sock:
raise RuntimeError("Can't setup tunnel for established connection.")
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
(host, port) = self._get_hostport(self._tunnel_host, self._tunnel_port)
self.send("CONNECT %s:%d HTTP/1.0\r\n" % (host, port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
response = self.response_class(self.sock, strict = self.strict,
method = self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending trailer
break
if line == '\r\n':
break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(data)
blocksize = 8192
if hasattr(data,'read') and not isinstance(data, array):
if self.debuglevel > 0: print "sendIng a read()able"
datablock = data.read(blocksize)
while datablock:
self.sock.sendall(datablock)
datablock = data.read(blocksize)
else:
self.sock.sendall(data)
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
#message_body was not a string (i.e. it is a file) and
#we must run the risk of Nagle
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(hdr)
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# Wrap the IPv6 Host Header with [] (RFC 2732)
if host_enc.find(':') >= 0:
host_enc = "[" + host_enc + "]"
if port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
header = '%s' % header
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = [str(v) for v in values]
for one_value in values:
if _is_illegal_header_value(one_value):
raise ValueError('Invalid header value %r' % (one_value,))
hdr = '%s: %s' % (header, '\r\n\t'.join(values))
self._output(hdr)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass a message body
associated with the request. The message body will be sent in
the same packet as the message headers if it is string, otherwise it is
sent as a separate packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body, method):
# Set the content-length based on the body. If the body is "empty", we
# set Content-Length: 0 for methods that expect a body (RFC 7230,
# Section 3.3.2). If the body is set for other methods, we set the
# header provided we can figure out what the length is.
thelen = None
if body is None and method.upper() in _METHODS_EXPECTING_BODY:
thelen = '0'
elif body is not None:
try:
thelen = str(len(body))
except TypeError:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if 'content-length' not in header_names:
self._set_content_length(body, method)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders(body)
def getresponse(self, buffering=False):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
args = (self.sock,)
kwds = {"strict":self.strict, "method":self._method}
if self.debuglevel > 0:
args += (self.debuglevel,)
if buffering:
#only add this keyword if non-default, for compatibility with
#other response_classes.
kwds["buffering"] = True;
response = self.response_class(*args, **kwds)
try:
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will raise
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.putheader = conn.putheader
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
(self._conn.host, self._conn.port) = self._conn._get_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
if not buffering:
response = self._conn.getresponse()
else:
#only add this keyword if non-default for compatibility
#with other connection classes
response = self._conn.getresponse(buffering)
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
HTTPConnection.__init__(self, host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
def connect(self):
"Connect to a host on a given (SSL) port."
HTTPConnection.connect(self)
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None, context=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict,
context=context))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
| mit |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/test/test_resource.py | 70 | 4429 | import unittest
from test import test_support
import resource
import time
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def test_args(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(test_support.TESTFN, "wb")
try:
f.write("X" * 1024)
try:
f.write("Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except IOError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
test_support.unlink(test_support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10L**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
def test_main(verbose=None):
test_support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
cxhernandez/pcled | setup.py | 1 | 2235 | """PCLED: Control your Arduino's LED strip through your PC"""
import sys
from glob import glob
from setuptools import setup, find_packages
NAME = "pcled"
VERSION = "0.1"
def read(filename):
import os
BASE_DIR = os.path.dirname(__file__)
filename = os.path.join(BASE_DIR, filename)
with open(filename, 'r') as fi:
return fi.read()
def readlist(filename):
rows = read(filename).split("\n")
rows = [x.strip() for x in rows if x.strip()]
return list(rows)
# if we are running on python 3, enable 2to3 and
# let it use the custom fixers from the custom_fixers
# package.
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
)
setup(
name=NAME,
version=VERSION,
description=("PCLED allows you to control your Arduino's "
"LED strip through your PC."),
long_description=read('README.rst'),
platforms=(
"Windows", "Linux", "Mac OS-X", "Unix",
),
classifiers=(
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering',
),
keywords='led arduino',
author="Carlos Xavier Hernandez",
author_email="cxh@stanford.edu",
url='https://github.com/cxhernandez/%s' % NAME,
download_url='https://github.com/cxhernandez/%s/tarball/master' % NAME,
license='MIT',
packages=find_packages(),
include_package_data=True,
package_data={
'': ['README.rst',
'requirements.txt'],
},
zip_safe=True,
scripts=glob('./scripts/python/*.py'),
install_requires=readlist('requirements.txt'),
**extra
)
| mit |
ltworf/siddio | homecontrol/homecontrol/devices.py | 1 | 3943 | # Siddio
# Copyright (C) 2017 Salvo "LtWorf" Tomaselli
#
# Siddio is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# author Salvo "LtWorf" Tomaselli <tiposchi@tiscali.it>
import collections
import socket
import struct
from syslog import *
from typing import List
SETSTATE = b's'
GETSTATE = b'g'
GETCOUNT = b'c'
GETNAME = b'n'
GETDESCR = b'd'
GETTAGS = b't'
class Device(collections.namedtuple('device', ('name', 'description', 'tags', 'host', 'port', 'dev_id'))):
'''
A device on an iocontrol daemon
It can query it for state and change it
The other properties are set statically when probing the iocontrol for
devices.
The properties dictionary contains the extra properties defined
by the user in the configuration file. They are extracted from the
tags.
'''
def __new__(cls, *args, **kwargs):
self = super(Device, cls).__new__(cls, *args, **kwargs)
# Set keyword properties
self.properties = {}
for tag in self.tags:
components = tag.split(':', 1)
if len(components) == 2:
self.properties[components[0]] = components[1]
return self
def get_state(self) -> bool:
'''
returns the state of the device.
caches the last known state, doesn't communicate with the device
'''
if not hasattr(self, '_state'):
self.update()
return self._state
def update(self) -> None:
'''
Queries the iocontrol to obtain the state of the device
'''
s = socket.socket(socket.AF_INET)
s.connect((self.host, self.port))
s.send(GETSTATE + self.dev_id)
self._state = s.recv(1) == b'\x01'
s.close()
def switch(self, new_state: bool) -> None:
if new_state == self.get_state():
return
self._state = new_state
state = b'\1' if new_state else b'\0'
s = socket.socket(socket.AF_INET)
s.connect((self.host, self.port))
s.send(SETSTATE + self.dev_id + state)
s.close()
def _devices(host: str, port: int) -> List[Device]:
r = []
try:
s = socket.socket(socket.AF_INET)
s.connect((host, port))
s.send(GETCOUNT)
fmt = '!B'
count = struct.unpack(fmt,s.recv(1))[0]
for i in range(count):
dev_id = struct.pack(fmt, i)
s.send(GETNAME + dev_id +
GETDESCR + dev_id +
GETTAGS + dev_id)
# Read until the required data is over
data = b''
while len(data.split(b'\0')) != 4:
data += s.recv(2048)
name, descr, tags, _ = (i.decode('utf8') for i in data.split(b'\0'))
tags_set = set(tags.split(','))
r.append(Device(name, descr, tags_set, host, port, dev_id))
s.close()
except Exception as e:
syslog(LOG_WARNING, 'Connection problem with %s:%d %e' % (host, port, e)
return r
def devices():
r = []
#TODO read hosts from configuration file
hosts = (('10.9', 4141), ('10.11', 4141))
for host, port in hosts:
syslog(LOG_INFO, 'Querying iocontrol %s:%d for devices' % (host, port))
devs = _devices(host, port)
r += devs
syslog(LOG_INFO, 'Reported %d devices' % len(devs))
return r
| gpl-3.0 |
katrid/django | tests/files/tests.py | 277 | 11286 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import os
import struct
import tempfile
import unittest
from io import BytesIO, StringIO
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile
from django.test import mock
from django.utils import six
from django.utils._os import upath
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
def test_seekable(self):
"""
File.seekable() should be available on Python 3.
"""
with tempfile.TemporaryFile() as temp:
temp.write(b"contents\n")
test_file = File(temp, name="something.txt")
if six.PY2:
self.assertFalse(hasattr(test_file, 'seekable'))
if six.PY3:
self.assertTrue(hasattr(test_file, 'seekable'))
self.assertTrue(test_file.seekable())
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertEqual(File(BytesIO(b'A file with no name')).name, None)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertEqual(ContentFile(b"content").name, None)
def test_content_file_custom_name(self):
"""
Test that the constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
Test that ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
if six.PY3:
self.assertIsInstance(ContentFile("español").read(), six.text_type)
else:
self.assertIsInstance(ContentFile("español").read(), bytes)
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
size = images.get_image_dimensions(img_path)
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
@unittest.skipUnless(Image, "Pillow not installed")
class GetImageDimensionsTests(unittest.TestCase):
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
def test_valid_image(self):
"""
get_image_dimensions() should catch struct.error while feeding the PIL
Image parser (#24544).
Emulates the Parser feed error. Since the error is raised on every feed
attempt, the resulting image size should be invalid: (None, None).
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error):
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False))
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
| bsd-3-clause |
prune998/ansible | lib/ansible/utils/module_docs_fragments/vmware.py | 149 | 1471 | # (c) 2016, Charles Paul <cpaul@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Parameters for VMware modules
DOCUMENTATION = '''
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter.
required: True
username:
description:
- The username of the vSphere vCenter.
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the vSphere vCenter.
required: True
aliases: ['pass', 'pwd']
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to
false when certificates are not trusted.
required: False
default: 'True'
choices: ['True', 'False']
'''
| gpl-3.0 |
rschnapka/server-tools | base_suspend_security/models/ir_rule.py | 33 | 1678 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
from ..base_suspend_security import BaseSuspendSecurityUid, SUSPEND_METHOD
class IrRule(models.Model):
_inherit = 'ir.rule'
@api.model
def domain_get(self, model_name, mode='read'):
if isinstance(self.env.uid, BaseSuspendSecurityUid):
return [], [], ['"%s"' % self.pool[model_name]._table]
return super(IrRule, self).domain_get(model_name, mode=mode)
def _register_hook(self, cr):
if not hasattr(models.BaseModel, SUSPEND_METHOD):
setattr(models.BaseModel, SUSPEND_METHOD,
lambda self: self.sudo(
user=BaseSuspendSecurityUid(self.env.uid)))
return super(IrRule, self)._register_hook(cr)
| agpl-3.0 |
jrshust/spark | examples/src/main/python/mllib/isotonic_regression_example.py | 100 | 2394 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Isotonic Regression Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
import math
from pyspark.mllib.regression import LabeledPoint, IsotonicRegression, IsotonicRegressionModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonIsotonicRegressionExample")
# $example on$
# Load and parse the data
def parsePoint(labeledData):
return (labeledData.label, labeledData.features[0], 1.0)
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_isotonic_regression_libsvm_data.txt")
# Create label, feature, weight tuples from input data with weight set to default value 1.0.
parsedData = data.map(parsePoint)
# Split data into training (60%) and test (40%) sets.
training, test = parsedData.randomSplit([0.6, 0.4], 11)
# Create isotonic regression model from training data.
# Isotonic parameter defaults to true so it is only shown for demonstration
model = IsotonicRegression.train(training)
# Create tuples of predicted and real labels.
predictionAndLabel = test.map(lambda p: (model.predict(p[1]), p[0]))
# Calculate mean squared error between predicted and real labels.
meanSquaredError = predictionAndLabel.map(lambda pl: math.pow((pl[0] - pl[1]), 2)).mean()
print("Mean Squared Error = " + str(meanSquaredError))
# Save and load model
model.save(sc, "target/tmp/myIsotonicRegressionModel")
sameModel = IsotonicRegressionModel.load(sc, "target/tmp/myIsotonicRegressionModel")
# $example off$
| apache-2.0 |
40223119/w17test | static/Brython3.1.0-20150301-090019/Lib/stat.py | 765 | 4304 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| gpl-3.0 |
guru-digital/CouchPotatoServer | libs/pyutil/test/out_of_shape/test_strutil.py | 106 | 1713 | #!/usr/bin/env python
# Copyright (c) 2004-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import unittest
from pyutil.assertutil import _assert
from pyutil import strutil
class Teststrutil(unittest.TestCase):
def test_short_input(self):
self.failUnless(strutil.pop_trailing_newlines("\r\n") == "")
self.failUnless(strutil.pop_trailing_newlines("\r") == "")
self.failUnless(strutil.pop_trailing_newlines("x\r\n") == "x")
self.failUnless(strutil.pop_trailing_newlines("x\r") == "x")
def test_split(self):
_assert(strutil.split_on_newlines("x\r\ny") == ["x", "y",], strutil.split_on_newlines("x\r\ny"))
_assert(strutil.split_on_newlines("x\r\ny\r\n") == ["x", "y", '',], strutil.split_on_newlines("x\r\ny\r\n"))
_assert(strutil.split_on_newlines("x\n\ny\n\n") == ["x", '', "y", '', '',], strutil.split_on_newlines("x\n\ny\n\n"))
def test_commonprefix(self):
_assert(strutil.commonprefix(["foo","foobarooo", "foosplat",]) == 'foo', strutil.commonprefix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonprefix(["foo","afoobarooo", "foosplat",]) == '', strutil.commonprefix(["foo","afoobarooo", "foosplat",]))
def test_commonsuffix(self):
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplat",]) == '', strutil.commonsuffix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplato",]) == 'o', strutil.commonsuffix(["foo","foobarooo", "foosplato",]))
_assert(strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]) == 'foo', strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]))
| gpl-3.0 |
Spiderlover/Toontown | toontown/safezone/ChineseCheckersBoard.py | 4 | 15595 |
class ChineseCheckersBoard:
def __init__(self):
self.squareList = []
for x in xrange(121):
self.squareList.append(CheckersSquare(x))
self.squareList[0].setAdjacent([None,
1,
2,
None,
None,
None])
self.squareList[1].setAdjacent([None,
3,
4,
2,
0,
None])
self.squareList[2].setAdjacent([1,
4,
5,
None,
None,
0])
self.squareList[3].setAdjacent([None,
6,
7,
4,
1,
None])
self.squareList[4].setAdjacent([3,
7,
8,
5,
2,
1])
self.squareList[5].setAdjacent([4,
8,
9,
None,
None,
2])
self.squareList[6].setAdjacent([None,
14,
15,
7,
3,
None])
self.squareList[7].setAdjacent([6,
15,
16,
8,
4,
3])
self.squareList[8].setAdjacent([7,
16,
17,
9,
5,
4])
self.squareList[9].setAdjacent([8,
17,
18,
None,
None,
5])
self.squareList[10].setAdjacent([None,
None,
23,
11,
None,
None])
self.squareList[11].setAdjacent([10,
23,
24,
12,
None,
None])
self.squareList[12].setAdjacent([11,
24,
25,
13,
None,
None])
self.squareList[13].setAdjacent([12,
25,
26,
14,
None,
None])
self.squareList[14].setAdjacent([13,
26,
27,
15,
6,
None])
self.squareList[15].setAdjacent([14,
27,
28,
16,
7,
6])
self.squareList[16].setAdjacent([15,
28,
29,
17,
8,
7])
self.squareList[17].setAdjacent([16,
29,
30,
18,
9,
8])
self.squareList[18].setAdjacent([17,
30,
31,
19,
None,
9])
(self.squareList[19].setAdjacent([18,
31,
32,
20,
None,
None]),)
self.squareList[20].setAdjacent([19,
32,
33,
21,
None,
None])
self.squareList[21].setAdjacent([20,
33,
34,
22,
None,
None])
self.squareList[22].setAdjacent([21,
34,
None,
None,
None,
None])
self.squareList[23].setAdjacent([None,
None,
35,
24,
11,
10])
self.squareList[24].setAdjacent([23,
35,
36,
25,
12,
11])
self.squareList[25].setAdjacent([24,
36,
37,
26,
13,
12])
self.squareList[26].setAdjacent([25,
37,
38,
27,
14,
13])
self.squareList[27].setAdjacent([26,
38,
39,
28,
15,
14])
self.squareList[28].setAdjacent([27,
39,
40,
29,
16,
15])
self.squareList[29].setAdjacent([28,
40,
41,
30,
17,
16])
self.squareList[30].setAdjacent([29,
41,
42,
31,
18,
17])
self.squareList[31].setAdjacent([30,
42,
43,
32,
19,
18])
self.squareList[32].setAdjacent([31,
43,
44,
33,
20,
19])
self.squareList[33].setAdjacent([32,
44,
45,
34,
21,
20])
self.squareList[34].setAdjacent([33,
45,
None,
None,
22,
21])
self.squareList[35].setAdjacent([None,
None,
46,
36,
24,
23])
self.squareList[36].setAdjacent([35,
46,
47,
37,
25,
24])
self.squareList[37].setAdjacent([36,
47,
48,
38,
26,
25])
self.squareList[38].setAdjacent([37,
48,
49,
39,
27,
26])
self.squareList[39].setAdjacent([38,
49,
50,
40,
28,
27])
self.squareList[40].setAdjacent([39,
50,
51,
41,
29,
28])
self.squareList[41].setAdjacent([40,
51,
52,
42,
30,
29])
self.squareList[42].setAdjacent([41,
52,
53,
43,
31,
30])
self.squareList[43].setAdjacent([42,
53,
54,
44,
32,
31])
self.squareList[44].setAdjacent([43,
54,
55,
45,
33,
32])
self.squareList[45].setAdjacent([44,
55,
None,
None,
34,
33])
self.squareList[46].setAdjacent([None,
None,
56,
47,
36,
35])
self.squareList[47].setAdjacent([46,
56,
57,
48,
37,
36])
self.squareList[48].setAdjacent([47,
57,
58,
49,
38,
37])
self.squareList[49].setAdjacent([48,
58,
59,
50,
39,
38])
self.squareList[50].setAdjacent([49,
59,
60,
51,
40,
39])
self.squareList[51].setAdjacent([50,
60,
61,
52,
41,
40])
self.squareList[52].setAdjacent([51,
61,
62,
53,
42,
41])
self.squareList[53].setAdjacent([52,
62,
63,
54,
43,
42])
self.squareList[54].setAdjacent([53,
63,
64,
55,
44,
43])
self.squareList[55].setAdjacent([54,
64,
None,
None,
45,
44])
self.squareList[56].setAdjacent([None,
65,
66,
57,
47,
46])
self.squareList[57].setAdjacent([56,
66,
67,
58,
48,
47])
self.squareList[58].setAdjacent([57,
67,
68,
59,
49,
48])
self.squareList[59].setAdjacent([58,
68,
69,
60,
50,
49])
self.squareList[60].setAdjacent([59,
69,
70,
61,
51,
50])
self.squareList[61].setAdjacent([60,
70,
71,
62,
52,
51])
self.squareList[62].setAdjacent([61,
71,
72,
63,
53,
52])
self.squareList[63].setAdjacent([62,
72,
73,
64,
54,
53])
self.squareList[64].setAdjacent([63,
73,
74,
None,
55,
54])
self.squareList[65].setAdjacent([None,
75,
76,
66,
56,
None])
self.squareList[66].setAdjacent([65,
76,
77,
67,
57,
56])
self.squareList[67].setAdjacent([66,
77,
78,
68,
58,
57])
self.squareList[68].setAdjacent([67,
78,
79,
69,
59,
58])
self.squareList[69].setAdjacent([68,
79,
80,
70,
60,
61])
self.squareList[70].setAdjacent([69,
80,
81,
71,
61,
60])
self.squareList[71].setAdjacent([70,
81,
82,
72,
62,
61])
self.squareList[72].setAdjacent([71,
82,
83,
73,
63,
62])
self.squareList[73].setAdjacent([72,
83,
84,
74,
64,
63])
self.squareList[74].setAdjacent([73,
84,
85,
None,
None,
64])
self.squareList[75].setAdjacent([None,
86,
87,
76,
65,
None])
self.squareList[76].setAdjacent([75,
87,
88,
77,
66,
65])
self.squareList[77].setAdjacent([76,
88,
89,
78,
67,
66])
self.squareList[78].setAdjacent([77,
89,
90,
79,
68,
67])
self.squareList[79].setAdjacent([78,
90,
91,
80,
69,
68])
self.squareList[80].setAdjacent([79,
91,
92,
81,
70,
69])
self.squareList[81].setAdjacent([80,
92,
93,
82,
71,
70])
self.squareList[82].setAdjacent([81,
93,
94,
83,
72,
71])
self.squareList[83].setAdjacent([82,
94,
95,
84,
73,
72])
self.squareList[84].setAdjacent([83,
95,
96,
85,
74,
73])
self.squareList[85].setAdjacent([84,
96,
97,
None,
None,
74])
self.squareList[86].setAdjacent([None,
98,
99,
87,
75,
None])
self.squareList[87].setAdjacent([86,
99,
100,
88,
76,
75])
self.squareList[88].setAdjacent([87,
100,
101,
89,
77,
76])
self.squareList[89].setAdjacent([88,
101,
102,
90,
78,
77])
self.squareList[90].setAdjacent([89,
102,
103,
91,
79,
78])
self.squareList[91].setAdjacent([90,
103,
104,
92,
80,
79])
self.squareList[92].setAdjacent([91,
104,
105,
93,
81,
80])
self.squareList[93].setAdjacent([92,
105,
106,
94,
82,
81])
self.squareList[94].setAdjacent([93,
106,
107,
95,
83,
82])
self.squareList[95].setAdjacent([94,
107,
108,
96,
84,
83])
self.squareList[96].setAdjacent([95,
108,
109,
97,
85,
84])
self.squareList[97].setAdjacent([96,
109,
110,
None,
None,
85])
self.squareList[98].setAdjacent([None,
None,
None,
99,
86,
None])
self.squareList[99].setAdjacent([98,
None,
None,
100,
87,
86])
self.squareList[100].setAdjacent([99,
None,
None,
101,
88,
87])
self.squareList[101].setAdjacent([100,
None,
None,
102,
89,
88])
self.squareList[102].setAdjacent([101,
None,
111,
103,
90,
89])
self.squareList[103].setAdjacent([102,
111,
112,
104,
91,
90])
self.squareList[104].setAdjacent([103,
112,
113,
105,
92,
91])
self.squareList[105].setAdjacent([104,
113,
114,
106,
93,
92])
self.squareList[106].setAdjacent([105,
114,
None,
107,
94,
93])
self.squareList[107].setAdjacent([106,
None,
None,
108,
95,
94])
self.squareList[108].setAdjacent([107,
None,
None,
109,
96,
95])
self.squareList[109].setAdjacent([108,
None,
None,
110,
97,
96])
self.squareList[110].setAdjacent([109,
None,
None,
None,
None,
97])
self.squareList[111].setAdjacent([None,
None,
115,
112,
103,
102])
self.squareList[112].setAdjacent([111,
115,
116,
113,
104,
103])
self.squareList[113].setAdjacent([112,
116,
117,
114,
105,
104])
self.squareList[114].setAdjacent([113,
117,
None,
None,
106,
105])
self.squareList[115].setAdjacent([None,
None,
118,
116,
112,
111])
self.squareList[116].setAdjacent([115,
118,
119,
117,
113,
112])
self.squareList[117].setAdjacent([116,
119,
None,
None,
114,
113])
self.squareList[118].setAdjacent([None,
None,
120,
119,
116,
115])
self.squareList[119].setAdjacent([118,
120,
None,
None,
117,
116])
self.squareList[120].setAdjacent([None,
None,
None,
None,
119,
118])
return
def delete(self):
for x in self.squareList:
x.delete()
del self.squareList
def getSquare(self, arrayLoc):
return self.squareList[arrayLoc]
def getSquareOffset(self, arrayLoc):
return self.squareList[arrayLoc - 1]
def getState(self, squareNum):
return self.squareList[squareNum].getState()
def getStateOffset(self, arrayLoc):
return self.squareList[squareNum - 1].getState()
def setState(self, squareNum, newState):
self.squareList[squareNum].setState(newState)
def setStateOffset(self, squareNum, newState):
self.squareList[squareNum - 1].setState(newState)
def getAdjacent(self, squareNum):
return self.squareList[squareNum].adjacent
def getAdjacentOffset(self, squareNum):
return self.squareList[squareNum - 1].adjacent
def getStates(self):
retList = []
for x in xrange(121):
retList.append(self.squareList[x].getState())
return retList
def setStates(self, squares):
y = 0
for x in xrange(121):
self.squareList[x].setState(squares[x])
class CheckersSquare:
def __init__(self, tileNu):
self.tileNum = tileNu
self.state = 0
self.adjacent = []
def delete(self):
del self.tileNum
del self.state
del self.adjacent
def setAdjacent(self, adjList):
for x in adjList:
self.adjacent.append(x)
def getAdjacent(self):
return self.adjacent
def setState(self, newState):
self.state = newState
def getState(self):
return self.state
def getNum(self):
return self.tileNum
| mit |
idem2lyon/persomov | libs/pyutil/test/deprecated/test_picklesaver.py | 106 | 1340 | #!/usr/bin/env python
# Copyright (c) 2002 Luke 'Artimage' Nelson
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import os
try:
from twisted.trial import unittest
except ImportError, le:
print "Skipping %s since it requires Twisted and Twisted could not be imported: %s" % (__name__, le,)
else:
from pyutil import PickleSaver, fileutil
class Thingie(PickleSaver.PickleSaver):
def __init__(self, fname, delay=30):
PickleSaver.PickleSaver.__init__(self, fname=fname, attrs={'tmp_store':'False'}, DELAY=delay)
class PickleSaverTest(unittest.TestCase):
def _test_save_now(self, fname):
thingie = Thingie(fname, delay=0)
thingie.tmp_store = 'True'
thingie.lazy_save() # Note: it was constructed with default save delay of 0.
def test_save_now(self):
"""
This test should create a lazy save object, save it with no delay and check if the file exists.
"""
tempdir = fileutil.NamedTemporaryDirectory()
fname = os.path.join(tempdir.name, "picklesavertest")
self._test_save_now(fname)
self.failUnless(os.path.isfile(fname), "The file [%s] does not exist." %(fname,))
tempdir.shutdown()
| gpl-3.0 |
lixt/lily2-gem5 | src/arch/x86/isa/insts/general_purpose/__init__.py | 91 | 2795 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["arithmetic",
"cache_and_memory_management",
"compare_and_test",
"control_transfer",
"data_conversion",
"data_transfer",
"flags",
"input_output",
"load_effective_address",
"load_segment_registers",
"logical",
"no_operation",
"rotate_and_shift",
"semaphores",
"string",
"system_calls"]
microcode = '''
# Microcode for general purpose instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
janeen666/mi-instrument | mi/dataset/parser/test/test_spkir_abj_dcl.py | 5 | 10626 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_spkir_abj_dcl
@file marine-integrations/mi/dataset/parser/test/test_spkir_abj_dcl.py
@author Steve Myerson
@brief Test code for a spkir_abj_dcl data parser
In the following files, Metadata consists of 4 records.
There is 1 group of Sensor Data records for each set of metadata.
Files used for testing:
20010101.spkir1.log
Metadata - 1 set, Sensor Data - 0 records
20020113.spkir2.log
Metadata - 1 set, Sensor Data - 13 records
20030208.spkir3.log
Metadata - 2 sets, Sensor Data - 8 records
20040305.spkir4.log
Metadata - 3 sets, Sensor Data - 5 records
20050403.spkir5.log
Metadata - 4 sets, Sensor Data - 3 records
20061220.spkir6.log
Metadata - 1 set, Sensor Data - 400 records
20071225.spkir7.log
Metadata - 2 sets, Sensor Data - 250 records
20080401.spkir8.log
This file contains a boatload of invalid sensor data records.
See metadata in file for a list of the errors.
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.spkir_abj.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.spkir_abj_dcl import \
SpkirAbjDclRecoveredParser, \
SpkirAbjDclTelemeteredParser
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
FILE1 = '20010101.spkir1.log'
FILE2 = '20020113.spkir2.log'
FILE3 = '20030208.spkir3.log'
FILE4 = '20040305.spkir4.log'
FILE5 = '20050403.spkir5.log'
FILE6 = '20061220.spkir6.log'
FILE7 = '20071225.spkir7.log'
FILE8 = '20080401.spkir8.log'
EXPECTED_FILE6 = 400
EXPECTED_FILE7 = 500
MODULE_NAME = 'mi.dataset.parser.spkir_abj_dcl'
@attr('UNIT', group='mi')
class SpkirAbjDclParserUnitTestCase(ParserUnitTestCase):
def create_rec_parser(self, file_handle):
"""
This function creates a SpkirAbjDcl parser for recovered data.
"""
parser = SpkirAbjDclRecoveredParser(self.rec_config,
file_handle,
self.rec_exception_callback)
return parser
def create_tel_parser(self, file_handle):
"""
This function creates a SpkirAbjDcl parser for telemetered data.
"""
parser = SpkirAbjDclTelemeteredParser(self.tel_config,
file_handle,
self.tel_exception_callback)
return parser
def open_file(self, filename):
fid = open(os.path.join(RESOURCE_PATH, filename), mode='rb')
return fid
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def rec_exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.rec_exception_callback_value = exception
self.rec_exceptions_detected += 1
def tel_exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.tel_exception_callback_value = exception
self.tel_exceptions_detected += 1
def setUp(self):
ParserUnitTestCase.setUp(self)
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self.tel_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self.rec_state_callback_value = None
self.rec_file_ingested_value = False
self.rec_publish_callback_value = None
self.rec_exception_callback_value = None
self.rec_exceptions_detected = 0
self.tel_state_callback_value = None
self.tel_file_ingested_value = False
self.tel_publish_callback_value = None
self.tel_exception_callback_value = None
self.tel_exceptions_detected = 0
self.maxDiff = None
def test_big_giant_input(self):
"""
Read a large file and verify that all expected particles can be read.
Verification is not done at this time, but will be done during
integration and qualification testing.
"""
log.debug('===== START TEST BIG GIANT INPUT RECOVERED =====')
in_file = self.open_file(FILE6)
parser = self.create_rec_parser(in_file)
# In a single read, get all particles in this file.
number_expected_results = EXPECTED_FILE6
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertEqual(self.rec_exception_callback_value, None)
log.debug('===== START TEST BIG GIANT INPUT TELEMETERED =====')
in_file = self.open_file(FILE7)
parser = self.create_tel_parser(in_file)
# In a single read, get all particles in this file.
number_expected_results = EXPECTED_FILE7
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertEqual(self.tel_exception_callback_value, None)
log.debug('===== END TEST BIG GIANT INPUT =====')
def test_get_many(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
log.debug('===== START TEST GET MANY RECOVERED =====')
expected_particle = 12
in_file = self.open_file(FILE5)
parser = self.create_rec_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(expected_particle)
self.assertEqual(len(result), expected_particle)
self.assertEqual(self.rec_exception_callback_value, None)
in_file.close()
log.debug('===== START TEST GET MANY TELEMETERED =====')
in_file = self.open_file(FILE4)
parser = self.create_tel_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(expected_particle)
self.assertEqual(len(result), expected_particle)
self.assertEqual(self.tel_exception_callback_value, None)
in_file.close()
log.debug('===== END TEST GET MANY =====')
def test_invalid_sensor_data_records(self):
"""
Read data from a file containing invalid sensor data records.
Verify that no instrument particles are produced.
"""
log.debug('===== START TEST INVALID SENSOR DATA RECOVERED =====')
in_file = self.open_file(FILE8)
parser = self.create_rec_parser(in_file)
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
in_file.close()
log.debug('===== START TEST INVALID SENSOR DATA TELEMETERED =====')
in_file = self.open_file(FILE8)
parser = self.create_tel_parser(in_file)
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_no_sensor_data(self):
"""
Read a file containing no sensor data records
and verify that no particles are produced.
"""
log.debug('===== START TEST NO SENSOR DATA RECOVERED =====')
in_file = self.open_file(FILE1)
parser = self.create_rec_parser(in_file)
# Try to get a record and verify that none are produced.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.rec_exception_callback_value, None)
in_file.close()
log.debug('===== START TEST NO SENSOR DATA TELEMETERED =====')
in_file = self.open_file(FILE1)
parser = self.create_tel_parser(in_file)
# Try to get a record and verify that none are produced.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.tel_exception_callback_value, None)
in_file.close()
log.debug('===== END TEST SENSOR DATA =====')
def test_simple(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START TEST SIMPLE RECOVERED =====')
in_file = self.open_file(FILE2)
parser = self.create_rec_parser(in_file)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(len(result), 1)
self.assertEqual(self.rec_exception_callback_value, None)
in_file.close()
log.debug('===== START TEST SIMPLE TELEMETERED =====')
in_file = self.open_file(FILE3)
parser = self.create_tel_parser(in_file)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(len(result), 1)
self.assertEqual(self.tel_exception_callback_value, None)
in_file.close()
log.debug('===== END TEST SIMPLE =====')
def test_many_with_yml(self):
"""
Read a file and verify that all records can be read.
Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
log.debug('===== START TEST MANY WITH YML RECOVERED =====')
num_particles = 13
in_file = self.open_file(FILE2)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(num_particles)
log.debug("Num particles: %d", len(particles))
self.assert_particles(particles, "rec_20020113.spkir2.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
in_file.close()
log.debug('===== START TEST MANY WITH YML TELEMETERED =====')
in_file = self.open_file(FILE2)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(num_particles)
log.debug("Num particles: %d", len(particles))
self.assert_particles(particles, "tel_20020113.spkir2.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST MANY WITH YML =====')
| bsd-2-clause |
Sutto/cloud-custodian | tools/c7n_azure/tests_azure/tests_resources/test_vm.py | 2 | 10977 | # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from ..azure_common import BaseTest, arm_template
from c7n_azure.session import Session
from dateutil import tz as tzutils
from mock import patch
from c7n.testing import mock_datetime_now
from c7n.utils import local_session
class VMTest(BaseTest):
def setUp(self):
super(VMTest, self).setUp()
def test_validate_vm_schemas(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'offhour'},
{'type': 'onhour'},
{'type': 'network-interface'},
{'type': 'instance-view'}
],
'actions': [
{'type': 'poweroff'},
{'type': 'stop'},
{'type': 'start'},
{'type': 'restart'},
{'type': 'poweroff'}
]
}, validate=True)
self.assertTrue(p)
@arm_template('vm.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('vm.json')
def test_find_running(self):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
fake_running_vms = [{
'resourceGroup': 'test_vm',
'name': 'test_vm'
}]
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_stop(self, filter_mock):
with patch(self._get_vm_client_string() + '.deallocate') as stop_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'stop'}
]
})
p.run()
stop_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'])
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_poweroff(self, filter_mock):
with patch(self._get_vm_client_string() + '.power_off') as poweroff_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'poweroff'}
]
})
p.run()
poweroff_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'],
)
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_start(self, filter_mock):
with patch(self._get_vm_client_string() + '.start') as start_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'start'}
]
})
p.run()
start_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'])
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
def test_restart(self, filter_mock):
with patch(self._get_vm_client_string() + '.restart') as restart_action_mock:
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'restart'}
]
})
p.run()
restart_action_mock.assert_called_with(
self.fake_running_vms[0]['resourceGroup'],
self.fake_running_vms[0]['name'])
@arm_template('vm.json')
@patch('c7n_azure.resources.vm.InstanceViewFilter.process', return_value=fake_running_vms)
@patch('c7n_azure.actions.delete.DeleteAction.process', return_value='')
def test_delete(self, delete_action_mock, filter_mock):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'instance-view',
'key': 'statuses[].code',
'op': 'in',
'value_type': 'swap',
'value': 'PowerState/running'}],
'actions': [
{'type': 'delete'}
]
})
p.run()
delete_action_mock.assert_called_with(self.fake_running_vms)
@arm_template('vm.json')
def test_find_vm_with_public_ip(self):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'network-interface',
'key': 'properties.ipConfigurations[].properties.publicIPAddress.id',
'op': 'eq',
'value': 'not-null'}
],
})
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'network-interface',
'key': 'properties.ipConfigurations[].properties.publicIPAddress.id',
'op': 'eq',
'value': 'null'}
],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('vm.json')
def test_on_off_hours(self):
t = datetime.datetime.now(tzutils.gettz("pt"))
t = t.replace(year=2018, month=8, day=24, hour=18, minute=30)
with mock_datetime_now(t, datetime):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'offhour',
'default_tz': "pt",
'offhour': 18,
'tag': 'schedule'}
],
})
resources = p.run()
self.assertEqual(len(resources), 1)
t = t.replace(year=2018, month=8, day=24, hour=8, minute=30)
with mock_datetime_now(t, datetime):
p = self.load_policy({
'name': 'test-azure-vm',
'resource': 'azure.vm',
'filters': [
{'type': 'onhour',
'default_tz': "pt",
'onhour': 8,
'tag': 'schedule'}
],
})
resources = p.run()
self.assertEqual(len(resources), 1)
def _get_vm_client_string(self):
client = local_session(Session)\
.client('azure.mgmt.compute.ComputeManagementClient').virtual_machines
return client.__module__ + '.' + client.__class__.__name__
| apache-2.0 |
dynaryu/Wind_multipliers | conf.py | 1 | 8133 | # -*- coding: utf-8 -*-
#
# windmultipliers documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 12 12:03:35 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.append('/usr/local/python-2.7.2/lib/python2.7')
sys.path.append('/nas/gemd/climate_change/CHARS/B_Wind/Projects/Multipliers/github/windmultipliers/'
'topographic')
sys.path.append('/nas/gemd/climate_change/CHARS/B_Wind/Projects/Multipliers/github/windmultipliers/terrain')
sys.path.append('/nas/gemd/climate_change/CHARS/B_Wind/Projects/Multipliers/github/windmultipliers/'
'shielding')
sys.path.append('/nas/gemd/climate_change/CHARS/B_Wind/Projects/Multipliers/github/windmultipliers/utilities')
sys.path.append('/nas/gemd/climate_change/CHARS/B_Wind/Projects/Multipliers/github/windmultipliers/tests')
sys.path.append('/nas/gemd/climate_change/CHARS/B_Wind/Projects/Multipliers/github/windmultipliers/'
'tests_characterisation')
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WindMultipliers'
copyright = u'2014, Geoscience Australia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'windmultipliersdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'windmultipliers.tex', u'windmultipliers Documentation',
u'Geoscience Australia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'windmultipliers', u'windmultipliers Documentation',
[u'Geoscience Australia'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-3.0 |
bdmod/extreme-subversion | BinarySourcce/subversion-1.6.17/subversion/tests/cmdline/svnadmin_tests.py | 1 | 34536 | #!/usr/bin/env python
#
# svnadmin_tests.py: testing the 'svnadmin' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2006, 2008-2009 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
# General modules
import os
import shutil
import sys
# Our testing module
import svntest
from svntest.verify import SVNExpectedStdout, SVNExpectedStderr
from svntest.verify import SVNUnexpectedStderr
# (abbreviation)
Skip = svntest.testcase.Skip
SkipUnless = svntest.testcase.SkipUnless
XFail = svntest.testcase.XFail
Item = svntest.wc.StateItem
#----------------------------------------------------------------------
# How we currently test 'svnadmin' --
#
# 'svnadmin create': Create an empty repository, test that the
# root node has a proper created-revision,
# because there was once a bug where it
# didn't.
#
# Note also that "svnadmin create" is tested
# implicitly every time we run a python test
# script. (An empty repository is always
# created and then imported into; if this
# subcommand failed catastrophically, every
# test would fail and we would know instantly.)
#
# 'svnadmin createtxn'
# 'svnadmin rmtxn': See below.
#
# 'svnadmin lstxns': We don't care about the contents of transactions;
# we only care that they exist or not.
# Therefore, we can simply parse transaction headers.
#
# 'svnadmin dump': A couple regression tests that ensure dump doesn't
# error out, and one to check that the --quiet option
# really does what it's meant to do. The actual
# contents of the dump aren't verified at all.
#
# ### TODO: someday maybe we could parse the contents of trees too.
#
######################################################################
# Helper routines
def get_txns(repo_dir):
"Get the txn names using 'svnadmin lstxns'."
exit_code, output_lines, error_lines = svntest.main.run_svnadmin('lstxns',
repo_dir)
txns = sorted([output_lines.strip(x) for x in output_lines])
return txns
def load_and_verify_dumpstream(sbox, expected_stdout, expected_stderr,
revs, dump, *varargs):
"""Load the array of lines passed in 'dump' into the
current tests' repository and verify the repository content
using the array of wc.States passed in revs. VARARGS are optional
arguments passed to the 'load' command"""
if type(dump) is type(""):
dump = [ dump ]
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svnadmin_binary, expected_stderr, 1, dump,
'load', '--quiet', sbox.repo_dir, *varargs)
if expected_stdout:
if expected_stdout == svntest.verify.AnyOutput:
if len(output) == 0:
raise SVNExpectedStdout
else:
svntest.verify.compare_and_display_lines(
"Standard output", "STDOUT:", expected_stdout, output)
if expected_stderr:
if expected_stderr == svntest.verify.AnyOutput:
if len(errput) == 0:
raise SVNExpectedStderr
else:
svntest.verify.compare_and_display_lines(
"Standard error output", "STDERR:", expected_stderr, errput)
# The expected error occurred, so don't try to verify the result
return
if revs:
# verify revs as wc states
for rev in range(len(revs)):
svntest.actions.run_and_verify_svn("Updating to r%s" % (rev+1),
svntest.verify.AnyOutput, [],
"update", "-r%s" % (rev+1),
sbox.wc_dir)
wc_tree = svntest.tree.build_tree_from_wc(sbox.wc_dir)
rev_tree = revs[rev].old_tree()
try:
svntest.tree.compare_trees ("rev/disk", rev_tree, wc_tree)
except svntest.tree.SVNTreeError:
svntest.verify.display_trees(None, 'WC TREE', wc_tree, rev_tree)
raise
######################################################################
# Tests
#----------------------------------------------------------------------
def test_create(sbox):
"'svnadmin create'"
repo_dir = sbox.repo_dir
wc_dir = sbox.wc_dir
svntest.main.safe_rmtree(repo_dir, 1)
svntest.main.safe_rmtree(wc_dir)
svntest.main.create_repos(repo_dir)
svntest.actions.run_and_verify_svn("Creating rev 0 checkout",
["Checked out revision 0.\n"], [],
"checkout",
sbox.repo_url, wc_dir)
svntest.actions.run_and_verify_svn(
"Running status",
[], [],
"status", wc_dir)
svntest.actions.run_and_verify_svn(
"Running verbose status",
[" 0 0 ? %s\n" % wc_dir], [],
"status", "--verbose", wc_dir)
# success
# dump stream tests need a dump file
def clean_dumpfile():
return \
[ "SVN-fs-dump-format-version: 2\n\n",
"UUID: 668cc64a-31ed-0310-8ccb-b75d75bb44e3\n\n",
"Revision-number: 0\n",
"Prop-content-length: 56\n",
"Content-length: 56\n\n",
"K 8\nsvn:date\nV 27\n2005-01-08T21:48:13.838745Z\nPROPS-END\n\n\n",
"Revision-number: 1\n",
"Prop-content-length: 98\n",
"Content-length: 98\n\n",
"K 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 4\nerik\n",
"K 8\nsvn:date\nV 27\n2005-01-08T21:51:16.313791Z\nPROPS-END\n\n\n",
"Node-path: A\n",
"Node-kind: file\n",
"Node-action: add\n",
"Prop-content-length: 35\n",
"Text-content-length: 5\n",
"Text-content-md5: e1cbb0c3879af8347246f12c559a86b5\n",
"Content-length: 40\n\n",
"K 12\nsvn:keywords\nV 2\nId\nPROPS-END\ntext\n\n\n"]
dumpfile_revisions = \
[ svntest.wc.State('', { 'A' : svntest.wc.StateItem(contents="text\n") }) ]
#----------------------------------------------------------------------
def extra_headers(sbox):
"loading of dumpstream with extra headers"
test_create(sbox)
dumpfile = clean_dumpfile()
dumpfile[3:3] = \
[ "X-Comment-Header: Ignored header normally not in dump stream\n" ]
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, dumpfile,
'--ignore-uuid')
#----------------------------------------------------------------------
# Ensure loading continues after skipping a bit of unknown extra content.
def extra_blockcontent(sbox):
"load success on oversized Content-length"
test_create(sbox)
dumpfile = clean_dumpfile()
# Replace "Content-length" line with two lines
dumpfile[8:9] = \
[ "Extra-content-length: 10\n",
"Content-length: 108\n\n" ]
# Insert the extra content after "PROPS-END\n"
dumpfile[11] = dumpfile[11][:-2] + "extra text\n\n\n"
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, dumpfile,
'--ignore-uuid')
#----------------------------------------------------------------------
def inconsistent_headers(sbox):
"load failure on undersized Content-length"
test_create(sbox)
dumpfile = clean_dumpfile()
dumpfile[-2] = "Content-length: 30\n\n"
load_and_verify_dumpstream(sbox, [], svntest.verify.AnyOutput,
dumpfile_revisions, dumpfile)
#----------------------------------------------------------------------
# Test for issue #2729: Datestamp-less revisions in dump streams do
# not remain so after load
def empty_date(sbox):
"preserve date-less revisions in load (issue #2729)"
test_create(sbox)
dumpfile = clean_dumpfile()
# Replace portions of the revision data to drop the svn:date revprop.
dumpfile[7:11] = \
[ "Prop-content-length: 52\n",
"Content-length: 52\n\n",
"K 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 4\nerik\nPROPS-END\n\n\n"
]
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, dumpfile,
'--ignore-uuid')
# Verify that the revision still lacks the svn:date property.
svntest.actions.run_and_verify_svn(None, [], [], "propget",
"--revprop", "-r1", "svn:date",
sbox.wc_dir)
#----------------------------------------------------------------------
def dump_copied_dir(sbox):
"'svnadmin dump' on copied directory"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
old_C_path = os.path.join(wc_dir, 'A', 'C')
new_C_path = os.path.join(wc_dir, 'A', 'B', 'C')
svntest.main.run_svn(None, 'cp', old_C_path, new_C_path)
svntest.main.run_svn(None, 'ci', wc_dir, '--quiet',
'-m', 'log msg')
exit_code, output, errput = svntest.main.run_svnadmin("dump", repo_dir)
if svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput):
raise svntest.Failure
#----------------------------------------------------------------------
def dump_move_dir_modify_child(sbox):
"'svnadmin dump' on modified child of copied dir"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
B_path = os.path.join(wc_dir, 'A', 'B')
Q_path = os.path.join(wc_dir, 'A', 'Q')
svntest.main.run_svn(None, 'cp', B_path, Q_path)
svntest.main.file_append(os.path.join(Q_path, 'lambda'), 'hello')
svntest.main.run_svn(None, 'ci', wc_dir, '--quiet',
'-m', 'log msg')
exit_code, output, errput = svntest.main.run_svnadmin("dump", repo_dir)
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput)
exit_code, output, errput = svntest.main.run_svnadmin("dump", "-r",
"0:HEAD", repo_dir)
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput)
#----------------------------------------------------------------------
def dump_quiet(sbox):
"'svnadmin dump --quiet'"
sbox.build(create_wc = False)
exit_code, output, errput = svntest.main.run_svnadmin("dump", sbox.repo_dir,
'--quiet')
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump --quiet' is unexpected.",
'STDERR', [], errput)
#----------------------------------------------------------------------
def hotcopy_dot(sbox):
"'svnadmin hotcopy PATH .'"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, None, [],
"hotcopy", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
exit_code, origout, origerr = svntest.main.run_svnadmin("dump",
sbox.repo_dir,
'--quiet')
exit_code, backout, backerr = svntest.main.run_svnadmin("dump",
backup_dir,
'--quiet')
if origerr or backerr or origout != backout:
raise svntest.Failure
#----------------------------------------------------------------------
def hotcopy_format(sbox):
"'svnadmin hotcopy' checking db/format file"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
exit_code, output, errput = svntest.main.run_svnadmin("hotcopy",
sbox.repo_dir,
backup_dir)
if errput:
print("Error: hotcopy failed")
raise svntest.Failure
# verify that the db/format files are the same
fp = open(os.path.join(sbox.repo_dir, "db", "format"))
contents1 = fp.read()
fp.close()
fp2 = open(os.path.join(backup_dir, "db", "format"))
contents2 = fp2.read()
fp2.close()
if contents1 != contents2:
print("Error: db/format file contents do not match after hotcopy")
raise svntest.Failure
#----------------------------------------------------------------------
def setrevprop(sbox):
"'setlog' and 'setrevprop', bypassing hooks'"
sbox.build()
# Try a simple log property modification.
iota_path = os.path.join(sbox.wc_dir, "iota")
exit_code, output, errput = svntest.main.run_svnadmin("setlog",
sbox.repo_dir,
"-r0",
"--bypass-hooks",
iota_path)
if errput:
print("Error: 'setlog' failed")
raise svntest.Failure
# Verify that the revprop value matches what we set when retrieved
# through the client.
svntest.actions.run_and_verify_svn(None,
[ "This is the file 'iota'.\n", "\n" ],
[], "propget", "--revprop", "-r0",
"svn:log", sbox.wc_dir)
# Try an author property modification.
foo_path = os.path.join(sbox.wc_dir, "foo")
svntest.main.file_write(foo_path, "foo")
exit_code, output, errput = svntest.main.run_svnadmin("setrevprop",
sbox.repo_dir,
"-r0", "svn:author",
foo_path)
if errput:
print("Error: 'setrevprop' failed")
raise svntest.Failure
# Verify that the revprop value matches what we set when retrieved
# through the client.
svntest.actions.run_and_verify_svn(None, [ "foo\n" ], [], "propget",
"--revprop", "-r0", "svn:author",
sbox.wc_dir)
def verify_windows_paths_in_repos(sbox):
"verify a repository containing paths like 'c:hi'"
# setup a repo with a directory 'c:hi'
sbox.build(create_wc = False)
repo_url = sbox.repo_url
chi_url = sbox.repo_url + '/c:hi'
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', '-m', 'log_msg',
chi_url)
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDERR', ["* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], errput)
#----------------------------------------------------------------------
# Returns the filename of the rev or revprop file (according to KIND)
# numbered REV in REPO_DIR, which must be in the first shard if we're
# using a sharded repository.
def fsfs_file(repo_dir, kind, rev):
if svntest.main.server_minor_version >= 5:
if svntest.main.fsfs_sharding is None:
return os.path.join(repo_dir, 'db', kind, '0', rev)
else:
shard = int(rev) // svntest.main.fsfs_sharding
path = os.path.join(repo_dir, 'db', kind, str(shard), rev)
if svntest.main.fsfs_packing is None or kind == 'revprops':
# we don't pack revprops
return path
elif os.path.exists(path):
# rev exists outside a pack file.
return path
else:
# didn't find the plain file; assume it's in a pack file
return os.path.join(repo_dir, 'db', kind, ('%d.pack' % shard), 'pack')
else:
return os.path.join(repo_dir, 'db', kind, rev)
def verify_incremental_fsfs(sbox):
"""svnadmin verify detects corruption dump can't"""
# setup a repo with a directory 'c:hi'
sbox.build(create_wc = False)
repo_url = sbox.repo_url
E_url = sbox.repo_url + '/A/B/E'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, None, [],
'mkdir', '-m', 'log_msg',
E_url + '/bravo')
# Corrupt r2's reference to A/C by replacing "dir 7-1.0.r1/1568" with
# "dir 7-1.0.r1/1569" (increment offset) and updating the checksum for
# this directory listing to "c9b5a2d26473a4e28088673dda9df804" so that
# the listing itself is valid.
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
if r2.endswith('pack'):
raise svntest.Skip
fp = open(r2, 'wb')
fp.write("""id: 0-2.0.r2/0
type: dir
count: 0
cpath: /A/B/E/bravo
copyroot: 0 /
PLAIN
K 5
alpha
V 17
file 3-1.0.r1/719
K 4
beta
V 17
file 4-1.0.r1/840
K 5
bravo
V 14
dir 0-2.0.r2/0
END
ENDREP
id: 2-1.0.r2/181
type: dir
pred: 2-1.0.r1/1043
count: 1
text: 2 69 99 99 f63001f7fddd1842d8891474d0982111
cpath: /A/B/E
copyroot: 0 /
PLAIN
K 1
E
V 16
dir 2-1.0.r2/181
K 1
F
V 17
dir 5-1.0.r1/1160
K 6
lambda
V 17
file 6-1.0.r1/597
END
ENDREP
id: 1-1.0.r2/424
type: dir
pred: 1-1.0.r1/1335
count: 1
text: 2 316 95 95 bccb66379b4f825dac12b50d80211bae
cpath: /A/B
copyroot: 0 /
PLAIN
K 1
B
V 16
dir 1-1.0.r2/424
K 1
C
V 17
dir 7-1.0.r1/1569
K 1
D
V 17
dir 8-1.0.r1/3061
K 2
mu
V 18
file i-1.0.r1/1451
END
ENDREP
id: 0-1.0.r2/692
type: dir
pred: 0-1.0.r1/3312
count: 1
text: 2 558 121 121 c9b5a2d26473a4e28088673dda9df804
cpath: /A
copyroot: 0 /
PLAIN
K 1
A
V 16
dir 0-1.0.r2/692
K 4
iota
V 18
file j-1.0.r1/3428
END
ENDREP
id: 0.0.r2/904
type: dir
pred: 0.0.r1/3624
count: 2
text: 2 826 65 65 e44e4151d0d124533338619f082c8c9a
cpath: /
copyroot: 0 /
_0.0.t1-1 add false false /A/B/E/bravo
904 1031
""")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify", "-r2",
sbox.repo_dir)
svntest.verify.verify_outputs(
message=None, actual_stdout=output, actual_stderr=errput,
expected_stdout=None,
expected_stderr=".*Found malformed header in revision file")
#----------------------------------------------------------------------
def recover_fsfs(sbox):
"recover a repository (FSFS only)"
sbox.build()
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
# Commit up to r3, so we can test various recovery scenarios.
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newer line\n')
svntest.main.run_svn(None, 'ci', sbox.wc_dir, '--quiet', '-m', 'log msg')
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newest line\n')
svntest.main.run_svn(None, 'ci', sbox.wc_dir, '--quiet', '-m', 'log msg')
# Remember the contents of the db/current file.
expected_current_contents = svntest.main.file_read(current_path)
# Move aside the current file for r3.
os.rename(os.path.join(sbox.repo_dir, 'db','current'),
os.path.join(sbox.repo_dir, 'db','was_current'));
# Run 'svnadmin recover' and check that the current file is recreated.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = svntest.main.file_read(current_path)
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be one rev lower than it should be.
svntest.main.file_write(current_path, '2\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = svntest.main.file_read(current_path)
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be *two* revs lower than it should be.
svntest.main.file_write(current_path, '1\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = svntest.main.file_read(current_path)
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be fish revs lower than it should be.
#
# Note: I'm not actually sure it's wise to recover from this, but
# detecting it would require rewriting fs_fs.c:get_youngest() to
# check the actual contents of its buffer, since atol() will happily
# convert "fish" to 0.
svntest.main.file_write(current_path, 'fish\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = svntest.main.file_read(current_path)
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
#----------------------------------------------------------------------
def load_with_parent_dir(sbox):
"'svnadmin load --parent-dir' reparents mergeinfo"
## See http://subversion.tigris.org/issues/show_bug.cgi?id=2983. ##
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'mergeinfo_included.dump')
dumpfile = svntest.main.file_read(dumpfile_location)
# Create 'sample' dir in sbox.repo_url, and load the dump stream there.
svntest.actions.run_and_verify_svn(None,
['\n', 'Committed revision 1.\n'],
[], "mkdir", sbox.repo_url + "/sample",
"-m", "Create sample dir")
load_and_verify_dumpstream(sbox, [], [], None, dumpfile, '--parent-dir',
'/sample')
# Verify the svn:mergeinfo properties for '--parent-dir'
svntest.actions.run_and_verify_svn(None,
[sbox.repo_url +
"/sample/branch - /sample/trunk:5-7\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample/branch')
svntest.actions.run_and_verify_svn(None,
[sbox.repo_url +
"/sample/branch1 - " +
"/sample/branch:6-9\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample/branch1')
# Create 'sample-2' dir in sbox.repo_url, and load the dump stream again.
# This time, don't include a leading slash on the --parent-dir argument.
# See issue #3547.
svntest.actions.run_and_verify_svn(None,
['\n', 'Committed revision 11.\n'],
[], "mkdir", sbox.repo_url + "/sample-2",
"-m", "Create sample-2 dir")
load_and_verify_dumpstream(sbox, [], [], None, dumpfile, '--parent-dir',
'sample-2')
# Verify the svn:mergeinfo properties for '--parent-dir'.
svntest.actions.run_and_verify_svn(None,
[sbox.repo_url +
"/sample-2/branch - " +
"/sample-2/trunk:15-17\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample-2/branch')
svntest.actions.run_and_verify_svn(None,
[sbox.repo_url +
"/sample-2/branch1 - " +
"/sample-2/branch:16-19\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample-2/branch1')
#----------------------------------------------------------------------
def set_uuid(sbox):
"test 'svnadmin setuuid'"
sbox.build(create_wc=False)
# Squirrel away the original repository UUID.
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
orig_uuid = output[0].rstrip()
# Try setting a new, bogus UUID.
svntest.actions.run_and_verify_svnadmin(None, None, '^.*Malformed UUID.*$',
'setuuid', sbox.repo_dir, 'abcdef')
# Try generating a brand new UUID.
svntest.actions.run_and_verify_svnadmin(None, [], None,
'setuuid', sbox.repo_dir)
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
new_uuid = output[0].rstrip()
if new_uuid == orig_uuid:
print("Error: new UUID matches the original one")
raise svntest.Failure
# Now, try setting the UUID back to the original value.
svntest.actions.run_and_verify_svnadmin(None, [], None,
'setuuid', sbox.repo_dir, orig_uuid)
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
new_uuid = output[0].rstrip()
if new_uuid != orig_uuid:
print("Error: new UUID doesn't match the original one")
raise svntest.Failure
#----------------------------------------------------------------------
def reflect_dropped_renumbered_revs(sbox):
"reflect dropped renumbered revs in svn:mergeinfo"
## See http://subversion.tigris.org/issues/show_bug.cgi?id=3020. ##
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = svntest.main.file_read(dumpfile_location)
# Create 'toplevel' dir in sbox.repo_url
svntest.actions.run_and_verify_svn(None, ['\n', 'Committed revision 1.\n'],
[], "mkdir", sbox.repo_url + "/toplevel",
"-m", "Create toplevel dir")
# Load the dump stream in sbox.repo_url
load_and_verify_dumpstream(sbox,[],[], None, dumpfile)
# Load the dump stream in toplevel dir
load_and_verify_dumpstream(sbox,[],[], None, dumpfile, '--parent-dir',
'/toplevel')
# Verify the svn:mergeinfo properties
svntest.actions.run_and_verify_svn(None, ["/trunk:1-4\n"],
[], 'propget', 'svn:mergeinfo',
sbox.repo_url + '/branch2')
svntest.actions.run_and_verify_svn(None, ["/branch1:5-9\n"],
[], 'propget', 'svn:mergeinfo',
sbox.repo_url + '/trunk')
svntest.actions.run_and_verify_svn(None, ["/toplevel/trunk:1-13\n"],
[], 'propget', 'svn:mergeinfo',
sbox.repo_url + '/toplevel/branch2')
svntest.actions.run_and_verify_svn(None, ["/toplevel/branch1:14-18\n"],
[], 'propget', 'svn:mergeinfo',
sbox.repo_url + '/toplevel/trunk')
svntest.actions.run_and_verify_svn(None, ["/toplevel/trunk:1-12\n"],
[], 'propget', 'svn:mergeinfo',
sbox.repo_url + '/toplevel/branch1')
svntest.actions.run_and_verify_svn(None, ["/trunk:1-3\n"],
[], 'propget', 'svn:mergeinfo',
sbox.repo_url + '/branch1')
#----------------------------------------------------------------------
def fsfs_recover_handle_missing_revs_or_revprops_file(sbox):
"""fsfs recovery checks missing revs / revprops files"""
# Set up a repository containing the greek tree.
sbox.build()
# Commit up to r3, so we can test various recovery scenarios.
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newer line\n')
svntest.main.run_svn(None, 'ci', sbox.wc_dir, '--quiet', '-m', 'log msg')
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newest line\n')
svntest.main.run_svn(None, 'ci', sbox.wc_dir, '--quiet', '-m', 'log msg')
rev_3 = fsfs_file(sbox.repo_dir, 'revs', '3')
rev_was_3 = rev_3 + '.was'
# Move aside the revs file for r3.
os.rename(rev_3, rev_was_3)
# Verify 'svnadmin recover' fails when youngest has a revprops
# file but no revs file.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Expected current rev to be <= %s but found 3"
# For example, if svntest.main.fsfs_sharding == 2, then rev_3 would
# be the pack file for r2:r3, and the error message would report "<= 1".
% (rev_3.endswith('pack') and '[012]' or '2')):
raise svntest.Failure
# Restore the r3 revs file, thus repairing the repository.
os.rename(rev_was_3, rev_3)
revprop_3 = fsfs_file(sbox.repo_dir, 'revprops', '3')
revprop_was_3 = revprop_3 + '.was'
# Move aside the revprops file for r3.
os.rename(revprop_3, revprop_was_3)
# Verify 'svnadmin recover' fails when youngest has a revs file
# but no revprops file (issue #2992).
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Revision 3 has a revs file but no revprops file"):
raise svntest.Failure
# Restore the r3 revprops file, thus repairing the repository.
os.rename(revprop_was_3, revprop_3)
# Change revprops file to a directory for revision 3
os.rename(revprop_3, revprop_was_3)
os.mkdir(revprop_3)
# Verify 'svnadmin recover' fails when youngest has a revs file
# but revprops file is not a file (another aspect of issue #2992).
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Revision 3 has a non-file where its revprops file should be.*"):
raise svntest.Failure
#----------------------------------------------------------------------
def create_in_repo_subdir(sbox):
"'svnadmin create /path/to/repo/subdir'"
repo_dir = sbox.repo_dir
wc_dir = sbox.wc_dir
svntest.main.safe_rmtree(repo_dir, 1)
svntest.main.safe_rmtree(wc_dir)
# This should succeed
svntest.main.create_repos(repo_dir)
try:
# This should fail
subdir = os.path.join(repo_dir, 'Z')
svntest.main.create_repos(subdir)
except svntest.main.SVNRepositoryCreateFailure:
return
# No SVNRepositoryCreateFailure raised?
raise svntest.Failure
def verify_with_invalid_revprops(sbox):
"svnadmin verify detects invalid revprops file"
repo_dir = sbox.repo_dir
svntest.main.safe_rmtree(repo_dir, 1)
# This should succeed
svntest.main.create_repos(repo_dir)
# Run a test verify
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, errput, None,
".*Verified revision 0*"):
raise svntest.Failure
# Empty the revprops file
rp_file = open(os.path.join(repo_dir, 'db', 'revprops', '0', '0'), 'w')
rp_file.write('')
rp_file.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, errput, None,
".*Malformed file"):
raise svntest.Failure
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
extra_headers,
extra_blockcontent,
inconsistent_headers,
empty_date,
dump_copied_dir,
dump_move_dir_modify_child,
dump_quiet,
hotcopy_dot,
hotcopy_format,
setrevprop,
verify_windows_paths_in_repos,
SkipUnless(verify_incremental_fsfs, svntest.main.is_fs_type_fsfs),
SkipUnless(recover_fsfs, svntest.main.is_fs_type_fsfs),
load_with_parent_dir,
set_uuid,
reflect_dropped_renumbered_revs,
SkipUnless(fsfs_recover_handle_missing_revs_or_revprops_file,
svntest.main.is_fs_type_fsfs),
create_in_repo_subdir,
SkipUnless(verify_with_invalid_revprops,
svntest.main.is_fs_type_fsfs),
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| gpl-2.0 |
plantigrade/geni-tools | src/gcf/sfa/trust/gid.py | 4 | 10204 | #----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
##
# Implements SFA GID. GIDs are based on certificates, and the GID class is a
# descendant of the certificate class.
##
from __future__ import absolute_import
import xmlrpclib
import uuid
from .certificate import Certificate
from ..util.faults import GidInvalidParentHrn, GidParentHrn
from ..util.sfalogging import logger
from ..util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
##
# Create a new uuid. Returns the UUID as a string.
def create_uuid():
return str(uuid.uuid4().int)
##
# GID is a tuple:
# (uuid, urn, public_key)
#
# UUID is a unique identifier and is created by the python uuid module
# (or the utility function create_uuid() in gid.py).
#
# HRN is a human readable name. It is a dotted form similar to a backward domain
# name. For example, planetlab.us.arizona.bakers.
#
# URN is a human readable identifier of form:
# "urn:publicid:IDN+toplevelauthority[:sub-auth.]*[\res. type]\ +object name"
# For example, urn:publicid:IDN+planetlab:us:arizona+user+bakers
#
# PUBLIC_KEY is the public key of the principal identified by the UUID/HRN.
# It is a Keypair object as defined in the cert.py module.
#
# It is expected that there is a one-to-one pairing between UUIDs and HRN,
# but it is uncertain how this would be inforced or if it needs to be enforced.
#
# These fields are encoded using xmlrpc into the subjectAltName field of the
# x509 certificate. Note: Call encode() once the fields have been filled in
# to perform this encoding.
class GID(Certificate):
##
# Create a new GID object
#
# @param create If true, create the X509 certificate
# @param subject If subject!=None, create the X509 cert and set the subject name
# @param string If string!=None, load the GID from a string
# @param filename If filename!=None, load the GID from a file
# @param lifeDays life of GID in days - default is 1825==5 years
# @param email Email address to put in subjectAltName - default is None
def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825, email=None):
self.uuid = None
self.hrn = None
self.urn = None
self.email = None # for adding to the SubjectAltName
Certificate.__init__(self, lifeDays, create, subject, string, filename)
if subject:
logger.debug("Creating GID for subject: %s" % subject)
if uuid:
self.uuid = int(uuid)
if hrn:
self.hrn = hrn
self.urn = hrn_to_urn(hrn, 'unknown')
if urn:
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
if email:
self.set_email(email)
def set_uuid(self, uuid):
if isinstance(uuid, str):
self.uuid = int(uuid)
else:
self.uuid = uuid
def get_uuid(self):
if not self.uuid:
self.decode()
return self.uuid
def set_hrn(self, hrn):
self.hrn = hrn
def get_hrn(self):
if not self.hrn:
self.decode()
return self.hrn
def set_urn(self, urn):
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def get_urn(self):
if not self.urn:
self.decode()
return self.urn
# Will be stuffed into subjectAltName
def set_email(self, email):
self.email = email
def get_email(self):
if not self.email:
self.decode()
return self.email
def get_type(self):
if not self.urn:
self.decode()
_, t = urn_to_hrn(self.urn)
return t
##
# Encode the GID fields and package them into the subject-alt-name field
# of the X509 certificate. This must be called prior to signing the
# certificate. It may only be called once per certificate.
def encode(self):
if self.urn:
urn = self.urn
else:
urn = hrn_to_urn(self.hrn, None)
str = "URI:" + urn
if self.uuid:
str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn
if self.email:
str += ", " + "email:" + self.email
self.set_data(str, 'subjectAltName')
##
# Decode the subject-alt-name field of the X509 certificate into the
# fields of the GID. This is automatically called by the various get_*()
# functions in this class.
def decode(self):
data = self.get_data('subjectAltName')
dict = {}
if data:
if data.lower().startswith('uri:http://<params>'):
dict = xmlrpclib.loads(data[11:])[0][0]
else:
spl = data.split(', ')
for val in spl:
if val.lower().startswith('uri:urn:uuid:'):
dict['uuid'] = uuid.UUID(val[4:]).int
elif val.lower().startswith('uri:urn:publicid:idn+'):
dict['urn'] = val[4:]
elif val.lower().startswith('email:'):
# FIXME: Ensure there isn't cruft in that address...
# EG look for email:copy,....
dict['email'] = val[6:]
self.uuid = dict.get("uuid", None)
self.urn = dict.get("urn", None)
self.hrn = dict.get("hrn", None)
self.email = dict.get("email", None)
if self.urn:
self.hrn = urn_to_hrn(self.urn)[0]
##
# Dump the credential to stdout.
#
# @param indent specifies a number of spaces to indent the output
# @param dump_parents If true, also dump the parents of the GID
def dump(self, *args, **kwargs):
print self.dump_string(*args,**kwargs)
def dump_string(self, indent=0, dump_parents=False):
result=" "*(indent-2) + "GID\n"
result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"
result += " "*indent + "urn:" + str(self.get_urn()) +"\n"
result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"
if self.get_email() is not None:
result += " "*indent + "email:" + str(self.get_email()) + "\n"
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if self.parent and dump_parents:
result += " "*indent + "parent:\n"
result += self.parent.dump_string(indent+4, dump_parents)
return result
##
# Verify the chain of authenticity of the GID. First perform the checks
# of the certificate class (verifying that each parent signs the child,
# etc). In addition, GIDs also confirm that the parent's HRN is a prefix
# of the child's HRN, and the parent is of type 'authority'.
#
# Verifying these prefixes prevents a rogue authority from signing a GID
# for a principal that is not a member of that authority. For example,
# planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.
def verify_chain(self, trusted_certs = None):
# do the normal certificate verification stuff
trusted_root = Certificate.verify_chain(self, trusted_certs)
if self.parent:
# make sure the parent's hrn is a prefix of the child's hrn
if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
# Parent must also be an authority (of some type) to sign a GID
# There are multiple types of authority - accept them all here
if not self.parent.get_type().find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
# Then recurse up the chain - ensure the parent is a trusted
# root or is in the namespace of a trusted root
self.parent.verify_chain(trusted_certs)
else:
# make sure that the trusted root's hrn is a prefix of the child's
trusted_gid = GID(string=trusted_root.save_to_string())
trusted_type = trusted_gid.get_type()
trusted_hrn = trusted_gid.get_hrn()
#if trusted_type == 'authority':
# trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
cur_hrn = self.get_hrn()
if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert: %s" % (trusted_hrn, cur_hrn))
# There are multiple types of authority - accept them all here
if not trusted_type.find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
return
| mit |
gmaxwell/bitcoin | contrib/devtools/optimize-pngs.py | 111 | 3391 | #!/usr/bin/env python
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| mit |
bsautermeister/tensorlight | tensorlight/utils/path.py | 1 | 1706 | import os
import fnmatch
def get_filenames(root_dir, pattern, include_root=True):
"""Gets a list of files of a given directory matching a
specified pattern, by using a resursive search.
Parameters
----------
root_dir: str
The directory to recursively look into.
pattern: str
The file pattern search string, such as '*.jpg'.
include_root: Boolean, optional
Whether to include the root path or just return the
filenames.
Returns
----------
matches: list(string)
Returns a list of filenames that match this pattern.
"""
matches = []
for root, dirnames, filenames in os.walk(root_dir):
for filename in fnmatch.filter(filenames, pattern):
if include_root:
filename = os.path.join(root, filename)
matches.append(filename)
return matches
def get_subdirnames(root_dir):
"""Gets the immediate subdirectory names of a folder.
Parameters
----------
root_dir: str
The directory to recursively look into.
Returns
----------
A list of strings with the folder names.
"""
return [d for d in os.listdir(root_dir)
if os.path.isdir(os.path.join(root_dir, d))]
def get_subdirpaths(root_dir):
"""Gets the path to all subdirectories given a folder.
Parameters
----------
root_dir: str
The directory to recursively look into.
Returns
----------
A list of strings with the folder paths.
"""
subdir_paths = []
for d in os.listdir(root_dir):
path = os.path.join(root_dir, d)
if os.path.isdir(path):
subdir_paths.append(path)
return subdir_paths | mit |
twobraids/socorro | socorro/unittest/lib/testVerTools.py | 11 | 1953 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import socorro.lib.ver_tools as vtl
phs = vtl._padding_high_string
pl = vtl._padding_list
tests = [('3', [3, phs, 0, phs] + pl * 3, '3'),
('3.', [3, phs, 0, phs] + pl * 3, '3'),
('3.0', [3, phs, 0, phs] + pl * 3, '3'),
('3.0.0', [3, phs, 0, phs] + pl * 3, '3'),
('3.5', [3, phs, 0, phs,
5, phs, 0, phs] + pl * 2, '3.5'),
('3.5pre', [3, phs, 0, phs,
5, 'pre', 0, phs] + pl * 2, '3.5pre'),
('3.5b3', [3, phs, 0, phs,
5, 'b', 3, phs] + pl * 2, '3.5b3'),
('3.6.4plugin3', [3, phs, 0, phs,
6, phs, 0, phs,
4, 'plugin', 3, phs] + pl, '3.6.4plugin3'),
]
def testNormalize():
for ver, expected, ver2 in tests:
got = vtl.normalize(ver)
assert got == expected, "expected %s, but got %s" % (expected, got)
def testDenomalize():
for ver, norm, expected in tests:
got = vtl.denormalize(norm)
assert got == expected, "expected %s, but got %s" % (expected, got)
def testCompare():
got = vtl.compare('3', '3.1')
assert got == -1, "expected %s, but got %s" % (-1, got)
got = vtl.compare('3', '3.0')
assert got == 0, "expected %s, but got %s" % (0, got)
got = vtl.compare('3', '3.0pre')
assert got == 1, "expected %s, but got %s" % (1, got)
got = vtl.compare('3.5b2', '3.5b1')
assert got == 1, "expected %s, but got %s" % (1, got)
got = vtl.compare('3.5', '3.5b1')
assert got == 1, "expected %s, but got %s" % (1, got)
got = vtl.compare('3.5.1', '3.5.1b3')
assert got == 1, "expected %s, but got %s" % (1, got)
| mpl-2.0 |
thehyve/variant | eggs/django_debug_toolbar-0.8.5-py2.7.egg/debug_toolbar/views.py | 11 | 7918 | """
Helper views for the debug toolbar. These are dynamically installed when the
debug toolbar is displayed, and typically can do Bad Things, so hooking up these
views in any other way is generally not advised.
"""
import os
import django.views.static
from django.conf import settings
from django.db import connection
from django.http import HttpResponseBadRequest
from django.shortcuts import render_to_response
from django.utils import simplejson
from django.utils.hashcompat import sha_constructor
class InvalidSQLError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def debug_media(request, path):
root = getattr(settings, 'DEBUG_TOOLBAR_MEDIA_ROOT', None)
if root is None:
parent = os.path.abspath(os.path.dirname(__file__))
root = os.path.join(parent, 'media', 'debug_toolbar')
return django.views.static.serve(request, path, root)
def sql_select(request):
"""
Returns the output of the SQL SELECT statement.
Expected GET variables:
sql: urlencoded sql with positional arguments
params: JSON encoded parameter values
duration: time for SQL to execute passed in from toolbar just for redisplay
hash: the hash of (secret + sql + params) for tamper checking
"""
from debug_toolbar.panels.sql import reformat_sql
sql = request.GET.get('sql', '')
params = request.GET.get('params', '')
hash = sha_constructor(settings.SECRET_KEY + sql + params).hexdigest()
if hash != request.GET.get('hash', ''):
return HttpResponseBadRequest('Tamper alert') # SQL Tampering alert
if sql.lower().strip().startswith('select'):
params = simplejson.loads(params)
cursor = connection.cursor()
cursor.execute(sql, params)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
cursor.close()
context = {
'result': result,
'sql': reformat_sql(cursor.db.ops.last_executed_query(cursor, sql, params)),
'duration': request.GET.get('duration', 0.0),
'headers': headers,
}
return render_to_response('debug_toolbar/panels/sql_select.html', context)
raise InvalidSQLError("Only 'select' queries are allowed.")
def sql_explain(request):
"""
Returns the output of the SQL EXPLAIN on the given query.
Expected GET variables:
sql: urlencoded sql with positional arguments
params: JSON encoded parameter values
duration: time for SQL to execute passed in from toolbar just for redisplay
hash: the hash of (secret + sql + params) for tamper checking
"""
from debug_toolbar.panels.sql import reformat_sql
sql = request.GET.get('sql', '')
params = request.GET.get('params', '')
hash = sha_constructor(settings.SECRET_KEY + sql + params).hexdigest()
if hash != request.GET.get('hash', ''):
return HttpResponseBadRequest('Tamper alert') # SQL Tampering alert
if sql.lower().strip().startswith('select'):
params = simplejson.loads(params)
cursor = connection.cursor()
if settings.DATABASE_ENGINE == "sqlite3":
# SQLite's EXPLAIN dumps the low-level opcodes generated for a query;
# EXPLAIN QUERY PLAN dumps a more human-readable summary
# See http://www.sqlite.org/lang_explain.html for details
cursor.execute("EXPLAIN QUERY PLAN %s" % (sql,), params)
else:
cursor.execute("EXPLAIN %s" % (sql,), params)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
cursor.close()
context = {
'result': result,
'sql': reformat_sql(cursor.db.ops.last_executed_query(cursor, sql, params)),
'duration': request.GET.get('duration', 0.0),
'headers': headers,
}
return render_to_response('debug_toolbar/panels/sql_explain.html', context)
raise InvalidSQLError("Only 'select' queries are allowed.")
def sql_profile(request):
"""
Returns the output of running the SQL and getting the profiling statistics.
Expected GET variables:
sql: urlencoded sql with positional arguments
params: JSON encoded parameter values
duration: time for SQL to execute passed in from toolbar just for redisplay
hash: the hash of (secret + sql + params) for tamper checking
"""
from debug_toolbar.panels.sql import reformat_sql
sql = request.GET.get('sql', '')
params = request.GET.get('params', '')
hash = sha_constructor(settings.SECRET_KEY + sql + params).hexdigest()
if hash != request.GET.get('hash', ''):
return HttpResponseBadRequest('Tamper alert') # SQL Tampering alert
if sql.lower().strip().startswith('select'):
params = simplejson.loads(params)
cursor = connection.cursor()
result = None
headers = None
result_error = None
try:
cursor.execute("SET PROFILING=1") # Enable profiling
cursor.execute(sql, params) # Execute SELECT
cursor.execute("SET PROFILING=0") # Disable profiling
# The Query ID should always be 1 here but I'll subselect to get the last one just in case...
cursor.execute("SELECT * FROM information_schema.profiling WHERE query_id=(SELECT query_id FROM information_schema.profiling ORDER BY query_id DESC LIMIT 1)")
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
except:
result_error = "Profiling is either not available or not supported by your database."
cursor.close()
context = {
'result': result,
'result_error': result_error,
'sql': reformat_sql(cursor.db.ops.last_executed_query(cursor, sql, params)),
'duration': request.GET.get('duration', 0.0),
'headers': headers,
}
return render_to_response('debug_toolbar/panels/sql_profile.html', context)
raise InvalidSQLError("Only 'select' queries are allowed.")
def template_source(request):
"""
Return the source of a template, syntax-highlighted by Pygments if
it's available.
"""
from django.template import TemplateDoesNotExist
from django.utils.safestring import mark_safe
from django.conf import settings
template_name = request.GET.get('template', None)
if template_name is None:
return HttpResponseBadRequest('"template" key is required')
try: # Django 1.2 ...
from django.template.loader import find_template_loader, make_origin
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
for loader in loaders:
try:
source, display_name = loader.load_template_source(template_name)
origin = make_origin(display_name, loader, template_name, settings.TEMPLATE_DIRS)
break
except TemplateDoesNotExist:
source = "Template Does Not Exist: %s" % (template_name,)
except (ImportError, AttributeError): # Django 1.1 ...
from django.template.loader import find_template_source
source, origin = find_template_source(template_name)
try:
from pygments import highlight
from pygments.lexers import HtmlDjangoLexer
from pygments.formatters import HtmlFormatter
source = highlight(source, HtmlDjangoLexer(), HtmlFormatter())
source = mark_safe(source)
source.pygmentized = True
except ImportError:
pass
return render_to_response('debug_toolbar/panels/template_source.html', {
'source': source,
'template_name': template_name
})
| apache-2.0 |
liyy7/scrapy | scrapy/spiders/crawl.py | 56 | 3521 | """
This modules implements the CrawlSpider which is the recommended spider to use
for scraping typical web sites that requires crawling pages.
See documentation in docs/topics/spiders.rst
"""
import copy
from scrapy.http import Request, HtmlResponse
from scrapy.utils.spider import iterate_spider_output
from scrapy.spiders import Spider
def identity(x):
return x
class Rule(object):
def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity):
self.link_extractor = link_extractor
self.callback = callback
self.cb_kwargs = cb_kwargs or {}
self.process_links = process_links
self.process_request = process_request
if follow is None:
self.follow = False if callback else True
else:
self.follow = follow
class CrawlSpider(Spider):
rules = ()
def __init__(self, *a, **kw):
super(CrawlSpider, self).__init__(*a, **kw)
self._compile_rules()
def parse(self, response):
return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True)
def parse_start_url(self, response):
return []
def process_results(self, response, results):
return results
def _requests_to_follow(self, response):
if not isinstance(response, HtmlResponse):
return
seen = set()
for n, rule in enumerate(self._rules):
links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = Request(url=link.url, callback=self._response_downloaded)
r.meta.update(rule=n, link_text=link.text)
yield rule.process_request(r)
def _response_downloaded(self, response):
rule = self._rules[response.meta['rule']]
return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow)
def _parse_response(self, response, callback, cb_kwargs, follow=True):
if callback:
cb_res = callback(response, **cb_kwargs) or ()
cb_res = self.process_results(response, cb_res)
for requests_or_item in iterate_spider_output(cb_res):
yield requests_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _compile_rules(self):
def get_method(method):
if callable(method):
return method
elif isinstance(method, basestring):
return getattr(self, method, None)
self._rules = [copy.copy(r) for r in self.rules]
for rule in self._rules:
rule.callback = get_method(rule.callback)
rule.process_links = get_method(rule.process_links)
rule.process_request = get_method(rule.process_request)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(CrawlSpider, cls).from_crawler(crawler, *args, **kwargs)
spider._follow_links = crawler.settings.getbool(
'CRAWLSPIDER_FOLLOW_LINKS', True)
return spider
def set_crawler(self, crawler):
super(CrawlSpider, self).set_crawler(crawler)
self._follow_links = crawler.settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True)
| bsd-3-clause |
cunnie/bin | VMTranslator.py | 1 | 12282 | #!/usr/local/bin/python2.7
import fileinput
import glob
import os
import sys
import datetime
class Cmd_arithmetic:
def __init__(self, code):
self.code = code
self.counter = -1
def generate(self):
self.counter += 1
return self.code.replace('%d', str(self.counter))
class Cmd_push:
cmd_push_asm = """ @%s // segment
D=%i
@%d // index
A=D+A // D holds the address to pull from
D=M // D holds the value we're gonna push onto SP
@SP
A=M
M=D
@SP // increment SP
M=M+1
"""
cmd_push_asm_constant = """ @%d
D=A
@SP
A=M
M=D
@SP // increment SP
M=M+1
"""
def __init__(self, segment='constant', index='0'):
self.code = self.cmd_push_asm
self.segment = segment
self.index = index
self.indirect = 'M'
if self.segment == 'local':
self.segment = 'LCL'
elif self.segment == 'argument':
self.segment = 'ARG'
elif self.segment == 'this':
self.segment = 'THIS'
elif self.segment == 'that':
self.segment = 'THAT'
elif self.segment == 'temp':
self.indirect = 'A'
self.segment = '5'
elif self.segment == 'pointer':
self.indirect = 'A'
self.segment = '3'
elif self.segment == 'static':
self.segment = '15'
elif self.segment == 'constant':
self.code = self.cmd_push_asm_constant
def generate(self):
return self.code.replace('%s', self.segment).replace('%d', self.index).replace('%i', self.indirect)
class Cmd_pop:
code = """ @{segment} // segment
D={a_or_m}
@{index} // index
D=D+A // D holds the address to which to pop
@SP
A=M
M=D // top of the stack has the address to which to pop
@SP
A=M-1
D=M // D holds the value we're going to pop
@SP
A=M
A=M // climbing the indirections
M=D // Done!
@SP // decrement SP
M=M-1
"""
def __init__(self, segment='constant', index='0'):
a_or_m = 'M'
if segment == 'local':
segment = 'LCL'
elif segment == 'argument':
segment = 'ARG'
elif segment == 'this':
segment = 'THIS'
elif segment == 'that':
segment = 'THAT'
elif segment == 'temp':
a_or_m = 'A'
segment = '5'
elif segment == 'pointer':
a_or_m = 'A'
segment = '3'
elif segment == 'static':
segment = '15'
self.named_placeholders = {
'segment': segment,
'index': index,
'a_or_m': a_or_m,
}
def generate(self):
return Cmd_pop.code.format(**self.named_placeholders)
class Cmd_goto:
def __init__(self, code, label="BRIAN_YOUR_CODE_HAS_A_MISTAKE"):
self.code = code
self.label = label
def generate(self):
return self.code.replace('%s', self.label)
class Cmd_function:
# (f) // Declare a label for the function entry
# // repeat k times: k == number of local variables
# PUSH 0
push_0 = """ @SP
D=A // D is 0, we take advantage of the fact that @SP == 0
A=M
AM=D // *SP = 0
M=M+1 // SP++
"""
def __init__(self, name="BRIAN_YOUR_CODE_HAS_A_MISTAKE", num_args=0):
self.name = name
self.args = num_args
def generate(self):
code = "(%s)\n".replace('%s', self.name)
for arg in range(0, int(self.args)):
code += self.push_0
return code
class Cmd_return:
# R13-R15 "These predefined symbols can be used for any purpose."
code = """ // FRAME = LCL
@LCL
D=M
@R13
M=D // FRAME (LCL) is in register 13
// RET = *(FRAME-5)
@5
A=D-A
D=M // D is RET
@R14
M=D // RET is in register 14
// *ARG = pop()
@SP
M=M-1 // SP--
A=M
D=M
@ARG
A=M
M=D
// SP = ARG+1
@ARG
D=M+1
@SP
M=D
// THAT = *(FRAME-1)
@R13
AM=M-1 // FRAME--
D=M
@THAT
M=D
// THIS = *(FRAME-2)
@R13
AM=M-1 // FRAME--
D=M
@THIS
M=D
// ARG = *(FRAME-3)
@R13
AM=M-1 // FRAME--
D=M
@ARG
M=D
// LCL = *(FRAME-4)
@R13
AM=M-1 // FRAME--
D=M
@LCL
M=D
// goto RET
@R14
A=M
0;JMP
"""
def __init__(self):
return
def generate(self):
return Cmd_return.code
class Cmd_call:
nonce = 0
code = """ @{function_name}.{nonce} // push return-address
D=A
@SP
A=M
M=D // *(SP) = D
@SP
M=M+1 // SP++
@LCL // push LCL
{push_on_stack}
@ARG // push ARG
{push_on_stack}
@THIS // push THIS
{push_on_stack}
@THAT // push THAT
{push_on_stack}
@SP // ARG = SP-n-5
D=M
@{num_args}
D=D-A
@5
D=D-A
@ARG
M=D
@SP // LCL = SP
D=M
@LCL
M=D
@{function_name}
0;JMP
({function_name}.{nonce})
"""
push_on_stack = """D=M
@SP
A=M
M=D // *(SP) = D
@SP
M=M+1 // SP++"""
def __init__(self, function_name="BRIAN_YOUR_CODE_HAS_A_MISTAKE", num_args=0):
self.function_name = function_name
self.num_args = num_args
self.named_placeholders = {
'push_on_stack': Cmd_call.push_on_stack,
'function_name': self.function_name,
'num_args': self.num_args,
'nonce': Cmd_call.nonce,
}
Cmd_call.nonce += 1
def generate(self):
return Cmd_call.code.format(**self.named_placeholders)
cmd_add = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
M=M+D // Add that guy to D
"""
cmd_sub = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
M=M-D // Sub D from that guy
"""
cmd_neg = """ @SP
M=M-1
A=M
D=-M // D is value at top of the stack
M=D
@SP
M=M+1
"""
cmd_eq = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
D=M-D // Sub D from that guy
@EQ_TRUE_%d
D;JEQ
(EQ_FALSE_%d) // NOT eq, set D to false (0)
@0
D=A
@EQ_DONE_%d
0;JMP
(EQ_TRUE_%d) // eq, set D to true (-1)
D=D-1 // D is 0, set D to true (-1)
(EQ_DONE_%d)
@SP
M=M-1
A=M
M=D
@SP
M=M+1
"""
cmd_lt = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
D=M-D // Sub D from that guy
@LT_TRUE_%d
D;JLT
(LT_FALSE_%d) // NOT lt, set D to false (0)
@0
D=A
@LT_DONE_%d
0;JMP
(LT_TRUE_%d) // lt, set D to true (-1)
@0
D=A-1 // set D to true (-1)
(LT_DONE_%d)
@SP
M=M-1
A=M
M=D
@SP
M=M+1
"""
cmd_gt = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
D=M-D // Sub D from that guy
@GT_TRUE_%d
D;JGT
(GT_FALSE_%d) // NOT gt, set D to false (0)
@0
D=A
@GT_DONE_%d
0;JMP
(GT_TRUE_%d) // gt, set D to true (-1)
@0
D=A-1 // set D to true (-1)
(GT_DONE_%d)
@SP
M=M-1
A=M
M=D
@SP
M=M+1
"""
cmd_and = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
M=D&M // Add D to that guy
"""
cmd_or = """ @SP
M=M-1
A=M
D=M // D is value at top of the stack
@SP
A=M-1 // Point to next highest on stack
M=D|M // Add D to that guy
"""
cmd_not = """ @SP
M=M-1
A=M
D=!M // D is value at top of the stack
M=D
@SP
M=M+1
"""
cmd_goto = """ @%s
0;JMP
"""
cmd_if_goto = """ @SP
@SP
M=M-1
A=M
D=M // D holds the value we've popped
@%s
D;JNE
"""
def parse(line):
# strip comments
no_comments = line.split('#', 1)[0]
no_comments = no_comments.split('//', 1)[0]
tokens = no_comments.split()
return tokens
def writecode(asm_file, tokens):
if not tokens:
return
asm_file.write('//')
for token in tokens:
asm_file.write(' ' + str(token))
asm_file.write('\n')
if tokens[0] == 'add':
asm_file.write(Cmd_arithmetic(cmd_add).generate())
elif tokens[0] == 'sub':
asm_file.write(Cmd_arithmetic(cmd_sub).generate())
elif tokens[0] == 'neg':
asm_file.write(Cmd_arithmetic(cmd_neg).generate())
elif tokens[0] == 'eq':
asm_file.write(Cmd_arithmetic(cmd_eq).generate())
elif tokens[0] == 'lt':
asm_file.write(Cmd_arithmetic(cmd_lt).generate())
elif tokens[0] == 'gt':
asm_file.write(Cmd_arithmetic(cmd_gt).generate())
elif tokens[0] == 'and':
asm_file.write(Cmd_arithmetic(cmd_and).generate())
elif tokens[0] == 'or':
asm_file.write(Cmd_arithmetic(cmd_or).generate())
elif tokens[0] == 'not':
asm_file.write(Cmd_arithmetic(cmd_not).generate())
elif tokens[0] == 'push':
asm_file.write(Cmd_push(segment=tokens[1], index=tokens[2]).generate())
elif tokens[0] == 'pop':
asm_file.write(Cmd_pop(segment=tokens[1], index=tokens[2]).generate())
elif tokens[0] == 'label':
asm_file.write("(" + tokens[1] + ")\n")
elif tokens[0] == 'goto':
asm_file.write(Cmd_goto(cmd_goto, label=tokens[1]).generate())
elif tokens[0] == 'if-goto':
asm_file.write(Cmd_goto(cmd_if_goto, label=tokens[1]).generate())
elif tokens[0] == 'function':
asm_file.write(Cmd_function(name=tokens[1], num_args=tokens[2]).generate())
elif tokens[0] == 'return':
asm_file.write(Cmd_return().generate())
elif tokens[0] == 'call':
asm_file.write(Cmd_call(function_name=tokens[1], num_args=tokens[2]).generate())
else:
sys.exit(cmd_name + " I can't recognize these tokens: " + '[%s]' % ', '.join(map(str, tokens)))
def writeInit(asm_file):
asm_file.write(""" @256
D=A
@SP
M=D
""")
writecode(asm_file, parse('call Sys.init 0'))
def banner():
asm_file.write("// Brian Cunnie's output for Nand to Tetris\n")
asm_file.write(datetime.datetime.now().strftime("// Compiled: %Y-%m-%d %H:%M\n\n"))
def path_debug():
sys.stderr.write('\ndirectory: ' + directory +
'\ninput_filenames' + str(input_filenames) +
'\noutput filename: ' + asm_filename + '\n')
cmd_name = sys.argv[0].split('/')[-1]
directory = os.environ['PWD']
input_filenames = glob.glob('*.vm')
asm_filename = os.path.join(directory, os.path.basename(os.environ['PWD']) + '.asm')
sys.stderr.write('\nsys.argv: ' + str(sys.argv))
if len(sys.argv) > 2:
sys.exit(cmd_name + " error: pass me one arg, the name of the file to compile, or a directory, or no argument")
if len(sys.argv) == 2:
file_or_directory = sys.argv[1]
if os.path.isfile(file_or_directory):
directory = os.path.dirname(file_or_directory)
input_filenames = [file_or_directory]
asm_filename = file_or_directory.replace('.vm', '.asm')
elif os.path.isdir(file_or_directory):
directory = file_or_directory.rstrip('/')
input_filenames = glob.glob(os.path.join(directory, '*.vm'))
asm_filename = os.path.join(directory, os.path.basename(directory) + '.asm')
else:
sys.exit(cmd_name + " error: " + file_or_directory + " isn't a file or directory!")
path_debug()
try:
asm_file = open(asm_filename, "w")
except:
sys.exit(cmd_name + " error. I couldn't open " + asm_filename + " for writing!")
banner()
# If there's only one input file, don't bother writing bootstrap code
if len(input_filenames) > 1:
writeInit(asm_file)
try:
for line in fileinput.input(input_filenames):
writecode(asm_file, parse(line))
except:
sys.exit(cmd_name + " error. I couldn't open " + str(input_filenames) + " for reading!")
| unlicense |
ShassAro/ShassAro | Bl_project/blVirtualEnv/lib/python2.7/site-packages/django/contrib/gis/geoip/libgeoip.py | 106 | 1094 | import os
from ctypes import CDLL
from ctypes.util import find_library
from django.conf import settings
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = dict((key, getattr(settings, key))
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key))
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None)
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name:
lib_path = find_library(lib_name)
if lib_path is None:
raise RuntimeError('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Getting the C `free` for the platform.
if os.name == 'nt':
libc = CDLL('msvcrt')
else:
libc = CDLL(None)
free = libc.free
| gpl-2.0 |
jwlawson/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 6 | 10430 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
venzozhang/GProject | utils/grid.py | 189 | 39708 | #!/usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import cairo
import sys
import re
import gtk
class DataRange:
def __init__(self, start = 0, end = 0, value = ''):
self.start = start
self.end = end
self.value = value
class EventString:
def __init__(self, at = 0, value = ''):
self.at = at
self.value = value
class EventFloat:
def __init__(self, at = 0, value = 0.0):
self.at = at
self.value = value
class EventInt:
def __init__(self, at = 0, value = 0.0):
self.at = at
self.value = value
def ranges_cmp(a, b):
diff = a.start - b.start
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
def events_cmp(a, b):
diff = a.at - b.at
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
class TimelineDataRange:
def __init__(self, name = ''):
self.name = name
self.ranges = []
return
def __search(self, key):
l = 0
u = len(self.ranges)-1
while l <= u:
i = int((l + u) / 2)
if key >= self.ranges[i].start and key <= self.ranges[i].end:
return i
elif key < self.ranges[i].start:
u = i - 1
else:
# key > self.ranges[i].end
l = i + 1
return - 1
def add_range(self, range):
self.ranges.append(range)
def get_all(self):
return self.ranges
def get_ranges(self, start, end):
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return []
elif s == -1:
return self.ranges[0:e + 1]
elif e == -1:
return self.ranges[s:len(self.ranges)]
else:
return self.ranges[s:e + 1]
def get_ranges_bounds(self, start, end):
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return(0, 0)
elif s == -1:
return(0, e + 1)
elif e == -1:
return(s, len(self.ranges))
else:
return(s, e + 1)
def sort(self):
self.ranges.sort(ranges_cmp)
def get_bounds(self):
if len(self.ranges) > 0:
lo = self.ranges[0].start
hi = self.ranges[len(self.ranges)-1].end
return(lo, hi)
else:
return(0, 0)
class TimelineEvent:
def __init__(self, name = ''):
self.name = name
self.events = []
def __search(self, key):
l = 0
u = len(self.events)-1
while l <= u:
i = int((l + u) / 2)
if key == self.events[i].at:
return i
elif key < self.events[i].at:
u = i - 1
else:
# key > self.events[i].at
l = i + 1
return l
def add_event(self, event):
self.events.append(event)
def get_events(self, start, end):
s = self.__search(start)
e = self.__search(end)
return self.events[s:e + 1]
def get_events_bounds(self, start, end):
s = self.__search(start)
e = self.__search(end)
return(s, e + 1)
def sort(self):
self.events.sort(events_cmp)
def get_bounds(self):
if len(self.events) > 0:
lo = self.events[0].at
hi = self.events[-1].at
return(lo, hi)
else:
return(0, 0)
class Timeline:
def __init__(self, name = ''):
self.ranges = []
self.event_str = []
self.event_int = []
self.name = name
def get_range(self, name):
for range in self.ranges:
if range.name == name:
return range
timeline = TimelineDataRange(name)
self.ranges.append(timeline)
return timeline
def get_event_str(self, name):
for event_str in self.event_str:
if event_str.name == name:
return event_str
timeline = TimelineEvent(name)
self.event_str.append(timeline)
return timeline
def get_event_int(self, name):
for event_int in self.event_int:
if event_int.name == name:
return event_int
timeline = TimelineEvent(name)
self.event_int.append(timeline)
return timeline
def get_ranges(self):
return self.ranges
def get_events_str(self):
return self.event_str
def get_events_int(self):
return self.event_int
def sort(self):
for range in self.ranges:
range.sort()
for event in self.event_int:
event.sort()
for event in self.event_str:
event.sort()
def get_bounds(self):
lo = 0
hi = 0
for range in self.ranges:
(range_lo, range_hi) = range.get_bounds()
if range_lo < lo:
lo = range_lo
if range_hi > hi:
hi = range_hi
for event_str in self.event_str:
(ev_lo, ev_hi) = event_str.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
for event_int in self.event_int:
(ev_lo, ev_hi) = event_int.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
return(lo, hi)
class Timelines:
def __init__(self):
self.timelines = []
def get(self, name):
for timeline in self.timelines:
if timeline.name == name:
return timeline
timeline = Timeline(name)
self.timelines.append(timeline)
return timeline
def get_all(self):
return self.timelines
def sort(self):
for timeline in self.timelines:
timeline.sort()
def get_bounds(self):
lo = 0
hi = 0
for timeline in self.timelines:
(t_lo, t_hi) = timeline.get_bounds()
if t_lo < lo:
lo = t_lo
if t_hi > hi:
hi = t_hi
return(lo, hi)
def get_all_range_values(self):
range_values = {}
for timeline in self.timelines:
for ranges in timeline.get_ranges():
for ran in ranges.get_all():
range_values[ran.value] = 1
return range_values.keys()
class Color:
def __init__(self, r = 0.0, g = 0.0, b = 0.0):
self.r = r
self.g = g
self.b = b
def set(self, r, g, b):
self.r = r
self.g = g
self.b = b
class Colors:
# XXX add more
default_colors = [Color(1, 0, 0), Color(0, 1, 0), Color(0, 0, 1), Color(1, 1, 0), Color(1, 0, 1), Color(0, 1, 1)]
def __init__(self):
self.__colors = {}
def add(self, name, color):
self.__colors[name] = color
def lookup(self, name):
if not self.__colors.has_key(name):
self.add(name, self.default_colors.pop())
return self.__colors.get(name)
class TopLegendRenderer:
def __init__(self):
self.__padding = 10
def set_padding(self, padding):
self.__padding = padding
def set_legends(self, legends, colors):
self.__legends = legends
self.__colors = colors
def layout(self, width):
self.__width = width
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
total_height += line_height
self.__height = total_height
def get_height(self):
return self.__height
def draw(self, ctx):
i = 0
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
ctx.rectangle(x, total_height, self.__padding, self.__padding)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke_preserve()
ctx.set_source_rgb(self.__colors[i].r,
self.__colors[i].g,
self.__colors[i].b)
ctx.fill()
ctx.move_to(x + self.__padding*2, total_height + t_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(legend)
i += 1
return
class TimelinesRenderer:
def __init__(self):
self.padding = 10
return
def get_height(self):
return self.height
def set_timelines(self, timelines, colors):
self.timelines = timelines
self.colors = colors
def set_render_range(self, start, end):
self.start = start
self.end = end
def get_data_x_start(self):
return self.padding / 2 + self.left_width + self.padding + self.right_width + self.padding / 2
def layout(self, width):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
left_width = 0
right_width = 0
left_n_lines = 0
range_n = 0
eventint_n = 0
eventstr_n = 0
for timeline in self.timelines.get_all():
left_n_lines += 1
t_width = ctx.text_extents(timeline.name)[2]
left_width = max(left_width, t_width)
for rang in timeline.get_ranges():
t_width = ctx.text_extents(rang.name)[2]
right_width = max(right_width, t_width)
range_n += 1
for events_int in timeline.get_events_int():
t_width = ctx.text_extents(events_int.name)[2]
right_width = max(right_width, t_width)
eventint_n += 1
for events_str in timeline.get_events_str():
t_width = ctx.text_extents(events_str.name)[2]
right_width = max(right_width, t_width)
eventstr_n += 1
left_height = left_n_lines * max_text_height + (left_n_lines - 1) * self.padding
right_n_lines = range_n + eventint_n + eventstr_n
right_height = (right_n_lines - 1) * self.padding + right_n_lines * max_text_height
right_data_height = (eventint_n + eventstr_n) * (max_text_height + 5) + range_n * 10
right_data_height += (right_n_lines - 1) * self.padding
height = max(left_height, right_height)
height = max(height, right_data_height)
self.left_width = left_width
self.right_width = right_width
self.max_text_height = max_text_height
self.width = width
self.height = height + self.padding
def draw_line(self, ctx, x, y, width, height):
ctx.move_to(x, y)
ctx.rel_line_to(width, height)
ctx.close_path()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_line_width(1.0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
def draw_events(self, ctx, events, x, y, width, height):
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x)
(lo, hi) = events.get_events_bounds(self.start, self.end)
for event in events.events[lo:hi]:
real_x = int(x + (event.at - self.start) * width / (self.end - self.start))
if real_x > last_x_drawn + 2:
ctx.rectangle(real_x, y, 1, 1)
ctx.set_source_rgb(1, 0, 0)
ctx.stroke()
ctx.move_to(real_x, y + self.max_text_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(str(event.value))
last_x_drawn = real_x
self.grey_background += 1
def draw_ranges(self, ctx, ranges, x, y, width, height):
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x - 1)
(lo, hi) = ranges.get_ranges_bounds(self.start, self.end)
for data_range in ranges.ranges[lo:hi]:
s = max(data_range.start, self.start)
e = min(data_range.end, self.end)
x_start = int(x + (s - self.start) * width / (self.end - self.start))
x_end = int(x + (e - self.start) * width / (self.end - self.start))
if x_end > last_x_drawn:
ctx.rectangle(x_start, y, x_end - x_start, 10)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
color = self.colors.lookup(data_range.value)
ctx.set_source_rgb(color.r, color.g, color.b)
ctx.fill()
last_x_drawn = x_end
self.grey_background += 1
def draw(self, ctx):
timeline_top = 0
top_y = self.padding / 2
left_x_start = self.padding / 2
left_x_end = left_x_start + self.left_width
right_x_start = left_x_end + self.padding
right_x_end = right_x_start + self.right_width
data_x_start = right_x_end + self.padding / 2
data_x_end = self.width
data_width = data_x_end - data_x_start
cur_y = top_y
self.draw_line(ctx, 0, 0, self.width, 0)
self.grey_background = 1
for timeline in self.timelines.get_all():
(y_bearing, t_width, t_height) = ctx.text_extents(timeline.name)[1:4]
ctx.move_to(left_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(timeline.name);
for events_int in timeline.get_events_int():
(y_bearing, t_width, t_height) = ctx.text_extents(events_int.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_int.name)
self.draw_events(ctx, events_int, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for events_str in timeline.get_events_str():
(y_bearing, t_width, t_height) = ctx.text_extents(events_str.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_str.name)
self.draw_events(ctx, events_str, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for ranges in timeline.get_ranges():
(y_bearing, t_width, t_height) = ctx.text_extents(ranges.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(ranges.name)
self.draw_ranges(ctx, ranges, data_x_start, cur_y, data_width, 10)
cur_y += self.max_text_height + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
self.draw_line(ctx, 0, cur_y - self.padding / 2,
self.width, 0)
bot_y = cur_y - self.padding / 2
self.draw_line(ctx, left_x_end + self.padding / 2, 0,
0, bot_y)
self.draw_line(ctx, right_x_end + self.padding / 2, 0,
0, bot_y)
return
class ScaleRenderer:
def __init__(self):
self.__top = 0
return
def set_bounds(self, lo, hi):
self.__lo = lo
self.__hi = hi
def get_position(self, x):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
return real_x
def set_top(self):
self.__top = 1
def set_bot(self):
self.__top = 0
def layout(self, width):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
# calculate scale delta
data_delta = self.__hi - self.__lo
closest = 1
while (closest*10) < data_delta:
closest *= 10
if (data_delta / closest) == 0:
delta = closest
elif(data_delta / closest) == 1:
delta = closest / 10
else:
delta = closest
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
self.__delta = delta
self.__width = width
# calculate text height
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
self.max_text_height = max_text_height
height = max_text_height + 10
self.__height = height
def get_height(self):
return self.__height
def draw(self, ctx):
delta = self.__delta
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
if self.__top == 1:
s = -1
else:
s = 1
# print scale points
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1.0)
ticks = range(int(start), int(end + delta), int(delta))
for x in ticks:
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 5*s)
ctx.close_path()
ctx.stroke()
(t_y_bearing, t_width, t_height) = ctx.text_extents(str(x))[1:4]
if self.__top:
text_delta = t_height + t_y_bearing
else:
text_delta = -t_y_bearing
ctx.move_to(real_x - t_width / 2, (5 + 5 + text_delta)*s)
ctx.show_text(str(x))
# draw subticks
delta /= 10
if delta > 0:
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
for x in range(int(start), int(end + delta), int(delta)):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 3*s)
ctx.close_path()
ctx.stroke()
class GraphicRenderer:
def __init__(self, start, end):
self.__start = float(start)
self.__end = float(end)
self.__mid_scale = ScaleRenderer()
self.__mid_scale.set_top()
self.__bot_scale = ScaleRenderer()
self.__bot_scale.set_bounds(start, end)
self.__bot_scale.set_bot()
self.__width = 1
self.__height = 1
def get_width(self):
return self.__width
def get_height(self):
return self.__height
# return x, y, width, height
def get_data_rectangle(self):
y_start = self.__top_legend.get_height()
x_start = self.__data.get_data_x_start()
return(x_start, y_start, self.__width - x_start, self.__data.get_height())
def scale_data(self, x):
x_start = self.__data.get_data_x_start()
x_scaled = x / (self.__width - x_start) * (self.__r_end - self.__r_start)
return x_scaled
# return x, y, width, height
def get_selection_rectangle(self):
y_start = self.__top_legend.get_height() + self.__data.get_height() + self.__mid_scale.get_height() + 20
y_height = self.__bot_scale.get_height() + 20
x_start = self.__bot_scale.get_position(self.__r_start)
x_end = self.__bot_scale.get_position(self.__r_end)
return(x_start, y_start, x_end - x_start, y_height)
def scale_selection(self, x):
x_scaled = x / self.__width * (self.__end - self.__start)
return x_scaled
def set_range(self, start, end):
s = min(start, end)
e = max(start, end)
start = max(self.__start, s)
end = min(self.__end, e)
self.__r_start = start
self.__r_end = end
self.__data.set_render_range(start, end)
self.__mid_scale.set_bounds(start, end)
self.layout(self.__width, self.__height)
def get_range(self):
return(self.__r_start, self.__r_end)
def set_data(self, data):
self.__data = data
def set_top_legend(self, top_legend):
self.__top_legend = top_legend
def layout(self, width, height):
self.__width = width
self.__height = height
self.__top_legend.layout(width)
top_legend_height = self.__top_legend.get_height()
self.__data.layout(width)
self.__mid_scale.layout(width - self.__data.get_data_x_start())
self.__bot_scale.layout(width)
return
def __x_pixel(self, x, width):
new_x = (x - self.__start) * width / (self.__end - self.__start)
return new_x
def draw(self, ctx):
# default background is white
ctx.save()
ctx.set_source_rgb(1, 1, 1)
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.rectangle(0, 0, self.__width, self.__height)
ctx.fill()
# top legend
ctx.save()
self.__top_legend.draw(ctx)
top_legend_height = self.__top_legend.get_height()
ctx.restore()
# separation line
ctx.move_to(0, top_legend_height)
ctx.line_to(self.__width, top_legend_height)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
# data
ctx.save()
ctx.translate(0,
top_legend_height)
self.__data.draw(ctx)
ctx.restore()
# scale below data
ctx.save()
ctx.translate(self.__data.get_data_x_start(),
top_legend_height + self.__data.get_height() + self.__mid_scale.get_height())
self.__mid_scale.draw(ctx)
ctx.restore()
height_used = top_legend_height + self.__data.get_height() + self.__mid_scale.get_height()
# separation between scale and left pane
ctx.move_to(self.__data.get_data_x_start(), height_used)
ctx.rel_line_to(0, -self.__mid_scale.get_height())
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke()
# separation below scale
ctx.move_to(0, height_used)
ctx.line_to(self.__width, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
select_start = self.__bot_scale.get_position(self.__r_start)
select_end = self.__bot_scale.get_position(self.__r_end)
# left connection between top scale and bottom scale
ctx.move_to(0, height_used);
ctx.line_to(self.__data.get_data_x_start(), height_used)
ctx.line_to(select_start, height_used + 20)
ctx.line_to(0, height_used + 20)
ctx.line_to(0, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# right connection between top scale and bottom scale
ctx.move_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + 20)
ctx.line_to(select_end, height_used + 20)
ctx.line_to(self.__width, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
height_used += 20
# unused area background
unused_start = self.__bot_scale.get_position(self.__r_start)
unused_end = self.__bot_scale.get_position(self.__r_end)
unused_height = self.__bot_scale.get_height() + 20
ctx.rectangle(0, height_used,
unused_start,
unused_height)
ctx.rectangle(unused_end,
height_used,
self.__width - unused_end,
unused_height)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# border line around bottom scale
ctx.move_to(unused_end, height_used)
ctx.line_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + unused_height)
ctx.line_to(0, height_used + unused_height)
ctx.line_to(0, height_used)
ctx.line_to(unused_start, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
ctx.move_to(unused_start, height_used)
ctx.line_to(unused_end, height_used)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.stroke()
# unused area dot borders
ctx.save()
ctx.move_to(max(unused_start, 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.move_to(min(unused_end, self.__width - 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.set_dash([5], 0)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
ctx.restore()
# bottom scale
ctx.save()
ctx.translate(0, height_used)
self.__bot_scale.draw(ctx)
ctx.restore()
class GtkGraphicRenderer(gtk.DrawingArea):
def __init__(self, data):
super(GtkGraphicRenderer, self).__init__()
self.__data = data
self.__moving_left = False
self.__moving_right = False
self.__moving_both = False
self.__moving_top = False
self.__force_full_redraw = True
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("expose_event", self.expose)
self.connect('size-allocate', self.size_allocate)
self.connect('motion-notify-event', self.motion_notify)
self.connect('button-press-event', self.button_press)
self.connect('button-release-event', self.button_release)
def set_smaller_zoom(self):
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start)*2)
self.__force_full_redraw = True
self.queue_draw()
def set_bigger_zoom(self):
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start) / 2)
self.__force_full_redraw = True
self.queue_draw()
def output_png(self, filename):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
surface.write_to_png(filename)
def button_press(self, widget, event):
(x, y, width, height) = self.__data.get_selection_rectangle()
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5:
self.__moving_left = True
return True
if abs(event.x - (x + width)) < 5:
self.__moving_right = True
return True
if event.x > x and event.x < x + width:
self.__moving_both = True
self.__moving_both_start = event.x
self.__moving_both_cur = event.x
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
self.__moving_top = True
self.__moving_top_start = event.x
self.__moving_top_cur = event.x
return True
return False
def button_release(self, widget, event):
if self.__moving_left:
self.__moving_left = False
left = self.__data.scale_selection(self.__moving_left_cur)
right = self.__data.get_range()[1]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_right:
self.__moving_right = False
right = self.__data.scale_selection(self.__moving_right_cur)
left = self.__data.get_range()[0]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_both:
self.__moving_both = False
delta = self.__data.scale_selection(self.__moving_both_cur - self.__moving_both_start)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_top:
self.__moving_top = False
return False
def motion_notify(self, widget, event):
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
if event.x <= 0:
self.__moving_left_cur = 0
elif event.x >= x + width:
self.__moving_left_cur = x + width
else:
self.__moving_left_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_right:
if event.x >= self.__width:
self.__moving_right = self.__width
elif event.x < x:
self.__moving_right_cur = x
else:
self.__moving_right_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_both:
cur_e = self.__width - (x + width - self.__moving_both_start)
cur_s = (self.__moving_both_start - x)
if event.x < cur_s:
self.__moving_both_cur = cur_s
elif event.x > cur_e:
self.__moving_both_cur = cur_e
else:
self.__moving_both_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_top:
self.__moving_top_cur = event.x
delta = self.__data.scale_data(self.__moving_top_start - self.__moving_top_cur)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.__moving_top_start = event.x
self.queue_draw()
return True
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5 or abs(event.x - (x + width)) < 5:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW))
return True
if event.x > x and event.x < x + width:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
widget.window.set_cursor(None)
return False
def size_allocate(self, widget, allocation):
self.__width = allocation.width
self.__height = allocation.height
self.__data.layout(allocation.width, allocation.height)
self.__force_full_redraw = True
self.queue_draw()
def expose(self, widget, event):
if self.__force_full_redraw:
self.__buffer_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
self.__force_full_redraw = False
ctx = widget.window.cairo_create()
ctx.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
ctx.clip()
ctx.set_source_surface(self.__buffer_surface)
ctx.paint()
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
ctx.move_to(max(self.__moving_left_cur, 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_right:
ctx.move_to(min(self.__moving_right_cur, self.__width - 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_both:
delta_x = self.__moving_both_cur - self.__moving_both_start
left_x = x + delta_x
ctx.move_to(x + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.move_to(x + width + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
return False
class MainWindow:
def __init__(self):
return
def run(self, graphic):
window = gtk.Window()
self.__window = window
window.set_default_size(200, 200)
vbox = gtk.VBox()
window.add(vbox)
render = GtkGraphicRenderer(graphic)
self.__render = render
vbox.pack_end(render, True, True, 0)
hbox = gtk.HBox()
vbox.pack_start(hbox, False, False, 0)
smaller_zoom = gtk.Button("Zoom Out")
smaller_zoom.connect("clicked", self.__set_smaller_cb)
hbox.pack_start(smaller_zoom)
bigger_zoom = gtk.Button("Zoom In")
bigger_zoom.connect("clicked", self.__set_bigger_cb)
hbox.pack_start(bigger_zoom)
output_png = gtk.Button("Output Png")
output_png.connect("clicked", self.__output_png_cb)
hbox.pack_start(output_png)
window.connect('destroy', gtk.main_quit)
window.show_all()
#gtk.bindings_activate(gtk.main_quit, 'q', 0)
gtk.main()
def __set_smaller_cb(self, widget):
self.__render.set_smaller_zoom()
def __set_bigger_cb(self, widget):
self.__render.set_bigger_zoom()
def __output_png_cb(self, widget):
dialog = gtk.FileChooserDialog("Output Png", self.__window,
gtk.FILE_CHOOSER_ACTION_SAVE, ("Save", 1))
self.__dialog = dialog
dialog.set_default_response(1)
dialog.connect("response", self.__dialog_response_cb)
dialog.show()
return
def __dialog_response_cb(self, widget, response):
if response == 1:
filename = self.__dialog.get_filename()
self.__render.output_png(filename)
widget.hide()
return
def read_data(filename):
timelines = Timelines()
colors = Colors()
fh = open(filename)
m1 = re.compile('range ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m2 = re.compile('event-str ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+)')
m3 = re.compile('event-int ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m4 = re.compile('color ([^ ]+) #([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})')
for line in fh.readlines():
m = m1.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
rang = timeline.get_range(m.group(2))
data_range = DataRange()
data_range.value = m.group(3)
data_range.start = int(m.group(4))
data_range.end = int(m.group(5))
rang.add_range(data_range)
continue
m = m2.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_str(m.group(2))
event = EventString()
event.value = m.group(3)
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m3.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_int(m.group(2))
event = EventInt()
event.value = int(m.group(3))
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m4.match(line)
if m:
r = int(m.group(2), 16)
g = int(m.group(3), 16)
b = int(m.group(4), 16)
color = Color(r / 255, g / 255, b / 255)
colors.add(m.group(1), color)
continue
timelines.sort()
return (colors, timelines)
def main():
(colors, timelines) = read_data(sys.argv[1])
(lower_bound, upper_bound) = timelines.get_bounds()
graphic = GraphicRenderer(lower_bound, upper_bound)
top_legend = TopLegendRenderer()
range_values = timelines.get_all_range_values()
range_colors = []
for range_value in range_values:
range_colors.append(colors.lookup(range_value))
top_legend.set_legends(range_values,
range_colors)
graphic.set_top_legend(top_legend)
data = TimelinesRenderer()
data.set_timelines(timelines, colors)
graphic.set_data(data)
# default range
range_mid = (upper_bound - lower_bound) / 2
range_width = (upper_bound - lower_bound) / 10
range_lo = range_mid - range_width / 2
range_hi = range_mid + range_width / 2
graphic.set_range(range_lo, range_hi)
main_window = MainWindow()
main_window.run(graphic)
main()
| gpl-2.0 |
ptorrestr/t2db_worker | t2db_worker/tests/test_parser.py | 1 | 1225 | #!/usr/bin/env python3
# api.py test
import unittest
from t2db_worker.parser import statusTweet
from t2db_worker.parser import statusUser
from t2db_worker.parser import getElement
from t2db_worker.parser import getRaw
from t2db_worker.parser import ParserStatus
from t2db_worker.tests.test_api import createApiStreaming
def getOneStatus():
api = createApiStreaming()
tweetIterator = api.getStream("obama")
for tweet in tweetIterator:
status = tweet
break
return status
status = None
class TestParserStatus(unittest.TestCase):
def setUp(self):
global status
if status is None:
status = getOneStatus()
def test_getTweet(self):
ps = ParserStatus(status)
rawTweet = ps.getTweet()
self.assertIsNotNone(rawTweet)
def test_getTweetFail(self):
badStatus = {}
ps = ParserStatus(badStatus)
self.assertRaises(Exception, ps.getTweet)
def test_getUser(self):
ps = ParserStatus(status)
rawUser = ps.getUser()
self.assertIsNotNone(rawUser)
def test_getUserFail(self):
badStatus = {}
ps = ParserStatus(badStatus)
self.assertRaises(Exception, ps.getUser)
| gpl-2.0 |
lordkman/burnman | misc/benchmarks/benchmark.py | 4 | 29408 | from __future__ import absolute_import
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
import burnman
import burnman.eos.birch_murnaghan as bm
import burnman.eos.birch_murnaghan_4th as bm4
import burnman.eos.mie_grueneisen_debye as mgd
import burnman.eos.slb as slb
import burnman.eos.vinet as vinet
import matplotlib.image as mpimg
def check_birch_murnaghan():
"""
Recreates Stixrude and Lithgow-Bertelloni (2005) Figure 1, bulk and shear modulus without thermal corrections
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.844e-6,
'K_0': 259.0e9,
'Kprime_0': 4.0,
'G_0': 175.0e9,
'Gprime_0': 1.7,
'molar_mass': .0,
}
test_mineral.set_method('bm3')
pressure = np.linspace(0., 140.e9, 100)
volume = np.empty_like(pressure)
bulk_modulus = np.empty_like(pressure)
shear_modulus = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm.volume(pressure[i], test_mineral.params)
bulk_modulus[i] = bm.bulk_modulus(volume[i], test_mineral.params)
shear_modulus[i] = bm.shear_modulus_third_order(
volume[i], test_mineral.params) # third order is used for the plot we are comparing against
# compare with figure 1
plt.plot(pressure / 1.e9, bulk_modulus /
1.e9, pressure / 1.e9, shear_modulus / 1.e9)
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig1.png')
plt.imshow(fig1, extent=[0, 140, 0, 800], aspect='auto')
plt.plot(pressure / 1.e9, bulk_modulus / 1.e9,
'g+', pressure / 1.e9, shear_modulus / 1.e9, 'g+')
plt.ylim(0, 800)
plt.xlim(0, 140)
plt.xlabel("Pressure (GPa)")
plt.ylabel("Modulus (GPa)")
plt.title(
"Comparing with Figure 1 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_birch_murnaghan_4th():
"""
Recreates the formulation of the 4th order Birch-Murnaghan EOS as in Ahmad and Alkammash, 2012; Figure 1.
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 10.e-6,
'K_0': 72.7e9,
'Kprime_0': 4.14,
'Kprime_prime_0': -0.0484e-9,
}
test_mineral.set_method('bm4')
pressure = np.linspace(0., 90.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm4.volume_fourth_order(
pressure[i], test_mineral.params) / test_mineral.params.get('V_0')
# compare with figure 1
plt.plot(pressure / 1.e9, volume)
fig1 = mpimg.imread('../../burnman/data/input_figures/Ahmad.png')
plt.imshow(fig1, extent=[0., 90., .65, 1.], aspect='auto')
plt.plot(pressure / 1.e9, volume, marker='o',
color='r', linestyle='', label='BM4')
plt.legend(loc='lower left')
plt.xlim(0., 90.)
plt.ylim(.65, 1.)
plt.xlabel("Volume/V0")
plt.ylabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Ahmad et al., (2012)")
plt.show()
def check_vinet():
"""
Recreates Dewaele et al., 2006, Figure 1, fitting a Vinet EOS to Fe data
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.75e-6,
'K_0': 163.4e9,
'Kprime_0': 5.38,
}
test_mineral.set_method('vinet')
pressure = np.linspace(17.7e9, 300.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = vinet.volume(pressure[i], test_mineral.params)
# compare with figure 1
plt.plot(pressure / 1.e9, volume / 6.02e-7)
fig1 = mpimg.imread('../../burnman/data/input_figures/Dewaele.png')
plt.imshow(fig1, extent=[0., 300., 6.8, 11.8], aspect='auto')
plt.plot(pressure / 1.e9, volume / 6.02e-7, marker='o',
color='r', linestyle='', label='Vinet Fit')
plt.legend(loc='lower left')
plt.xlim(0., 300.)
plt.ylim(6.8, 11.8)
plt.ylabel("Volume (Angstroms^3/atom")
plt.xlabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Dewaele et al., (2006)")
plt.show()
def check_mgd_shim_duffy_kenichi():
"""
Attemmpts to recreate Shim Duffy Kenichi (2002)
"""
plt.close()
# Create gold material from Table 1
gold = burnman.Mineral()
gold.params = {'name': 'gold',
'V_0': 10.22e-6,
'K_0': 167.0e9,
'Kprime_0': 5.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 1.0,
'Debye_0': 170.,
'grueneisen_0': 2.97, # this does better with gr = 2.93. Why?
'q_0': 1.0}
gold.set_method('mgd3')
# Total pressures, pulled from Table 2
ref_pressures = [
np.array([0., 3.55, 7.55, 12.06, 17.16, 22.91, 29.42, 36.77, 45.11, 54.56, 65.29, 77.50, 91.42, 107.32, 125.51, 146.38, 170.38, 198.07])]
ref_pressures.append(
np.array([4.99, 8.53, 12.53, 17.04, 22.13, 27.88, 34.38, 41.73, 50.06, 59.50, 70.22, 82.43, 96.33, 112.22, 130.40, 151.25, 175.24, 202.90]))
ref_pressures.append(
np.array([12.14, 15.69, 19.68, 24.19, 29.28, 35.03, 41.53, 48.88, 57.20, 66.64, 77.37, 89.57, 103.47, 119.35, 137.53, 158.38, 182.36, 210.02]))
ref_pressures.append(
np.array([19.30, 22.84, 26.84, 31.35, 36.44, 42.19, 48.68, 56.03, 64.35, 73.80, 84.52, 96.72, 110.62, 126.50, 144.68, 165.53, 189.51, 217.17]))
eos = mgd.MGD3()
pressures = np.empty_like(ref_pressures)
ref_dv = np.linspace(0.0, 0.34, len(pressures[0]))
ref_volumes = (1 - ref_dv) * gold.params['V_0']
T = np.array([300., 1000., 2000., 3000.])
for t in range(len(pressures)):
for i in range(len(pressures[t])):
pressures[t][i] = eos.pressure(T[t], ref_volumes[i], gold.params)
plt.plot(ref_dv, (pressures[t] / 1.e9 - ref_pressures[t]))
plt.ylim(-1, 1)
plt.ylabel("Difference in pressure (GPa)")
plt.xlabel("1-dV/V")
plt.title("Comparing with Shim, Duffy, and Kenichi (2002)")
plt.show()
def check_mgd_fei_mao_shu_hu():
"""
Benchmark agains Fei Mao Shu Hu (1991)
"""
mgfeo = burnman.Mineral()
mgfeo.params = {'name': 'MgFeO',
'V_0': 11.657e-6,
'K_0': 157.0e9,
'Kprime_0': 4.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 2.0,
'Debye_0': 500.,
'grueneisen_0': 1.50,
'q_0': 1.1}
mgfeo.set_method('mgd3')
# pulled from table 1
temperatures = np.array(
[300, 300, 483, 483, 483, 590, 593, 593, 593, 700, 600, 500, 650, 600,
600, 650, 700, 737, 727, 673, 600, 543, 565, 585, 600, 628, 654, 745, 768, 747, 726, 700, 676])
volumes = np.array(
[77.418, 72.327, 74.427, 73.655, 72.595, 74.1, 73.834, 73.101, 70.845, 73.024, 72.630, 68.644, 72.969, 72.324, 71.857,
72.128, 73.283, 73.337, 72.963, 71.969, 69.894, 67.430, 67.607, 67.737, 68.204, 68.518, 68.955, 70.777, 72.921, 72.476, 72.152, 71.858, 71.473])
# change from cubic angstroms per unit cell to cubic meters per mol of
# molecules.
volumes = volumes / 1.e30 * 6.022141e23 / 4.0
ref_pressures = np.array(
[0.0, 12.23, 7.77, 9.69, 12.54, 9.21, 9.90, 11.83, 18.35, 12.68, 13.15, 25.16, 12.53, 14.01, 15.34,
14.86, 11.99, 12.08, 13.03, 15.46, 21.44, 29.98, 29.41, 29.05, 27.36, 26.38, 24.97, 19.49, 13.39, 14.48, 15.27, 15.95, 16.94])
ref_pressures = ref_pressures
pressures = np.empty_like(volumes)
eos = mgd.MGD3()
for i in range(len(temperatures)):
pressures[i] = eos.pressure(temperatures[i], volumes[i], mgfeo.params)
plt.scatter(temperatures, (pressures / 1.e9 - ref_pressures))
plt.ylim(-1, 1)
plt.title("Comparing with Fei, Mao, Shu, and Hu (1991)")
plt.xlabel("Temperature (K) at various volumes")
plt.ylabel("Difference in total pressure (GPa)")
plt.show()
def check_slb_fig3():
"""
Benchmark grueneisen parameter against figure 3 of Stixrude and Lithgow-Bertelloni (2005b)
"""
perovskite = burnman.Mineral()
perovskite.params = {'name': 'perovksite',
'V_0': burnman.tools.molar_volume_from_unit_cell_volume(168.27, 4.),
'grueneisen_0': 1.63,
'q_0': 1.7}
volume = np.linspace(0.6, 1.0, 100)
grueneisen_slb = np.empty_like(volume)
grueneisen_mgd = np.empty_like(volume)
q_slb = np.empty_like(volume)
q_mgd = np.empty_like(volume)
slb_eos = slb.SLB2()
mgd_eos = mgd.MGD2()
# calculate its thermal properties
for i in range(len(volume)):
# call with dummy pressure and temperatures, they do not change it
grueneisen_slb[i] = slb_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
grueneisen_mgd[i] = mgd_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
q_slb[i] = slb_eos.volume_dependent_q(
1. / volume[i], perovskite.params)
q_mgd[i] = perovskite.params['q_0']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig3.png')
plt.imshow(fig1, extent=[0.6, 1.0, 0.35, 2.0], aspect='auto')
plt.plot(volume, grueneisen_slb, 'g+', volume, grueneisen_mgd, 'b+')
plt.plot(volume, q_slb, 'g+', volume, q_mgd, 'b+')
plt.xlim(0.6, 1.0)
plt.ylim(0.35, 2.0)
plt.ylabel("Grueneisen parameter")
plt.xlabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 3 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_slb_fig7_txt():
"""
Calculates all values for forsterite and benchmarks with values from Stixrude and Lithgow-Bertelloni (personal communication)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.603e-6,
'K_0': 127.955e9,
'Kprime_0': 4.232,
'G_0': 81.6e9,
'Gprime_0': 1.4,
'molar_mass': .140695,
'n': 7.0,
'Debye_0': 809.183,
'grueneisen_0': .993,
'q_0': 2.093,
'F_0': -1.1406e5,
'eta_s_0': 2.364}
forsterite.set_method('slb3')
data = np.loadtxt(
"../../burnman/data/input_minphys/slb_fig7.txt", skiprows=2)
temperature = np.array(data[:, 2])
pressure = np.array(data[:, 0])
rho = np.array(data[:, 3])
rho_comp = np.empty_like(rho)
Kt = np.array(data[:, 4])
Kt_comp = np.empty_like(Kt)
Ks = np.array(data[:, 5])
Ks_comp = np.empty_like(Ks)
G = np.array(data[:, 6])
G_comp = np.empty_like(G)
VB = np.array(data[:, 7])
VB_comp = np.empty_like(VB)
VS = np.array(data[:, 8])
VS_comp = np.empty_like(VS)
VP = np.array(data[:, 9])
VP_comp = np.empty_like(VP)
vol = np.array(data[:, 10])
vol_comp = np.empty_like(vol)
alpha = np.array(data[:, 11])
alpha_comp = np.empty_like(alpha)
Cp = np.array(data[:, 12])
Cp_comp = np.empty_like(Cp)
gr = np.array(data[:, 13])
gr_comp = np.empty_like(gr)
gibbs = np.array(data[:, 14])
gibbs_comp = np.empty_like(gibbs)
entropy = np.array(data[:, 15])
entropy_comp = np.empty_like(gibbs)
enthalpy = np.array(data[:, 16])
enthalpy_comp = np.empty_like(gibbs)
for i in range(len(temperature)):
forsterite.set_state(pressure[i], temperature[i])
rho_comp[i] = 100. * (forsterite.density / 1000. - rho[i]) / rho[i]
Kt_comp[i] = 100. * (
forsterite.isothermal_bulk_modulus / 1.e9 - Kt[i]) / Kt[i]
Ks_comp[i] = 100. * (
forsterite.adiabatic_bulk_modulus / 1.e9 - Ks[i]) / Ks[i]
G_comp[i] = 100. * (forsterite.shear_modulus / 1.e9 - G[i]) / G[i]
VB_comp[i] = 100. * (forsterite.v_phi / 1000. - VB[i]) / VB[i]
VS_comp[i] = 100. * (forsterite.v_s / 1000. - VS[i]) / VS[i]
VP_comp[i] = 100. * (forsterite.v_p / 1000. - VP[i]) / VP[i]
vol_comp[i] = 100. * (forsterite.molar_volume * 1.e6 - vol[i]) / vol[i]
alpha_comp[i] = 100. * (
forsterite.thermal_expansivity / 1.e-5 - alpha[i]) / (alpha[-1])
Cp_comp[i] = 100. * (forsterite.heat_capacity_p /
forsterite.params['molar_mass'] / 1000. - Cp[i]) / (Cp[-1])
gr_comp[i] = (forsterite.grueneisen_parameter - gr[i]) / gr[i]
gibbs_comp[i] = 100. * (
forsterite.molar_gibbs / 1.e6 - gibbs[i]) / gibbs[i]
entropy_comp[i] = 100. * (
forsterite.molar_entropy - entropy[i]) / (entropy[i] if entropy[i] != 0. else 1.)
enthalpy_comp[i] = 100. * (
forsterite.molar_enthalpy / 1.e6 - enthalpy[i]) / (enthalpy[i] if enthalpy[i] != 0. else 1.)
plt.plot(temperature, rho_comp, label=r'$\rho$')
plt.plot(temperature, Kt_comp, label=r'$K_S$')
plt.plot(temperature, Ks_comp, label=r'$K_T$')
plt.plot(temperature, G_comp, label=r'$G$')
plt.plot(temperature, VS_comp, label=r'$V_S$')
plt.plot(temperature, VP_comp, label=r'$V_P$')
plt.plot(temperature, VB_comp, label=r'$V_\phi$')
plt.plot(temperature, vol_comp, label=r'$V$')
plt.plot(temperature, alpha_comp, label=r'$\alpha$')
plt.plot(temperature, Cp_comp, label=r'$c_P$')
plt.plot(temperature, gr_comp, label=r'$\gamma$')
plt.plot(temperature, gibbs_comp, label=r'Gibbs')
plt.plot(temperature, enthalpy_comp, label=r'Enthalpy')
plt.plot(temperature, entropy_comp, label=r'Entropy')
plt.xlim([0, 2750])
plt.ylim([-0.001, 0.001])
plt.xticks([0, 800, 1600, 2200])
plt.xlabel("Temperature (K)")
plt.ylabel("Percent Difference from HeFESTo")
plt.legend(loc="center right")
# plt.savefig("output_figures/benchmark1.pdf")
plt.show()
def check_slb_fig7():
"""
Calculates all values for forsterite and benchmarks with figure 7 from Stixrude and Lithgow-Bertelloni (2005)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.60e-6,
'K_0': 128.0e9,
'Kprime_0': 4.2,
'G_0': 82.0e9,
'Gprime_0': 1.4,
'n': 7.0,
'molar_mass': .140695,
'Debye_0': 809.,
'grueneisen_0': .99,
'q_0': 2.1,
'eta_s_0': 2.4}
forsterite.set_method('slb3')
temperature = np.linspace(0., 2000., 200)
volume = np.empty_like(temperature)
bulk_modulus = np.empty_like(temperature)
shear_modulus = np.empty_like(temperature)
heat_capacity = np.empty_like(temperature)
pressure = 1.0e5
forsterite.set_state(pressure, 300.)
Ks_0 = forsterite.adiabatic_bulk_modulus
# calculate its thermal properties
for i in range(len(temperature)):
forsterite.set_state(pressure, temperature[i])
volume[i] = forsterite.molar_volume / forsterite.params['V_0']
bulk_modulus[i] = forsterite.adiabatic_bulk_modulus / Ks_0
shear_modulus[i] = forsterite.shear_modulus / forsterite.params['G_0']
heat_capacity[i] = forsterite.heat_capacity_p / forsterite.params['n']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_vol.png')
plt.imshow(fig1, extent=[0, 2200, 0.99, 1.08], aspect='auto')
plt.plot(temperature, volume, 'g+')
plt.ylim(0.99, 1.08)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_Cp.png')
plt.imshow(fig1, extent=[0, 2200, 0., 70.], aspect='auto')
plt.plot(temperature, heat_capacity, 'g+')
plt.ylim(0, 70)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Heat Capacity Cp")
plt.title(
"Comparing with adiabatic_bulk_modulus7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_K.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, bulk_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Bulk Modulus K/K0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_G.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, shear_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Shear Modulus G/G0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_averaging():
"""
Reproduce Figure 1a from Watt et. al. 1976 to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# MgO bulk and shear moduli taken from Landolt-Boernstein
# - Group III Condensed Matter Volume 41B, 1999, pp 1-3
K2 = 152. # Bulk modulus, GPa
G2 = 155. # Shear modulus, GPa
# AgCl bulk and shear moduli (estimated from plot)
G1 = G2 * 0.07
K1 = K2 * 0.27
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a1.png')
plt.imshow(fig, extent=[0, 1.0, 0.25, 1.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus / K2, 'g-')
plt.plot(volumes, r_bulk_modulus / K2, 'g-')
plt.plot(volumes, vrh_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsu_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsl_bulk_modulus / K2, 'g-')
plt.ylim(0.25, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a2.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 1.0], aspect='auto')
plt.plot(volumes, v_shear_modulus / G2, 'g-')
plt.plot(volumes, r_shear_modulus / G2, 'g-')
plt.plot(volumes, vrh_shear_modulus / G2, 'g-')
plt.plot(volumes, hsu_shear_modulus / G2, 'g-')
plt.plot(volumes, hsl_shear_modulus / G2, 'g-')
plt.ylim(0.0, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
# also check against some numerical values given in Berryman (1995) for
# porous glass
K = 46.3
G = 30.5
# the value for porosity=0.46 in the table appears to be a typo. Remove
# it here
porosity = np.array(
[0.0, 0.05, 0.11, 0.13, 0.25, 0.33, 0.36, 0.39, 0.44, 0.50, 0.70])
berryman_bulk_modulus = np.array(
[46.3, 41.6, 36.6, 35.1, 27.0, 22.5, 21.0, 19.6, 17.3, 14.8, 7.7]) # 15.5 probably a typo?
hsu_bulk_modulus_vals = np.empty_like(porosity)
for i in range(len(porosity)):
hsu_bulk_modulus_vals[i] = hashin_shtrikman_upper.average_bulk_moduli(
[porosity[i], 1.0 - porosity[i]], [0.0, K], [0.0, G])
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [0.0, K], [0.0, G])
fig = mpimg.imread('../../burnman/data/input_figures/berryman_fig4.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 50.0], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.scatter(porosity, hsu_bulk_modulus_vals, c='r')
plt.scatter(porosity, berryman_bulk_modulus, c='y')
plt.ylim(0.0, 50.0)
plt.xlim(0, 1.0)
plt.xlabel("Porosity")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 4 of Berryman (1995)")
plt.show()
def check_averaging_2():
"""
Reproduce Figure 1 from Hashin and Shtrikman (1963) to check the
Hashin-Shtrikman bounds for an elastic composite
"""
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# These values are from Hashin and Shtrikman (1963)
K1 = 25.0
K2 = 60.7
G1 = 11.5
G2 = 41.8
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig1_K.png')
plt.imshow(fig, extent=[0, 1.0, 1.1, K2 + 0.3], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.ylim(K1, K2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Hashin and Shtrikman (1963)")
plt.show()
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig2_G.png')
plt.imshow(fig, extent=[0, 1.0, 0.3, G2], aspect='auto')
plt.plot(volumes, hsu_shear_modulus, 'g-')
plt.plot(volumes, hsl_shear_modulus, 'g-')
plt.ylim(G1, G2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 2 of Hashin and Shtrikman (1963)")
plt.show()
def check_averaging_3():
"""
Reproduce Figure 3 from Avseth et al. (2010) to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
hs_av_bulk_modulus = np.empty_like(volumes)
hs_av_shear_modulus = np.empty_like(volumes)
# Quartz bulk and shear moduli
K2 = 37.
G2 = 45.
# Fluid bulk and shear moduli
G1 = 0.00001
K1 = 2.35
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hs_av_bulk_modulus[i] = 0.5 * hsl_bulk_modulus[
i] + 0.5 * hsu_bulk_modulus[i]
hs_av_shear_modulus[i] = 0.5 * hsl_shear_modulus[
i] + 0.5 * hsu_shear_modulus[i]
fig = mpimg.imread(
'../../burnman/data/input_figures/Avseth_et_al_2010_fig3_K.png')
plt.imshow(fig, extent=[0, 1.0, 0., 40.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus, 'g-')
plt.plot(volumes, r_bulk_modulus, 'g-')
plt.plot(volumes, vrh_bulk_modulus, 'g-')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.plot(volumes, hs_av_bulk_modulus, 'g-')
plt.ylim(0., 40.00)
plt.xlim(0., 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 3 of Avseth et al., 2010")
plt.show()
if __name__ == "__main__":
check_averaging()
check_averaging_2()
check_averaging_3()
check_birch_murnaghan()
check_birch_murnaghan_4th()
check_vinet()
check_slb_fig7()
check_slb_fig3()
check_mgd_shim_duffy_kenichi()
check_mgd_fei_mao_shu_hu()
check_slb_fig7_txt()
| gpl-2.0 |
klim-iv/phantomjs-qt5 | src/qt/qtbase/util/local_database/enumdata.py | 3 | 38516 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
# language_list and country_list reflect the current values of enums in qlocale.h
# If new xml language files are available in CLDR, these languages and countries
# need to be *appended* to this list.
language_list = {
0 : [ "AnyLanguage", " " ],
1 : [ "C", " " ],
2 : [ "Abkhazian", "ab" ],
3 : [ "Oromo", "om" ], # macrolanguage
4 : [ "Afar", "aa" ],
5 : [ "Afrikaans", "af" ],
6 : [ "Albanian", "sq" ], # macrolanguage
7 : [ "Amharic", "am" ],
8 : [ "Arabic", "ar" ], # macrolanguage
9 : [ "Armenian", "hy" ],
10 : [ "Assamese", "as" ],
11 : [ "Aymara", "ay" ], # macrolanguage
12 : [ "Azerbaijani", "az" ], # macrolanguage
13 : [ "Bashkir", "ba" ],
14 : [ "Basque", "eu" ],
15 : [ "Bengali", "bn" ],
16 : [ "Dzongkha", "dz" ],
17 : [ "Bihari", "bh" ],
18 : [ "Bislama", "bi" ],
19 : [ "Breton", "br" ],
20 : [ "Bulgarian", "bg" ],
21 : [ "Burmese", "my" ],
22 : [ "Belarusian", "be" ],
23 : [ "Khmer", "km" ],
24 : [ "Catalan", "ca" ],
25 : [ "Chinese", "zh" ], # macrolanguage
26 : [ "Corsican", "co" ],
27 : [ "Croatian", "hr" ],
28 : [ "Czech", "cs" ],
29 : [ "Danish", "da" ],
30 : [ "Dutch", "nl" ],
31 : [ "English", "en" ],
32 : [ "Esperanto", "eo" ],
33 : [ "Estonian", "et" ], # macrolanguage
34 : [ "Faroese", "fo" ],
35 : [ "Fijian", "fj" ],
36 : [ "Finnish", "fi" ],
37 : [ "French", "fr" ],
38 : [ "Western Frisian", "fy" ],
39 : [ "Gaelic", "gd" ],
40 : [ "Galician", "gl" ],
41 : [ "Georgian", "ka" ],
42 : [ "German", "de" ],
43 : [ "Greek", "el" ],
44 : [ "Greenlandic", "kl" ],
45 : [ "Guarani", "gn" ], # macrolanguage
46 : [ "Gujarati", "gu" ],
47 : [ "Hausa", "ha" ],
48 : [ "Hebrew", "he" ],
49 : [ "Hindi", "hi" ],
50 : [ "Hungarian", "hu" ],
51 : [ "Icelandic", "is" ],
52 : [ "Indonesian", "id" ],
53 : [ "Interlingua", "ia" ],
54 : [ "Interlingue", "ie" ],
55 : [ "Inuktitut", "iu" ], # macrolanguage
56 : [ "Inupiak", "ik" ], # macrolanguage
57 : [ "Irish", "ga" ],
58 : [ "Italian", "it" ],
59 : [ "Japanese", "ja" ],
60 : [ "Javanese", "jv" ],
61 : [ "Kannada", "kn" ],
62 : [ "Kashmiri", "ks" ],
63 : [ "Kazakh", "kk" ],
64 : [ "Kinyarwanda", "rw" ],
65 : [ "Kirghiz", "ky" ],
66 : [ "Korean", "ko" ],
67 : [ "Kurdish", "ku" ], # macrolanguage
68 : [ "Rundi", "rn" ],
69 : [ "Lao", "lo" ],
70 : [ "Latin", "la" ],
71 : [ "Latvian", "lv" ], # macrolanguage
72 : [ "Lingala", "ln" ],
73 : [ "Lithuanian", "lt" ],
74 : [ "Macedonian", "mk" ],
75 : [ "Malagasy", "mg" ], # macrolanguage
76 : [ "Malay", "ms" ], # macrolanguage
77 : [ "Malayalam", "ml" ],
78 : [ "Maltese", "mt" ],
79 : [ "Maori", "mi" ],
80 : [ "Marathi", "mr" ],
81 : [ "Marshallese", "mh" ],
82 : [ "Mongolian", "mn" ], # macrolanguage
83 : [ "Nauru", "na" ],
84 : [ "Nepali", "ne" ], # macrolanguage
85 : [ "NorwegianBokmal", "nb" ],
86 : [ "Occitan", "oc" ],
87 : [ "Oriya", "or" ], # macrolanguage
88 : [ "Pashto", "ps" ], # macrolanguage
89 : [ "Persian", "fa" ], # macrolanguage
90 : [ "Polish", "pl" ],
91 : [ "Portuguese", "pt" ],
92 : [ "Punjabi", "pa" ],
93 : [ "Quechua", "qu" ], # macrolanguage
94 : [ "Romansh", "rm" ],
95 : [ "Romanian", "ro" ],
96 : [ "Russian", "ru" ],
97 : [ "Samoan", "sm" ],
98 : [ "Sango", "sg" ],
99 : [ "Sanskrit", "sa" ],
100 : [ "Serbian", "sr" ],
101 : [ "Ossetic", "os" ],
102 : [ "Southern Sotho", "st" ],
103 : [ "Tswana", "tn" ],
104 : [ "Shona", "sn" ],
105 : [ "Sindhi", "sd" ],
106 : [ "Sinhala", "si" ],
107 : [ "Swati", "ss" ],
108 : [ "Slovak", "sk" ],
109 : [ "Slovenian", "sl" ],
110 : [ "Somali", "so" ],
111 : [ "Spanish", "es" ],
112 : [ "Sundanese", "su" ],
113 : [ "Swahili", "sw" ], # macrolanguage
114 : [ "Swedish", "sv" ],
115 : [ "Sardinian", "sc" ], # macrolanguage
116 : [ "Tajik", "tg" ],
117 : [ "Tamil", "ta" ],
118 : [ "Tatar", "tt" ],
119 : [ "Telugu", "te" ],
120 : [ "Thai", "th" ],
121 : [ "Tibetan", "bo" ],
122 : [ "Tigrinya", "ti" ],
123 : [ "Tongan", "to" ],
124 : [ "Tsonga", "ts" ],
125 : [ "Turkish", "tr" ],
126 : [ "Turkmen", "tk" ],
127 : [ "Tahitian", "ty" ],
128 : [ "Uighur", "ug" ],
129 : [ "Ukrainian", "uk" ],
130 : [ "Urdu", "ur" ],
131 : [ "Uzbek", "uz" ], # macrolanguage
132 : [ "Vietnamese", "vi" ],
133 : [ "Volapuk", "vo" ],
134 : [ "Welsh", "cy" ],
135 : [ "Wolof", "wo" ],
136 : [ "Xhosa", "xh" ],
137 : [ "Yiddish", "yi" ], # macrolanguage
138 : [ "Yoruba", "yo" ],
139 : [ "Zhuang", "za" ], # macrolanguage
140 : [ "Zulu", "zu" ],
141 : [ "NorwegianNynorsk", "nn" ],
142 : [ "Bosnian", "bs" ],
143 : [ "Divehi", "dv" ],
144 : [ "Manx", "gv" ],
145 : [ "Cornish", "kw" ],
146 : [ "Akan", "ak" ], # macrolanguage
147 : [ "Konkani", "kok" ],
148 : [ "Ga", "gaa" ],
149 : [ "Igbo", "ig" ],
150 : [ "Kamba", "kam" ],
151 : [ "Syriac", "syr" ],
152 : [ "Blin", "byn" ],
153 : [ "Geez", "gez" ],
154 : [ "Koro", "kfo" ],
155 : [ "Sidamo", "sid" ],
156 : [ "Atsam", "cch" ],
157 : [ "Tigre", "tig" ],
158 : [ "Jju", "kaj" ],
159 : [ "Friulian", "fur" ],
160 : [ "Venda", "ve" ],
161 : [ "Ewe", "ee" ],
162 : [ "Walamo", "wal" ],
163 : [ "Hawaiian", "haw" ],
164 : [ "Tyap", "kcg" ],
165 : [ "Nyanja", "ny" ],
166 : [ "Filipino", "fil" ],
167 : [ "Swiss German", "gsw" ],
168 : [ "Sichuan Yi", "ii" ],
169 : [ "Kpelle", "kpe" ],
170 : [ "Low German", "nds" ],
171 : [ "South Ndebele", "nr" ],
172 : [ "Northern Sotho", "nso" ],
173 : [ "Northern Sami", "se" ],
174 : [ "Taroko", "trv" ],
175 : [ "Gusii", "guz" ],
176 : [ "Taita", "dav" ],
177 : [ "Fulah", "ff" ], # macrolanguage
178 : [ "Kikuyu", "ki" ],
179 : [ "Samburu", "saq" ],
180 : [ "Sena", "seh" ],
181 : [ "North Ndebele", "nd" ],
182 : [ "Rombo", "rof" ],
183 : [ "Tachelhit", "shi" ],
184 : [ "Kabyle", "kab" ],
185 : [ "Nyankole", "nyn" ],
186 : [ "Bena", "bez" ],
187 : [ "Vunjo", "vun" ],
188 : [ "Bambara", "bm" ],
189 : [ "Embu", "ebu" ],
190 : [ "Cherokee", "chr" ],
191 : [ "Morisyen", "mfe" ],
192 : [ "Makonde", "kde" ],
193 : [ "Langi", "lag" ],
194 : [ "Ganda", "lg" ],
195 : [ "Bemba", "bem" ],
196 : [ "Kabuverdianu", "kea" ],
197 : [ "Meru", "mer" ],
198 : [ "Kalenjin", "kln" ],
199 : [ "Nama", "naq" ],
200 : [ "Machame", "jmc" ],
201 : [ "Colognian", "ksh" ],
202 : [ "Masai", "mas" ],
203 : [ "Soga", "xog" ],
204 : [ "Luyia", "luy" ],
205 : [ "Asu", "asa" ],
206 : [ "Teso", "teo" ],
207 : [ "Saho", "ssy" ],
208 : [ "Koyra Chiini", "khq" ],
209 : [ "Rwa", "rwk" ],
210 : [ "Luo", "luo" ],
211 : [ "Chiga", "cgg" ],
212 : [ "Central Morocco Tamazight", "tzm" ],
213 : [ "Koyraboro Senni", "ses" ],
214 : [ "Shambala", "ksb" ],
215 : [ "Bodo", "brx" ],
216 : [ "Avaric", "av" ],
217 : [ "Chamorro", "ch" ],
218 : [ "Chechen", "ce" ],
219 : [ "Church", "cu" ], # macrolanguage
220 : [ "Chuvash", "cv" ],
221 : [ "Cree", "cr" ], # macrolanguage
222 : [ "Haitian", "ht" ],
223 : [ "Herero", "hz" ],
224 : [ "Hiri Motu", "ho" ],
225 : [ "Kanuri", "kr" ], # macrolanguage
226 : [ "Komi", "kv" ], # macrolanguage
227 : [ "Kongo", "kg" ], # macrolanguage
228 : [ "Kwanyama", "kj" ],
229 : [ "Limburgish", "li" ],
230 : [ "LubaKatanga", "lu" ],
231 : [ "Luxembourgish", "lb" ],
232 : [ "Navaho", "nv" ],
233 : [ "Ndonga", "ng" ],
234 : [ "Ojibwa", "oj" ], # macrolanguage
235 : [ "Pali", "pi" ], # macrolanguage
236 : [ "Walloon", "wa" ],
237 : [ "Aghem", "agq" ],
238 : [ "Basaa", "bas" ],
239 : [ "Zarma", "dje" ],
240 : [ "Duala", "dua" ],
241 : [ "JolaFonyi", "dyo" ],
242 : [ "Ewondo", "ewo" ],
243 : [ "Bafia", "ksf" ],
244 : [ "MakhuwaMeetto", "mgh" ],
245 : [ "Mundang", "mua" ],
246 : [ "Kwasio", "nmg" ],
247 : [ "Nuer", "nus" ],
248 : [ "Sakha", "sah" ],
249 : [ "Sangu", "sbp" ],
250 : [ "Congo Swahili", "swc" ],
251 : [ "Tasawaq", "twq" ],
252 : [ "Vai", "vai" ],
253 : [ "Walser", "wae" ],
254 : [ "Yangben", "yav" ],
255 : [ "Avestan", "ae" ],
256 : [ "Asturian", "ast" ],
257 : [ "Ngomba", "jgo" ],
258 : [ "Kako", "kkj" ],
259 : [ "Meta", "mgo" ],
260 : [ "Ngiemboon", "nnh" ],
261 : [ "Aragonese", "an" ],
262 : [ "Akkadian", "akk" ],
263 : [ "AncientEgyptian", "egy" ],
264 : [ "AncientGreek", "grc" ],
265 : [ "Aramaic", "arc" ],
266 : [ "Balinese", "ban" ],
267 : [ "Bamun", "bax" ],
268 : [ "BatakToba", "bbc" ],
269 : [ "Buginese", "bug" ],
270 : [ "Buhid", "bku" ],
271 : [ "Carian", "xcr" ],
272 : [ "Chakma", "ccp" ],
273 : [ "ClassicalMandaic", "myz" ],
274 : [ "Coptic", "cop" ],
275 : [ "Dogri", "doi" ], # macrolanguage
276 : [ "EasternCham", "cjm" ],
277 : [ "EasternKayah", "eky" ],
278 : [ "Etruscan", "ett" ],
279 : [ "Gothic", "got" ],
280 : [ "Hanunoo", "hnn" ],
281 : [ "Ingush", "inh" ],
282 : [ "LargeFloweryMiao", "hmd" ],
283 : [ "Lepcha", "lep" ],
284 : [ "Limbu", "lif" ],
285 : [ "Lisu", "lis" ],
286 : [ "Lu", "khb" ],
287 : [ "Lycian", "xlc" ],
288 : [ "Lydian", "xld" ],
289 : [ "Mandingo", "man" ], # macrolanguage
290 : [ "Manipuri", "mni" ],
291 : [ "Meroitic", "xmr" ],
292 : [ "NorthernThai", "nod" ],
293 : [ "OldIrish", "sga" ],
294 : [ "OldNorse", "non" ],
295 : [ "OldPersian", "peo" ],
296 : [ "OldTurkish", "otk" ],
297 : [ "Pahlavi", "pal" ],
298 : [ "Parthian", "xpr" ],
299 : [ "Phoenician", "phn" ],
300 : [ "PrakritLanguage", "pra" ],
301 : [ "Rejang", "rej" ],
302 : [ "Sabaean", "xsa" ],
303 : [ "Samaritan", "smp" ],
304 : [ "Santali", "sat" ],
305 : [ "Saurashtra", "saz" ],
306 : [ "Sora", "srb" ],
307 : [ "Sylheti", "syl" ],
308 : [ "Tagbanwa", "tbw" ],
309 : [ "TaiDam", "blt" ],
310 : [ "TaiNua", "tdd" ],
311 : [ "Ugaritic", "uga" ]
}
country_list = {
0 : [ "AnyCountry", "ZZ" ],
1 : [ "Afghanistan", "AF" ],
2 : [ "Albania", "AL" ],
3 : [ "Algeria", "DZ" ],
4 : [ "AmericanSamoa", "AS" ],
5 : [ "Andorra", "AD" ],
6 : [ "Angola", "AO" ],
7 : [ "Anguilla", "AI" ],
8 : [ "Antarctica", "AQ" ],
9 : [ "AntiguaAndBarbuda", "AG" ],
10 : [ "Argentina", "AR" ],
11 : [ "Armenia", "AM" ],
12 : [ "Aruba", "AW" ],
13 : [ "Australia", "AU" ],
14 : [ "Austria", "AT" ],
15 : [ "Azerbaijan", "AZ" ],
16 : [ "Bahamas", "BS" ],
17 : [ "Bahrain", "BH" ],
18 : [ "Bangladesh", "BD" ],
19 : [ "Barbados", "BB" ],
20 : [ "Belarus", "BY" ],
21 : [ "Belgium", "BE" ],
22 : [ "Belize", "BZ" ],
23 : [ "Benin", "BJ" ],
24 : [ "Bermuda", "BM" ],
25 : [ "Bhutan", "BT" ],
26 : [ "Bolivia", "BO" ],
27 : [ "BosniaAndHerzegowina", "BA" ],
28 : [ "Botswana", "BW" ],
29 : [ "BouvetIsland", "BV" ],
30 : [ "Brazil", "BR" ],
31 : [ "BritishIndianOceanTerritory", "IO" ],
32 : [ "Brunei", "BN" ],
33 : [ "Bulgaria", "BG" ],
34 : [ "BurkinaFaso", "BF" ],
35 : [ "Burundi", "BI" ],
36 : [ "Cambodia", "KH" ],
37 : [ "Cameroon", "CM" ],
38 : [ "Canada", "CA" ],
39 : [ "CapeVerde", "CV" ],
40 : [ "CaymanIslands", "KY" ],
41 : [ "CentralAfricanRepublic", "CF" ],
42 : [ "Chad", "TD" ],
43 : [ "Chile", "CL" ],
44 : [ "China", "CN" ],
45 : [ "ChristmasIsland", "CX" ],
46 : [ "CocosIslands", "CC" ],
47 : [ "Colombia", "CO" ],
48 : [ "Comoros", "KM" ],
49 : [ "CongoKinshasa", "CD" ],
50 : [ "CongoBrazzaville", "CG" ],
51 : [ "CookIslands", "CK" ],
52 : [ "CostaRica", "CR" ],
53 : [ "IvoryCoast", "CI" ],
54 : [ "Croatia", "HR" ],
55 : [ "Cuba", "CU" ],
56 : [ "Cyprus", "CY" ],
57 : [ "CzechRepublic", "CZ" ],
58 : [ "Denmark", "DK" ],
59 : [ "Djibouti", "DJ" ],
60 : [ "Dominica", "DM" ],
61 : [ "DominicanRepublic", "DO" ],
62 : [ "EastTimor", "TL" ],
63 : [ "Ecuador", "EC" ],
64 : [ "Egypt", "EG" ],
65 : [ "ElSalvador", "SV" ],
66 : [ "EquatorialGuinea", "GQ" ],
67 : [ "Eritrea", "ER" ],
68 : [ "Estonia", "EE" ],
69 : [ "Ethiopia", "ET" ],
70 : [ "FalklandIslands", "FK" ],
71 : [ "FaroeIslands", "FO" ],
72 : [ "Fiji", "FJ" ],
73 : [ "Finland", "FI" ],
74 : [ "France", "FR" ],
75 : [ "Guernsey", "GG" ],
76 : [ "FrenchGuiana", "GF" ],
77 : [ "FrenchPolynesia", "PF" ],
78 : [ "FrenchSouthernTerritories", "TF" ],
79 : [ "Gabon", "GA" ],
80 : [ "Gambia", "GM" ],
81 : [ "Georgia", "GE" ],
82 : [ "Germany", "DE" ],
83 : [ "Ghana", "GH" ],
84 : [ "Gibraltar", "GI" ],
85 : [ "Greece", "GR" ],
86 : [ "Greenland", "GL" ],
87 : [ "Grenada", "GD" ],
88 : [ "Guadeloupe", "GP" ],
89 : [ "Guam", "GU" ],
90 : [ "Guatemala", "GT" ],
91 : [ "Guinea", "GN" ],
92 : [ "GuineaBissau", "GW" ],
93 : [ "Guyana", "GY" ],
94 : [ "Haiti", "HT" ],
95 : [ "HeardAndMcDonaldIslands", "HM" ],
96 : [ "Honduras", "HN" ],
97 : [ "HongKong", "HK" ],
98 : [ "Hungary", "HU" ],
99 : [ "Iceland", "IS" ],
100 : [ "India", "IN" ],
101 : [ "Indonesia", "ID" ],
102 : [ "Iran", "IR" ],
103 : [ "Iraq", "IQ" ],
104 : [ "Ireland", "IE" ],
105 : [ "Israel", "IL" ],
106 : [ "Italy", "IT" ],
107 : [ "Jamaica", "JM" ],
108 : [ "Japan", "JP" ],
109 : [ "Jordan", "JO" ],
110 : [ "Kazakhstan", "KZ" ],
111 : [ "Kenya", "KE" ],
112 : [ "Kiribati", "KI" ],
113 : [ "NorthKorea", "KP" ],
114 : [ "SouthKorea", "KR" ],
115 : [ "Kuwait", "KW" ],
116 : [ "Kyrgyzstan", "KG" ],
117 : [ "Laos", "LA" ],
118 : [ "Latvia", "LV" ],
119 : [ "Lebanon", "LB" ],
120 : [ "Lesotho", "LS" ],
121 : [ "Liberia", "LR" ],
122 : [ "Libya", "LY" ],
123 : [ "Liechtenstein", "LI" ],
124 : [ "Lithuania", "LT" ],
125 : [ "Luxembourg", "LU" ],
126 : [ "Macau", "MO" ],
127 : [ "Macedonia", "MK" ],
128 : [ "Madagascar", "MG" ],
129 : [ "Malawi", "MW" ],
130 : [ "Malaysia", "MY" ],
131 : [ "Maldives", "MV" ],
132 : [ "Mali", "ML" ],
133 : [ "Malta", "MT" ],
134 : [ "MarshallIslands", "MH" ],
135 : [ "Martinique", "MQ" ],
136 : [ "Mauritania", "MR" ],
137 : [ "Mauritius", "MU" ],
138 : [ "Mayotte", "YT" ],
139 : [ "Mexico", "MX" ],
140 : [ "Micronesia", "FM" ],
141 : [ "Moldova", "MD" ],
142 : [ "Monaco", "MC" ],
143 : [ "Mongolia", "MN" ],
144 : [ "Montserrat", "MS" ],
145 : [ "Morocco", "MA" ],
146 : [ "Mozambique", "MZ" ],
147 : [ "Myanmar", "MM" ],
148 : [ "Namibia", "NA" ],
149 : [ "Nauru", "NR" ],
150 : [ "Nepal", "NP" ],
151 : [ "Netherlands", "NL" ],
152 : [ "CuraSao", "CW" ],
153 : [ "NewCaledonia", "NC" ],
154 : [ "NewZealand", "NZ" ],
155 : [ "Nicaragua", "NI" ],
156 : [ "Niger", "NE" ],
157 : [ "Nigeria", "NG" ],
158 : [ "Niue", "NU" ],
159 : [ "NorfolkIsland", "NF" ],
160 : [ "NorthernMarianaIslands", "MP" ],
161 : [ "Norway", "NO" ],
162 : [ "Oman", "OM" ],
163 : [ "Pakistan", "PK" ],
164 : [ "Palau", "PW" ],
165 : [ "PalestinianTerritories", "PS" ],
166 : [ "Panama", "PA" ],
167 : [ "PapuaNewGuinea", "PG" ],
168 : [ "Paraguay", "PY" ],
169 : [ "Peru", "PE" ],
170 : [ "Philippines", "PH" ],
171 : [ "Pitcairn", "PN" ],
172 : [ "Poland", "PL" ],
173 : [ "Portugal", "PT" ],
174 : [ "PuertoRico", "PR" ],
175 : [ "Qatar", "QA" ],
176 : [ "Reunion", "RE" ],
177 : [ "Romania", "RO" ],
178 : [ "Russia", "RU" ],
179 : [ "Rwanda", "RW" ],
180 : [ "SaintKittsAndNevis", "KN" ],
181 : [ "SaintLucia", "LC" ],
182 : [ "SaintVincentAndTheGrenadines", "VC" ],
183 : [ "Samoa", "WS" ],
184 : [ "SanMarino", "SM" ],
185 : [ "SaoTomeAndPrincipe", "ST" ],
186 : [ "SaudiArabia", "SA" ],
187 : [ "Senegal", "SN" ],
188 : [ "Seychelles", "SC" ],
189 : [ "SierraLeone", "SL" ],
190 : [ "Singapore", "SG" ],
191 : [ "Slovakia", "SK" ],
192 : [ "Slovenia", "SI" ],
193 : [ "SolomonIslands", "SB" ],
194 : [ "Somalia", "SO" ],
195 : [ "SouthAfrica", "ZA" ],
196 : [ "SouthGeorgiaAndTheSouthSandwichIslands", "GS" ],
197 : [ "Spain", "ES" ],
198 : [ "SriLanka", "LK" ],
199 : [ "SaintHelena", "SH" ],
200 : [ "SaintPierreAndMiquelon", "PM" ],
201 : [ "Sudan", "SD" ],
202 : [ "Suriname", "SR" ],
203 : [ "SvalbardAndJanMayenIslands", "SJ" ],
204 : [ "Swaziland", "SZ" ],
205 : [ "Sweden", "SE" ],
206 : [ "Switzerland", "CH" ],
207 : [ "Syria", "SY" ],
208 : [ "Taiwan", "TW" ],
209 : [ "Tajikistan", "TJ" ],
210 : [ "Tanzania", "TZ" ],
211 : [ "Thailand", "TH" ],
212 : [ "Togo", "TG" ],
213 : [ "Tokelau", "TK" ],
214 : [ "Tonga", "TO" ],
215 : [ "TrinidadAndTobago", "TT" ],
216 : [ "Tunisia", "TN" ],
217 : [ "Turkey", "TR" ],
218 : [ "Turkmenistan", "TM" ],
219 : [ "TurksAndCaicosIslands", "TC" ],
220 : [ "Tuvalu", "TV" ],
221 : [ "Uganda", "UG" ],
222 : [ "Ukraine", "UA" ],
223 : [ "UnitedArabEmirates", "AE" ],
224 : [ "UnitedKingdom", "GB" ],
225 : [ "UnitedStates", "US" ],
226 : [ "UnitedStatesMinorOutlyingIslands", "UM" ],
227 : [ "Uruguay", "UY" ],
228 : [ "Uzbekistan", "UZ" ],
229 : [ "Vanuatu", "VU" ],
230 : [ "VaticanCityState", "VA" ],
231 : [ "Venezuela", "VE" ],
232 : [ "Vietnam", "VN" ],
233 : [ "BritishVirginIslands", "VG" ],
234 : [ "UnitedStatesVirginIslands", "VI" ],
235 : [ "WallisAndFutunaIslands", "WF" ],
236 : [ "WesternSahara", "EH" ],
237 : [ "Yemen", "YE" ],
238 : [ "CanaryIslands", "IC" ],
239 : [ "Zambia", "ZM" ],
240 : [ "Zimbabwe", "ZW" ],
241 : [ "ClippertonIsland", "CP" ],
242 : [ "Montenegro", "ME" ],
243 : [ "Serbia", "RS" ],
244 : [ "Saint Barthelemy", "BL" ],
245 : [ "Saint Martin", "MF" ],
246 : [ "LatinAmericaAndTheCaribbean", "419" ],
247 : [ "AscensionIsland", "AC" ],
248 : [ "AlandIslands", "AX" ],
249 : [ "DiegoGarcia", "DG" ],
250 : [ "CeutaAndMelilla", "EA" ],
251 : [ "IsleOfMan", "IM" ],
252 : [ "Jersey", "JE" ],
253 : [ "TristanDaCunha", "TA" ],
254 : [ "SouthSudan", "SS" ],
255 : [ "Bonaire", "BQ" ],
256 : [ "SintMaarten", "SX" ],
257 : [ "Kosovo", "XK" ]
}
script_list = {
0 : [ "AnyScript", "Zzzz" ],
1 : [ "Arabic", "Arab" ],
2 : [ "Cyrillic", "Cyrl" ],
3 : [ "Deseret", "Dsrt" ],
4 : [ "Gurmukhi", "Guru" ],
5 : [ "Simplified Han", "Hans" ],
6 : [ "Traditional Han", "Hant" ],
7 : [ "Latin", "Latn" ],
8 : [ "Mongolian", "Mong" ],
9 : [ "Tifinagh", "Tfng" ],
10 : [ "Armenian", "Armn" ],
11 : [ "Bengali", "Beng" ],
12 : [ "Cherokee", "Cher" ],
13 : [ "Devanagari", "Deva" ],
14 : [ "Ethiopic", "Ethi" ],
15 : [ "Georgian", "Geor" ],
16 : [ "Greek", "Grek" ],
17 : [ "Gujarati", "Gujr" ],
18 : [ "Hebrew", "Hebr" ],
19 : [ "Japanese", "Jpan" ],
20 : [ "Khmer", "Khmr" ],
21 : [ "Kannada", "Knda" ],
22 : [ "Korean", "Kore" ],
23 : [ "Lao", "Laoo" ],
24 : [ "Malayalam", "Mlym" ],
25 : [ "Myanmar", "Mymr" ],
26 : [ "Oriya", "Orya" ],
27 : [ "Tamil", "Taml" ],
28 : [ "Telugu", "Telu" ],
29 : [ "Thaana", "Thaa" ],
30 : [ "Thai", "Thai" ],
31 : [ "Tibetan", "Tibt" ],
32 : [ "Sinhala", "Sinh" ],
33 : [ "Syriac", "Syrc" ],
34 : [ "Yi", "Yiii" ],
35 : [ "Vai", "Vaii" ],
36 : [ "Avestan", "Avst" ],
37 : [ "Balinese", "Bali" ],
38 : [ "Bamum", "Bamu" ],
39 : [ "Batak", "Batk" ],
40 : [ "Bopomofo", "Bopo" ],
41 : [ "Brahmi", "Brah" ],
42 : [ "Buginese", "Bugi" ],
43 : [ "Buhid", "Buhd" ],
44 : [ "CanadianAboriginal", "Cans" ],
45 : [ "Carian", "Cari" ],
46 : [ "Chakma", "Cakm" ],
47 : [ "Cham", "Cham" ],
48 : [ "Coptic", "Copt" ],
49 : [ "Cypriot", "Cprt" ],
50 : [ "Egyptian Hieroglyphs", "Egyp" ],
51 : [ "Fraser", "Lisu" ],
52 : [ "Glagolitic", "Glag" ],
53 : [ "Gothic", "Goth" ],
54 : [ "Han", "Hani" ],
55 : [ "Hangul", "Hang" ],
56 : [ "Hanunoo", "Hano" ],
57 : [ "Imperial Aramaic", "Armi" ],
58 : [ "Inscriptional Pahlavi", "Phli" ],
59 : [ "Inscriptional Parthian", "Prti" ],
60 : [ "Javanese", "Java" ],
61 : [ "Kaithi", "Kthi" ],
62 : [ "Katakana", "Kana" ],
63 : [ "Kayah Li", "Kali" ],
64 : [ "Kharoshthi", "Khar" ],
65 : [ "Lanna", "Lana" ],
66 : [ "Lepcha", "Lepc" ],
67 : [ "Limbu", "Limb" ],
68 : [ "Linear B", "Linb" ],
69 : [ "Lycian", "Lyci" ],
70 : [ "Lydian", "Lydi" ],
71 : [ "Mandaean", "Mand" ],
72 : [ "Meitei Mayek", "Mtei" ],
73 : [ "Meroitic", "Mero" ],
74 : [ "Meroitic Cursive", "Merc" ],
75 : [ "Nko", "Nkoo" ],
76 : [ "New Tai Lue", "Talu" ],
77 : [ "Ogham", "Ogam" ],
78 : [ "Ol Chiki", "Olck" ],
79 : [ "Old Italic", "Ital" ],
80 : [ "Old Persian", "Xpeo" ],
81 : [ "Old South Arabian", "Sarb" ],
82 : [ "Orkhon", "Orkh" ],
83 : [ "Osmanya", "Osma" ],
84 : [ "Phags Pa", "Phag" ],
85 : [ "Phoenician", "Phnx" ],
86 : [ "Pollard Phonetic", "Plrd" ],
87 : [ "Rejang", "Rjng" ],
88 : [ "Runic", "Runr" ],
89 : [ "Samaritan", "Samr" ],
90 : [ "Saurashtra", "Saur" ],
91 : [ "Sharada", "Shrd" ],
92 : [ "Shavian", "Shaw" ],
93 : [ "Sora Sompeng", "Sora" ],
94 : [ "Cuneiform", "Xsux" ],
95 : [ "Sundanese", "Sund" ],
96 : [ "Syloti Nagri", "Sylo" ],
97 : [ "Tagalog", "Tglg" ],
98 : [ "Tagbanwa", "Tagb" ],
99 : [ "Tai Le", "Tale" ],
100 : [ "Tai Viet", "Tavt" ],
101 : [ "Takri", "Takr" ],
102 : [ "Ugaritic", "Ugar" ],
103 : [ "Braille", "Brai" ],
104 : [ "Hiragana", "Hira" ]
# ### : [ "Blissymbols", "Blis" ],
# ### : [ "Linear A", "Lina" ],
# ### : [ "Naxi Geba", "Nkgb" ],
# ### : [ "Pahawh Hmong", "Hmng" ],
# ### : [ "Varang Kshiti", "Wara" ],
}
def countryCodeToId(code):
if not code:
return 0
for country_id in country_list:
if country_list[country_id][1] == code:
return country_id
return -1
def languageCodeToId(code):
if not code:
return 0
for language_id in language_list:
if language_list[language_id][1] == code:
return language_id
return -1
def scriptCodeToId(code):
if not code:
return 0
for script_id in script_list:
if script_list[script_id][1] == code:
return script_id
return -1
| bsd-3-clause |
sdgdsffdsfff/Cmdb_Puppet | cmdb/simplecmdb/wsgi.py | 3 | 1431 | """
WSGI config for simplecmdb project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "simplecmdb.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simplecmdb.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| epl-1.0 |
tengqm/kubernetes | hack/verify-publishing-bot.py | 66 | 5145 | #!/usr/bin/env python
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import fnmatch
import os
import sys
import json
def get_gomod_dependencies(rootdir, components):
all_dependencies = {}
for component in components:
with open(os.path.join(rootdir, component, "go.mod")) as f:
print(component + " dependencies")
all_dependencies[component] = []
lines = list(set(f))
lines.sort()
for line in lines:
for dep in components:
if dep == component:
continue
if ("k8s.io/" + dep + " =>") not in line:
continue
print("\t"+dep)
if dep not in all_dependencies[component]:
all_dependencies[component].append(dep)
return all_dependencies
def get_rules_dependencies(rules_file):
import yaml
with open(rules_file) as f:
data = yaml.safe_load(f)
return data
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
components = []
for component in os.listdir(rootdir + '/staging/src/k8s.io/'):
components.append(component)
components.sort()
rules_file = "/staging/publishing/rules.yaml"
try:
import yaml
except ImportError:
print("Please install missing pyyaml module and re-run %s" % sys.argv[0])
sys.exit(1)
rules_dependencies = get_rules_dependencies(rootdir + rules_file)
gomod_dependencies = get_gomod_dependencies(rootdir + '/staging/src/k8s.io/', components)
processed_repos = []
for rule in rules_dependencies["rules"]:
branch = rule["branches"][0]
# If this no longer exists in master
if rule["destination"] not in gomod_dependencies:
# Make sure we don't include a rule to publish it from master
for branch in rule["branches"]:
if branch["name"] == "master":
raise Exception("cannot find master branch for destination %s" % rule["destination"])
# And skip validation of publishing rules for it
continue
if branch["name"] != "master":
raise Exception("cannot find master branch for destination %s" % rule["destination"])
if branch["source"]["branch"] != "master":
raise Exception("cannot find master source branch for destination %s" % rule["destination"])
# we specify the go version for all master branches through `default-go-version`
# so ensure we don't specify explicit go version for master branch in rules
if "go" in branch:
raise Exception("go version must not be specified for master branch for destination %s" % rule["destination"])
print("processing : %s" % rule["destination"])
if rule["destination"] not in gomod_dependencies:
raise Exception("missing go.mod for %s" % rule["destination"])
processed_repos.append(rule["destination"])
processed_deps = []
for dep in set(gomod_dependencies[rule["destination"]]):
found = False
if "dependencies" in branch:
for dep2 in branch["dependencies"]:
processed_deps.append(dep2["repository"])
if dep2["branch"] != "master":
raise Exception("Looking for master branch and found : %s for destination", dep2,
rule["destination"])
if dep2["repository"] == dep:
found = True
else:
raise Exception(
"Please add %s as dependencies under destination %s in %s" % (gomod_dependencies[rule["destination"]], rule["destination"], rules_file))
if not found:
raise Exception("Please add %s as a dependency under destination %s in %s" % (dep, rule["destination"], rules_file))
else:
print(" found dependency %s" % dep)
extraDeps = set(processed_deps) - set(gomod_dependencies[rule["destination"]])
if len(extraDeps) > 0:
raise Exception("extra dependencies in rules for %s: %s" % (rule["destination"], ','.join(str(s) for s in extraDeps)))
items = set(gomod_dependencies.keys()) - set(processed_repos)
if len(items) > 0:
raise Exception("missing rules for %s" % ','.join(str(s) for s in items))
print("Done.")
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
chokribr/PIST | modules/bibformat/lib/elements/bfe_place.py | 39 | 1151 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints imprint publication place
"""
__revision__ = "$Id$"
def format_element(bfo):
"""
Prints the imprint publication place as HTML
@see: date.py, publisher.py, reprints.py, imprint.py, pagination.py
"""
place = bfo.field('260__a')
if place != "sine loco":
return place
| gpl-2.0 |
tmpkus/photivo | scons-local-2.2.0/SCons/Tool/sunar.py | 14 | 2593 | """engine.SCons.Tool.sunar
Tool-specific initialization for Solaris (Forte) ar (library archive). If CC
exists, static libraries should be built with it, so that template
instantians can be resolved.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunar.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-xar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
ticosax/django | django/core/files/utils.py | 901 | 1230 | class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
| bsd-3-clause |
lemarcudal/sha_thedivision | test/Lib/encodings/iso2022_kr.py | 816 | 1053 | #
# iso2022_kr.py: Python Unicode Codec for ISO2022_KR
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
mj10777/QGIS | python/PyQt/PyQt5/QtSvg.py | 45 | 1033 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QtSvg.py
---------------------
Date : March 2016
Copyright : (C) 2016 by Juergen E. Fischer
Email : jef at norbit dot de
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Juergen E. Fischer'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Juergen E. Fischer'
from PyQt5.QtSvg import *
| gpl-2.0 |
miarmak/CloudFerry | fabfile.py | 1 | 1795 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from fabric.api import task, env
from cloudferrylib.scheduler.namespace import Namespace
from cloudferrylib.scheduler.scheduler import Scheduler
import cfglib
from cloudferrylib.utils import utils as utl
from cloudferrylib.utils import utils
from cloudferrylib.scheduler.scenario import Scenario
from cloud import cloud_ferry
from dry_run import chain
env.forward_agent = True
env.user = 'root'
LOG = utl.get_log(__name__)
@task
def migrate(name_config=None, name_instance=None, debug=False):
"""
:name_config - name of config yaml-file, example 'config.yaml'
"""
if debug:
utl.configure_logging("DEBUG")
cfglib.collector_configs_plugins()
cfglib.init_config(name_config)
utils.init_singletones(cfglib.CONF)
env.key_filename = cfglib.CONF.migrate.key_filename
cloud = cloud_ferry.CloudFerry(cfglib.CONF)
cloud.migrate(Scenario())
@task
def get_info(name_config, debug=False):
if debug:
utl.configure_logging("DEBUG")
LOG.info("Init getting information")
namespace = Namespace({'name_config': name_config})
scheduler = Scheduler(namespace)
@task
def dry_run():
chain.process_test_chain()
if __name__ == '__main__':
migrate(None)
| apache-2.0 |
jtoppins/beaker | Server/bkr/server/alembic/versions/41763e5d07cb_fix_job_product_id_type.py | 1 | 1187 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Fix job.product_id type
Revision ID: 41763e5d07cb
Revises: 2c03c52950bf
Create Date: 2014-10-10 13:22:26.918643
"""
# revision identifiers, used by Alembic.
revision = '41763e5d07cb'
down_revision = '2c03c52950bf'
from alembic import op
import sqlalchemy as sa
def upgrade():
column_info, = [info for info in sa.inspect(op.get_bind()).get_columns('job')
if info['name'] == 'product_id']
if not isinstance(column_info['type'], sa.Integer):
op.execute("""
UPDATE job
SET product_id = NULL
WHERE product_id NOT IN (SELECT id FROM product)
""")
op.execute("""
ALTER TABLE job
MODIFY product_id INT DEFAULT NULL,
ADD CONSTRAINT job_product_id_fk
FOREIGN KEY (product_id)
REFERENCES product (id)
""")
def downgrade():
pass # no downgrade as this was a schema mistake
| gpl-2.0 |
moonso/MultiQC | multiqc/modules/star/star_module.py | 1 | 7164 | #!/usr/bin/env python
""" MultiQC module to parse output from STAR """
from __future__ import print_function
from collections import OrderedDict
import logging
import re
from multiqc import config, BaseMultiqcModule
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(name='STAR', anchor='star',
href="https://github.com/alexdobin/STAR",
info="is an ultrafast universal RNA-seq aligner.")
# Find and load any STAR reports
self.star_data = dict()
for f in self.find_log_files('Log.final.out'):
parsed_data = self.parse_star_report(f['f'])
if parsed_data is not None:
s_name = f['s_name'].split('Log.final.out', 1)[0]
if s_name in self.star_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.star_data[s_name] = parsed_data
if len(self.star_data) == 0:
log.debug("Could not find any reports in {}".format(config.analysis_dir))
raise UserWarning
log.info("Found {} reports".format(len(self.star_data)))
# Write parsed report data to a file
self.write_csv_file(self.star_data, 'multiqc_star.txt')
self.sections = list()
# Basic Stats Table
# Report table is immutable, so just updating it works
self.star_stats_table()
# Alignment bar plot
# Only one section, so add to the intro
self.intro += self.star_alignment_chart()
def parse_star_report (self, raw_data):
""" Parse the final STAR log file. """
regexes = {
'total_reads': r"Number of input reads \|\s+(\d+)",
'avg_input_read_length': r"Average input read length \|\s+([\d\.]+)",
'uniquely_mapped': r"Uniquely mapped reads number \|\s+(\d+)",
'uniquely_mapped_percent': r"Uniquely mapped reads % \|\s+([\d\.]+)",
'avg_mapped_read_length': r"Average mapped length \|\s+([\d\.]+)",
'num_splices': r"Number of splices: Total \|\s+(\d+)",
'num_annotated_splices': r"Number of splices: Annotated \(sjdb\) \|\s+(\d+)",
'num_GTAG_splices': r"Number of splices: GT/AG \|\s+(\d+)",
'num_GCAG_splices': r"Number of splices: GC/AG \|\s+(\d+)",
'num_ATAC_splices': r"Number of splices: AT/AC \|\s+(\d+)",
'num_noncanonical_splices': r"Number of splices: Non-canonical \|\s+(\d+)",
'mismatch_rate': r"Mismatch rate per base, % \|\s+([\d\.]+)",
'deletion_rate': r"Deletion rate per base \|\s+([\d\.]+)",
'deletion_length': r"Deletion average length \|\s+([\d\.]+)",
'insertion_rate': r"Insertion rate per base \|\s+([\d\.]+)",
'insertion_length': r"Insertion average length \|\s+([\d\.]+)",
'multimapped': r"Number of reads mapped to multiple loci \|\s+(\d+)",
'multimapped_percent': r"% of reads mapped to multiple loci \|\s+([\d\.]+)",
'multimapped_toomany': r"Number of reads mapped to too many loci \|\s+(\d+)",
'multimapped_toomany_percent': r"% of reads mapped to too many loci \|\s+([\d\.]+)",
'unmapped_mismatches_percent': r"% of reads unmapped: too many mismatches \|\s+([\d\.]+)",
'unmapped_tooshort_percent': r"% of reads unmapped: too short \|\s+([\d\.]+)",
'unmapped_other_percent': r"% of reads unmapped: other \|\s+([\d\.]+)",
}
parsed_data = {}
for k, r in regexes.items():
r_search = re.search(r, raw_data, re.MULTILINE)
if r_search:
parsed_data[k] = float(r_search.group(1))
# Figure out the numbers for unmapped as for some reason only the percentages are given
try:
total_mapped = parsed_data['uniquely_mapped'] + parsed_data['multimapped'] + parsed_data['multimapped_toomany']
unmapped_count = parsed_data['total_reads'] - total_mapped
total_unmapped_percent = parsed_data['unmapped_mismatches_percent'] + parsed_data['unmapped_tooshort_percent'] + parsed_data['unmapped_other_percent']
parsed_data['unmapped_mismatches'] = int(round(unmapped_count * (parsed_data['unmapped_mismatches_percent'] / total_unmapped_percent), 0))
parsed_data['unmapped_tooshort'] = int(round(unmapped_count * (parsed_data['unmapped_tooshort_percent'] / total_unmapped_percent), 0))
parsed_data['unmapped_other'] = int(round(unmapped_count * (parsed_data['unmapped_other_percent'] / total_unmapped_percent), 0))
except KeyError:
pass
if len(parsed_data) == 0: return None
return parsed_data
def star_stats_table(self):
""" Take the parsed stats from the STAR report and add them to the
basic stats table at the top of the report """
config.general_stats['headers']['uniquely_mapped_percent'] = '<th class="chroma-col" data-chroma-scale="YlGn" data-chroma-max="100" data-chroma-min="0"><span data-toggle="tooltip" title="STAR: % Uniquely mapped reads">% Mapped</span></th>'
config.general_stats['headers']['uniquely_mapped'] = '<th class="chroma-col" data-chroma-scale="PuRd" data-chroma-min="0"><span data-toggle="tooltip" title="STAR: Uniquely mapped reads (millions)">M Mapped</span></th>'
for sn, data in self.star_data.items():
config.general_stats['rows'][sn]['uniquely_mapped_percent'] = '<td class="text-right">{:.1f}%</td>'.format(data['uniquely_mapped_percent'])
config.general_stats['rows'][sn]['uniquely_mapped'] = '<td class="text-right">{:.1f}</td>'.format(data['uniquely_mapped']/1000000)
def star_alignment_chart (self):
""" Make the HighCharts HTML to plot the alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['uniquely_mapped'] = { 'color': '#437bb1', 'name': 'Uniquely mapped' }
keys['multimapped'] = { 'color': '#7cb5ec', 'name': 'Mapped to multiple loci' }
keys['multimapped_toomany'] = { 'color': '#f7a35c', 'name': 'Mapped to too many loci' }
keys['unmapped_mismatches'] = { 'color': '#e63491', 'name': 'Unmapped: too many mismatches' }
keys['unmapped_tooshort'] = { 'color': '#b1084c', 'name': 'Unmapped: too short' }
keys['unmapped_other'] = { 'color': '#7f0000', 'name': 'Unmapped: other' }
# Config for the plot
config = {
'id': 'star_alignment_plot',
'title': 'STAR Alignment Scores',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return self.plot_bargraph(self.star_data, keys, config)
| mit |
anurag-ks/Melissa-Core | tests/test_actions_db.py | 2 | 9683 | """test for actions_db module."""
import os
import unittest
from StringIO import StringIO
try: # py3
from unittest import mock
except ImportError: # py2
import mock
import pytest
import sqlite3
def test_simple_import():
"""test simple import.
the first error raised because IO is disable when testing.
IO error is on profile_populator module.
"""
with pytest.raises(IOError):
from melissa import actions_db # NOQA
def test_import_and_mock_populator():
"""test mock profile_populator module when import this module.
this still raise error because profile.json is missing.
IO error is raised on profile module
"""
with pytest.raises(IOError):
with mock.patch('melissa.profile_populator.profile_populator'):
from melissa import actions_db # NOQA
@mock.patch(
'melissa.profile_loader.load_profile',
return_value={
'actions_db_file': ':memory:',
'modules': 'melissa.actions',
}
)
class WithProfileTest(unittest.TestCase):
"""test case using temp profile."""
def setUp(self):
"""setup func."""
# execscript argument
self.exec_script_arg = (
'\n '
'CREATE TABLE synonyms ('
'\n synonym varchar(50) PRIMARY KEY,'
'\n word varchar(50)'
'\n );'
'\n\n CREATE TABLE words ('
'\n word varchar(50),'
'\n word_group varchar(255),'
'\n word_order integer'
'\n );'
'\n\n CREATE INDEX word_index ON words (word);'
'\n\n CREATE TABLE word_groups ('
'\n word_group varchar(255),'
'\n function varchar(255),'
'\n word_count integer'
'\n );'
'\n\n CREATE INDEX word_group_index '
'ON word_groups (word_group);'
'\n\n CREATE TABLE functions ('
'\n function varchar(255) PRIMARY KEY,'
'\n priority integer'
'\n );'
'\n\n CREATE TABLE expression ('
'\n word varchar(50) PRIMARY KEY,'
'\n word_order integer'
'\n );'
'\n '
)
def test_actions_db_import(self, m_load_profile):
"""test run."""
from melissa import actions_db # NOQA
def test_create_actions_db_mocked_inputs(self, m_load_profile):
"""test run create_actions_db with mocked inputs."""
from melissa.actions_db import create_actions_db
mock_con = mock.Mock()
mock_cur = mock.Mock()
create_actions_db(mock_con, mock_cur)
mock_con.commit.assert_called_once_with()
mock_cur.executescript.assert_called_once_with(self.exec_script_arg)
def test_create_actions_db_raise_sqlite3_error(self, m_load_profile):
"""test run create_actions_db and raise sqlite3 error."""
from melissa.actions_db import create_actions_db
mock_con = mock.Mock()
mock_cur = mock.Mock()
mock_err_msg = 'Error'
mock_con.commit.side_effect = sqlite3.Error(mock_err_msg)
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
with pytest.raises(SystemExit):
create_actions_db(mock_con, mock_cur)
assert 'Error {}:\n'.format(mock_err_msg) in mock_stdout.getvalue()
def test_assemble_actions_db(self, m_load_profile):
"""test run func."""
from melissa.actions_db import assemble_actions_db
assemble_actions_db()
def test_assemble_actions_db_mock_sqlite(self, m_load_profile):
"""test run func and mock libs."""
from melissa.actions_db import assemble_actions_db
with mock.patch('melissa.actions_db.sqlite3') as mock_sq:
assemble_actions_db()
# preparation
sql_cmd_basename = 'test_actions_db_sql_command.txt'
script_folder = os.path.dirname(os.path.realpath(__file__))
sql_cmd_path = os.path.join(
script_folder, 'test_data', sql_cmd_basename)
with open(sql_cmd_path) as f:
sql_cmds = f.read().splitlines()
# testing
assert len(mock_sq.mock_calls) == 240
# connect
assert mock_sq.connect.call_count == 1
mock_sq.connect.assert_called_once_with(
':memory:', check_same_thread=False)
# connect().cursor
mock_sq.connect.return_value.cursor.assert_called_once_with()
# connect().cursor().executescript
(mock_sq.connect.return_value.cursor.return_value
.executescript.assert_called_once_with(self.exec_script_arg))
# connect().commit
mock_sq.connect.return_value.commit.assert_called_with()
# test sql commands.
call_result = (
mock_sq.connect.return_value.cursor.return_value
.execute.mock_calls)
call_result_args = [x[1][0] for x in call_result]
non_exist_expected_call = [
x for x in sql_cmds if x not in call_result_args]
not_expected_call = [
x for x in call_result_args if x not in sql_cmds]
# connect().cursor().execute
err_msg = (
'Expected calls which are not exist on actual call:\n{}\n'
'Actual calls which are not expected:\n{}'
)
err_msg = err_msg.format(
'\n'.join(non_exist_expected_call),
'\n'.join(not_expected_call),
)
assert len(call_result_args) == len(sql_cmds), err_msg
for cmd in sql_cmds:
assert mock.call(cmd) in call_result, err_msg
def test_insert_words_mock_input_and_name_input(self, m_load_profile):
"""test run insert_words with mock input."""
m_name = mock.Mock()
input_string = 'name'
for mock_name in (m_name, input_string):
mock_con = mock.Mock()
mock_cur = mock.Mock()
mock_words = mock.Mock()
mock_priority = mock.Mock()
from melissa.actions_db import insert_words
with mock.patch('sys.stdout', new_callable=StringIO) \
as mock_stdout:
insert_words(
mock_con, mock_cur, mock_name, mock_words, mock_priority)
assert (
"Invalid WORDS type '<class 'mock.mock.Mock'>' "
"for module {}".format(mock_name)
) in mock_stdout.getvalue()
def test_insert_words_mock_input_and_words_list(self, m_load_profile):
"""test run insert_words with mock input and words as list."""
mock_name = 'how'
mock_con = mock.Mock()
mock_cur = mock.Mock()
mock_words = ['how', 'are', 'you']
mock_priority = mock.Mock()
from melissa.actions_db import insert_words
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
insert_words(
mock_con, mock_cur, mock_name, mock_words, mock_priority)
cur_exec_calls = [
mock.call(
"INSERT INTO functions (function, priority) "
"values ('{} handle',{})".format(
mock_name, mock_priority)),
]
for word in mock_words:
cur_exec_calls.append(mock.call(
"INSERT INTO words (word, word_group, word_order) "
"values ('{w}','{w}',0)".format(w=word)
))
cur_exec_calls.append(mock.call(
"INSERT INTO word_groups "
"(word_group, function, word_count) "
"values ('{}','{} handle',1)".format(
word, mock_name, mock_priority)
))
assert len(mock_cur.execute.mock_calls) == 7
for call in cur_exec_calls:
assert call in mock_cur.execute.mock_calls
mock_con.commit.assert_called_once_with()
assert '' in mock_stdout.getvalue()
def test_insert_words_mock_input_and_words_dict(self, m_load_profile):
"""test run insert_words with mock input and words as dict."""
mock_name = 'define'
mock_con = mock.Mock()
mock_cur = mock.Mock()
mock_words = {'define_subject': {'groups': ['define']}}
mock_priority = mock.Mock()
from melissa.actions_db import insert_words
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
insert_words(
mock_con, mock_cur, mock_name, mock_words, mock_priority)
cur_exec_calls = [
mock.call(
"INSERT INTO functions (function, priority) "
"values ('define define_subject',0)"),
mock. call(
"INSERT INTO word_groups "
"(word_group, function, word_count) "
"values ('define','define define_subject',1)"),
mock.call(
"INSERT INTO words (word, word_group, word_order) "
"values ('define','define',0)")
]
mock_con.commit.assert_called_once_with()
assert len(mock_cur.execute.mock_calls) == 3
for call in cur_exec_calls:
assert call in mock_cur.execute.mock_calls
assert '' in mock_stdout.getvalue()
| mit |
molobrakos/home-assistant | homeassistant/components/nuheat/__init__.py | 7 | 1024 | """Support for NuHeat thermostats."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_DEVICES
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import discovery
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'nuheat'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_DEVICES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the NuHeat thermostat component."""
import nuheat
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
devices = conf.get(CONF_DEVICES)
api = nuheat.NuHeat(username, password)
api.authenticate()
hass.data[DOMAIN] = (api, devices)
discovery.load_platform(hass, "climate", DOMAIN, {}, config)
return True
| apache-2.0 |
luthfii/xsched | tools/qemu-xen/scripts/simpletrace.py | 150 | 5917 | #!/usr/bin/env python
#
# Pretty-printer for simple trace backend binary trace files
#
# Copyright IBM, Corp. 2010
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# For help see docs/tracing.txt
import struct
import re
import inspect
from tracetool import _read_events, Event
from tracetool.backend.simple import is_string
header_event_id = 0xffffffffffffffff
header_magic = 0xf2b177cb0aa429b4
dropped_event_id = 0xfffffffffffffffe
log_header_fmt = '=QQQ'
rec_header_fmt = '=QQII'
def read_header(fobj, hfmt):
'''Read a trace record header'''
hlen = struct.calcsize(hfmt)
hdr = fobj.read(hlen)
if len(hdr) != hlen:
return None
return struct.unpack(hfmt, hdr)
def get_record(edict, rechdr, fobj):
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
if rechdr is None:
return None
rec = (rechdr[0], rechdr[1])
if rechdr[0] != dropped_event_id:
event_id = rechdr[0]
event = edict[event_id]
for type, name in event.args:
if is_string(type):
l = fobj.read(4)
(len,) = struct.unpack('=L', l)
s = fobj.read(len)
rec = rec + (s,)
else:
(value,) = struct.unpack('=Q', fobj.read(8))
rec = rec + (value,)
else:
(value,) = struct.unpack('=Q', fobj.read(8))
rec = rec + (value,)
return rec
def read_record(edict, fobj):
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
rechdr = read_header(fobj, rec_header_fmt)
return get_record(edict, rechdr, fobj) # return tuple of record elements
def read_trace_file(edict, fobj):
"""Deserialize trace records from a file, yielding record tuples (event_num, timestamp, arg1, ..., arg6)."""
header = read_header(fobj, log_header_fmt)
if header is None or \
header[0] != header_event_id or \
header[1] != header_magic:
raise ValueError('Not a valid trace file!')
if header[2] != 0 and \
header[2] != 2:
raise ValueError('Unknown version of tracelog format!')
log_version = header[2]
if log_version == 0:
raise ValueError('Older log format, not supported with this QEMU release!')
while True:
rec = read_record(edict, fobj)
if rec is None:
break
yield rec
class Analyzer(object):
"""A trace file analyzer which processes trace records.
An analyzer can be passed to run() or process(). The begin() method is
invoked, then each trace record is processed, and finally the end() method
is invoked.
If a method matching a trace event name exists, it is invoked to process
that trace record. Otherwise the catchall() method is invoked."""
def begin(self):
"""Called at the start of the trace."""
pass
def catchall(self, event, rec):
"""Called if no specific method for processing a trace event has been found."""
pass
def end(self):
"""Called at the end of the trace."""
pass
def process(events, log, analyzer):
"""Invoke an analyzer on each event in a log."""
if isinstance(events, str):
events = _read_events(open(events, 'r'))
if isinstance(log, str):
log = open(log, 'rb')
enabled_events = []
dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
edict = {dropped_event_id: dropped_event}
for e in events:
if 'disable' not in e.properties:
enabled_events.append(e)
for num, event in enumerate(enabled_events):
edict[num] = event
def build_fn(analyzer, event):
if isinstance(event, str):
return analyzer.catchall
fn = getattr(analyzer, event.name, None)
if fn is None:
return analyzer.catchall
event_argcount = len(event.args)
fn_argcount = len(inspect.getargspec(fn)[0]) - 1
if fn_argcount == event_argcount + 1:
# Include timestamp as first argument
return lambda _, rec: fn(*rec[1:2 + event_argcount])
else:
# Just arguments, no timestamp
return lambda _, rec: fn(*rec[2:2 + event_argcount])
analyzer.begin()
fn_cache = {}
for rec in read_trace_file(edict, log):
event_num = rec[0]
event = edict[event_num]
if event_num not in fn_cache:
fn_cache[event_num] = build_fn(analyzer, event)
fn_cache[event_num](event, rec)
analyzer.end()
def run(analyzer):
"""Execute an analyzer on a trace file given on the command-line.
This function is useful as a driver for simple analysis scripts. More
advanced scripts will want to call process() instead."""
import sys
if len(sys.argv) != 3:
sys.stderr.write('usage: %s <trace-events> <trace-file>\n' % sys.argv[0])
sys.exit(1)
events = _read_events(open(sys.argv[1], 'r'))
process(events, sys.argv[2], analyzer)
if __name__ == '__main__':
class Formatter(Analyzer):
def __init__(self):
self.last_timestamp = None
def catchall(self, event, rec):
i = 1
timestamp = rec[1]
if self.last_timestamp is None:
self.last_timestamp = timestamp
delta_ns = timestamp - self.last_timestamp
self.last_timestamp = timestamp
fields = [event.name, '%0.3f' % (delta_ns / 1000.0)]
for type, name in event.args:
if is_string(type):
fields.append('%s=%s' % (name, rec[i + 1]))
else:
fields.append('%s=0x%x' % (name, rec[i + 1]))
i += 1
print ' '.join(fields)
run(Formatter())
| gpl-2.0 |
jcadduono/android_kernel_oneplus_msm8996 | tools/perf/python/twatch.py | 1565 | 1316 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
revanorion/NetworkSecurityProject | phpmyadmin/vendor/guzzle/guzzle/docs/conf.py | 469 | 3047 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'leftbar.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"index_template": "index.html",
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| apache-2.0 |
seblat/coala-bears | bears/shell/ShellCheckBear.py | 13 | 1730 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
from dependency_management.requirements.AnyOneOfRequirements import (
AnyOneOfRequirements)
from dependency_management.requirements.CabalRequirement import (
CabalRequirement)
@linter(executable='shellcheck', output_format='regex',
output_regex=r'.+:(?P<line>\d+):(?P<column>\d+): '
r'(?P<severity>error|warning|info): (?P<message>.+)')
class ShellCheckBear:
"""
Check bash/shell scripts for syntactical problems (with understandable
messages), semantical problems as well as subtle caveats and pitfalls.
A gallery of bad code that can be detected is available at
<https://github.com/koalaman/shellcheck/blob/master/README.md>.
"""
LANGUAGES = {'sh', 'bash', 'ksh', 'dash'}
REQUIREMENTS = {AnyOneOfRequirements(
[CabalRequirement('shellcheck', '0.4.1'),
DistributionRequirement('shellcheck')
]
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Security', 'Undefined Element', 'Unused Code'}
@staticmethod
def create_arguments(filename, file, config_file, shell: str='sh',
shellcheck_ignore: list=None):
"""
:param shell: Target shell being used.
:param shellcheck_ignore: List of linting rules that should be ignored.
"""
args = ('--f', 'gcc', '-s', shell, filename)
if shellcheck_ignore:
args += ('-e', ','.join(shellcheck_ignore))
return args
| agpl-3.0 |
ewdurbin/raven-python | raven/contrib/bottle/utils.py | 25 | 1045 | """
raven.contrib.bottle.utils
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
logger = logging.getLogger(__name__)
def get_data_from_request(request):
urlparts = _urlparse.urlsplit(request.url)
try:
form_dict = request.forms.dict
# we only are about the most recent one
formdata = dict([(k, form_dict[k][-1]) for k in form_dict])
except Exception:
formdata = {}
data = {
'request': {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
}
return data
| bsd-3-clause |
punkkeks/OctoPrint | src/octoprint/server/api/job.py | 29 | 1955 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from flask import request, make_response, jsonify
from octoprint.server import printer, NO_CONTENT
from octoprint.server.util.flask import restricted_access, get_json_command_from_request
from octoprint.server.api import api
import octoprint.util as util
@api.route("/job", methods=["POST"])
@restricted_access
def controlJob():
if not printer.is_operational():
return make_response("Printer is not operational", 409)
valid_commands = {
"start": [],
"restart": [],
"pause": [],
"cancel": []
}
command, data, response = get_json_command_from_request(request, valid_commands)
if response is not None:
return response
activePrintjob = printer.is_printing() or printer.is_paused()
if command == "start":
if activePrintjob:
return make_response("Printer already has an active print job, did you mean 'restart'?", 409)
printer.start_print()
elif command == "restart":
if not printer.is_paused():
return make_response("Printer does not have an active print job or is not paused", 409)
printer.start_print()
elif command == "pause":
if not activePrintjob:
return make_response("Printer is neither printing nor paused, 'pause' command cannot be performed", 409)
printer.toggle_pause_print()
elif command == "cancel":
if not activePrintjob:
return make_response("Printer is neither printing nor paused, 'cancel' command cannot be performed", 409)
printer.cancel_print()
return NO_CONTENT
@api.route("/job", methods=["GET"])
def jobState():
currentData = printer.get_current_data()
return jsonify({
"job": currentData["job"],
"progress": currentData["progress"],
"state": currentData["state"]["text"]
}) | agpl-3.0 |
pawelld/webias | webias/gnosis/xml/pickle/util/_flags.py | 3 | 1685 | PARANOIA = 1 # default: security model of xml_pickle-0.51
def setParanoia(val):
global PARANOIA
PARANOIA = val
def getParanoia(): return PARANOIA
# 0: Put refs in XML when ident objs found, instead of entire objs
# 1: never put references in XML
DEEPCOPY = 0
def setDeepCopy(val):
global DEEPCOPY
DEEPCOPY = val
def getDeepCopy(): return DEEPCOPY
TYPE_IN_BODY = {}
def setInBody(typename,val):
global TYPE_IN_BODY
TYPE_IN_BODY[typename] = val
# return setting for type, defaulting to 0
def getInBody(typename): return TYPE_IN_BODY.get(typename) or 0
CURRENT_PARSER = "DOM"
def setParser(name):
"Set current parser, by name"
global CURRENT_PARSER
CURRENT_PARSER = name
def getParser(): return CURRENT_PARSER
def enumParsers():
"Return available parsers as a dictionary of (name: parser)"
dict = {}
try:
from gnosis.xml.pickle.parsers._dom import thing_from_dom
dict['DOM'] = thing_from_dom
except: pass
try:
from gnosis.xml.pickle.parsers._sax import thing_from_sax
dict['SAX'] = thing_from_sax
except: pass
try:
from gnosis.xml.pickle.parsers._cexpat import thing_from_cexpat
dict['cEXPAT'] = thing_from_cexpat
except: pass
return dict
VERBOSE_XML = 0
def setVerbose(val):
"""Setting verbose to 0 will turn off some fields that
aren't technically necessary (for example, the family field
won't be included unless a mutated type is present).
You need to set this to 1 if you're talking to a parser that
requires that all fields be present."""
global VERBOSE_XML
VERBOSE_XML = val
def getVerbose(): return VERBOSE_XML
| agpl-3.0 |
PKRoma/poedit | deps/boost/tools/build/test/tag.py | 6 | 3299 | #!/usr/bin/python
# Copyright (C) 2003. Pedro Ferreira
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
###############################################################################
#
# test_folder_with_dot_in_name()
# ------------------------------
#
###############################################################################
def test_folder_with_dot_in_name(t):
"""
Regression test: the 'tag' feature did not work in directories that had a
dot in their name.
"""
t.write("version-1.32.0/jamroot.jam", """\
project test : requirements <tag>@$(__name__).tag ;
rule tag ( name : type ? : property-set )
{
# Do nothing, just make sure the rule is invoked OK.
ECHO The tag rule has been invoked. ;
}
exe a : a.cpp ;
""")
t.write("version-1.32.0/a.cpp", "int main() {}\n")
t.run_build_system(subdir="version-1.32.0")
t.expect_addition("version-1.32.0/bin/$toolset/debug*/a.exe")
t.expect_output_lines("The tag rule has been invoked.")
###############################################################################
#
# test_tag_property()
# -------------------
#
###############################################################################
def test_tag_property(t):
"""Basic tag property test."""
t.write("jamroot.jam", """\
import virtual-target ;
rule tag ( name : type ? : property-set )
{
local tags ;
switch [ $(property-set).get <variant> ]
{
case debug : tags += d ;
case release : tags += r ;
}
switch [ $(property-set).get <link> ]
{
case shared : tags += s ;
case static : tags += t ;
}
if $(tags)
{
return [ virtual-target.add-prefix-and-suffix $(name)_$(tags:J="")
: $(type) : $(property-set) ] ;
}
}
# Test both fully-qualified and local name of the rule
exe a : a.cpp : <tag>@$(__name__).tag ;
lib b : a.cpp : <tag>@tag ;
stage c : a ;
""")
t.write("a.cpp", """\
int main() {}
#ifdef _MSC_VER
__declspec (dllexport) void x () {}
#endif
""")
file_list = (
BoostBuild.List("bin/$toolset/debug*/a_ds.exe") +
BoostBuild.List("bin/$toolset/debug*/b_ds.dll") +
BoostBuild.List("c/a_ds.exe") +
BoostBuild.List("bin/$toolset/release*/a_rs.exe") +
BoostBuild.List("bin/$toolset/release*/b_rs.dll") +
BoostBuild.List("c/a_rs.exe") +
BoostBuild.List("bin/$toolset/debug*/a_dt.exe") +
BoostBuild.List("bin/$toolset/debug*/b_dt.lib") +
BoostBuild.List("c/a_dt.exe") +
BoostBuild.List("bin/$toolset/release*/a_rt.exe") +
BoostBuild.List("bin/$toolset/release*/b_rt.lib") +
BoostBuild.List("c/a_rt.exe"))
variants = ["debug", "release", "link=static,shared"]
t.run_build_system(variants)
t.expect_addition(file_list)
t.run_build_system(variants + ["clean"])
t.expect_removal(file_list)
###############################################################################
#
# main()
# ------
#
###############################################################################
t = BoostBuild.Tester(use_test_config=False)
test_tag_property(t)
test_folder_with_dot_in_name(t)
t.cleanup()
| mit |
cculianu/bitcoin-abc | test/functional/p2p_invalid_messages.py | 1 | 8071 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import struct
from test_framework import messages
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
command = b'badmsg'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return messages.ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.command, self.str_data)
class InvalidMessagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
"""
. Test msg header
0. Send a bunch of large (2MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
1. Send an oversized (2MB+) message and check that we're disconnected.
2. Send a few messages with an incorrect data size in the header, ensure the
messages are ignored.
"""
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_command()
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
# 2MB, per MAX_PROTOCOL_MESSAGE_LENGTH
msg_limit = 2 * 1024 * 1024
# Account for the 4-byte length prefix
valid_data_limit = msg_limit - 5
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
assert len(msg_at_size.serialize()) == msg_limit
self.log.info(
"Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be
# connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=320)
assert node.p2p.is_connected
#
# 1.
#
# Send an oversized message, ensure we're disconnected.
#
msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1))
assert len(msg_over_size.serialize()) == (msg_limit + 1)
with node.assert_debug_log(["Oversized header detected"]):
# An unknown message type (or *any* message type) over
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
node.p2p.send_message(msg_over_size)
node.p2p.wait_for_disconnect(timeout=4)
node.disconnect_p2ps()
conn = node.add_p2p_connection(P2PDataStore())
conn.wait_for_verack()
#
# 2.
#
# Send messages with an incorrect data size in the header.
#
actual_size = 100
msg = msg_unrecognized(str_data="b" * actual_size)
# TODO: handle larger-than cases. I haven't been able to pin down what
# behavior to expect.
for wrong_size in (2, 77, 78, 79):
self.log.info(
"Sending a message with incorrect size of {}".format(wrong_size))
# Unmodified message should submit okay.
node.p2p.send_and_ping(msg)
# A message lying about its data size results in a disconnect when the incorrect
# data size is less than the actual size.
#
# TODO: why does behavior change at 78 bytes?
#
node.p2p.send_raw_message(
self._tweak_msg_data_size(
msg, wrong_size))
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
node.p2p.send_message(messages.msg_ping(nonce=123123))
except IOError:
pass
node.p2p.wait_for_disconnect(timeout=10)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
# Node is still up.
conn = node.add_p2p_connection(P2PDataStore())
conn.sync_with_ping()
def test_magic_bytes(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
conn.magic_bytes = b'\x00\x11\x22\x32'
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']):
conn.send_message(messages.msg_ping(nonce=0xff))
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['ProcessMessages(badmsg, 2 bytes): CHECKSUM ERROR expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
# magic
4 +
# command
12 +
# len
4
)
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
def test_size(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
# magic
4 +
# command
12
)
# modify len to MAX_SIZE + 1
msg = msg[:cut_len] + \
struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_command(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']):
msg = msg_unrecognized(str_data="d")
msg.command = b'\xff' * 12
msg = conn.build_message(msg)
# Modify command
msg = msg[:7] + b'\x00' + msg[7 + 1:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def _tweak_msg_data_size(self, message, wrong_size):
"""
Return a raw message based on another message but with an incorrect data size in
the message header.
"""
raw_msg = self.node.p2p.build_message(message)
bad_size_bytes = struct.pack("<I", wrong_size)
num_header_bytes_before_size = 4 + 12
# Replace the correct data size in the message with an incorrect one.
raw_msg_with_wrong_size = (
raw_msg[:num_header_bytes_before_size] +
bad_size_bytes +
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
)
assert len(raw_msg) == len(raw_msg_with_wrong_size)
return raw_msg_with_wrong_size
if __name__ == '__main__':
InvalidMessagesTest().main()
| mit |
whix/python | PyBeaner/0007/code_lines.py | 36 | 1086 | # coding=utf-8
__author__ = 'PyBeaner'
import os
import fnmatch
total_lines = 0
code_lines = 0
empty_lines = 0
comment_lines = 0
def count_line(line):
line = line.lstrip()
global comment_lines, empty_lines, total_lines, code_lines
total_lines += 1
if line.startswith("#"):
comment_lines += 1
elif not line:
empty_lines += 1
else:
code_lines += 1
def scan_dir(directory, suffix="*.py"):
directory = os.path.abspath(directory)
print("Scanning files in %s ..." % directory)
for cur_dir, dirs, files in os.walk(directory):
for file in files:
if not fnmatch.fnmatch(file, suffix):
continue
file_path = os.path.join(cur_dir, file)
with open(file_path, errors="replace") as f:
for line in f:
count_line(line)
if __name__ == '__main__':
scan_dir(r"../..")
print("Total lines:%d" % total_lines)
print("Code lines:%d" % code_lines)
print("Empty lines:%d" % empty_lines)
print("Comment lines:%d" % comment_lines)
| mit |
facekapow/runtime | deps/v8/tools/release/auto_tag.py | 86 | 5863 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import sys
from common_includes import *
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
# TODO(machenbach): Remove after the git switch.
if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
print "This script is disabled until after the v8 git migration."
return True
self.CommonPrepare()
self.PrepareBranch()
self.GitCheckout("master")
self.vc.Pull()
class GetTags(Step):
MESSAGE = "Get all V8 tags."
def RunStep(self):
self.GitCreateBranch(self._config["BRANCHNAME"])
self["tags"] = self.vc.GetTags()
class GetOldestUntaggedVersion(Step):
MESSAGE = "Check if there's a version on bleeding edge without a tag."
def RunStep(self):
tags = set(self["tags"])
self["candidate"] = None
self["candidate_version"] = None
self["next"] = None
self["next_version"] = None
# Iterate backwards through all automatic version updates.
for git_hash in self.GitLog(
format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines():
# Get the version.
if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
continue
self.ReadAndPersistVersion()
version = self.ArrayToVersion("")
# Strip off trailing patch level (tags don't include tag level 0).
if version.endswith(".0"):
version = version[:-2]
# Clean up checked-out version file.
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
if version in tags:
if self["candidate"]:
# Revision "git_hash" is tagged already and "candidate" was the next
# newer revision without a tag.
break
else:
print("Stop as %s is the latest version and it has been tagged." %
version)
self.CommonCleanup()
return True
else:
# This is the second oldest version without a tag.
self["next"] = self["candidate"]
self["next_version"] = self["candidate_version"]
# This is the oldest version without a tag.
self["candidate"] = git_hash
self["candidate_version"] = version
if not self["candidate"] or not self["candidate_version"]:
print "Nothing found to tag."
self.CommonCleanup()
return True
print("Candidate for tagging is %s with version %s" %
(self["candidate"], self["candidate_version"]))
class GetLKGRs(Step):
MESSAGE = "Get the last lkgrs."
def RunStep(self):
revision_url = "https://v8-status.appspot.com/revisions?format=json"
status_json = self.ReadURL(revision_url, wait_plan=[5, 20])
self["lkgrs"] = [entry["revision"]
for entry in json.loads(status_json) if entry["status"]]
class CalculateTagRevision(Step):
MESSAGE = "Calculate the revision to tag."
def LastLKGR(self, min_rev, max_rev):
"""Finds the newest lkgr between min_rev (inclusive) and max_rev
(exclusive).
"""
for lkgr in self["lkgrs"]:
# LKGRs are reverse sorted.
if int(min_rev) <= int(lkgr) and int(lkgr) < int(max_rev):
return lkgr
return None
def RunStep(self):
# Get the lkgr after the tag candidate and before the next tag candidate.
candidate_svn = self.vc.GitSvn(self["candidate"])
if self["next"]:
next_svn = self.vc.GitSvn(self["next"])
else:
# Don't include the version change commit itself if there is no upper
# limit yet.
candidate_svn = str(int(candidate_svn) + 1)
next_svn = sys.maxint
lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
if not lkgr_svn:
print "There is no lkgr since the candidate version yet."
self.CommonCleanup()
return True
# Let's check if the lkgr is at least three hours old.
self["lkgr"] = self.vc.SvnGit(lkgr_svn)
if not self["lkgr"]:
print "Couldn't find git hash for lkgr %s" % lkgr_svn
self.CommonCleanup()
return True
lkgr_utc_time = int(self.GitLog(n=1, format="%at", git_hash=self["lkgr"]))
current_utc_time = self._side_effect_handler.GetUTCStamp()
if current_utc_time < lkgr_utc_time + 10800:
print "Candidate lkgr %s is too recent for tagging." % lkgr_svn
self.CommonCleanup()
return True
print "Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"])
class MakeTag(Step):
MESSAGE = "Tag the version."
def RunStep(self):
if not self._options.dry_run:
self.GitReset(self["lkgr"])
# FIXME(machenbach): Make this work with the git repo.
self.vc.Tag(self["candidate_version"],
"svn/bleeding_edge",
"This won't work!")
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
self.CommonCleanup()
class AutoTag(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("--dry_run", help="Don't tag the new version.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.dry_run and not options.author:
print "Specify your chromium.org email with -a"
return False
options.wait_for_lgtm = False
options.force_readline_defaults = True
options.force_upload = True
return True
def _Config(self):
return {
"BRANCHNAME": "auto-tag-v8",
"PERSISTFILE_BASENAME": "/tmp/v8-auto-tag-tempfile",
}
def _Steps(self):
return [
Preparation,
GetTags,
GetOldestUntaggedVersion,
GetLKGRs,
CalculateTagRevision,
MakeTag,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(AutoTag().Run())
| apache-2.0 |
julbright/beautifulhue | beautifulhue/api/_light.py | 3 | 4500 | from beautifulhue.libs.http import Request
class Light:
def __init__(self, bridge, user, debug):
self.bridge = bridge
self.user = user
self.debug = debug
def get(self, resource, debug=False):
"""
@summary: Get all lights, get new lights, or get a specific light as\
determined by the resource object.
@TODO: Fix resource variable scope issue that manifests when making\
multiple light.get(resource) calls.
"""
request = Request()
services = {
'all':{'service':'lights'},
'new':{'service':'lights/new'}
}
if (isinstance(resource['which'], int)):
resource['id'] = resource['which']
resource['which'] = 'one'
if (resource['which'] == 'one'):
services['one'] = {'service':'lights/{id}'.format(id=resource['id'])}
service = services[resource['which']]['service']
path = 'api/{username}/{service}'.format(
username=self.user['name'],
service=service
)
url = 'http://{bridge_ip}/{path}'.format(bridge_ip=self.bridge['ip'],
path=path)
status, content = request.get(url, resource)
if service == 'lights':
lights = []
for (k, v) in content.items():
v['id'] = int(k)
lights.append(v)
if resource.has_key('verbose') and resource['verbose']:
_lights = []
for light in lights:
path = 'api/{username}/lights/{id}'.format(
username=self.user['name'],
id=light['id']
)
url = 'http://{bridge_ip}/{path}'.format(bridge_ip=self.bridge['ip'],
path=path)
status, content = request.get(url, resource)
_lights.append(content)
content = _lights
else:
content = lights
if debug:
return dict(info=status, resource=content)
else:
return dict(resource=content)
def find(self, resource, debug=False):
"""
@summary: Search for new lights.
"""
request = Request()
services = {
'new':{'service':'lights'}
}
service = services[resource['which']]['service']
path = 'api/{username}/{service}'.format(
username=self.user['name'],
service=service
)
url = 'http://{bridge_ip}/{path}'.format(bridge_ip=self.bridge['ip'],
path=path)
status, content = request.post(url)
if debug:
return dict(info=status, resource=content)
else:
return dict(resource=content)
def update(self, resource, debug=False):
"""
@summary: Rename lights, or set a light's state, as determined by the\
resource object.
"""
request = Request()
if (resource['data'].has_key('attr')):
service = 'lights/{id}'.format(id=resource['which'])
data = resource['data']['attr']
elif (resource['data'].has_key('state')):
service = 'lights/{id}/state'.format(id=resource['which'])
data = resource['data']['state']
else:
raise Exception('Unknown data type.')
path = 'api/{username}/{service}'.format(
username=self.user['name'],
service=service
)
url = 'http://{bridge_ip}/{path}'.format(bridge_ip=self.bridge['ip'],
path=path)
status, content = request.put(url, data)
if debug:
return dict(info=status, resource=content)
else:
return dict(resource=content)
| mit |
sternshus/Arelle | arelle/CntlrCmdLine.py | 1 | 70763 | '''
Created on Oct 3, 2010
This module is Arelle's controller in command line non-interactive mode
(This module can be a pattern for custom integration of Arelle into an application.)
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import gettext, time, datetime, os, shlex, sys, traceback, fnmatch, threading, json, logging
from optparse import OptionParser, SUPPRESS_HELP
import re
from arelle import (Cntlr, FileSource, ModelDocument, XmlUtil, Version,
ViewFileDTS, ViewFileFactList, ViewFileFactTable, ViewFileConcepts,
ViewFileFormulae, ViewFileRelationshipSet, ViewFileTests, ViewFileRssFeed,
ViewFileRoleTypes,
ModelManager)
from arelle.ModelValue import qname
from arelle.Locale import format_string
from arelle.ModelFormulaObject import FormulaOptions
from arelle import PluginManager
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
from arelle.WebCache import proxyTuple
import logging
from lxml import etree
win32file = win32api = win32process = pywintypes = None
STILL_ACTIVE = 259 # MS Windows process status constants
PROCESS_QUERY_INFORMATION = 0x400
def main():
"""Main program to initiate application from command line or as a separate process (e.g, java Runtime.getRuntime().exec). May perform
a command line request, or initiate a web server on specified local port.
:param argv: Command line arguments. (Currently supported arguments can be displayed by the parameter *--help*.)
:type message: [str]
"""
envArgs = os.getenv("ARELLE_ARGS")
if envArgs:
args = shlex.split(envArgs)
else:
args = sys.argv[1:]
gettext.install("arelle") # needed for options messages
parseAndRun(args)
def xbrlTurtleGraphModel(furi='~/sternshus/gcs/data/1000623/000100062316000141/0001000623-16-000141-xbrl.zip'):
args = ['--plugins', 'xbrlDB', '-f', furi, '--keepOpen', '--store-to-XBRL-DB',
'rdfTurtleFile,None,None,None,turtle.rdf,None,rdfDB']
gettext.install("arelle") # needed for options messages
success, model, graph = parseAndRun(args)
uri = model.uriDir
return (success, uri, model, graph)
def wsgiApplication(extraArgs=[]): # for example call wsgiApplication(["--plugins=EdgarRenderer"])
return parseAndRun( ["--webserver=::wsgi"] + extraArgs )
def parseAndRun(args):
"""interface used by Main program and py.test (arelle_test.py)
"""
try:
from arelle import webserver
hasWebServer = True
except ImportError:
hasWebServer = False
cntlr = CntlrCmdLine() # need controller for plug ins to be loaded
usage = "usage: %prog [options]"
parser = OptionParser(usage,
version="Arelle(r) {0}bit {1}".format(cntlr.systemWordSize, Version.version),
conflict_handler="resolve") # allow reloading plug-in options without errors
parser.add_option("-f", "--file", dest="entrypointFile",
help=_("FILENAME is an entry point, which may be "
"an XBRL instance, schema, linkbase file, "
"inline XBRL instance, testcase file, "
"testcase index file. FILENAME may be "
"a local file or a URI to a web located file. "
"For multiple instance filings may be | separated file names or JSON list "
"of file/parameter dicts [{\"file\":\"filepath\"}, {\"file\":\"file2path\"} ...]."))
parser.add_option("--username", dest="username",
help=_("user name if needed (with password) for web file retrieval"))
parser.add_option("--password", dest="password",
help=_("password if needed (with user name) for web retrieval"))
# special option for web interfaces to suppress closing an opened modelXbrl
parser.add_option("--keepOpen", dest="keepOpen", action="store_true", help=SUPPRESS_HELP)
parser.add_option("-i", "--import", dest="importFiles",
help=_("FILENAME is a list of files to import to the DTS, such as "
"additional formula or label linkbases. "
"Multiple file names are separated by a '|' character. "))
parser.add_option("-d", "--diff", dest="diffFile",
help=_("FILENAME is a second entry point when "
"comparing (diffing) two DTSes producing a versioning report."))
parser.add_option("-r", "--report", dest="versReportFile",
help=_("FILENAME is the filename to save as the versioning report."))
parser.add_option("-v", "--validate",
action="store_true", dest="validate",
help=_("Validate the file according to the entry "
"file type. If an XBRL file, it is validated "
"according to XBRL validation 2.1, calculation linkbase validation "
"if either --calcDecimals or --calcPrecision are specified, and "
"SEC EDGAR Filing Manual (if --efm selected) or Global Filer Manual "
"disclosure system validation (if --gfm=XXX selected). "
"If a test suite or testcase, the test case variations "
"are individually so validated. "
"If formulae are present they will be validated and run unless --formula=none is specified. "
))
parser.add_option("--calcDecimals", action="store_true", dest="calcDecimals",
help=_("Specify calculation linkbase validation inferring decimals."))
parser.add_option("--calcdecimals", action="store_true", dest="calcDecimals", help=SUPPRESS_HELP)
parser.add_option("--calcPrecision", action="store_true", dest="calcPrecision",
help=_("Specify calculation linkbase validation inferring precision."))
parser.add_option("--calcprecision", action="store_true", dest="calcPrecision", help=SUPPRESS_HELP)
parser.add_option("--efm", action="store_true", dest="validateEFM",
help=_("Select Edgar Filer Manual (U.S. SEC) disclosure system validation (strict)."))
parser.add_option("--gfm", action="store", dest="disclosureSystemName", help=SUPPRESS_HELP)
parser.add_option("--disclosureSystem", action="store", dest="disclosureSystemName",
help=_("Specify a disclosure system name and"
" select disclosure system validation. "
"Enter --disclosureSystem=help for list of names or help-verbose for list of names and descriptions. "))
parser.add_option("--disclosuresystem", action="store", dest="disclosureSystemName", help=SUPPRESS_HELP)
parser.add_option("--hmrc", action="store_true", dest="validateHMRC",
help=_("Select U.K. HMRC disclosure system validation."))
parser.add_option("--utr", action="store_true", dest="utrValidate",
help=_("Select validation with respect to Unit Type Registry."))
parser.add_option("--utrUrl", action="store", dest="utrUrl",
help=_("Override disclosure systems Unit Type Registry location (URL or file path)."))
parser.add_option("--utrurl", action="store", dest="utrUrl", help=SUPPRESS_HELP)
parser.add_option("--infoset", action="store_true", dest="infosetValidate",
help=_("Select validation with respect testcase infosets."))
parser.add_option("--labelLang", action="store", dest="labelLang",
help=_("Language for labels in following file options (override system settings)"))
parser.add_option("--labellang", action="store", dest="labelLang", help=SUPPRESS_HELP)
parser.add_option("--labelRole", action="store", dest="labelRole",
help=_("Label role for labels in following file options (instead of standard label)"))
parser.add_option("--labelrole", action="store", dest="labelRole", help=SUPPRESS_HELP)
parser.add_option("--DTS", "--csvDTS", action="store", dest="DTSFile",
help=_("Write DTS tree into FILE (may be .csv or .html)"))
parser.add_option("--facts", "--csvFacts", action="store", dest="factsFile",
help=_("Write fact list into FILE"))
parser.add_option("--factListCols", action="store", dest="factListCols",
help=_("Columns for fact list file"))
parser.add_option("--factTable", "--csvFactTable", action="store", dest="factTableFile",
help=_("Write fact table into FILE"))
parser.add_option("--concepts", "--csvConcepts", action="store", dest="conceptsFile",
help=_("Write concepts into FILE"))
parser.add_option("--pre", "--csvPre", action="store", dest="preFile",
help=_("Write presentation linkbase into FILE"))
parser.add_option("--cal", "--csvCal", action="store", dest="calFile",
help=_("Write calculation linkbase into FILE"))
parser.add_option("--dim", "--csvDim", action="store", dest="dimFile",
help=_("Write dimensions (of definition) linkbase into FILE"))
parser.add_option("--formulae", "--htmlFormulae", action="store", dest="formulaeFile",
help=_("Write formulae linkbase into FILE"))
parser.add_option("--viewArcrole", action="store", dest="viewArcrole",
help=_("Write linkbase relationships for viewArcrole into viewFile"))
parser.add_option("--viewarcrole", action="store", dest="viewArcrole", help=SUPPRESS_HELP)
parser.add_option("--viewFile", action="store", dest="viewFile",
help=_("Write linkbase relationships for viewArcrole into viewFile"))
parser.add_option("--viewfile", action="store", dest="viewFile", help=SUPPRESS_HELP)
parser.add_option("--roleTypes", action="store", dest="roleTypesFile",
help=_("Write defined role types into FILE"))
parser.add_option("--roletypes", action="store", dest="roleTypesFile", help=SUPPRESS_HELP)
parser.add_option("--arcroleTypes", action="store", dest="arcroleTypesFile",
help=_("Write defined arcrole types into FILE"))
parser.add_option("--arcroletypes", action="store", dest="arcroleTypesFile", help=SUPPRESS_HELP)
parser.add_option("--testReport", "--csvTestReport", action="store", dest="testReport",
help=_("Write test report of validation (of test cases) into FILE"))
parser.add_option("--testreport", "--csvtestreport", action="store", dest="testReport", help=SUPPRESS_HELP)
parser.add_option("--testReportCols", action="store", dest="testReportCols",
help=_("Columns for test report file"))
parser.add_option("--testreportcols", action="store", dest="testReportCols", help=SUPPRESS_HELP)
parser.add_option("--rssReport", action="store", dest="rssReport",
help=_("Write RSS report into FILE"))
parser.add_option("--rssreport", action="store", dest="rssReport", help=SUPPRESS_HELP)
parser.add_option("--rssReportCols", action="store", dest="rssReportCols",
help=_("Columns for RSS report file"))
parser.add_option("--rssreportcols", action="store", dest="rssReportCols", help=SUPPRESS_HELP)
parser.add_option("--skipDTS", action="store_true", dest="skipDTS",
help=_("Skip DTS activities (loading, discovery, validation), useful when an instance needs only to be parsed."))
parser.add_option("--skipdts", action="store_true", dest="skipDTS", help=SUPPRESS_HELP)
parser.add_option("--skipLoading", action="store", dest="skipLoading",
help=_("Skip loading discovered or schemaLocated files matching pattern (unix-style file name patterns separated by '|'), useful when not all linkbases are needed."))
parser.add_option("--skiploading", action="store", dest="skipLoading", help=SUPPRESS_HELP)
parser.add_option("--logFile", action="store", dest="logFile",
help=_("Write log messages into file, otherwise they go to standard output. "
"If file ends in .xml it is xml-formatted, otherwise it is text. "))
parser.add_option("--logfile", action="store", dest="logFile", help=SUPPRESS_HELP)
parser.add_option("--logFormat", action="store", dest="logFormat",
help=_("Logging format for messages capture, otherwise default is \"[%(messageCode)s] %(message)s - %(file)s\"."))
parser.add_option("--logformat", action="store", dest="logFormat", help=SUPPRESS_HELP)
parser.add_option("--logLevel", action="store", dest="logLevel",
help=_("Minimum level for messages capture, otherwise the message is ignored. "
"Current order of levels are debug, info, info-semantic, warning, warning-semantic, warning, assertion-satisfied, inconsistency, error-semantic, assertion-not-satisfied, and error. "))
parser.add_option("--loglevel", action="store", dest="logLevel", help=SUPPRESS_HELP)
parser.add_option("--logLevelFilter", action="store", dest="logLevelFilter",
help=_("Regular expression filter for logLevel. "
"(E.g., to not match *-semantic levels, logLevelFilter=(?!^.*-semantic$)(.+). "))
parser.add_option("--loglevelfilter", action="store", dest="logLevelFilter", help=SUPPRESS_HELP)
parser.add_option("--logCodeFilter", action="store", dest="logCodeFilter",
help=_("Regular expression filter for log message code."))
parser.add_option("--logcodefilter", action="store", dest="logCodeFilter", help=SUPPRESS_HELP)
parser.add_option("--statusPipe", action="store", dest="statusPipe", help=SUPPRESS_HELP)
parser.add_option("--monitorParentProcess", action="store", dest="monitorParentProcess", help=SUPPRESS_HELP)
parser.add_option("--outputAttribution", action="store", dest="outputAttribution", help=SUPPRESS_HELP)
parser.add_option("--outputattribution", action="store", dest="outputAttribution", help=SUPPRESS_HELP)
parser.add_option("--showOptions", action="store_true", dest="showOptions", help=SUPPRESS_HELP)
parser.add_option("--parameters", action="store", dest="parameters", help=_("Specify parameters for formula and validation (name=value[,name=value])."))
parser.add_option("--parameterSeparator", action="store", dest="parameterSeparator", help=_("Specify parameters separator string (if other than comma)."))
parser.add_option("--parameterseparator", action="store", dest="parameterSeparator", help=SUPPRESS_HELP)
parser.add_option("--formula", choices=("validate", "run", "none"), dest="formulaAction",
help=_("Specify formula action: "
"validate - validate only, without running, "
"run - validate and run, or "
"none - prevent formula validation or running when also specifying -v or --validate. "
"if this option is not specified, -v or --validate will validate and run formulas if present"))
parser.add_option("--formulaParamExprResult", action="store_true", dest="formulaParamExprResult", help=_("Specify formula tracing."))
parser.add_option("--formulaparamexprresult", action="store_true", dest="formulaParamExprResult", help=SUPPRESS_HELP)
parser.add_option("--formulaParamInputValue", action="store_true", dest="formulaParamInputValue", help=_("Specify formula tracing."))
parser.add_option("--formulaparaminputvalue", action="store_true", dest="formulaParamInputValue", help=SUPPRESS_HELP)
parser.add_option("--formulaCallExprSource", action="store_true", dest="formulaCallExprSource", help=_("Specify formula tracing."))
parser.add_option("--formulacallexprsource", action="store_true", dest="formulaCallExprSource", help=SUPPRESS_HELP)
parser.add_option("--formulaCallExprCode", action="store_true", dest="formulaCallExprCode", help=_("Specify formula tracing."))
parser.add_option("--formulacallexprcode", action="store_true", dest="formulaCallExprCode", help=SUPPRESS_HELP)
parser.add_option("--formulaCallExprEval", action="store_true", dest="formulaCallExprEval", help=_("Specify formula tracing."))
parser.add_option("--formulacallexpreval", action="store_true", dest="formulaCallExprEval", help=SUPPRESS_HELP)
parser.add_option("--formulaCallExprResult", action="store_true", dest="formulaCallExprResult", help=_("Specify formula tracing."))
parser.add_option("--formulacallexprtesult", action="store_true", dest="formulaCallExprResult", help=SUPPRESS_HELP)
parser.add_option("--formulaVarSetExprEval", action="store_true", dest="formulaVarSetExprEval", help=_("Specify formula tracing."))
parser.add_option("--formulavarsetexpreval", action="store_true", dest="formulaVarSetExprEval", help=SUPPRESS_HELP)
parser.add_option("--formulaVarSetExprResult", action="store_true", dest="formulaVarSetExprResult", help=_("Specify formula tracing."))
parser.add_option("--formulavarsetexprresult", action="store_true", dest="formulaVarSetExprResult", help=SUPPRESS_HELP)
parser.add_option("--formulaVarSetTiming", action="store_true", dest="timeVariableSetEvaluation", help=_("Specify showing times of variable set evaluation."))
parser.add_option("--formulavarsettiming", action="store_true", dest="timeVariableSetEvaluation", help=SUPPRESS_HELP)
parser.add_option("--formulaAsserResultCounts", action="store_true", dest="formulaAsserResultCounts", help=_("Specify formula tracing."))
parser.add_option("--formulaasserresultcounts", action="store_true", dest="formulaAsserResultCounts", help=SUPPRESS_HELP)
parser.add_option("--formulaSatisfiedAsser", action="store_true", dest="formulaSatisfiedAsser", help=_("Specify formula tracing."))
parser.add_option("--formulasatisfiedasser", action="store_true", dest="formulaSatisfiedAsser", help=SUPPRESS_HELP)
parser.add_option("--formulaUnsatisfiedAsser", action="store_true", dest="formulaUnsatisfiedAsser", help=_("Specify formula tracing."))
parser.add_option("--formulaunsatisfiedasser", action="store_true", dest="formulaUnsatisfiedAsser", help=SUPPRESS_HELP)
parser.add_option("--formulaUnsatisfiedAsserError", action="store_true", dest="formulaUnsatisfiedAsserError", help=_("Specify formula tracing."))
parser.add_option("--formulaunsatisfiedassererror", action="store_true", dest="formulaUnsatisfiedAsserError", help=SUPPRESS_HELP)
parser.add_option("--formulaFormulaRules", action="store_true", dest="formulaFormulaRules", help=_("Specify formula tracing."))
parser.add_option("--formulaformularules", action="store_true", dest="formulaFormulaRules", help=SUPPRESS_HELP)
parser.add_option("--formulaVarsOrder", action="store_true", dest="formulaVarsOrder", help=_("Specify formula tracing."))
parser.add_option("--formulavarsorder", action="store_true", dest="formulaVarsOrder", help=SUPPRESS_HELP)
parser.add_option("--formulaVarExpressionSource", action="store_true", dest="formulaVarExpressionSource", help=_("Specify formula tracing."))
parser.add_option("--formulavarexpressionsource", action="store_true", dest="formulaVarExpressionSource", help=SUPPRESS_HELP)
parser.add_option("--formulaVarExpressionCode", action="store_true", dest="formulaVarExpressionCode", help=_("Specify formula tracing."))
parser.add_option("--formulavarexpressioncode", action="store_true", dest="formulaVarExpressionCode", help=SUPPRESS_HELP)
parser.add_option("--formulaVarExpressionEvaluation", action="store_true", dest="formulaVarExpressionEvaluation", help=_("Specify formula tracing."))
parser.add_option("--formulavarexpressionevaluation", action="store_true", dest="formulaVarExpressionEvaluation", help=SUPPRESS_HELP)
parser.add_option("--formulaVarExpressionResult", action="store_true", dest="formulaVarExpressionResult", help=_("Specify formula tracing."))
parser.add_option("--formulavarexpressionresult", action="store_true", dest="formulaVarExpressionResult", help=SUPPRESS_HELP)
parser.add_option("--formulaVarFilterWinnowing", action="store_true", dest="formulaVarFilterWinnowing", help=_("Specify formula tracing."))
parser.add_option("--formulavarfilterwinnowing", action="store_true", dest="formulaVarFilterWinnowing", help=SUPPRESS_HELP)
parser.add_option("--formulaVarFiltersResult", action="store_true", dest="formulaVarFiltersResult", help=_("Specify formula tracing."))
parser.add_option("--formulavarfiltersresult", action="store_true", dest="formulaVarFiltersResult", help=SUPPRESS_HELP)
parser.add_option("--formulaRunIDs", action="store", dest="formulaRunIDs", help=_("Specify formula/assertion IDs to run, separated by a '|' character."))
parser.add_option("--formularunids", action="store", dest="formulaRunIDs", help=SUPPRESS_HELP)
parser.add_option("--uiLang", action="store", dest="uiLang",
help=_("Language for user interface (override system settings, such as program messages). Does not save setting."))
parser.add_option("--uilang", action="store", dest="uiLang", help=SUPPRESS_HELP)
parser.add_option("--proxy", action="store", dest="proxy",
help=_("Modify and re-save proxy settings configuration. "
"Enter 'system' to use system proxy setting, 'none' to use no proxy, "
"'http://[user[:password]@]host[:port]' "
" (e.g., http://192.168.1.253, http://example.com:8080, http://joe:secret@example.com:8080), "
" or 'show' to show current setting, ." ))
parser.add_option("--internetConnectivity", choices=("online", "offline"), dest="internetConnectivity",
help=_("Specify internet connectivity: online or offline"))
parser.add_option("--internetconnectivity", action="store", dest="internetConnectivity", help=SUPPRESS_HELP)
parser.add_option("--internetTimeout", type="int", dest="internetTimeout",
help=_("Specify internet connection timeout in seconds (0 means unlimited)."))
parser.add_option("--internettimeout", type="int", action="store", dest="internetTimeout", help=SUPPRESS_HELP)
parser.add_option("--internetRecheck", choices=("weekly", "daily", "never"), dest="internetRecheck",
help=_("Specify rechecking cache files (weekly is default)"))
parser.add_option("--internetrecheck", choices=("weekly", "daily", "never"), action="store", dest="internetRecheck", help=SUPPRESS_HELP)
parser.add_option("--internetLogDownloads", action="store_true", dest="internetLogDownloads",
help=_("Log info message for downloads to web cache."))
parser.add_option("--internetlogdownloads", action="store_true", dest="internetLogDownloads", help=SUPPRESS_HELP)
parser.add_option("--xdgConfigHome", action="store", dest="xdgConfigHome",
help=_("Specify non-standard location for configuration and cache files (overrides environment parameter XDG_CONFIG_HOME)."))
parser.add_option("--plugins", action="store", dest="plugins",
help=_("Modify plug-in configuration. "
"Re-save unless 'temp' is in the module list. "
"Enter 'show' to show current plug-in configuration. "
"Commands show, and module urls are '|' separated: "
"+url to add plug-in by its url or filename, ~name to reload a plug-in by its name, -name to remove a plug-in by its name, "
"relative URLs are relative to installation plug-in directory, "
" (e.g., '+http://arelle.org/files/hello_web.py', '+C:\Program Files\Arelle\examples\plugin\hello_dolly.py' to load, "
"or +../examples/plugin/hello_dolly.py for relative use of examples directory, "
"~Hello Dolly to reload, -Hello Dolly to remove). "
"If + is omitted from .py file nothing is saved (same as temp). "
"Packaged plug-in urls are their directory's url. " ))
parser.add_option("--packages", action="store", dest="packages",
help=_("Modify taxonomy packages configuration. "
"Re-save unless 'temp' is in the module list. "
"Enter 'show' to show current packages configuration. "
"Commands show, and module urls are '|' separated: "
"+url to add package by its url or filename, ~name to reload package by its name, -name to remove a package by its name, "
"URLs are full absolute paths. "
"If + is omitted from package file nothing is saved (same as temp). " ))
parser.add_option("--packageManifestName", action="store", dest="packageManifestName",
help=_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). "
"Uses unix file name pattern matching. "
"Multiple manifest files are supported in archive (such as oasis catalogs). "
"(Replaces search for either .taxonomyPackage.xml or catalog.xml). " ))
parser.add_option("--abortOnMajorError", action="store_true", dest="abortOnMajorError", help=_("Abort process on major error, such as when load is unable to find an entry or discovered file."))
parser.add_option("--showEnvironment", action="store_true", dest="showEnvironment", help=_("Show Arelle's config and cache directory and host OS environment parameters."))
parser.add_option("--showenvironment", action="store_true", dest="showEnvironment", help=SUPPRESS_HELP)
parser.add_option("--collectProfileStats", action="store_true", dest="collectProfileStats", help=_("Collect profile statistics, such as timing of validation activities and formulae."))
if hasWebServer:
parser.add_option("--webserver", action="store", dest="webserver",
help=_("start web server on host:port[:server] for REST and web access, e.g., --webserver locahost:8080, "
"or specify nondefault a server name, such as cherrypy, --webserver locahost:8080:cherrypy. "
"(It is possible to specify options to be defaults for the web server, such as disclosureSystem and validations, but not including file names.) "))
pluginOptionsIndex = len(parser.option_list)
# install any dynamic plugins so their command line options can be parsed if present
for i, arg in enumerate(args):
if arg.startswith('--plugins'):
if len(arg) > 9 and arg[9] == '=':
preloadPlugins = arg[10:]
elif i < len(args) - 1:
preloadPlugins = args[i+1]
else:
preloadPlugins = ""
for pluginCmd in preloadPlugins.split('|'):
cmd = pluginCmd.strip()
if cmd not in ("show", "temp") and len(cmd) > 0 and cmd[0] not in ('-', '~', '+'):
moduleInfo = PluginManager.addPluginModule(cmd)
if moduleInfo:
cntlr.preloadedPlugins[cmd] = moduleInfo
PluginManager.reset()
break
# add plug-in options
for optionsExtender in pluginClassMethods("CntlrCmdLine.Options"):
optionsExtender(parser)
pluginLastOptionIndex = len(parser.option_list)
parser.add_option("-a", "--about",
action="store_true", dest="about",
help=_("Show product version, copyright, and license."))
if not args and cntlr.isGAE:
args = ["--webserver=::gae"]
elif cntlr.isCGI:
args = ["--webserver=::cgi"]
elif cntlr.isMSW:
# if called from java on Windows any empty-string arguments are lost, see:
# http://bugs.java.com/view_bug.do?bug_id=6518827
# insert needed arguments
sourceArgs = args
args = []
namedOptions = set()
optionsWithArg = set()
for option in parser.option_list:
names = str(option).split('/')
namedOptions.update(names)
if option.action == "store":
optionsWithArg.update(names)
priorArg = None
for arg in sourceArgs:
if priorArg in optionsWithArg and arg in namedOptions:
# probable java/MSFT interface bug 6518827
args.append('') # add empty string argument
# remove quoting if arguments quoted according to http://bugs.java.com/view_bug.do?bug_id=6518827
if r'\"' in arg: # e.g., [{\"foo\":\"bar\"}] -> [{"foo":"bar"}]
arg = arg.replace(r'\"', '"')
args.append(arg)
priorArg = arg
(options, leftoverArgs) = parser.parse_args(args)
if options.about:
print(_("\narelle(r) {0}bit {1}\n\n"
"An open source XBRL platform\n"
"(c) 2010-2015 Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not \nuse this file except in compliance with the License. "
"You may obtain a copy \nof the License at "
"'http://www.apache.org/licenses/LICENSE-2.0'\n\n"
"Unless required by applicable law or agreed to in writing, software \n"
"distributed under the License is distributed on an \"AS IS\" BASIS, \n"
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n"
"See the License for the specific language governing permissions and \n"
"limitations under the License."
"\n\nIncludes:"
"\n Python(r) {3[0]}.{3[1]}.{3[2]} (c) 2001-2013 Python Software Foundation"
"\n PyParsing (c) 2003-2013 Paul T. McGuire"
"\n lxml {4[0]}.{4[1]}.{4[2]} (c) 2004 Infrae, ElementTree (c) 1999-2004 by Fredrik Lundh"
"{2}"
"\n May include installable plug-in modules with author-specific license terms"
).format(cntlr.systemWordSize, Version.version,
_("\n Bottle (c) 2011-2013 Marcel Hellkamp") if hasWebServer else "",
sys.version_info, etree.LXML_VERSION))
elif options.disclosureSystemName in ("help", "help-verbose"):
text = _("Disclosure system choices: \n{0}").format(' \n'.join(cntlr.modelManager.disclosureSystem.dirlist(options.disclosureSystemName)))
try:
print(text)
except UnicodeEncodeError:
print(text.encode("ascii", "replace").decode("ascii"))
elif len(leftoverArgs) != 0 and (not hasWebServer or options.webserver is None):
parser.error(_("unrecognized arguments: {}".format(', '.join(leftoverArgs))))
elif (options.entrypointFile is None and
((not options.proxy) and (not options.plugins) and
(not any(pluginOption for pluginOption in parser.option_list[pluginOptionsIndex:pluginLastOptionIndex])) and
(not hasWebServer or options.webserver is None))):
parser.error(_("incorrect arguments, please try\n python CntlrCmdLine.py --help"))
elif hasWebServer and options.webserver:
# webserver incompatible with file operations
if any((options.entrypointFile, options.importFiles, options.diffFile, options.versReportFile,
options.factsFile, options.factListCols, options.factTableFile,
options.conceptsFile, options.preFile, options.calFile, options.dimFile, options.formulaeFile, options.viewArcrole, options.viewFile,
options.roleTypesFile, options.arcroleTypesFile
)):
parser.error(_("incorrect arguments with --webserver, please try\n python CntlrCmdLine.py --help"))
else:
# note that web server logging does not strip time stamp, use logFormat if that is desired
cntlr.startLogging(logFileName='logToBuffer')
from arelle import CntlrWebMain
app = CntlrWebMain.startWebserver(cntlr, options)
if options.webserver == '::wsgi':
return app
else:
# parse and run the FILENAME
cntlr.startLogging(logFileName=(options.logFile or "logToPrint"),
logFormat=(options.logFormat or "[%(messageCode)s] %(message)s - %(file)s"),
logLevel=(options.logLevel or "DEBUG"),
logToBuffer=getattr(options, "logToBuffer", False)) # e.g., used by EdgarRenderer to require buffered logging
success = cntlr.run(options)
return success
class ParserForDynamicPlugins:
def __init__(self, options):
self.options = options
def add_option(self, *args, **kwargs):
if 'dest' in kwargs:
_dest = kwargs['dest']
if not hasattr(self.options, _dest):
setattr(self.options, _dest, kwargs.get('default',None))
class CntlrCmdLine(Cntlr.Cntlr):
"""
.. class:: CntlrCmdLin()
Initialization sets up for platform via Cntlr.Cntlr.
"""
def __init__(self, logFileName=None):
super(CntlrCmdLine, self).__init__(hasGui=False)
self.preloadedPlugins = {}
def run(self, options, sourceZipStream=None, responseZipStream=None):
"""Process command line arguments or web service request, such as to load and validate an XBRL document, or start web server.
When a web server has been requested, this method may be called multiple times, once for each web service (REST) request that requires processing.
Otherwise (when called for a command line request) this method is called only once for the command line arguments request.
:param options: OptionParser options from parse_args of main argv arguments (when called from command line) or corresponding arguments from web service (REST) request.
:type options: optparse.Values
"""
if options.statusPipe or options.monitorParentProcess:
try:
global win32file, win32api, win32process, pywintypes
import win32file, win32api, win32process, pywintypes
except ImportError: # win32 not installed
self.addToLog("--statusPipe {} cannot be installed, packages for win32 missing".format(options.statusPipe))
options.statusPipe = options.monitorParentProcess = None
if options.statusPipe:
try:
self.statusPipe = win32file.CreateFile("\\\\.\\pipe\\{}".format(options.statusPipe),
win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, win32file.FILE_FLAG_NO_BUFFERING, None)
self.showStatus = self.showStatusOnPipe
self.lastStatusTime = 0.0
self.parentProcessHandle = None
except pywintypes.error: # named pipe doesn't exist
self.addToLog("--statusPipe {} has not been created by calling program".format(options.statusPipe))
if options.monitorParentProcess:
try:
self.parentProcessHandle = win32api.OpenProcess(PROCESS_QUERY_INFORMATION, False, int(options.monitorParentProcess))
def monitorParentProcess():
if win32process.GetExitCodeProcess(self.parentProcessHandle) != STILL_ACTIVE:
sys.exit()
_t = threading.Timer(10.0, monitorParentProcess)
_t.daemon = True
_t.start()
monitorParentProcess()
except ImportError: # win32 not installed
self.addToLog("--monitorParentProcess {} cannot be installed, packages for win32api and win32process missing".format(options.monitorParentProcess))
except (ValueError, pywintypes.error): # parent process doesn't exist
self.addToLog("--monitorParentProcess Process {} Id is invalid".format(options.monitorParentProcess))
sys.exit()
if options.showOptions: # debug options
for optName, optValue in sorted(options.__dict__.items(), key=lambda optItem: optItem[0]):
self.addToLog("Option {0}={1}".format(optName, optValue), messageCode="info")
self.addToLog("sys.argv {0}".format(sys.argv), messageCode="info")
if options.uiLang: # set current UI Lang (but not config setting)
self.setUiLanguage(options.uiLang)
if options.proxy:
if options.proxy != "show":
proxySettings = proxyTuple(options.proxy)
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
self.addToLog(_("Proxy configuration has been set."), messageCode="info")
useOsProxy, urlAddr, urlPort, user, password = self.config.get("proxySettings", proxyTuple("none"))
if useOsProxy:
self.addToLog(_("Proxy configured to use {0}.").format(
_('Microsoft Windows Internet Settings') if sys.platform.startswith("win")
else (_('Mac OS X System Configuration') if sys.platform in ("darwin", "macos")
else _('environment variables'))), messageCode="info")
elif urlAddr:
self.addToLog(_("Proxy setting: http://{0}{1}{2}{3}{4}").format(
user if user else "",
":****" if password else "",
"@" if (user or password) else "",
urlAddr,
":{0}".format(urlPort) if urlPort else ""), messageCode="info")
else:
self.addToLog(_("Proxy is disabled."), messageCode="info")
if options.plugins:
resetPlugins = False
savePluginChanges = True
showPluginModules = False
for pluginCmd in options.plugins.split('|'):
cmd = pluginCmd.strip()
if cmd == "show":
showPluginModules = True
elif cmd == "temp":
savePluginChanges = False
elif cmd.startswith("+"):
moduleInfo = PluginManager.addPluginModule(cmd[1:])
if moduleInfo:
self.addToLog(_("Addition of plug-in {0} successful.").format(moduleInfo.get("name")),
messageCode="info", file=moduleInfo.get("moduleURL"))
resetPlugins = True
if "CntlrCmdLine.Options" in moduleInfo["classMethods"]:
addedPluginWithCntlrCmdLineOptions = True
else:
self.addToLog(_("Unable to load plug-in."), messageCode="info", file=cmd[1:])
elif cmd.startswith("~"):
if PluginManager.reloadPluginModule(cmd[1:]):
self.addToLog(_("Reload of plug-in successful."), messageCode="info", file=cmd[1:])
resetPlugins = True
else:
self.addToLog(_("Unable to reload plug-in."), messageCode="info", file=cmd[1:])
elif cmd.startswith("-"):
if PluginManager.removePluginModule(cmd[1:]):
self.addToLog(_("Deletion of plug-in successful."), messageCode="info", file=cmd[1:])
resetPlugins = True
else:
self.addToLog(_("Unable to delete plug-in."), messageCode="info", file=cmd[1:])
else: # assume it is a module or package (may also have been loaded before for option parsing)
savePluginChanges = False
if cmd in self.preloadedPlugins:
moduleInfo = self.preloadedPlugins[cmd] # already loaded, add activation message to log below
else:
moduleInfo = PluginManager.addPluginModule(cmd)
if moduleInfo:
resetPlugins = True
if moduleInfo:
self.addToLog(_("Activation of plug-in {0} successful, version {1}.").format(moduleInfo.get("name"), moduleInfo.get("version")),
messageCode="info", file=moduleInfo.get("moduleURL"))
else:
self.addToLog(_("Unable to load \"%(name)s\" as a plug-in or \"%(name)s\" is not recognized as a plugin command. "),
messageCode="arelle:pluginParameterError",
messageArgs={"name": cmd, "file": cmd}, level=logging.ERROR)
if resetPlugins:
PluginManager.reset()
if savePluginChanges:
PluginManager.save(self)
if options.webserver: # options may need reparsing dynamically
_optionsParser = ParserForDynamicPlugins(options)
# add plug-in options
for optionsExtender in pluginClassMethods("CntlrCmdLine.Options"):
optionsExtender(_optionsParser)
if showPluginModules:
self.addToLog(_("Plug-in modules:"), messageCode="info")
for i, moduleItem in enumerate(sorted(PluginManager.pluginConfig.get("modules", {}).items())):
moduleInfo = moduleItem[1]
self.addToLog(_("Plug-in: {0}; author: {1}; version: {2}; status: {3}; date: {4}; description: {5}; license {6}.").format(
moduleItem[0], moduleInfo.get("author"), moduleInfo.get("version"), moduleInfo.get("status"),
moduleInfo.get("fileDate"), moduleInfo.get("description"), moduleInfo.get("license")),
messageCode="info", file=moduleInfo.get("moduleURL"))
if options.packages:
from arelle import PackageManager
savePackagesChanges = True
showPackages = False
for packageCmd in options.packages.split('|'):
cmd = packageCmd.strip()
if cmd == "show":
showPackages = True
elif cmd == "temp":
savePackagesChanges = False
elif cmd.startswith("+"):
packageInfo = PackageManager.addPackage(self, cmd[1:], options.packageManifestName)
if packageInfo:
self.addToLog(_("Addition of package {0} successful.").format(packageInfo.get("name")),
messageCode="info", file=packageInfo.get("URL"))
else:
self.addToLog(_("Unable to load package."), messageCode="info", file=cmd[1:])
elif cmd.startswith("~"):
if PackageManager.reloadPackageModule(self, cmd[1:]):
self.addToLog(_("Reload of package successful."), messageCode="info", file=cmd[1:])
else:
self.addToLog(_("Unable to reload package."), messageCode="info", file=cmd[1:])
elif cmd.startswith("-"):
if PackageManager.removePackageModule(self, cmd[1:]):
self.addToLog(_("Deletion of package successful."), messageCode="info", file=cmd[1:])
else:
self.addToLog(_("Unable to delete package."), messageCode="info", file=cmd[1:])
else: # assume it is a module or package
savePackagesChanges = False
packageInfo = PackageManager.addPackage(self, cmd, options.packageManifestName)
if packageInfo:
self.addToLog(_("Activation of package {0} successful.").format(packageInfo.get("name")),
messageCode="info", file=packageInfo.get("URL"))
resetPlugins = True
else:
self.addToLog(_("Unable to load package \"%(name)s\". "),
messageCode="arelle:packageLoadingError",
messageArgs={"name": cmd, "file": cmd}, level=logging.ERROR)
if PackageManager.packagesConfigChanged:
PackageManager.rebuildRemappings(self)
if savePackagesChanges:
PackageManager.save(self)
else:
PackageManager.packagesConfigChanged = False
if showPackages:
self.addToLog(_("Taxonomy packages:"), messageCode="info")
for packageInfo in PackageManager.orderedPackagesConfig()["packages"]:
self.addToLog(_("Package: {0}; version: {1}; status: {2}; date: {3}; description: {4}.").format(
packageInfo.get("name"), packageInfo.get("version"), packageInfo.get("status"),
packageInfo.get("fileDate"), packageInfo.get("description")),
messageCode="info", file=packageInfo.get("URL"))
if options.showEnvironment:
self.addToLog(_("Config directory: {0}").format(self.configDir))
self.addToLog(_("Cache directory: {0}").format(self.userAppDir))
for envVar in ("XDG_CONFIG_HOME",):
if envVar in os.environ:
self.addToLog(_("XDG_CONFIG_HOME={0}").format(os.environ[envVar]))
return True
self.modelManager.customTransforms = None # clear out prior custom transforms
self.modelManager.loadCustomTransforms()
# run utility command line options that don't depend on entrypoint Files
hasUtilityPlugin = False
for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Utility.Run"):
hasUtilityPlugin = True
try:
pluginXbrlMethod(self, options, sourceZipStream=sourceZipStream, responseZipStream=responseZipStream)
except SystemExit: # terminate operation, plug in has terminated all processing
return True # success
# if no entrypointFile is applicable, quit now
if options.proxy or options.plugins or hasUtilityPlugin:
if not (options.entrypointFile or sourceZipStream):
return True # success
self.username = options.username
self.password = options.password
if options.disclosureSystemName:
self.modelManager.validateDisclosureSystem = True
self.modelManager.disclosureSystem.select(options.disclosureSystemName)
if options.validateEFM:
self.addToLog(_("both --efm and --disclosureSystem validation are requested, ignoring --efm only"),
messageCode="info", file=options.entrypointFile)
elif options.validateEFM:
self.modelManager.validateDisclosureSystem = True
self.modelManager.disclosureSystem.select("efm")
elif options.validateHMRC:
self.modelManager.validateDisclosureSystem = True
self.modelManager.disclosureSystem.select("hmrc")
else:
self.modelManager.disclosureSystem.select(None) # just load ordinary mappings
self.modelManager.validateDisclosureSystem = False
if options.utrUrl: # override disclosureSystem utrUrl
self.modelManager.disclosureSystem.utrUrl = options.utrUrl
# can be set now because the utr is first loaded at validation time
if options.skipDTS: # skip DTS loading, discovery, etc
self.modelManager.skipDTS = True
if options.skipLoading: # skip loading matching files (list of unix patterns)
self.modelManager.skipLoading = re.compile(
'|'.join(fnmatch.translate(f) for f in options.skipLoading.split('|')))
# disclosure system sets logging filters, override disclosure filters, if specified by command line
if options.logLevelFilter:
self.setLogLevelFilter(options.logLevelFilter)
if options.logCodeFilter:
self.setLogCodeFilter(options.logCodeFilter)
if options.calcDecimals:
if options.calcPrecision:
self.addToLog(_("both --calcDecimals and --calcPrecision validation are requested, proceeding with --calcDecimals only"),
messageCode="info", file=options.entrypointFile)
self.modelManager.validateInferDecimals = True
self.modelManager.validateCalcLB = True
elif options.calcPrecision:
self.modelManager.validateInferDecimals = False
self.modelManager.validateCalcLB = True
if options.utrValidate:
self.modelManager.validateUtr = True
if options.infosetValidate:
self.modelManager.validateInfoset = True
if options.abortOnMajorError:
self.modelManager.abortOnMajorError = True
if options.collectProfileStats:
self.modelManager.collectProfileStats = True
if options.outputAttribution:
self.modelManager.outputAttribution = options.outputAttribution
if options.internetConnectivity == "offline":
self.webCache.workOffline = True
elif options.internetConnectivity == "online":
self.webCache.workOffline = False
if options.internetTimeout is not None:
self.webCache.timeout = (options.internetTimeout or None) # use None if zero specified to disable timeout
if options.internetLogDownloads:
self.webCache.logDownloads = True
fo = FormulaOptions()
if options.parameters:
parameterSeparator = (options.parameterSeparator or ',')
fo.parameterValues = dict(((qname(key, noPrefixIsNoNamespace=True),(None,value))
for param in options.parameters.split(parameterSeparator)
for key,sep,value in (param.partition('='),) ) )
if options.formulaParamExprResult:
fo.traceParameterExpressionResult = True
if options.formulaParamInputValue:
fo.traceParameterInputValue = True
if options.formulaCallExprSource:
fo.traceCallExpressionSource = True
if options.formulaCallExprCode:
fo.traceCallExpressionCode = True
if options.formulaCallExprEval:
fo.traceCallExpressionEvaluation = True
if options.formulaCallExprResult:
fo.traceCallExpressionResult = True
if options.formulaVarSetExprEval:
fo.traceVariableSetExpressionEvaluation = True
if options.formulaVarSetExprResult:
fo.traceVariableSetExpressionResult = True
if options.formulaAsserResultCounts:
fo.traceAssertionResultCounts = True
if options.formulaSatisfiedAsser:
fo.traceSatisfiedAssertions = True
if options.formulaUnsatisfiedAsser:
fo.traceUnsatisfiedAssertions = True
if options.formulaUnsatisfiedAsserError:
fo.errorUnsatisfiedAssertions = True
if options.formulaFormulaRules:
fo.traceFormulaRules = True
if options.formulaVarsOrder:
fo.traceVariablesOrder = True
if options.formulaVarExpressionSource:
fo.traceVariableExpressionSource = True
if options.formulaVarExpressionCode:
fo.traceVariableExpressionCode = True
if options.formulaVarExpressionEvaluation:
fo.traceVariableExpressionEvaluation = True
if options.formulaVarExpressionResult:
fo.traceVariableExpressionResult = True
if options.timeVariableSetEvaluation:
fo.timeVariableSetEvaluation = True
if options.formulaVarFilterWinnowing:
fo.traceVariableFilterWinnowing = True
if options.formulaVarFiltersResult:
fo.traceVariableFiltersResult = True
if options.formulaVarFiltersResult:
fo.traceVariableFiltersResult = True
if options.formulaRunIDs:
fo.runIDs = options.formulaRunIDs
self.modelManager.formulaOptions = fo
success = True
# entrypointFile may be absent (if input is a POSTED zip or file name ending in .zip)
# or may be a | separated set of file names
_entryPoints = []
if options.entrypointFile:
_f = options.entrypointFile
try: # may be a json list
_entryPoints = json.loads(_f)
except ValueError:
# is it malformed json?
if _f.startswith("[{") or _f.endswith("]}") or '"file:"' in _f:
self.addToLog(_("File name parameter appears to be malformed JSON: {0}").format(_f),
messageCode="FileNameFormatError",
level=logging.ERROR)
success = False
else: # try as file names separated by '|'
for f in (_f or '').split('|'):
if not sourceZipStream and not isHttpUrl(f) and not os.path.isabs(f):
f = os.path.normpath(os.path.join(os.getcwd(), f)) # make absolute normed path
_entryPoints.append({"file":f})
filesource = None # file source for all instances if not None
if sourceZipStream:
filesource = FileSource.openFileSource(None, self, sourceZipStream)
elif len(_entryPoints) == 1: # check if a zip and need to discover entry points
filesource = FileSource.openFileSource(_entryPoints[0].get("file",None), self)
_entrypointFiles = _entryPoints
if filesource and not filesource.selection:
if filesource.isArchive:
_entrypointFiles = []
for _archiveFile in (filesource.dir or ()): # .dir might be none if IOerror
filesource.select(_archiveFile)
if ModelDocument.Type.identify(filesource, filesource.url) in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
_entrypointFiles.append({"file":filesource.url})
elif os.path.isdir(filesource.url):
_entrypointFiles = []
for _file in os.listdir(filesource.url):
_path = os.path.join(filesource.url, _file)
if os.path.isfile(_path) and ModelDocument.Type.identify(filesource, _path) in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
_entrypointFiles.append({"file":_path})
for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Filing.Start"):
pluginXbrlMethod(self, options, filesource, _entrypointFiles, sourceZipStream=sourceZipStream, responseZipStream=responseZipStream)
for _entrypoint in _entrypointFiles:
_entrypointFile = _entrypoint.get("file", None) if isinstance(_entrypoint,dict) else _entrypoint
if filesource and filesource.isArchive:
filesource.select(_entrypointFile)
else:
filesource = FileSource.openFileSource(_entrypointFile, self, sourceZipStream)
self.entrypointFile = _entrypointFile
timeNow = XmlUtil.dateunionValue(datetime.datetime.now())
firstStartedAt = startedAt = time.time()
modelDiffReport = None
modelXbrl = None
try:
if filesource:
modelXbrl = self.modelManager.load(filesource, _("views loading"))
except ModelDocument.LoadingException:
pass
except Exception as err:
self.addToLog(_("Entry point loading Failed to complete request: \n{0} \n{1}").format(
err,
traceback.format_tb(sys.exc_info()[2])),
messageCode="Exception",
level=logging.ERROR)
success = False # loading errors, don't attempt to utilize loaded DTS
if modelXbrl and modelXbrl.modelDocument:
loadTime = time.time() - startedAt
modelXbrl.profileStat(_("load"), loadTime)
self.addToLog(format_string(self.modelManager.locale,
_("loaded in %.2f secs at %s"),
(loadTime, timeNow)),
messageCode="info", file=self.entrypointFile)
if options.importFiles:
for importFile in options.importFiles.split("|"):
fileName = importFile.strip()
if sourceZipStream is not None and not (fileName.startswith('http://') or os.path.isabs(fileName)):
fileName = os.path.dirname(modelXbrl.uri) + os.sep + fileName # make relative to sourceZipStream
ModelDocument.load(modelXbrl, fileName, isSupplemental=True)
loadTime = time.time() - startedAt
self.addToLog(format_string(self.modelManager.locale,
_("import in %.2f secs at %s"),
(loadTime, timeNow)),
messageCode="info", file=importFile)
modelXbrl.profileStat(_("import"), loadTime)
if modelXbrl.errors:
success = False # loading errors, don't attempt to utilize loaded DTS
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, options, modelXbrl)
else: # not a test case, probably instance or DTS
for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Xbrl.Loaded"):
pluginXbrlMethod(self, options, modelXbrl, _entrypoint)
else:
success = False
if success and options.diffFile and options.versReportFile:
try:
diffFilesource = FileSource.FileSource(options.diffFile,self)
startedAt = time.time()
modelXbrl2 = self.modelManager.load(diffFilesource, _("views loading"))
if modelXbrl2.errors:
if not options.keepOpen:
modelXbrl2.close()
success = False
else:
loadTime = time.time() - startedAt
modelXbrl.profileStat(_("load"), loadTime)
self.addToLog(format_string(self.modelManager.locale,
_("diff comparison DTS loaded in %.2f secs"),
loadTime),
messageCode="info", file=self.entrypointFile)
startedAt = time.time()
modelDiffReport = self.modelManager.compareDTSes(options.versReportFile)
diffTime = time.time() - startedAt
modelXbrl.profileStat(_("diff"), diffTime)
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
diffTime),
messageCode="info", file=self.entrypointFile)
except ModelDocument.LoadingException:
success = False
except Exception as err:
success = False
self.addToLog(_("[Exception] Failed to doad diff file: \n{0} \n{1}").format(
err,
traceback.format_tb(sys.exc_info()[2])))
if success:
try:
modelXbrl = self.modelManager.modelXbrl
hasFormulae = modelXbrl.hasFormulae
isAlreadyValidated = False
for pluginXbrlMethod in pluginClassMethods("ModelDocument.IsValidated"):
if pluginXbrlMethod(modelXbrl): # e.g., streaming extensions already has validated
isAlreadyValidated = True
if options.validate and not isAlreadyValidated:
startedAt = time.time()
if options.formulaAction: # don't automatically run formulas
modelXbrl.hasFormulae = False
self.modelManager.validate()
if options.formulaAction: # restore setting
modelXbrl.hasFormulae = hasFormulae
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt),
messageCode="info", file=self.entrypointFile)
if (options.formulaAction in ("validate", "run") and # do nothing here if "none"
not isAlreadyValidated): # formulas can't run if streaming has validated the instance
from arelle import ValidateXbrlDimensions, ValidateFormula
startedAt = time.time()
if not options.validate:
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
# setup fresh parameters from formula optoins
modelXbrl.parameters = fo.typedParameters()
ValidateFormula.validate(modelXbrl, compileOnly=(options.formulaAction != "run"))
self.addToLog(format_string(self.modelManager.locale,
_("formula validation and execution in %.2f secs")
if options.formulaAction == "run"
else _("formula validation only in %.2f secs"),
time.time() - startedAt),
messageCode="info", file=self.entrypointFile)
if options.testReport:
ViewFileTests.viewTests(self.modelManager.modelXbrl, options.testReport, options.testReportCols)
if options.rssReport:
ViewFileRssFeed.viewRssFeed(self.modelManager.modelXbrl, options.rssReport, options.rssReportCols)
if options.DTSFile:
ViewFileDTS.viewDTS(modelXbrl, options.DTSFile)
if options.factsFile:
ViewFileFactList.viewFacts(modelXbrl, options.factsFile, labelrole=options.labelRole, lang=options.labelLang, cols=options.factListCols)
if options.factTableFile:
ViewFileFactTable.viewFacts(modelXbrl, options.factTableFile, labelrole=options.labelRole, lang=options.labelLang)
if options.conceptsFile:
ViewFileConcepts.viewConcepts(modelXbrl, options.conceptsFile, labelrole=options.labelRole, lang=options.labelLang)
if options.preFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.preFile, "Presentation Linkbase", "http://www.xbrl.org/2003/arcrole/parent-child", labelrole=options.labelRole, lang=options.labelLang)
if options.calFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.calFile, "Calculation Linkbase", "http://www.xbrl.org/2003/arcrole/summation-item", labelrole=options.labelRole, lang=options.labelLang)
if options.dimFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.dimFile, "Dimensions", "XBRL-dimensions", labelrole=options.labelRole, lang=options.labelLang)
if options.formulaeFile:
ViewFileFormulae.viewFormulae(modelXbrl, options.formulaeFile, "Formulae", lang=options.labelLang)
if options.viewArcrole and options.viewFile:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, options.viewFile, os.path.basename(options.viewArcrole), options.viewArcrole, labelrole=options.labelRole, lang=options.labelLang)
if options.roleTypesFile:
ViewFileRoleTypes.viewRoleTypes(modelXbrl, options.roleTypesFile, "Role Types", isArcrole=False, lang=options.labelLang)
if options.arcroleTypesFile:
ViewFileRoleTypes.viewRoleTypes(modelXbrl, options.arcroleTypesFile, "Arcrole Types", isArcrole=True, lang=options.labelLang)
for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Xbrl.Run"):
g = pluginXbrlMethod(self, options, modelXbrl, _entrypoint)
except (IOError, EnvironmentError) as err:
self.addToLog(_("[IOError] Failed to save output:\n {0}").format(err),
messageCode="IOError",
file=options.entrypointFile,
level=logging.CRITICAL)
success = False
except Exception as err:
self.addToLog(_("[Exception] Failed to complete request: \n{0} \n{1}").format(
err,
traceback.format_tb(sys.exc_info()[2])),
messageCode=err.__class__.__name__,
file=options.entrypointFile,
level=logging.CRITICAL)
success = False
if modelXbrl:
modelXbrl.profileStat(_("total"), time.time() - firstStartedAt)
if options.collectProfileStats and modelXbrl:
modelXbrl.logProfileStats()
if not options.keepOpen:
if modelDiffReport:
self.modelManager.close(modelDiffReport)
elif modelXbrl:
self.modelManager.close(modelXbrl)
if options.validate:
for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Filing.Validate"):
pluginXbrlMethod(self, options, filesource, _entrypointFiles, sourceZipStream=sourceZipStream, responseZipStream=responseZipStream)
for pluginXbrlMethod in pluginClassMethods("CntlrCmdLine.Filing.End"):
pluginXbrlMethod(self, options, filesource, _entrypointFiles, sourceZipStream=sourceZipStream, responseZipStream=responseZipStream)
self.username = self.password = None #dereference password
if options.statusPipe and getattr(self, "statusPipe", None) is not None:
win32file.WriteFile(self.statusPipe, b" ") # clear status
win32file.FlushFileBuffers(self.statusPipe)
win32file.SetFilePointer(self.statusPipe, 0, win32file.FILE_BEGIN) # hangs on close without this
win32file.CloseHandle(self.statusPipe)
self.statusPipe = None # dereference
return (success, modelXbrl, g)
# default web authentication password
def internet_user_password(self, host, realm):
return (self.username, self.password)
# special show status for named pipes
def showStatusOnPipe(self, message, clearAfter=None):
# now = time.time() # seems ok without time-limiting writes to the pipe
if self.statusPipe is not None: # max status updates 3 per second now - 0.3 > self.lastStatusTime and
# self.lastStatusTime = now
try:
if self.parentProcessHandle is not None:
if win32process.GetExitCodeProcess(self.parentProcessHandle) != STILL_ACTIVE:
sys.exit()
win32file.WriteFile(self.statusPipe, (message or "").encode("utf8"))
win32file.FlushFileBuffers(self.statusPipe)
win32file.SetFilePointer(self.statusPipe, 0, win32file.FILE_BEGIN) # hangs on close without this
except Exception as ex:
#with open("Z:\\temp\\trace.log", "at", encoding="utf-8") as fh:
# fh.write("Status pipe exception {} {}\n".format(type(ex), ex))
system.exit()
if __name__ == "__main__":
'''
if '--COMserver' in sys.argv:
from arelle import CntlrComServer
CntlrComServer.main()
else:
main()
'''
main()
| apache-2.0 |
CyberTaoFlow/scirius | rules/south_migrations/0002_auto__chg_field_reference_value.py | 2 | 4939 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Reference.value'
db.alter_column(u'rules_reference', 'value', self.gf('django.db.models.fields.CharField')(max_length=1000))
def backwards(self, orm):
# Changing field 'Reference.value'
db.alter_column(u'rules_reference', 'value', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
u'rules.category': {
'Meta': {'object_name': 'Category'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 12, 0, 0)'}),
'descr': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rules.Source']"})
},
u'rules.reference': {
'Meta': {'object_name': 'Reference'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'rules.rule': {
'Meta': {'object_name': 'Rule'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rules.Category']"}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '10000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['rules.Reference']", 'symmetrical': 'False', 'blank': 'True'}),
'rev': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'unique': 'True'}),
'state': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'rules.ruleset': {
'Meta': {'object_name': 'Ruleset'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['rules.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {}),
'descr': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['rules.SourceAtVersion']", 'symmetrical': 'False'}),
'suppressed_rules': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['rules.Rule']", 'symmetrical': 'False', 'blank': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
u'rules.source': {
'Meta': {'object_name': 'Source'},
'created_date': ('django.db.models.fields.DateTimeField', [], {}),
'datatype': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '400'})
},
u'rules.sourceatversion': {
'Meta': {'object_name': 'SourceAtVersion'},
'git_version': ('django.db.models.fields.CharField', [], {'default': "'HEAD'", 'max_length': '42'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rules.Source']"}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 12, 0, 0)', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '42'})
}
}
complete_apps = ['rules'] | gpl-3.0 |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/fileinput.py | 224 | 14143 | """Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""input([files[, inplace[, backup[, mode[, openhook]]]]])
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not hasattr(openhook, '__call__'):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __iter__(self):
return self
def next(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except OSError:
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
| apache-2.0 |
stylianos-kampakis/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
dya2/python-for-android | python3-alpha/python3-src/Lib/test/test_exception_variations.py | 182 | 4036 |
from test.support import run_unittest
import unittest
class ExceptionTestCase(unittest.TestCase):
def test_try_except_else_finally(self):
hit_except = False
hit_else = False
hit_finally = False
try:
raise Exception('nyaa!')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
self.assertFalse(hit_else)
def test_try_except_else_finally_no_exception(self):
hit_except = False
hit_else = False
hit_finally = False
try:
pass
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
self.assertTrue(hit_else)
def test_try_except_finally(self):
hit_except = False
hit_finally = False
try:
raise Exception('yarr!')
except:
hit_except = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
def test_try_except_finally_no_exception(self):
hit_except = False
hit_finally = False
try:
pass
except:
hit_except = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
def test_try_except(self):
hit_except = False
try:
raise Exception('ahoy!')
except:
hit_except = True
self.assertTrue(hit_except)
def test_try_except_no_exception(self):
hit_except = False
try:
pass
except:
hit_except = True
self.assertFalse(hit_except)
def test_try_except_else(self):
hit_except = False
hit_else = False
try:
raise Exception('foo!')
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_else)
self.assertTrue(hit_except)
def test_try_except_else_no_exception(self):
hit_except = False
hit_else = False
try:
pass
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_except)
self.assertTrue(hit_else)
def test_try_finally_no_exception(self):
hit_finally = False
try:
pass
finally:
hit_finally = True
self.assertTrue(hit_finally)
def test_nested(self):
hit_finally = False
hit_inner_except = False
hit_inner_finally = False
try:
try:
raise Exception('inner exception')
except:
hit_inner_except = True
finally:
hit_inner_finally = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertTrue(hit_inner_finally)
self.assertTrue(hit_finally)
def test_nested_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception('outer exception')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_main():
run_unittest(ExceptionTestCase)
if __name__ == '__main__':
test_main()
| apache-2.0 |
JulyKikuAkita/PythonPrac | cs15211/PathSumII.py | 1 | 5669 | __source__ = 'https://leetcode.com/problems/path-sum-ii/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/path-sum-ii.py
# Time: O(n)
# Space: O(h), h is height of binary tree
# DFS
#
# Description: Leetcode # 113. Path Sum II
#
# Given a binary tree and a sum, find all root-to-leaf paths where each path's sum equals the given sum.
#
# For example:
# Given the below binary tree and sum = 22,
# 5
# / \
# 4 8
# / / \
# 11 13 4
# / \ / \
# 7 2 5 1
# return
# [
# [5,4,11,2],
# [5,8,4,5]
# ]
#
#
# Companies
# Bloomberg
# Related Topics
# Tree Depth-first Search
# Similar Questions
# Path Sum Binary Tree Paths Path Sum III
#
import unittest
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @param sum, an integer
# @return a list of lists of integers
def pathSum(self, root, sum):
return self.pathSumRecu([], [], root, sum)
def pathSumRecu(self, result, cur, root, sum):
if root is None:
return result
if root.left is None and root.right is None and root.val == sum:
result.append(cur + [root.val])
return result
cur.append(root.val)
self.pathSumRecu(result, cur, root.left, sum - root.val)
self.pathSumRecu(result, cur, root.right, sum - root.val)
cur.pop()
return result
class Solution2:
# @param root, a tree node
# @param sum, an integer
# @return a list of lists of integers
def pathSum(self, root, sum):
result = []
self.pathSumRecu(result, [], root, sum)
return result
def pathSumRecu(self, result, cur, root, sum):
if root is None:
return result # if fo return, it means return null, bad behavior
if root.left is None and root.right is None and root.val == sum:
result.append(cur + [root.val])
return result
self.pathSumRecu(result, cur + [root.val], root.left, sum - root.val)
self.pathSumRecu(result, cur + [root.val], root.right, sum - root.val)
return result
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.left.left.left = TreeNode(7)
root.left.left.right = TreeNode(2)
print Solution().pathSum(root, 22)
print Solution2().pathSum(root, 77)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# DFS
# 2ms 61.15%
class Solution {
public List<List<Integer>> pathSum(TreeNode root, int sum) {
List<List<Integer>> result = new ArrayList<>();
if (root == null) {
return result;
}
pathSum(root, sum, result, new ArrayList<>());
return result;
}
private void pathSum(TreeNode root, int sum, List<List<Integer>> result, List<Integer> list) {
sum -= root.val;
list.add(root.val);
if (root.left == null && root.right == null) {
if (sum == 0) {
result.add(new ArrayList<>(list));
}
} else {
if (root.left != null) {
pathSum(root.left, sum, result, list);
}
if (root.right != null) {
pathSum(root.right, sum, result, list);
}
}
list.remove(list.size() - 1);
}
}
# 2ms 61.15%
class Solution {
public List<List<Integer>> pathSum(TreeNode root, int sum) {
List<List<Integer>> res = new ArrayList<>();
List<Integer> path = new ArrayList<>();
dfs(root, sum, res, path);
return res;
}
public void dfs(TreeNode root, int sum, List<List<Integer>> res, List<Integer> path){
if(root==null) return;
path.add(root.val);
if(root.left==null && root.right==null ){
if(root.val==sum)
res.add(new ArrayList<Integer>(path));
return;
}
if(root.left!=null) {
dfs(root.left,sum-root.val,res,path);
path.remove(path.size()-1);
}
if(root.right!=null) {
dfs(root.right,sum-root.val,res,path);
path.remove(path.size()-1);
}
}
}
# BFS
# 6ms 9.98%
class Solution {
public List<List<Integer>> pathSum(TreeNode root, int sum) {
List<List<Integer>> res = new ArrayList<>();
List<Integer> path = new ArrayList<>();
Stack<TreeNode> stack = new Stack<TreeNode>();
int SUM = 0;
TreeNode cur = root;
TreeNode pre = null;
while(cur!=null || !stack.isEmpty()){
while(cur!=null){
stack.push(cur);
path.add(cur.val);
SUM+=cur.val;
cur=cur.left;
}
cur = stack.peek();
if(cur.right!=null && cur.right!=pre){
cur = cur.right;
continue;
}
if(cur.left==null && cur.right==null && SUM==sum)
res.add(new ArrayList<Integer>(path));
pre = cur;
stack.pop();
path.remove(path.size()-1);
SUM-=cur.val;
cur = null;
}
return res;
}
}
''' | apache-2.0 |
neraliu/tpjs | src/breakpad/src/tools/gyp/test/configurations/inheritance/gyptest-inheritance.py | 430 | 1047 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('configurations.gyp')
test.set_configuration('Release')
test.build('configurations.gyp')
test.run_built_executable('configurations',
stdout=('Base configuration\n'
'Common configuration\n'
'Common2 configuration\n'
'Release configuration\n'))
test.set_configuration('Debug')
test.build('configurations.gyp')
test.run_built_executable('configurations',
stdout=('Base configuration\n'
'Common configuration\n'
'Common2 configuration\n'
'Debug configuration\n'))
test.pass_test()
| bsd-3-clause |
gkc1000/pyscf | pyscf/pbc/dft/__init__.py | 1 | 1454 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.pbc.dft.gen_grid import UniformGrids, BeckeGrids
from pyscf.pbc.dft import rks
from pyscf.pbc.dft import uks
from pyscf.pbc.dft import roks
from pyscf.pbc.dft import krks
from pyscf.pbc.dft import kuks
from pyscf.pbc.dft import kroks
UKS = uks.UKS
ROKS = roks.ROKS
KRKS = krks.KRKS
KUKS = kuks.KUKS
KROKS = kroks.KROKS
def RKS(cell, *args, **kwargs):
if cell.spin == 0:
return rks.RKS(cell, *args, **kwargs)
else:
return roks.ROKS(cell, *args, **kwargs)
RKS.__doc__ = rks.RKS.__doc__
def KS(cell, *args, **kwargs):
if cell.spin == 0:
return rks.RKS(cell, *args, **kwargs)
else:
return uks.UKS(cell, *args, **kwargs)
def KKS(cell, *args, **kwargs):
if cell.spin == 0:
return krks.KRKS(cell, *args, **kwargs)
else:
return kuks.KUKS(cell, *args, **kwargs)
| apache-2.0 |
vmindru/ansible | lib/ansible/modules/storage/netapp/_na_cdot_volume.py | 59 | 15187 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_volume
short_description: Manage NetApp cDOT volumes
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_volume) instead.
description:
- Create or destroy volumes on NetApp cDOT
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
infinite:
description:
- Set True if the volume is an Infinite Volume.
type: bool
default: 'no'
online:
description:
- Whether the specified volume is online, or not.
type: bool
default: 'yes'
aggregate_name:
description:
- The name of the aggregate the flexvol should exist on. Required when C(state=present).
size:
description:
- The size of the volume in (size_unit). Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
vserver:
description:
- Name of the vserver to use.
required: true
junction_path:
description:
- Junction path where to mount the volume
required: false
version_added: '2.6'
export_policy:
description:
- Export policy to set for the specified junction path.
required: false
default: default
version_added: '2.6'
snapshot_policy:
description:
- Snapshot policy to set for the specified volume.
required: false
default: default
version_added: '2.6'
'''
EXAMPLES = """
- name: Create FlexVol
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
aggregate_name: aggr1
size: 20
size_unit: mb
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
junction_path: /ansibleVolume
export_policy: all_nfs_networks
snapshot_policy: daily
- name: Make FlexVol offline
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
online: False
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
is_online=dict(required=False, type='bool', default=True, aliases=['online']),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
aggregate_name=dict(type='str'),
vserver=dict(required=True, type='str', default=None),
junction_path=dict(required=False, type='str', default=None),
export_policy=dict(required=False, type='str', default='default'),
snapshot_policy=dict(required=False, type='str', default='default'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['aggregate_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.is_infinite = p['is_infinite']
self.is_online = p['is_online']
self.size_unit = p['size_unit']
self.vserver = p['vserver']
self.junction_path = p['junction_path']
self.export_policy = p['export_policy']
self.snapshot_policy = p['snapshot_policy']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.aggregate_name = p['aggregate_name']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_volume(self):
"""
Return details about the volume
:param:
name : Name of the volume
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', self.name)
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
result = self.server.invoke_successfully(volume_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
volume_attributes = result.get_child_by_name(
'attributes-list').get_child_by_name(
'volume-attributes')
# Get volume's current size
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
current_size = volume_space_attributes.get_child_content('size')
# Get volume's state (online/offline)
volume_state_attributes = volume_attributes.get_child_by_name(
'volume-state-attributes')
current_state = volume_state_attributes.get_child_content('state')
is_online = None
if current_state == "online":
is_online = True
elif current_state == "offline":
is_online = False
return_value = {
'name': self.name,
'size': current_size,
'is_online': is_online,
}
return return_value
def create_volume(self):
create_parameters = {'volume': self.name,
'containing-aggr-name': self.aggregate_name,
'size': str(self.size),
}
if self.junction_path:
create_parameters['junction-path'] = str(self.junction_path)
if self.export_policy != 'default':
create_parameters['export-policy'] = str(self.export_policy)
if self.snapshot_policy != 'default':
create_parameters['snapshot-policy'] = str(self.snapshot_policy)
volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-create', **create_parameters)
try:
self.server.invoke_successfully(volume_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_volume(self):
if self.is_infinite:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy-async', **{'volume-name': self.name})
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.name, 'unmount-and-offline':
'true'})
try:
self.server.invoke_successfully(volume_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_volume(self):
"""
Rename the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename-async',
**{'volume-name': self.name, 'new-volume-name': str(
self.name)})
else:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename', **{'volume': self.name, 'new-volume-name': str(
self.name)})
try:
self.server.invoke_successfully(volume_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def resize_volume(self):
"""
Re-size the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size-async',
**{'volume-name': self.name, 'new-size': str(
self.size)})
else:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size', **{'volume': self.name, 'new-size': str(
self.size)})
try:
self.server.invoke_successfully(volume_resize,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_volume_state(self):
"""
Change volume's state (offline/online).
Note: 'is_infinite' needs to be set to True in order to change the
state of an Infinite Volume.
"""
state_requested = None
if self.is_online:
# Requested state is 'online'.
state_requested = "online"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online',
**{'name': self.name})
else:
# Requested state is 'offline'.
state_requested = "offline"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline',
**{'name': self.name})
try:
self.server.invoke_successfully(volume_change_state,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
(self.name, state_requested, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
volume_exists = False
rename_volume = False
resize_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
if str(volume_detail['size']) != str(self.size):
resize_volume = True
changed = True
if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
changed = True
if self.is_online is False:
# Volume is online, but requested state is offline
pass
else:
# Volume is offline but requested state is online
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
else:
if resize_volume:
self.resize_volume()
if volume_detail['is_online'] is not \
None and volume_detail['is_online'] != \
self.is_online:
self.change_volume_state()
# Ensure re-naming is the last change made.
if rename_volume:
self.rename_volume()
elif self.state == 'absent':
self.delete_volume()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTVolume()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
Shouqun/node-gn | tools/depot_tools/third_party/boto/ses/exceptions.py | 89 | 1827 | """
Various exceptions that are specific to the SES module.
"""
from boto.exception import BotoServerError
class SESError(BotoServerError):
"""
Sub-class all SES-related errors from here. Don't raise this error
directly from anywhere. The only thing this gets us is the ability to
catch SESErrors separately from the more generic, top-level
BotoServerError exception.
"""
pass
class SESAddressNotVerifiedError(SESError):
"""
Raised when a "Reply-To" address has not been validated in SES yet.
"""
pass
class SESIdentityNotVerifiedError(SESError):
"""
Raised when an identity (domain or address) has not been verified in SES yet.
"""
pass
class SESDomainNotConfirmedError(SESError):
"""
"""
pass
class SESAddressBlacklistedError(SESError):
"""
After you attempt to send mail to an address, and delivery repeatedly
fails, said address is blacklisted for at least 24 hours. The blacklisting
eventually expires, and you are able to attempt delivery again. If you
attempt to send mail to a blacklisted email, this is raised.
"""
pass
class SESDailyQuotaExceededError(SESError):
"""
Your account's daily (rolling 24 hour total) allotment of outbound emails
has been exceeded.
"""
pass
class SESMaxSendingRateExceededError(SESError):
"""
Your account's requests/second limit has been exceeded.
"""
pass
class SESDomainEndsWithDotError(SESError):
"""
Recipient's email address' domain ends with a period/dot.
"""
pass
class SESLocalAddressCharacterError(SESError):
"""
An address contained a control or whitespace character.
"""
pass
class SESIllegalAddressError(SESError):
"""
Raised when an illegal address is encountered.
"""
pass
| mit |
civisanalytics/ansible | lib/ansible/modules/windows/win_psexec.py | 22 | 4681 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_psexec
version_added: '2.3'
short_description: Runs commands (remotely) as another (privileged) user
description:
- Run commands (remotely) through the PsExec service
- Run commands as another (domain) user (with elevated privileges)
options:
command:
description:
- The command line to run through PsExec (limited to 260 characters).
required: true
executable:
description:
- The location of the PsExec utility (in case it is not located in your PATH).
default: psexec.exe
hostnames:
description:
- The hostnames to run the command.
- If not provided, the command is run locally.
username:
description:
- The (remote) user to run the command as.
- If not provided, the current user is used.
password:
description:
- The password for the (remote) user to run the command as.
- This is mandatory in order authenticate yourself.
chdir:
description:
- Run the command from this (remote) directory.
noprofile:
description:
- Run the command without loading the account's profile.
default: False
elevated:
description:
- Run the command with elevated privileges.
default: False
interactive:
description:
- Run the program so that it interacts with the desktop on the remote system.
default: False
limited:
description:
- Run the command as limited user (strips the Administrators group and allows only privileges assigned to the Users group).
default: False
system:
description:
- Run the remote command in the System account.
default: False
priority:
description:
- Used to run the command at a different priority.
choices:
- background
- low
- belownormal
- abovenormal
- high
- realtime
timeout:
description:
- The connection timeout in seconds
wait:
description:
- Wait for the application to terminate.
- Only use for non-interactive applications.
default: True
requires: [ psexec ]
author: Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
# Test the PsExec connection to the local system (target node) with your user
- win_psexec:
command: whoami.exe
# Run regedit.exe locally (on target node) as SYSTEM and interactively
- win_psexec:
command: regedit.exe
interactive: yes
system: yes
# Run the setup.exe installer on multiple servers using the Domain Administrator
- win_psexec:
command: E:\setup.exe /i /IACCEPTEULA
hostnames:
- remote_server1
- remote_server2
username: DOMAIN\Administrator
password: some_password
priority: high
# Run PsExec from custom location C:\Program Files\sysinternals\
- win_psexec:
command: netsh advfirewall set allprofiles state off
executable: C:\Program Files\sysinternals\psexec.exe
hostnames: [ remote_server ]
password: some_password
priority: low
'''
RETURN = r'''
cmd:
description: The complete command line used by the module, including PsExec call and additional options.
returned: always
type: string
sample: psexec.exe \\remote_server -u DOMAIN\Administrator -p some_password E:\setup.exe
rc:
description: The return code for the command
returned: always
type: int
sample: 0
stdout:
description: The standard output from the command
returned: always
type: string
sample: Success.
stderr:
description: The error output from the command
returned: always
type: string
sample: Error 15 running E:\setup.exe
msg:
description: Possible error message on failure
returned: failed
type: string
sample: The 'password' parameter is a required parameter.
changed:
description: Whether or not any changes were made.
returned: always
type: bool
sample: True
'''
| gpl-3.0 |
w4-sjcho/grpc | src/python/grpcio_test/grpc_test/framework/interfaces/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
chainer/chainer | docs/source/conf.py | 4 | 14917 | # -*- coding: utf-8 -*-
#
# Chainer documentation build configuration file, created by
# sphinx-quickstart on Sun May 10 12:22:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import pkg_resources
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import _docstring_check
import _autosummary_check
__version__ = pkg_resources.get_distribution('chainer').version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
rtd_version = os.environ.get('READTHEDOCS_VERSION')
if rtd_version == 'latest':
tag = 'master'
else:
tag = 'v{}'.format(__version__)
extlinks = {
'blob': ('https://github.com/chainer/chainer/blob/{}/%s'.format(tag), ''),
'tree': ('https://github.com/chainer/chainer/tree/{}/%s'.format(tag), ''),
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode',
'_napoleon_patch',
]
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chainer'
copyright = u'2015, Preferred Networks, inc. and Preferred Infrastructure, inc.'
author = u'Preferred Networks, inc. and Preferred Infrastructure, inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Napoleon settings
napoleon_use_ivar = True
napoleon_include_special_with_doc = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/modified_theme.css'
if on_rtd:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/modified_theme.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Chainerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Chainer.tex', u'Chainer Documentation',
u'Preferred Networks, inc. and Preferred Infrastructure, inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chainer', u'Chainer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Chainer', u'Chainer Documentation',
author, 'Chainer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'cupy': ('https://docs-cupy.chainer.org/en/latest/', None),
'chainercv': ('https://chainercv.readthedocs.io/en/latest/', None),
}
doctest_global_setup = '''
import os
import numpy as np
import chainer
from chainer.backends import cuda
from chainer.backends.cuda import cupy
from chainer import Function, gradient_check, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.testing import doctest_helper
from chainer.training import extensions
import chainerx
import onnx_chainer
np.random.seed(0)
'''
spelling_lang = 'en_US'
spelling_word_list_filename = 'spelling_wordlist.txt'
def setup(app):
app.connect('autodoc-process-docstring', _autodoc_process_docstring)
app.connect('build-finished', _build_finished)
def _autodoc_process_docstring(app, what, name, obj, options, lines):
_docstring_check.check(app, what, name, obj, options, lines)
def _build_finished(app, exception):
if exception is None:
_autosummary_check.check(app, exception)
def _import_object_from_name(module_name, fullname):
obj = sys.modules.get(module_name)
if obj is None:
return None
for comp in fullname.split('.'):
obj = getattr(obj, comp)
return obj
def _is_egg_directory(path):
return (path.endswith('.egg') and
os.path.isdir(os.path.join(path, 'EGG-INFO')))
def _is_git_root(path):
return os.path.isdir(os.path.join(path, '.git'))
_source_root = None
def _find_source_root(source_abs_path):
# Note that READTHEDOCS* environment variable cannot be used, because they
# are not set under docker environment.
global _source_root
if _source_root is None:
dir = os.path.dirname(source_abs_path)
while True:
if _is_egg_directory(dir) or _is_git_root(dir):
# Reached the root directory
_source_root = dir
break
dir_ = os.path.dirname(dir)
if len(dir_) == len(dir):
raise RuntimeError('Couldn\'t parse root directory from '
'source file: {}'.format(source_abs_path))
dir = dir_
return _source_root
def _get_source_relative_path(source_abs_path):
return os.path.relpath(source_abs_path, _find_source_root(source_abs_path))
def _get_sourcefile_and_linenumber(obj):
# Retrieve the original function wrapped by contextlib.contextmanager
if callable(obj):
closure = getattr(obj, '__closure__', None)
if closure is not None:
obj = closure[0].cell_contents
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None, None
# inspect can return None for cython objects
if filename is None:
return None, None
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
return filename, linenum
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
if 1 == int(os.environ.get('CHAINER_DOCS_SKIP_LINKCODE', 0)):
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not (mod.__name__ == 'chainer' or mod.__name__.startswith('chainer.')):
return None
# Retrieve source file name and line number
filename, linenum = _get_sourcefile_and_linenumber(obj)
if filename is None or linenum is None:
return None
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return 'https://github.com/chainer/chainer/blob/{}/{}#L{}'.format(
tag, relpath, linenum)
| mit |
av8ramit/tensorflow | tensorflow/contrib/predictor/predictor_factories.py | 10 | 5194 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factory functions for `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.predictor import contrib_estimator_predictor
from tensorflow.contrib.predictor import core_estimator_predictor
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.python.estimator import estimator as core_estimator
def from_contrib_estimator(estimator,
prediction_input_fn,
input_alternative_key=None,
output_alternative_key=None,
graph=None):
"""Constructs a `Predictor` from a `tf.contrib.learn.Estimator`.
Args:
estimator: an instance of `tf.contrib.learn.Estimator`.
prediction_input_fn: a function that takes no arguments and returns an
instance of `InputFnOps`.
input_alternative_key: Optional. Specify the input alternative used for
prediction.
output_alternative_key: Specify the output alternative used for
prediction. Not needed for single-headed models but required for
multi-headed models.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a core `Estimator` instead of a contrib
`Estimator`.
"""
if isinstance(estimator, core_estimator.Estimator):
raise TypeError('Espected estimator to be of type '
'tf.contrib.learn.Estimator, but got type '
'tf.python.estimator.Estimator. You likely want to call '
'from_estimator.')
return contrib_estimator_predictor.ContribEstimatorPredictor(
estimator,
prediction_input_fn,
input_alternative_key=input_alternative_key,
output_alternative_key=output_alternative_key,
graph=graph)
def from_estimator(estimator,
serving_input_receiver_fn,
output_key=None,
graph=None):
"""Constructs a `Predictor` from a `tf.python.estimator.Estimator`.
Args:
estimator: an instance of `learn.python.estimator.Estimator`.
serving_input_receiver_fn: a function that takes no arguments and returns
an instance of `ServingInputReceiver` compatible with `estimator`.
output_key: Optional string specifying the export output to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
Returns:
An initialized `Predictor`.
Raises:
TypeError: if `estimator` is a contrib `Estimator` instead of a core
`Estimator`.
"""
if isinstance(estimator, contrib_estimator.Estimator):
raise TypeError('Espected estimator to be of type '
'tf.python.estimator.Estimator, but got type '
'tf.contrib.learn.Estimator. You likely want to call '
'from_contrib_estimator.')
return core_estimator_predictor.CoreEstimatorPredictor(
estimator, serving_input_receiver_fn, output_key=output_key, graph=graph)
def from_saved_model(export_dir,
signature_def_key=None,
signature_def=None,
tags=None,
graph=None):
"""Constructs a `Predictor` from a `SavedModel` on disk.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def`
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
tags: Optional. Tags that will be used to retrieve the correct
`SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
Returns:
An initialized `Predictor`.
Raises:
ValueError: More than one of `signature_def_key` and `signature_def` is
specified.
"""
return saved_model_predictor.SavedModelPredictor(
export_dir,
signature_def_key=signature_def_key,
signature_def=signature_def,
tags=tags,
graph=graph)
| apache-2.0 |
sujeet4github/MyLangUtils | LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/pip/_vendor/cachecontrol/_cmd.py | 488 | 1320 | import logging
from pip._vendor import requests
from pip._vendor.cachecontrol.adapter import CacheControlAdapter
from pip._vendor.cachecontrol.cache import DictCache
from pip._vendor.cachecontrol.controller import logger
from argparse import ArgumentParser
def setup_logging():
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_session():
adapter = CacheControlAdapter(
DictCache(),
cache_etags=True,
serializer=None,
heuristic=None,
)
sess = requests.Session()
sess.mount('http://', adapter)
sess.mount('https://', adapter)
sess.cache_controller = adapter.controller
return sess
def get_args():
parser = ArgumentParser()
parser.add_argument('url', help='The URL to try and cache')
return parser.parse_args()
def main(args=None):
args = get_args()
sess = get_session()
# Make a request to get a response
resp = sess.get(args.url)
# Turn on logging
setup_logging()
# try setting the cache
sess.cache_controller.cache_response(resp.request, resp.raw)
# Now try to get it
if sess.cache_controller.cached_request(resp.request):
print('Cached!')
else:
print('Not cached :(')
if __name__ == '__main__':
main()
| gpl-3.0 |
cytec/SickRage | lib/pyasn1/debug.py | 185 | 1541 | import sys
from pyasn1.compat.octets import octs2ints
from pyasn1 import error
from pyasn1 import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr.write
def __init__(self, *flags):
self._flags = flagNone
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
| gpl-3.0 |
soarpenguin/ansible | contrib/inventory/apstra_aos.py | 23 | 20483 | #!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your preferred directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import json
import os
import re
import sys
try:
import argparse
HAS_ARGPARSE = True
except ImportError:
HAS_ARGPARSE = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
if not HAS_ARGPARSE:
raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
pass
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
pass
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
pass
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
pass
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
pass
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile('\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.