repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
chengsoonong/acton | acton/database.py | 1 | 72157 | """Wrapper class for databases."""
from abc import ABC, abstractmethod
from inspect import Traceback
import json
import logging
import os.path
import tempfile
from typing import Iterable, List, Sequence
import warnings
import time
from acton.proto.acton_pb2 import Database as DatabasePB
import astropy.io.ascii as io_ascii
import astropy.io.fits as io_fits
import astropy.table
import h5py
import numpy
import pandas
import sklearn.preprocessing
from numpy.random import multivariate_normal
LabelEncoderPB = DatabasePB.LabelEncoder
def product(seq: Iterable[int]):
"""Finds the product of a list of ints.
Arguments
---------
seq
List of ints.
Returns
-------
int
Product.
"""
prod = 1
for i in seq:
prod *= i
return prod
def serialise_encoder(
encoder: sklearn.preprocessing.LabelEncoder) -> LabelEncoderPB:
"""Serialises a LabelEncoder as a protobuf.
Parameters
----------
encoder
LabelEncoder.
Returns
-------
LabelEncoderPB
Protobuf representing the LabelEncoder.
"""
proto = LabelEncoderPB()
if not hasattr(encoder, 'classes_'):
return proto
for i, class_label in enumerate(encoder.classes_):
encoding = proto.encoding.add()
encoding.class_label = str(class_label)
encoding.class_int = i
return proto
class Database(ABC):
"""Base class for database wrappers."""
@abstractmethod
def __enter__(self):
return self
@abstractmethod
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
pass
@abstractmethod
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
@abstractmethod
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
@abstractmethod
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
"""Writes feature vectors to the database.
Parameters
----------
ids
Iterable of IDs.
features
N x D array of feature vectors. The ith row corresponds to the ith
ID in `ids`.
"""
@abstractmethod
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
"""Writes label vectors to the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
labels
T x N x D array of label vectors. The ith row corresponds to the ith
labeller ID in `labeller_ids` and the jth column corresponds to the
jth instance ID in `instance_ids`.
"""
@abstractmethod
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
@abstractmethod
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
@abstractmethod
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
class HDF5Database(Database):
"""Database wrapping an HDF5 file as a context manager.
Attributes
----------
path : str
Path to HDF5 file.
_h5_file : h5py.File
HDF5 file object.
"""
def __init__(self, path: str):
self.path = path
def __enter__(self):
self._open_hdf5()
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._h5_file.close()
delattr(self, '_h5_file')
def _assert_open(self):
"""Asserts that the HDF5 file is ready to be read to/written from.
Raises
------
AssertionError
"""
assert hasattr(self, '_h5_file'), ('HDF5 database must be used as a '
'context manager.')
def _open_hdf5(self):
"""Opens the HDF5 file and creates it if it doesn't exist.
Notes
-----
The HDF5 file will be stored in self._h5_file.
"""
try:
self._h5_file = h5py.File(self.path, 'r+')
except OSError:
with h5py.File(self.path, 'w') as h5_file:
self._setup_hdf5(h5_file)
self._h5_file = h5py.File(self.path, 'r+')
class ManagedHDF5Database(HDF5Database):
"""Database using an HDF5 file.
Notes
-----
This database uses an internal schema. For reading files from disk, use
another Database.
Attributes
----------
path : str
Path to HDF5 file.
label_dtype : str
Data type of labels.
feature_dtype : str
Data type of features.
_h5_file : h5py.File
Opened HDF5 file.
_sync_attrs : List[str]
List of instance attributes to sync with the HDF5 file's attributes.
"""
def __init__(self, path: str, label_dtype: str=None,
feature_dtype: str=None):
"""
Parameters
----------
path
Path to HDF5 file.
label_dtype
Data type of labels. If not provided then it will be read from the
database file; if the database file does not exist then the default
type of 'float32' will be used.
feature_dtype
Data type of features. If not provided then it will be read from the
database file; if the database file does not exist then the default
type of 'float32' will be used.
"""
super().__init__(path)
self.label_dtype = label_dtype
self._default_label_dtype = 'float32'
self.feature_dtype = feature_dtype
self._default_feature_dtype = 'float32'
# List of attributes to keep in sync with the HDF5 file.
self._sync_attrs = ['label_dtype', 'feature_dtype']
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'ManagedHDF5Database'
db_kwargs = {
'label_dtype': self.label_dtype,
'feature_dtype': self.feature_dtype}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
# No encoder for a managed DB - assume that labels are encoded already.
# proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def _open_hdf5(self):
"""Opens the HDF5 file and creates it if it doesn't exist.
Notes
-----
The HDF5 file will be stored in self._h5_file.
"""
super()._open_hdf5()
# Load attrs from HDF5 file if we haven't specified them.
for attr in self._sync_attrs:
if getattr(self, attr) is None:
setattr(self, attr, self._h5_file.attrs[attr])
self._validate_hdf5()
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
"""Writes feature vectors to the database.
Parameters
----------
ids
Iterable of IDs.
features:
N x D array of feature vectors. The ith row corresponds to the ith
ID in `ids`.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
self._assert_open()
# Input validation.
if len(ids) != len(features):
raise ValueError('Must have same number of IDs and features.')
if self._h5_file.attrs['n_features'] == -1:
# This is the first time we've stored features, so make a record of
# the dimensionality.
self._h5_file.attrs['n_features'] = features.shape[1]
elif self._h5_file.attrs['n_features'] != features.shape[1]:
raise ValueError(
'Expected features to have dimensionality {}, got {}'.format(
self._h5_file.attrs['n_features'], features.shape[1]))
# Early termination.
if not ids:
return
# Cast the features to the right type.
if features.dtype != self.feature_dtype:
warnings.warn('Casting features from type {} to type {}.'.format(
features.dtype, self.feature_dtype))
features = features.astype(self.feature_dtype)
# Resize the feature array if we need to store more IDs than before.
max_id = max(ids) + 1
if max_id > self._h5_file['features'].shape[0]:
self._h5_file['features'].resize(
(max_id, self._h5_file.attrs['n_features']))
# Store the feature vectors.
# TODO(MatthewJA): Vectorise this. This could be tricky as HDF5 doesn't
# fully support NumPy's fancy indexing.
for id_, feature in zip(ids, features):
self._h5_file['features'][id_, :] = feature
# Add the IDs to the database.
known_ids = set(self.get_known_instance_ids())
new_ids = [i for i in ids if i not in known_ids]
n_new_ids = len(new_ids)
n_old_ids = self._h5_file['instance_ids'].shape[0]
self._h5_file['instance_ids'].resize((n_old_ids + n_new_ids,))
self._h5_file['instance_ids'][-n_new_ids:] = numpy.array(
new_ids, dtype=int)
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
self._assert_open()
if self._h5_file.attrs['n_features'] == -1 and ids:
raise KeyError('No features stored in database.')
# Allocate the features array.
features = numpy.zeros((len(ids), self._h5_file.attrs['n_features']),
dtype=self._h5_file.attrs['feature_dtype'])
# Loop through each ID we want to query and put the associated feature
# into the features array.
features = self._h5_file['features'].value[ids, :]
features = numpy.asarray(
features, dtype=self._h5_file.attrs['feature_dtype'])
return features
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
"""Writes label vectors to the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
labels
T x N x D array of label vectors. The ith row corresponds to the ith
labeller ID in `labeller_ids` and the jth column corresponds to the
jth instance ID in `instance_ids`.
"""
self._assert_open()
# Input validation.
if len(labeller_ids) != labels.shape[0]:
raise ValueError(
'labels array has incorrect number of labellers:'
' expected {}, got {}.'.format(len(labeller_ids),
labels.shape[0]))
if len(instance_ids) != labels.shape[1]:
raise ValueError(
'labels array has incorrect number of instances:'
' expected {}, got {}.'.format(len(instance_ids),
labels.shape[1]))
if self._h5_file.attrs['label_dim'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['label_dim'] = labels.shape[2]
elif self._h5_file.attrs['label_dim'] != labels.shape[2]:
raise ValueError(
'Expected labels to have dimensionality {}, got {}'.format(
self._h5_file.attrs['label_dim'], labels.shape[2]))
# Early termination.
if not labeller_ids or not instance_ids:
return
# Cast the labels to the right type.
if labels.dtype != self.label_dtype:
warnings.warn('Casting labels from type {} to type {}.'.format(
labels.dtype, self.label_dtype))
labels = labels.astype(self.label_dtype)
# Resize the label array if necessary.
max_labeller_id = max(labeller_ids) + 1
max_instance_id = max(instance_ids) + 1
if (max_labeller_id > self._h5_file['labels'].shape[0] or
max_instance_id > self._h5_file['labels'].shape[1]):
self._h5_file['labels'].resize(
(max_labeller_id, max_instance_id,
self._h5_file.attrs['label_dim']))
# Store the labels.
# TODO(MatthewJA): Vectorise this.
for labeller_idx, labeller_id in enumerate(labeller_ids):
for instance_idx, instance_id in enumerate(instance_ids):
label = labels[labeller_idx, instance_idx]
self._h5_file['labels'][
labeller_id, instance_id, :] = label
logging.debug(
'New label array size: {}'.format(self._h5_file['labels'].shape))
# Add the instance IDs to the database.
known_instance_ids = set(self.get_known_instance_ids())
new_instance_ids = [i for i in instance_ids
if i not in known_instance_ids]
n_new_instance_ids = len(new_instance_ids)
n_old_instance_ids = self._h5_file['instance_ids'].shape[0]
if n_new_instance_ids:
self._h5_file['instance_ids'].resize(
(n_old_instance_ids + n_new_instance_ids,))
self._h5_file['instance_ids'][-n_new_instance_ids:] = numpy.array(
new_instance_ids, dtype=int)
# Add the labeller IDs to the database.
known_labeller_ids = set(self.get_known_labeller_ids())
new_labeller_ids = [i for i in labeller_ids
if i not in known_labeller_ids]
n_new_labeller_ids = len(new_labeller_ids)
n_old_labeller_ids = self._h5_file['labeller_ids'].shape[0]
if n_new_labeller_ids:
self._h5_file['labeller_ids'].resize(
(n_old_labeller_ids + n_new_labeller_ids,))
self._h5_file['labeller_ids'][-n_new_labeller_ids:] = numpy.array(
new_labeller_ids, dtype=int)
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
self._assert_open()
if self._h5_file.attrs['label_dim'] == -1 and (
labeller_ids or instance_ids):
raise KeyError('No labels stored in database.')
labels = self._h5_file['labels'].value[labeller_ids][:, instance_ids, :]
labels = numpy.asarray(labels, dtype=self._h5_file.attrs['label_dtype'])
return labels
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['instance_ids']]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['labeller_ids']]
def _setup_hdf5(self, h5_file: h5py.File):
"""Sets up an HDF5 file to work as a database.
Parameters
----------
h5_file
HDF5 file to set up. Must be opened in write mode.
"""
if self.label_dtype is None:
self.label_dtype = self._default_label_dtype
if self.feature_dtype is None:
self.feature_dtype = self._default_feature_dtype
h5_file.create_dataset('features', shape=(0, 0),
dtype=self.feature_dtype,
maxshape=(None, None))
h5_file.create_dataset('labels', shape=(0, 0, 0),
dtype=self.label_dtype,
maxshape=(None, None, None))
h5_file.create_dataset('instance_ids', shape=(0,),
dtype=int, maxshape=(None,))
h5_file.create_dataset('labeller_ids', shape=(0,),
dtype=int, maxshape=(None,))
h5_file.attrs['label_dtype'] = self.label_dtype
h5_file.attrs['feature_dtype'] = self.feature_dtype
h5_file.attrs['n_features'] = -1
h5_file.attrs['label_dim'] = -1
def _validate_hdf5(self):
"""Checks that self._h5_file has the correct schema.
Raises
------
ValueError
"""
try:
assert 'features' in self._h5_file
assert 'labels' in self._h5_file
assert 'instance_ids' in self._h5_file
assert 'labeller_ids' in self._h5_file
assert len(self._h5_file['features'].shape) == 2
assert len(self._h5_file['labels'].shape) == 3
assert len(self._h5_file['instance_ids'].shape) == 1
assert len(self._h5_file['labeller_ids'].shape) == 1
except AssertionError:
raise ValueError(
'File {} is not a valid database.'.format(self.path))
for attr in self._sync_attrs:
assert getattr(self, attr) is not None
if self._h5_file.attrs[attr] != getattr(self, attr):
raise ValueError('Incompatible {}: expected {}, got {}'.format(
attr, getattr(self, attr), self._h5_file.attrs[attr]))
class GraphDatabase(HDF5Database):
"""Manage database handling knowledge graph factorization,
Attributes
-----------
path: str
Path to HDF5 file.
"""
def __init__(self, path: str):
"""
Parameters
----------
path
Path to HDF5 file.
"""
self.path = path
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'ManagedHDF5Database'
db_kwargs = {
'label_dtype': self.label_dtype,
'feature_dtype': self.feature_dtype}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
# No encoder for a managed DB - assume that labels are encoded already.
# proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def _open_hdf5(self):
"""Opens the HDF5 file and creates it if it doesn't exist.
Notes
-----
The HDF5 file will be stored in self._h5_file.
"""
try:
self._h5_file = h5py.File(self.path, 'r+')
except OSError:
with h5py.File(self.path, 'w') as h5_file:
self._setup_hdf5(h5_file)
self._h5_file = h5py.File(self.path, 'r+')
def _setup_hdf5(self, h5_file: h5py.File):
"""Sets up an HDF5 file to work as a database.
Parameters
----------
h5_file
HDF5 file to set up. Must be opened in write mode.
"""
h5_file.create_dataset('features_E',
shape=(0, 0, 0),
maxshape=(None, None, None))
h5_file.create_dataset('features_R',
shape=(0, 0, 0, 0),
maxshape=(None, None, None, None))
h5_file.create_dataset('labels',
shape=(0, 0, 0),
maxshape=(None, None, None))
h5_file.attrs['n_entities'] = -1
h5_file.attrs['n_relations'] = -1
h5_file.attrs['n_dim'] = -1
h5_file.attrs['n_particles'] = -1
def _validate_hdf5(self):
"""Checks that self._h5_file has the correct schema.
Raises
------
ValueError
"""
try:
assert 'features_E' in self._h5_file
assert 'features_R' in self._h5_file
assert 'labels' in self._h5_file
assert len(self._h5_file['features_E'].shape) == 3
assert len(self._h5_file['features_R'].shape) == 4
assert len(self._h5_file['labels'].shape) == 3
except AssertionError:
raise ValueError(
'File {} is not a valid database.'.format(self.path))
def write_labels(self,
labels: numpy.ndarray):
"""Writes label vectors to the database.
Parameters
----------
labels
K x N x N array of label vectors.
K is the number of relations, N is the number of entities.
"""
self._assert_open()
# Input validation.
if self._h5_file.attrs['n_relations'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_relations'] = labels.shape[0]
elif self._h5_file.attrs['n_relations'] != labels.shape[0]:
raise ValueError(
'Expected number of relations {}, glot {}'.format(
self._h5_file.attrs['n_relations'], labels.shape[0]))
if self._h5_file.attrs['n_entities'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_entities'] = labels.shape[1]
elif self._h5_file.attrs['n_entities'] != labels.shape[1]:
raise ValueError(
'Expected number of entities {}, glot {}'.format(
self._h5_file.attrs['n_entities'], labels.shape[1]))
# Resize the label array if necessary.
if (labels.shape[0] > self._h5_file['labels'].shape[0] or
labels.shape[1] > self._h5_file['labels'].shape[1] or
labels.shape[2] > self._h5_file['labels'].shape[2]):
self._h5_file['labels'].resize(labels.shape)
# Store the labels.
# TODO(MatthewJA): Vectorise this.
for i in range(labels.shape[0]):
self._h5_file['labels'][i, :] = labels[i, :]
logging.debug(
'New label array size: {}'.format(self._h5_file['labels'].shape))
def write_features(self,
features_E: numpy.ndarray,
features_R: numpy.ndarray):
"""Writes feature vectors to the database.
Parameters
----------
features_E:
P x N x D array of entity feature vectors.
P is the number of particles.
N is the number of entities.
D is the number of latent variable dimensions.
features_R:
P x K x D x D array of relation feature vectors.
P is the number of particles.
K is the number of relations.
D is the number of latent variable dimensions.
"""
self._assert_open()
n_particles = features_E.shape[0]
assert features_E.shape[0] == features_R.shape[0]
n_entities = features_E.shape[1]
n_relations = features_R.shape[1]
n_dim = features_E.shape[2]
assert features_E.shape[2] == features_R.shape[2] == features_R.shape[3]
# Input validation.
if self._h5_file.attrs['n_relations'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_relations'] = n_relations
elif self._h5_file.attrs['n_relations'] != n_relations:
raise ValueError(
'Expected number of relations {}, glot {}'.format(
self._h5_file.attrs['n_relations'], n_relations))
if self._h5_file.attrs['n_entities'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_entities'] = n_entities
elif self._h5_file.attrs['n_entities'] != n_entities:
raise ValueError(
'Expected number of entities {}, glot {}'.format(
self._h5_file.attrs['n_entities'], n_entities))
if self._h5_file.attrs['n_dim'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_dim'] = n_dim
elif self._h5_file.attrs['n_dim'] != n_dim:
raise ValueError(
'Expected number of latent dimensions {}, glot {}'.format(
self._h5_file.attrs['n_dim'], n_dim))
if self._h5_file.attrs['n_particles'] == -1:
# This is the first time we've stored labels, so make a record of
# the dimensionality.
self._h5_file.attrs['n_particles'] = n_particles
elif self._h5_file.attrs['n_particles'] != n_particles:
raise ValueError(
'Expected number of partibles {}, glot {}'.format(
self._h5_file.attrs['n_particles'], n_particles))
# Resize the feature array if we need to store more IDs than before.
if (features_E.shape[0] > self._h5_file['features_E'].shape[0] or
features_E.shape[1] > self._h5_file['features_E'].shape[1] or
features_E.shape[2] > self._h5_file['features_E'].shape[2]):
self._h5_file['features_E'].resize(features_E.shape)
if (features_R.shape[0] > self._h5_file['features_R'].shape[0] or
features_R.shape[1] > self._h5_file['features_R'].shape[1] or
features_R.shape[2] > self._h5_file['features_R'].shape[2]):
self._h5_file['features_R'].resize(features_R.shape)
# Store the feature vectors.
# TODO(MatthewJA): Vectorise this. This could be tricky as HDF5 doesn't
# fully support NumPy's fancy indexing.
for id_, feature in enumerate(features_E):
self._h5_file['features_E'][id_, :] = feature
for id_, feature in enumerate(features_R):
self._h5_file['features_R'][id_, :, :] = feature
logging.debug(
'New feature E array size: {}'.format(
self._h5_file['features_E'].shape))
logging.debug(
'New feature R array size: {}'.format(
self._h5_file['features_R'].shape))
def read_labels(self,
instance_ids: Sequence[tuple]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
instance_ids
sequence of ids to be read labels
empty list indicates reading all labels in once
Returns
-------
numpy.ndarray
array of label vectors.
"""
self._assert_open()
n_entities = self._h5_file.attrs['n_entities']
n_relations = self._h5_file.attrs['n_relations']
if (n_entities == -1 or n_relations == -1):
raise KeyError('No labels stored in database.')
if len(instance_ids) == 0:
return numpy.asarray(self._h5_file['labels'].value)
else:
labels = []
for tuple_ in instance_ids:
r_k, e_i, e_j = tuple_
labels.append(self._h5_file['labels'].value[r_k, e_i, e_j])
return numpy.asarray(labels)
def read_features(self) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
Returns
-------
E
numpy.ndarray
P x N x D array of feature vectors.
R
list
each element is numpy.ndarray
P x K x D x D array of feature vectors.
"""
self._assert_open()
if self._h5_file.attrs['n_particles'] == -1:
raise KeyError('No features stored in database.')
# Allocate the features array.
features_E = numpy.zeros((self._h5_file.attrs['n_particles'],
self._h5_file.attrs['n_entities']),
self._h5_file.attrs['n_dim'])
features_R = numpy.zeros((self._h5_file.attrs['n_particles'],
self._h5_file.attrs['n_relations'],
self._h5_file.attrs['n_dim'],
self._h5_file.attrs['n_dim']))
# Loop through each ID we want to query and put the associated feature
# into the features array.
features_E = self._h5_file['features_E'].value
features_R = self._h5_file['features_R'].value
features_E = numpy.asarray(features_E)
features_R = numpy.asarray(features_R)
return features_E, features_R
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['instance_ids']]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
self._assert_open()
return [id_ for id_ in self._h5_file['labeller_ids']]
class HDF5Reader(HDF5Database):
"""Reads HDF5 databases.
Attributes
----------
feature_cols : List[str]
List of feature datasets.
label_col : str
Name of label dataset.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to HDF5 file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_h5_file : h5py.File
HDF5 file object.
_is_multidimensional : bool
Whether the features are in a multidimensional dataset.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to HDF5 file.
feature_cols
List of feature datasets. If only one feature dataset is specified,
this dataset is allowed to be a multidimensional dataset and contain
multiple features.
label_col
Name of label dataset.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
super().__init__(path)
if not feature_cols:
raise ValueError('Must specify feature columns for HDF5.')
self.feature_cols = feature_cols
self.label_col = label_col
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
with h5py.File(self.path, 'r') as data:
is_multidimensional = any(len(data[f_col].shape) > 1 or
not product(data[f_col].shape[1:]) == 1
for f_col in feature_cols)
if is_multidimensional and len(feature_cols) != 1:
raise ValueError(
'Feature arrays and feature columns cannot be mixed. '
'To read in features from a multidimensional dataset, '
'only specify one feature column name.')
self._is_multidimensional = is_multidimensional
self.n_instances = data[label_col].shape[0]
if len(data[label_col].shape) == 1:
self.n_labels = 1
else:
assert len(data[label_col].shape) == 2
self.n_labels = data[label_col].shape[1]
if is_multidimensional:
self.n_features = data[feature_cols[0]].shape[1]
else:
self.n_features = len(feature_cols)
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'HDF5Reader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
# TODO(MatthewJA): Optimise this.
self._assert_open()
# For each ID, get the corresponding features.
if self._is_multidimensional:
# If there are duplicates in ids, then this will crash with an
# OSError! (and a very cryptic error message...) To get around this,
# we'll first get all the unique IDs.
unique_ids = []
unique_ids_set = set() # For lookups.
id_to_index = {} # For reconstructing the features.
for id_ in ids:
if id_ not in unique_ids_set:
unique_ids.append(id_)
unique_ids_set.add(id_)
id_to_index[id_] = len(unique_ids) - 1
# Then index with just the unique IDs.
features_ = self._h5_file[self.feature_cols[0]][unique_ids]
# Finally, reconstruct the features array.
features = numpy.zeros((len(ids), features_.shape[1]))
for index, id_ in enumerate(ids):
index_ = id_to_index[id_]
features[index, :] = features_[index_, :]
return features
else:
# Allocate output array.
features = numpy.zeros((len(ids), len(self.feature_cols)))
# Read each feature.
features_h5 = self._h5_file[self.feature_cols[0]]
for feature_idx, feature_name in enumerate(self.feature_cols):
features[ids, feature_idx] = features_h5[feature_name][ids]
return numpy.nan_to_num(features)
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
self._assert_open()
if len(labeller_ids) > 1:
raise NotImplementedError('Multiple labellers not yet supported.')
# TODO(MatthewJA): Optimise this.
# For each ID, get the corresponding labels.
# If there are duplicates in ids, then this will crash with an
# OSError! (and a very cryptic error message...) To get around this,
# we'll first get all the unique IDs.
unique_ids = []
unique_ids_set = set() # For lookups.
id_to_index = {} # For reconstructing the labels.
for id_ in instance_ids:
if id_ not in unique_ids_set:
unique_ids.append(id_)
unique_ids_set.add(id_)
id_to_index[id_] = len(unique_ids) - 1
# Then index with just the unique IDs.
labels_ = self._h5_file[self.label_col][unique_ids].reshape(
(1, len(unique_ids), -1))
# Finally, reconstruct the labels array.
labels = numpy.zeros(
(1, len(instance_ids), labels_.shape[2]),
dtype=labels_.dtype)
for index, id_ in enumerate(instance_ids):
index_ = id_to_index[id_]
labels[0, index, :] = labels_[0, index_, :]
if labels.shape[2] != 1:
raise NotImplementedError('Multidimensional labels not currently '
'supported.')
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
return labels
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
self._assert_open()
return [i for i in range(self.n_instances)]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
raise NotImplementedError()
class ASCIIReader(Database):
"""Reads ASCII databases.
Attributes
----------
feature_cols : List[str]
List of feature columns.
label_col : str
Name of label column.
max_id_length : int
Maximum length of IDs.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to ASCII file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_db : Database
Underlying ManagedHDF5Database.
_db_filepath : str
Path of underlying HDF5 database.
_tempdir : str
Temporary directory where the underlying HDF5 database is stored.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to ASCII file.
feature_cols
List of feature columns.
label_col
Name of label column.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
self.path = path
self.feature_cols = feature_cols
self.label_col = label_col
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'ASCIIReader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def _db_from_ascii(self,
db: Database,
data: astropy.table.Table,
feature_cols: Sequence[str],
label_col: str,
ids: Sequence[int]):
"""Reads an ASCII table into a database.
Notes
-----
The entire file is copied into memory.
Arguments
---------
db
Database.
data
ASCII table.
feature_cols
List of column names of the features. If empty, all non-label and
non-ID columns will be used.
label_col
Column name of the labels.
ids
List of instance IDs.
"""
# Read in features.
columns = data.keys()
if not feature_cols:
# If there are no features given, use all columns.
feature_cols = [c for c in columns if c != label_col]
# This converts the features from a table to an array.
features = data[feature_cols].as_array()
features = features.view(numpy.float64).reshape(features.shape + (-1,))
# Read in labels.
labels = numpy.array(
data[label_col]).reshape((1, -1, 1))
# We want to support multiple labellers in the future, but currently
# don't. So every labeller is the same, ID = 0.
labeller_ids = [0]
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
# Write to database.
db.write_features(ids, features)
db.write_labels(labeller_ids, ids, labels)
def __enter__(self):
self._tempdir = tempfile.TemporaryDirectory(prefix='acton')
# Read the whole file into a DB.
self._db_filepath = os.path.join(self._tempdir.name, 'db.h5')
data = io_ascii.read(self.path)
ids = list(range(len(data[self.label_col])))
max_label_len = max(len(str(i)) for i in data[self.label_col])
label_dtype = '<S{}'.format(max_label_len)
self._db = ManagedHDF5Database(
self._db_filepath,
label_dtype=label_dtype,
feature_dtype='float64')
self._db.__enter__()
try:
# We want to handle the encoding ourselves.
self._db_from_ascii(self._db, data, self.feature_cols,
self.label_col, ids, encode_labels=False)
except TypeError:
# Encoding isn't supported in the underlying database.
self._db_from_ascii(self._db, data, self.feature_cols,
self.label_col, ids)
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._db.__exit__(exc_type, exc_val, exc_tb)
self._tempdir.cleanup()
delattr(self, '_db')
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
return self._db.read_features(ids)
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x F array of label vectors.
"""
# N.B. Labels are encoded in _db_from_ascii.
return self._db.read_labels(labeller_ids, instance_ids)
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return self._db.get_known_instance_ids()
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
return self._db.get_known_labeller_ids()
class GraphReader(Database):
"""Reads ASCII databases for graph based structure
Input file:
List of known facts,
formatted as relation_id \tab entity_id1 \tab entity_id2,
means entity_id1 has relation_id relation with entity_id2,
Both entity-id and relation-id start from 0.
Output labels:
K x N x N ndarrays,
where K is the number of relations,
N is the number of entities.
0 represents invalid facts, 1 represents valid facts.
Output features:
E is N x D latent features of the entities.
R is K x D x D latent features of the relations.
Features are initially random/gibbs sampled,
will be sequentially updated after getting labels
Attributes
----------
path : str
Path to ASCII file.
_db : Database
Underlying ManagedHDF5Database.
_db_filepath : str
Path of underlying HDF5 database.
_tempdir : str
Temporary directory where the underlying HDF5 database is stored.
n_dim
Number of latent features (size of latent dimension).
n_particles:
Number of particles for Thompson sampling.
gibbs_init
Indicates how to sample features (gibbs/random).
var_r
variance of prior of R
var_e
variance of prior of E
var_x
variance of X
obs_mask
Mask tensor of observed triples.
given_r
whether there is any R given for initialization
"""
def __init__(self, path: str, n_dim: int, n_particles: int = 5,
gibbs_init: bool = True, var_r: int = 1, var_e: int = 1,
var_x: float = 0.01, obs_mask: numpy.ndarray= None,
given_r: numpy.ndarray = None):
"""
Parameters
----------
path
Path to ASCII file.
n_dim
Number of latent features (size of latent dimension).
n_particles:
Number of particles for Thompson sampling.
gibbs_init
Indicates how to sample features (gibbs/random).
var_r
variance of prior of R
var_e
variance of prior of E
var_x
variance of X
obs_mask
Mask tensor of observed triples.
given_r
Given features R if any
"""
self.path = path
self.n_dim = n_dim
self.n_particles = n_particles
self.gibbs_init = gibbs_init
self.var_r = var_r
self.var_e = var_e
self.var_x = var_x
self.obs_mask = obs_mask
self.given_r = given_r
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'LabelOnlyASCIIReader'
db_kwargs = {
'n_dim': self.n_dim,
'n_particles': self.n_particles, }
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
return proto
def _db_from_ascii(self,
db: Database,
data: astropy.table.Table,
):
"""Reads an ASCII table into a database.
Notes
-----
The entire file is copied into memory.
Arguments
---------
db
Database.
data
ASCII table.
"""
# triples: relation_id entity_id1 entity_id2
# e.g. (0,2,4) represents entity 2 and 4 have relation 0
triples = data.as_array()
triples = triples.view(numpy.int).reshape((triples.shape[0], 3))
self.n_relations = max(triples[:, 0]) + 1
self.n_entities = max(triples[:, 1]) + 1
assert self.n_entities == max(triples[:, -1]) + 1
# only support one labeller
# construct label tensor X = {0,1}^{K x N x N}
X = numpy.zeros((self.n_relations, self.n_entities, self.n_entities))
for i in triples:
X[i[0], i[1], i[2]] = 1
# Initailize features E,R
self.E = list()
self.R = list()
self.RE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim])
self.RTE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim])
if isinstance(self.obs_mask, type(None)):
self.obs_mask = numpy.zeros_like(X)
else:
logging.info(
"Initial Total, Positive, Negative Obs : %d / %d / %d",
numpy.sum(self.obs_mask),
numpy.sum(X[self.obs_mask == 1]),
numpy.sum(self.obs_mask) - numpy.sum(X[self.obs_mask == 1]))
cur_obs = numpy.zeros_like(X)
for k in range(self.n_relations):
cur_obs[k][self.obs_mask[k] == 1] = X[k][self.obs_mask[k] == 1]
self.obs_sum = numpy.sum(numpy.sum(self.obs_mask, 1), 1)
self.valid_relations = numpy.nonzero(numpy.sum(numpy.sum(X, 1), 1))[0]
self.features = numpy.zeros(
[2 * self.n_entities * self.n_relations, self.n_dim])
self.xi = numpy.zeros([2 * self.n_entities * self.n_relations])
# cur_obs[cur_obs.nonzero()] = 1
if self.gibbs_init and numpy.sum(self.obs_sum) != 0:
# initialize latent variables with gibbs sampling
E = numpy.random.random([self.n_entities, self.n_dim])
R = numpy.random.random([self.n_relations, self.n_dim, self.n_dim])
for gi in range(20):
tic = time.time()
if isinstance(self.given_r, type(None)):
self._sample_relations(
cur_obs, self.obs_mask, E, R, self.var_r)
self._sample_entities(
cur_obs, self.obs_mask, E, R, self.var_e)
else:
self._sample_entities(
cur_obs, self.obs_mask, E, R, self.var_e)
logging.info("Gibbs Init %d: %f", gi, time.time() - tic)
for p in range(self.n_particles):
self.E.append(E.copy())
self.R.append(R.copy())
else:
# random initialization
for p in range(self.n_particles):
self.E.append(numpy.random.random(
[self.n_entities, self.n_dim]))
self.R.append(numpy.random.random(
[self.n_relations, self.n_dim, self.n_dim]))
self.E = numpy.asarray(self.E)
self.R = numpy.asarray(self.R)
# Write to database.
db.write_features(self.E, self.R)
db.write_labels(X)
def __enter__(self):
self._tempdir = tempfile.TemporaryDirectory(prefix='acton')
# Read the whole file into a DB.
self._db_filepath = os.path.join(self._tempdir.name, 'db.h5')
data = io_ascii.read(self.path)
self._db = GraphDatabase(self._db_filepath)
self._db.__enter__()
self._db_from_ascii(self._db, data)
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._db.__exit__(exc_type, exc_val, exc_tb)
self._tempdir.cleanup()
delattr(self, '_db')
def read_features(self) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
Returns
-------
E
numpy.ndarray
P x N x D array of feature vectors.
R
list
each element is numpy.ndarray
P x K x D x D array of feature vectors.
N x D array of feature vectors.
"""
return self._db.read_features()
def read_labels(self,
instance_ids: Sequence[tuple]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
Returns
-------
numpy.ndarray
array of label vectors.
"""
# N.B. Labels are encoded in _db_from_ascii.
return self._db.read_labels(instance_ids)
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise NotImplementedError('Cannot write to read-only database.')
def _sample_entities(self, X, mask, E, R, var_e, sample_idx=None):
RE = self.RE
RTE = self.RTE
for k in range(self.n_relations):
RE[k] = numpy.dot(R[k], E.T).T
RTE[k] = numpy.dot(R[k].T, E.T).T
if isinstance(sample_idx, type(None)):
sample_idx = range(self.n_entities)
for i in sample_idx:
self._sample_entity(X, mask, E, R, i, var_e, RE, RTE)
for k in range(self.n_relations):
RE[k][i] = numpy.dot(R[k], E[i])
RTE[k][i] = numpy.dot(R[k].T, E[i])
def _sample_entity(self, X, mask, E, R, i, var_e, RE=None, RTE=None):
nz_r = mask[:, i, :].nonzero()
nz_c = mask[:, :, i].nonzero()
nnz_r = nz_r[0].size
nnz_c = nz_c[0].size
nnz_all = nnz_r + nnz_c
self.features[:nnz_r] = RE[nz_r]
self.features[nnz_r:nnz_all] = RTE[nz_c]
self.xi[:nnz_r] = X[:, i, :][nz_r]
self.xi[nnz_r:nnz_all] = X[:, :, i][nz_c]
_xi = self.xi[:nnz_all] * self.features[:nnz_all].T
xi = numpy.sum(_xi, 1) / self.var_x
_lambda = numpy.identity(self.n_dim) / var_e
_lambda += numpy.dot(
self.features[:nnz_all].T,
self.features[:nnz_all]) / self.var_x
inv_lambda = numpy.linalg.inv(_lambda)
mu = numpy.dot(inv_lambda, xi)
E[i] = multivariate_normal(mu, inv_lambda)
numpy.mean(numpy.diag(inv_lambda))
# logging.debug('Mean variance E, %d, %f', i, mean_var)
def _sample_relations(self, X, mask, E, R, var_r):
EXE = numpy.kron(E, E)
for k in self.valid_relations:
if self.obs_sum[k] != 0:
self._sample_relation(X, mask, E, R, k, EXE, var_r)
else:
R[k] = numpy.random.normal(
0, var_r, size=[self.n_dim, self.n_dim])
def _sample_relation(self, X, mask, E, R, k, EXE, var_r):
_lambda = numpy.identity(self.n_dim ** 2) / var_r
xi = numpy.zeros(self.n_dim ** 2)
kron = EXE[mask[k].flatten() == 1]
if kron.shape[0] != 0:
_lambda += numpy.dot(kron.T, kron)
xi += numpy.sum(X[k, mask[k] == 1].flatten() * kron.T, 1)
_lambda /= self.var_x
# mu = numpy.linalg.solve(_lambda, xi) / self.var_x
inv_lambda = numpy.linalg.inv(_lambda)
mu = numpy.dot(inv_lambda, xi) / self.var_x
# R[k] = normal(mu, _lambda).reshape([self.n_dim, self.n_dim])
R[k] = multivariate_normal(
mu, inv_lambda).reshape([self.n_dim, self.n_dim])
numpy.mean(numpy.diag(inv_lambda))
# logging.info('Mean variance R, %d, %f', k, mean_var)
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return self._db.get_known_instance_ids()
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
return self._db.get_known_labeller_ids()
class PandasReader(Database):
"""Reads HDF5 databases.
Attributes
----------
feature_cols : List[str]
List of feature datasets.
label_col : str
Name of label dataset.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to HDF5 file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_df : pandas.DataFrame
Pandas dataframe.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
key: str, encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to HDF5 file.
feature_cols
List of feature columns. If none are specified, then all non-label,
non-ID columns will be used.
label_col
Name of label dataset.
key
Pandas key.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
self.path = path
self.feature_cols = feature_cols
self.label_col = label_col
self.key = key
self._df = pandas.read_hdf(self.path, self.key)
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
if not self.feature_cols:
self.feature_cols = [k for k in self._df.keys()
if k != self.label_col]
self.n_instances = len(self._df[self.label_col])
self.n_features = len(self.feature_cols)
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'PandasReader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'key': self.key,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def __enter__(self):
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
delattr(self, '_df')
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
# TODO(MatthewJA): Optimise this.
# Allocate output features array.
features = numpy.zeros((len(ids), self.n_features))
# For each ID, get the corresponding features.
for out_index, id_ in enumerate(ids):
sel = self._df.iloc[id_]
for feature_index, feature in enumerate(self.feature_cols):
features[out_index, feature_index] = sel[feature]
return features
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.ndarray
T x N x 1 array of label vectors.
"""
# Draw a label to get the dtype.
dtype = type(self._df.iloc[0][self.label_col])
# Allocate output labels array.
labels = numpy.zeros(
(len(labeller_ids), len(instance_ids), 1),
dtype=dtype)
if len(labeller_ids) > 1:
raise NotImplementedError('Multiple labellers not yet supported.')
# For each ID, get the corresponding labels.
for out_index, id_ in enumerate(instance_ids):
sel = self._df.iloc[int(id_)]
labels[0, out_index, 0] = sel[self.label_col]
if labels.shape[2] != 1:
raise NotImplementedError('Multidimensional labels not currently '
'supported.')
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
return labels
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return [i for i in range(self.n_instances)]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
raise NotImplementedError()
class FITSReader(Database):
"""Reads FITS databases.
Attributes
----------
hdu_index : int
Index of HDU in the FITS file.
feature_cols : List[str]
List of feature columns.
label_col : str
Name of label column.
n_features : int
Number of features.
n_instances : int
Number of instances.
n_labels : int
Number of labels per instance.
path : str
Path to FITS file.
encode_labels : bool
Whether to encode labels as integers.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
_hdulist : astropy.io.fits.HDUList
FITS HDUList.
"""
def __init__(self, path: str, feature_cols: List[str], label_col: str,
hdu_index: int=1, encode_labels: bool=True,
label_encoder: sklearn.preprocessing.LabelEncoder=None):
"""
Parameters
----------
path
Path to FITS file.
feature_cols
List of feature columns. If none are specified, then all non-label,
non-ID columns will be used.
label_col
Name of label dataset.
hdu_index
Index of HDU in the FITS file. Default is 1, i.e., the first
extension in the FITS file.
encode_labels
Whether to encode labels as integers.
label_encoder
Encodes labels as integers. If not specified, the label column will
be read and a label encoding generated.
"""
self.path = path
self.feature_cols = feature_cols
self.label_col = label_col
self.hdu_index = hdu_index
self.encode_labels = encode_labels
self.label_encoder = label_encoder
if self.label_encoder and not self.encode_labels:
raise ValueError('label_encoder specified but encode_labels is '
'False')
if self.label_encoder is None:
self.label_encoder = sklearn.preprocessing.LabelEncoder()
# These will be set when the FITS file is opened.
self.n_instances = None
self.n_features = None
def to_proto(self) -> DatabasePB:
"""Serialises this database as a protobuf.
Returns
-------
DatabasePB
Protobuf representing this database.
"""
proto = DatabasePB()
proto.path = self.path
proto.class_name = 'FITSReader'
db_kwargs = {
'feature_cols': self.feature_cols,
'label_col': self.label_col,
'hdu_index': self.hdu_index,
'encode_labels': self.encode_labels}
for key, value in db_kwargs.items():
kwarg = proto.kwarg.add()
kwarg.key = key
kwarg.value = json.dumps(value)
proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder))
return proto
def __enter__(self):
self._hdulist = io_fits.open(self.path)
# If we haven't specified columns, use all except the label column.
cols = self._hdulist[self.hdu_index].columns.names
if not self.feature_cols:
self.feature_cols = [k for k in cols if k != self.label_col]
self.n_features = len(self.feature_cols)
self.n_instances = \
self._hdulist[self.hdu_index].data[self.label_col].ravel().shape[0]
return self
def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback):
self._hdulist.close()
delattr(self, '_hdulist')
def read_features(self, ids: Sequence[int]) -> numpy.ndarray:
"""Reads feature vectors from the database.
Parameters
----------
ids
Iterable of IDs.
Returns
-------
numpy.ndarray
N x D array of feature vectors.
"""
# TODO(MatthewJA): Optimise this.
# Allocate output features array.
features = numpy.zeros((len(ids), self.n_features))
for f_index, col in enumerate(self.feature_cols):
col = self._hdulist[self.hdu_index].data[col]
features[:, f_index] = col[ids]
return features
def read_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int]) -> numpy.ndarray:
"""Reads label vectors from the database.
Parameters
----------
labeller_ids
Iterable of labeller IDs.
instance_ids
Iterable of instance IDs.
Returns
-------
numpy.p
T x N x 1 array of label vectors.
"""
label_col = self._hdulist[self.hdu_index].data[self.label_col]
labels = label_col[instance_ids].reshape((1, -1, 1))
# Encode labels.
if self.encode_labels:
labels = numpy.apply_along_axis(
self.label_encoder.fit_transform,
axis=1,
arr=labels.reshape(labels.shape[:2])
).reshape(labels.shape)
return labels
def write_features(self, ids: Sequence[int], features: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def write_labels(self,
labeller_ids: Sequence[int],
instance_ids: Sequence[int],
labels: numpy.ndarray):
raise PermissionError('Cannot write to read-only database.')
def get_known_instance_ids(self) -> List[int]:
"""Returns a list of known instance IDs.
Returns
-------
List[str]
A list of known instance IDs.
"""
return [i for i in range(self.n_instances)]
def get_known_labeller_ids(self) -> List[int]:
"""Returns a list of known labeller IDs.
Returns
-------
List[str]
A list of known labeller IDs.
"""
raise NotImplementedError()
# For safe string-based access to database classes.
DATABASES = {
'ASCIIReader': ASCIIReader,
'GraphReader': GraphReader,
'HDF5Reader': HDF5Reader,
'FITSReader': FITSReader,
'ManagedHDF5Database': ManagedHDF5Database,
'GraphDatabase': GraphDatabase,
'PandasReader': PandasReader,
}
| bsd-3-clause |
wimberosa/samba | source4/scripting/python/samba/netcmd/dbcheck.py | 1 | 4646 | #!/usr/bin/env python
#
# Samba4 AD database checker
#
# Copyright (C) Andrew Tridgell 2011
# Copyright Giampaolo Lauria 2011 <lauria2@yahoo.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ldb, sys
import samba.getopt as options
from samba.auth import system_session
from samba.samdb import SamDB
from samba.netcmd import (
Command,
CommandError,
Option
)
from samba.dbchecker import dbcheck
class cmd_dbcheck(Command):
"""check local AD database for errors"""
synopsis = "%prog [<DN>] [options]"
takes_args = ["DN?"]
takes_options = [
Option("--scope", dest="scope", default="SUB",
help="Pass search scope that builds DN list. Options: SUB, ONE, BASE"),
Option("--fix", dest="fix", default=False, action='store_true',
help='Fix any errors found'),
Option("--yes", dest="yes", default=False, action='store_true',
help="don't confirm changes, just do them all as a single transaction"),
Option("--cross-ncs", dest="cross_ncs", default=False, action='store_true',
help="cross naming context boundaries"),
Option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Print more details of checking"),
Option("--quiet", dest="quiet", action="store_true", default=False,
help="don't print details of checking"),
Option("--attrs", dest="attrs", default=None, help="list of attributes to check (space separated)"),
Option("--reindex", dest="reindex", default=False, action="store_true", help="force database re-index"),
Option("-H", "--URL", help="LDB URL for database or target server (defaults to local SAM database)",
type=str, metavar="URL", dest="H"),
]
def run(self, DN=None, H=None, verbose=False, fix=False, yes=False,
cross_ncs=False, quiet=False,
scope="SUB", credopts=None, sambaopts=None, versionopts=None,
attrs=None, reindex=False):
lp = sambaopts.get_loadparm()
over_ldap = H is not None and H.startswith('ldap')
if over_ldap:
creds = credopts.get_credentials(lp, fallback_machine=True)
else:
creds = None
samdb = SamDB(session_info=system_session(), url=H,
credentials=creds, lp=lp)
if H is None or not over_ldap:
samdb_schema = samdb
else:
samdb_schema = SamDB(session_info=system_session(), url=None,
credentials=creds, lp=lp)
scope_map = { "SUB": ldb.SCOPE_SUBTREE, "BASE": ldb.SCOPE_BASE, "ONE":ldb.SCOPE_ONELEVEL }
scope = scope.upper()
if not scope in scope_map:
raise CommandError("Unknown scope %s" % scope)
search_scope = scope_map[scope]
controls = ['show_deleted:1']
if over_ldap:
controls.append('paged_results:1:1000')
if cross_ncs:
controls.append("search_options:1:2")
if not attrs:
attrs = ['*']
else:
attrs = attrs.split()
started_transaction = False
if yes and fix:
samdb.transaction_start()
started_transaction = True
try:
chk = dbcheck(samdb, samdb_schema=samdb_schema, verbose=verbose,
fix=fix, yes=yes, quiet=quiet, in_transaction=started_transaction)
if reindex:
self.outf.write("Re-indexing...\n")
error_count = 0
if chk.reindex_database():
self.outf.write("completed re-index OK\n")
else:
error_count = chk.check_database(DN=DN, scope=search_scope,
controls=controls, attrs=attrs)
except:
if started_transaction:
samdb.transaction_cancel()
raise
if started_transaction:
samdb.transaction_commit()
if error_count != 0:
sys.exit(1)
| gpl-3.0 |
leoc/home-assistant | tests/components/automation/test_time.py | 2 | 12787 | """The tests for the time automation."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.bootstrap import _setup_component
import homeassistant.util.dt as dt_util
import homeassistant.components.automation as automation
from tests.common import fire_time_changed, get_test_home_assistant
class TestAutomationTime(unittest.TestCase):
"""Test the event automation."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.append('group')
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_when_hour_matches(self):
"""Test for firing if hour is matching."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(hour=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
automation.turn_off(self.hass)
self.hass.block_till_done()
fire_time_changed(self.hass, dt_util.utcnow().replace(hour=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_minute_matches(self):
"""Test for firing if minutes are matching."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'minutes': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(minute=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_second_matches(self):
"""Test for firing if seconds are matching."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'seconds': 0,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_when_all_matches(self):
"""Test for firing if everything matches."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': 1,
'minutes': 2,
'seconds': 3,
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=1, minute=2, second=3))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_seconds(self):
"""Test for firing periodically every second."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'seconds': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=0, minute=0, second=2))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_minutes(self):
"""Test for firing periodically every minute."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'minutes': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=0, minute=2, second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_periodic_hours(self):
"""Test for firing periodically every hour."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'hours': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=2, minute=0, second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_using_after(self):
"""Test for firing after."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'after': '5:00:00',
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.now.hour }}'
},
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=5, minute=0, second=0))
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('time - 5', self.calls[0].data['some'])
def test_if_not_working_if_no_values_in_conf_provided(self):
"""Test for failure if no configuration."""
assert not _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=5, minute=0, second=0))
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_using_wrong_after(self):
"""YAML translates time values to total seconds.
This should break the before rule.
"""
assert not _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time',
'after': 3605,
# Total seconds. Hour = 3600 second
},
'action': {
'service': 'test.automation'
}
}
})
fire_time_changed(self.hass, dt_util.utcnow().replace(
hour=1, minute=0, second=5))
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_action_before(self):
"""Test for if action before."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'before': '10:00',
},
'action': {
'service': 'test.automation'
}
}
})
before_10 = dt_util.now().replace(hour=8)
after_10 = dt_util.now().replace(hour=14)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=before_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=after_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after(self):
"""Test for if action after."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'after': '10:00',
},
'action': {
'service': 'test.automation'
}
}
})
before_10 = dt_util.now().replace(hour=8)
after_10 = dt_util.now().replace(hour=14)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=before_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=after_10):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_one_weekday(self):
"""Test for if action with one weekday."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'weekday': 'mon',
},
'action': {
'service': 'test.automation'
}
}
})
days_past_monday = dt_util.now().weekday()
monday = dt_util.now() - timedelta(days=days_past_monday)
tuesday = monday + timedelta(days=1)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=monday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=tuesday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_list_weekday(self):
"""Test for action with a list of weekdays."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event'
},
'condition': {
'platform': 'time',
'weekday': ['mon', 'tue'],
},
'action': {
'service': 'test.automation'
}
}
})
days_past_monday = dt_util.now().weekday()
monday = dt_util.now() - timedelta(days=days_past_monday)
tuesday = monday + timedelta(days=1)
wednesday = tuesday + timedelta(days=1)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=monday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=tuesday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=wednesday):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
| mit |
moondrop-entertainment/django-nonrel-drawp | django/core/mail/backends/filebased.py | 394 | 2485 | """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, **kwargs):
self._fname = None
if 'file_path' in kwargs:
self.file_path = kwargs.pop('file_path')
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH',None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, basestring):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is an directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured('Path for saving email messages exists, but is not a directory: %s' % self.file_path)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError, err:
raise ImproperlyConfigured('Could not create directory for saving email messages: %s (%s)' % (self.file_path, err))
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super(EmailBackend, self).__init__(*args, **kwargs)
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'a')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| bsd-3-clause |
simoesusp/Hexacoptero | Denis/MissionPlanner trabalho/LogAnalyzer/py2exe/tests/TestMotorBalance.py | 43 | 2389 | from LogAnalyzer import Test,TestResult
import DataflashLog
from VehicleType import VehicleType
class TestBalanceTwist(Test):
'''test for badly unbalanced copter, including yaw twist'''
def __init__(self):
Test.__init__(self)
self.name = "Motor Balance"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != VehicleType.Copter:
self.result.status = TestResult.StatusType.NA
return
self.result.status = TestResult.StatusType.UNKNOWN
if not "RCOU" in logdata.channels:
return
ch = []
for i in range(8):
for prefix in "Chan", "Ch", "C":
if prefix+`(i+1)` in logdata.channels["RCOU"]:
ch.append(map(lambda x: x[1], logdata.channels["RCOU"][prefix+`(i+1)`].listData))
ch = zip(*ch)
num_channels = 0
for i in range(len(ch)):
ch[i] = filter(lambda x: (x>0 and x<3000), ch[i])
if num_channels < len(ch[i]):
num_channels = len(ch[i])
if num_channels < 2:
return
try:
min_throttle = logdata.parameters["RC3_MIN"] + logdata.parameters["THR_MIN"] / (logdata.parameters["RC3_MAX"]-logdata.parameters["RC3_MIN"])/1000.0
except KeyError as e:
min_throttle = logdata.parameters["MOT_PWM_MIN"] / (logdata.parameters["MOT_PWM_MAX"]-logdata.parameters["RC3_MIN"])/1000.0
ch = filter(lambda x:sum(x)/num_channels > min_throttle, ch)
if len(ch) == 0:
return
avg_all = map(lambda x:sum(x)/num_channels,ch)
avg_all = sum(avg_all)/len(avg_all)
avg_ch = []
for i in range(num_channels):
avg = map(lambda x: x[i],ch)
avg = sum(avg)/len(avg)
avg_ch.append(avg)
self.result.statusMessage = "Motor channel averages = %s\nAverage motor output = %.0f\nDifference between min and max motor averages = %.0f" % (str(avg_ch),avg_all,abs(min(avg_ch)-max(avg_ch)))
self.result.status = TestResult.StatusType.GOOD
if abs(min(avg_ch)-max(avg_ch)) > 75:
self.result.status = TestResult.StatusType.WARN
if abs(min(avg_ch)-max(avg_ch)) > 150:
self.result.status = TestResult.StatusType.FAIL
| gpl-3.0 |
dhorelik/django-cms | cms/south_migrations/0061_revers_id_unique.py | 63 | 17555 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Page', fields ['reverse_id', 'site', 'publisher_is_draft']
db.create_unique(u'cms_page', ['reverse_id', 'site_id', 'publisher_is_draft'])
def backwards(self, orm):
# Removing unique constraint on 'Page', fields ['reverse_id', 'site', 'publisher_is_draft']
db.delete_unique(u'cms_page', ['reverse_id', 'site_id', 'publisher_is_draft'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
rense/django-rest-framework | tests/test_negotiation.py | 81 | 1649 | from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.negotiation import DefaultContentNegotiation
from rest_framework.renderers import BaseRenderer
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class MockJSONRenderer(BaseRenderer):
media_type = 'application/json'
class MockHTMLRenderer(BaseRenderer):
media_type = 'text/html'
class NoCharsetSpecifiedRenderer(BaseRenderer):
media_type = 'my/media'
class TestAcceptedMediaType(TestCase):
def setUp(self):
self.renderers = [MockJSONRenderer(), MockHTMLRenderer()]
self.negotiator = DefaultContentNegotiation()
def select_renderer(self, request):
return self.negotiator.select_renderer(request, self.renderers)
def test_client_without_accept_use_renderer(self):
request = Request(factory.get('/'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json')
def test_client_underspecifies_accept_use_renderer(self):
request = Request(factory.get('/', HTTP_ACCEPT='*/*'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json')
def test_client_overspecifies_accept_use_client(self):
request = Request(factory.get('/', HTTP_ACCEPT='application/json; indent=8'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json; indent=8')
| bsd-2-clause |
dictoss/proto | cassandra/client_cassandra_cql.py | 1 | 3553 | #!/usr/bin/python3
#
# cassandra client test program
#
import os
import sys
import datetime
import json
import cassandra
from cassandra.cluster import Cluster
from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, BatchStatement, PreparedStatement
CASSANDRA_CLUSTER = ['192.168.22.112']
def _get_client_version():
print('cassandra.__version__ = {}'.format(cassandra.__version__))
def _select_users(keyspace):
try:
_cluster = Cluster(contact_points=CASSANDRA_CLUSTER, port=9042)
_session = _cluster.connect()
_session.set_keyspace(keyspace)
_session.row_factory = dict_factory
# fetch all record
_rows = _session.execute('''SELECT * FROM users;''')
for r in _rows:
print(r)
# fetch prepare record
_user_lookup_stmt = _session.prepare(
"SELECT * FROM users WHERE userid=?")
_lookup_result = []
for o in ['dictoss']:
_user = _session.execute(_user_lookup_stmt, [o])
print(_user)
print(list(_user))
_lookup_result.append(_user)
except:
print('EXCEPT: {}({})'.format(sys.exc_info()[0], sys.exc_info()[1]))
else:
_session.shutdown()
def _select2_users(keyspace):
try:
_cluster = Cluster(contact_points=CASSANDRA_CLUSTER, port=9042)
with _cluster.connect() as _session:
_session.set_keyspace(keyspace)
_session.row_factory = dict_factory
# fetch all record
_rows = _session.execute('''SELECT * FROM users;''')
for r in _rows:
print(r)
# fetch prepare record
_user_lookup_stmt = _session.prepare(
"SELECT * FROM users WHERE userid=?")
_lookup_result = []
for o in ['dictoss']:
_user = _session.execute(_user_lookup_stmt, [o])
print(_user)
print(list(_user))
_lookup_result.append(_user)
except:
print('EXCEPT: {}({})'.format(sys.exc_info()[0], sys.exc_info()[1]))
def _upsert_users(keyspace):
try:
_cluster = Cluster(contact_points=CASSANDRA_CLUSTER, port=9042)
_session = _cluster.connect()
_session.set_keyspace(keyspace)
_session.row_factory = dict_factory
_add_users = [
{'userid': 'dummy1', 'first_name': '11', 'last_name': 'dummy', 'emails': set('a')},
{'userid': 'dummy2', 'first_name': '22', 'last_name': 'dummy', 'emails': set('b')}
]
_prepare_insert = _session.prepare(
"INSERT INTO users (userid, first_name, last_name, emails) VALUES (?, ?, ?, ?)")
# "INSERT INTO users (userid, first_name, emails) VALUES (?, ?, ?)")
_batch = BatchStatement(consistency_level=0)
for user in _add_users:
print(user)
_batch.add(_prepare_insert, [ user['userid'], user['first_name'], user['last_name'], user['emails'] ])
# _batch.add(_prepare_insert, [ user['userid'], user['first_name'], user['emails'] ])
_session.execute(_batch)
except:
print('EXCEPT insert: {}({})'.format(sys.exc_info()[0], sys.exc_info()[1]))
else:
_session.shutdown()
def main():
print('cassandra client test')
_get_client_version()
_select2_users('mykeyspace')
_upsert_users('mykeyspace')
return
if '__main__' == __name__:
ret = main()
sys.exit(ret)
| bsd-2-clause |
wgcv/SWW-Crashphone | lib/python2.7/site-packages/django/template/debug.py | 42 | 3778 | from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
from django.utils.timezone import template_localtime
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append((command, token.source))
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source, msg):
e = TemplateSyntaxError(msg)
e.django_template_source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_filter_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
def compile_function_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
return node.render(context)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except UnicodeDecodeError:
return ''
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return conditional_escape(output)
else:
return output
| apache-2.0 |
repotvsupertuga/repo | script.module.stream.tvsupertuga.addon/resources/lib/sources/en/phdmovies.py | 7 | 6565 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['perfecthdmovies.pw']
self.base_link = 'http://www.perfecthdmovies.pw'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = post.split('Download%252BLinks.png', 1)[-1]
u = client.parseDOM(u, 'div', attrs={'style': '.+?'})
u = [re.findall('<a href="(.+?)"', i) for i in u]
u = [i[0] for i in u if i]
items += [(t, i) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if 'hindi' in fmt and not 'dual' in fmt: raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)(?:Gb|mb))', name)[-1]
div = 1 if size.endswith('Gb') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 |
blighj/django | tests/staticfiles_tests/test_storage.py | 24 | 26035 | import os
import shutil
import sys
import tempfile
import unittest
from io import StringIO
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands.collectstatic import \
Command as CollectstaticCommand
from django.core.cache.backends.base import BaseCache
from django.core.management import call_command
from django.test import override_settings
from .cases import CollectionTestCase
from .settings import TEST_ROOT
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles:
hashed_file_path = hashed_file_path
def setUp(self):
self._max_post_process_passes = storage.staticfiles_storage.max_post_process_passes
super().setUp()
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
storage.staticfiles_storage.max_post_process_passes = self._max_post_process_passes
def assertPostCondition(self):
"""
Assert post conditions for a test are met. Must be manually called at
the end of each test.
"""
pass
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.5e0040571e1a.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
self.assertPostCondition()
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.554da52152af.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'chrome:foobar', content)
self.assertIn(b'//foobar', content)
self.assertPostCondition()
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css?spam=eggs")
with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css#eggs")
with storage.staticfiles_storage.open("cached/styles.5e0040571e1a.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.c4e6753b52d3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#path/to/../../fonts/font.svg', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
self.assertPostCondition()
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.eb04def9f9a4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.5e0040571e1a.css", content)
self.assertNotIn(b"/static/styles_root.css", content)
self.assertIn(b"/static/styles_root.401f2509a628.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
self.assertPostCondition()
def test_template_tag_absolute_root(self):
"""
Like test_template_tag_absolute, but for a file in STATIC_ROOT (#26249).
"""
relpath = self.hashed_file_path("absolute_root.css")
self.assertEqual(relpath, "absolute_root.f821df1b64f7.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/styles_root.css", content)
self.assertIn(b"/static/styles_root.401f2509a628.css", content)
self.assertPostCondition()
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.c3e9e1ea6f2e.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.5e0040571e1a.css", content)
self.assertPostCondition()
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.f53576679e5a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.5e0040571e1a.css")""", relfile.read())
self.assertPostCondition()
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.5d5c10836967.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
self.assertPostCondition()
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
self.assertPostCondition()
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'loop')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_import_loop(self):
finders.get_finder.cache_clear()
err = StringIO()
with self.assertRaisesMessage(RuntimeError, 'Max post-process passes exceeded'):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue())
self.assertPostCondition()
def test_post_processing(self):
"""
post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
self.assertPostCondition()
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.3fa427592a53.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
"""
post_processing indicates the origin of the error when it fails.
"""
finders.get_finder.cache_clear()
err = StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
self.assertPostCondition()
@override_settings(
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
)
class TestCollectionCachedStorage(TestHashedFiles, CollectionTestCase):
"""
Tests for the Cache busting storage
"""
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.5e0040571e1a.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(self.hashed_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.hashed_files.clear()
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertIsNone(cached_name)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
# Check files that had to be hashed multiple times since their content
# includes other files that were hashed.
name = 'cached/relative.css'
hashed_name = 'cached/relative.c3e9e1ea6f2e.css'
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertIsNone(cached_name)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = (
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/\x16\xb4"
)
cache_key = storage.staticfiles_storage.hash_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
def test_corrupt_intermediate_files(self):
configured_storage = storage.staticfiles_storage
# Clear cache to force rehashing of the files
configured_storage.hashed_files.clear()
# Simulate a corrupt chain of intermediate files by ensuring they don't
# resolve before the max post-process count, which would normally be
# high enough.
configured_storage.max_post_process_passes = 1
# File without intermediates that can be rehashed without a problem.
self.hashed_file_path('cached/css/img/window.png')
# File with too many intermediates to rehash with the low max
# post-process passes.
err_msg = "The name 'cached/styles.css' could not be hashed with %r." % (configured_storage._wrapped,)
with self.assertRaisesMessage(ValueError, err_msg):
self.hashed_file_path('cached/styles.css')
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.ExtraPatternsCachedStaticFilesStorage',
)
class TestExtraPatternsCachedStorage(CollectionTestCase):
def setUp(self):
storage.staticfiles_storage.hashed_files.clear() # avoid cache interference
super().setUp()
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
def test_multi_extension_patterns(self):
"""
With storage classes having several file extension patterns, only the
files matching a specific file pattern should be affected by the
substitution (#19670).
"""
# CSS files shouldn't be touched by JS patterns.
relpath = self.cached_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.f53576679e5a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b'import url("styles.5e0040571e1a.css")', relfile.read())
# Confirm JS patterns have been applied to JS files.
relpath = self.cached_file_path("cached/test.js")
self.assertEqual(relpath, "cached/test.388d7a790d46.js")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b'JS_URL("import.f53576679e5a.css")', relfile.read())
@override_settings(
STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage',
)
class TestCollectionManifestStorage(TestHashedFiles, CollectionTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super().setUp()
temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, 'test'))
self._clear_filename = os.path.join(temp_dir, 'test', 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
self.patched_settings = self.settings(
STATICFILES_DIRS=settings.STATICFILES_DIRS + [temp_dir])
self.patched_settings.enable()
self.addCleanup(shutil.rmtree, temp_dir)
self._manifest_strict = storage.staticfiles_storage.manifest_strict
def tearDown(self):
self.patched_settings.disable()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
storage.staticfiles_storage.manifest_strict = self._manifest_strict
super().tearDown()
def assertPostCondition(self):
hashed_files = storage.staticfiles_storage.hashed_files
# The in-memory version of the manifest matches the one on disk
# since a properly created manifest should cover all filenames.
if hashed_files:
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn(
'"version": "%s"' % storage.staticfiles_storage.manifest_version,
manifest_content
)
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = storage.staticfiles_storage.clean_name(os.path.join('test', 'cleared.txt'))
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
def test_missing_entry(self):
missing_file_name = 'cached/missing.css'
configured_storage = storage.staticfiles_storage
self.assertNotIn(missing_file_name, configured_storage.hashed_files)
# File name not found in manifest
with self.assertRaisesMessage(ValueError, "Missing staticfiles manifest entry for '%s'" % missing_file_name):
self.hashed_file_path(missing_file_name)
configured_storage.manifest_strict = False
# File doesn't exist on disk
err_msg = "The file '%s' could not be found with %r." % (missing_file_name, configured_storage._wrapped)
with self.assertRaisesMessage(ValueError, err_msg):
self.hashed_file_path(missing_file_name)
content = StringIO()
content.write('Found')
configured_storage.save(missing_file_name, content)
# File exists on disk
self.hashed_file_path(missing_file_name)
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
)
class TestCollectionSimpleCachedStorage(CollectionTestCase):
"""
Tests for the Cache busting storage
"""
hashed_file_path = hashed_file_path
def setUp(self):
storage.staticfiles_storage.hashed_files.clear() # avoid cache interference
super().setUp()
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
class CustomStaticFilesStorage(storage.StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super().__init__(*args, **kwargs)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports chmod.")
class TestStaticFilePermissions(CollectionTestCase):
command_params = {
'interactive': False,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super().setUp()
def tearDown(self):
os.umask(self.old_umask)
super().tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
)
def test_collect_static_files_permissions(self):
call_command('collectstatic', **self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o655)
self.assertEqual(dir_mode, 0o765)
@override_settings(
FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None,
)
def test_collect_static_files_default_permissions(self):
call_command('collectstatic', **self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE='staticfiles_tests.test_storage.CustomStaticFilesStorage',
)
def test_collect_static_files_subclass_of_static_storage(self):
call_command('collectstatic', **self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o640)
self.assertEqual(dir_mode, 0o740)
@override_settings(
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
)
class TestCollectionHashedFilesCache(CollectionTestCase):
"""
Files referenced from CSS use the correct final hashed name regardless of
the order in which the files are post-processed.
"""
hashed_file_path = hashed_file_path
def setUp(self):
super().setUp()
self._temp_dir = temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, 'test'))
self.addCleanup(shutil.rmtree, temp_dir)
def _get_filename_path(self, filename):
return os.path.join(self._temp_dir, 'test', filename)
def test_file_change_after_collectstatic(self):
# Create initial static files.
file_contents = (
('foo.png', 'foo'),
('bar.css', 'url("foo.png")\nurl("xyz.png")'),
('xyz.png', 'xyz'),
)
for filename, content in file_contents:
with open(self._get_filename_path(filename), 'w') as f:
f.write(content)
with self.modify_settings(STATICFILES_DIRS={'append': self._temp_dir}):
finders.get_finder.cache_clear()
err = StringIO()
# First collectstatic run.
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
relpath = self.hashed_file_path('test/bar.css')
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'foo.acbd18db4cc2.png', content)
self.assertIn(b'xyz.d16fb36f0911.png', content)
# Change the contents of the png files.
for filename in ('foo.png', 'xyz.png'):
with open(self._get_filename_path(filename), 'w+b') as f:
f.write(b"new content of file to change its hash")
# The hashes of the png files in the CSS file are updated after
# a second collectstatic.
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
relpath = self.hashed_file_path('test/bar.css')
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'foo.57a5cb9ba68d.png', content)
self.assertIn(b'xyz.57a5cb9ba68d.png', content)
| bsd-3-clause |
cyberphox/MissionPlanner | Lib/site-packages/numpy/distutils/command/install_clib.py | 96 | 1249 | import os
from distutils.core import Command
from distutils.ccompiler import new_compiler
from numpy.distutils.misc_util import get_cmd
class install_clib(Command):
description = "Command to install installable C libraries"
user_options = []
def initialize_options(self):
self.install_dir = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install', ('install_lib', 'install_dir'))
def run (self):
build_clib_cmd = get_cmd("build_clib")
build_dir = build_clib_cmd.build_clib
# We need the compiler to get the library name -> filename association
if not build_clib_cmd.compiler:
compiler = new_compiler(compiler=None)
compiler.customize(self.distribution)
else:
compiler = build_clib_cmd.compiler
for l in self.distribution.installed_libraries:
target_dir = os.path.join(self.install_dir, l.target_dir)
name = compiler.library_filename(l.name)
source = os.path.join(build_dir, name)
self.mkpath(target_dir)
self.outfiles.append(self.copy_file(source, target_dir)[0])
def get_outputs(self):
return self.outfiles
| gpl-3.0 |
steven-cutting/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/internals.py | 9 | 15857 | # Natural Language Toolkit: Internal utility functions
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
import subprocess, os.path, re, warnings, textwrap
import types
######################################################################
# Regular Expression Processing
######################################################################
def convert_regexp_to_nongrouping(pattern):
"""
Convert all grouping parenthases in the given regexp pattern to
non-grouping parenthases, and return the result. E.g.:
>>> convert_regexp_to_nongrouping('ab(c(x+)(z*))?d')
'ab(?:c(?:x+)(?:z*))?d'
@type pattern: C{str}
@rtype: C{str}
"""
# Sanity check: back-references are not allowed!
for s in re.findall(r'\\.|\(\?P=', pattern):
if s[1] in '0123456789' or s == '(?P=':
raise ValueError('Regular expressions with back-references '
'are not supported: %r' % pattern)
# This regexp substitution function replaces the string '('
# with the string '(?:', but otherwise makes no changes.
def subfunc(m):
return re.sub('^\((\?P<[^>]*>)?$', '(?:', m.group())
# Scan through the regular expression. If we see any backslashed
# characters, ignore them. If we see a named group, then
# replace it with "(?:". If we see any open parens that are part
# of an extension group, ignore those too. But if we see
# any other open paren, replace it with "(?:")
return re.sub(r'''(?x)
\\. | # Backslashed character
\(\?P<[^>]*> | # Named group
\(\? | # Extension group
\( # Grouping parenthasis''', subfunc, pattern)
##########################################################################
# Java Via Command-Line
##########################################################################
_java_bin = None
_java_options = []
def config_java(bin=None, options=None):
"""
Configure nltk's java interface, by letting nltk know where it can
find the C{java} binary, and what extra options (if any) should be
passed to java when it is run.
@param bin: The full path to the C{java} binary. If not specified,
then nltk will search the system for a C{java} binary; and if
one is not found, it will raise a C{LookupError} exception.
@type bin: C{string}
@param options: A list of options that should be passed to the
C{java} binary when it is called. A common value is
C{['-Xmx512m']}, which tells the C{java} binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
@type options: C{list} of C{string}
"""
global _java_bin, _java_options
if bin is not None:
if not os.path.exists(bin):
raise ValueError('Could not find java binary at %r' % bin)
_java_bin = bin
if options is not None:
if isinstance(options, basestring):
options = options.split()
_java_options = list(options)
# Check the JAVAHOME environment variable.
for env_var in ['JAVAHOME', 'JAVA_HOME']:
if _java_bin is None and env_var in os.environ:
paths = [os.path.join(os.environ[env_var], 'java'),
os.path.join(os.environ[env_var], 'bin', 'java')]
for path in paths:
if os.path.exists(path):
_java_bin = path
print '[Found java: %s]' % path
# If we're on a POSIX system, try using the 'which' command to
# find a java binary.
if _java_bin is None and os.name == 'posix':
try:
p = subprocess.Popen(['which', 'java'], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
path = stdout.strip()
if path.endswith('java') and os.path.exists(path):
_java_bin = path
print '[Found java: %s]' % path
except:
pass
if _java_bin is None:
raise LookupError('Unable to find java! Use config_java() '
'or set the JAVAHOME environment variable.')
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None):
"""
Execute the given java command, by opening a subprocess that calls
C{java}. If java has not yet been configured, it will be configured
by calling L{config_java()} with no arguments.
@param cmd: The java command that should be called, formatted as
a list of strings. Typically, the first string will be the name
of the java class; and the remaining strings will be arguments
for that java class.
@type cmd: C{list} of C{string}
@param classpath: A C{':'} separated list of directories, JAR
archives, and ZIP archives to search for class files.
@type classpath: C{string}
@param stdin, stdout, stderr: Specify the executed programs'
standard input, standard output and standard error file
handles, respectively. Valid values are C{subprocess.PIPE},
an existing file descriptor (a positive integer), an existing
file object, and C{None}. C{subprocess.PIPE} indicates that a
new pipe to the child should be created. With C{None}, no
redirection will occur; the child's file handles will be
inherited from the parent. Additionally, stderr can be
C{subprocess.STDOUT}, which indicates that the stderr data
from the applications should be captured into the same file
handle as for stdout.
@return: A tuple C{(stdout, stderr)}, containing the stdout and
stderr outputs generated by the java command if the C{stdout}
and C{stderr} parameters were set to C{subprocess.PIPE}; or
C{None} otherwise.
@raise OSError: If the java command returns a nonzero return code.
"""
if isinstance(cmd, basestring):
raise TypeError('cmd should be a list of strings')
# Make sure we know where a java binary is.
if _java_bin is None:
config_java()
# Construct the full command string.
cmd = list(cmd)
if classpath is not None:
cmd = ['-cp', classpath] + cmd
cmd = [_java_bin] + _java_options + cmd
# Call java via a subprocess
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print stderr
raise OSError('Java command failed!')
return (stdout, stderr)
if 0:
#config_java(options='-Xmx512m')
# Write:
#java('weka.classifiers.bayes.NaiveBayes',
# ['-d', '/tmp/names.model', '-t', '/tmp/train.arff'],
# classpath='/Users/edloper/Desktop/weka/weka.jar')
# Read:
(a,b) = java(['weka.classifiers.bayes.NaiveBayes',
'-l', '/tmp/names.model', '-T', '/tmp/test.arff',
'-p', '0'],#, '-distribution'],
classpath='/Users/edloper/Desktop/weka/weka.jar')
######################################################################
# Parsing
######################################################################
class ParseError(ValueError):
"""
Exception raised by parse_* functions when they fail.
@param position: The index in the input string where an error occured.
@param expected: What was expected when an error occured.
"""
def __init__(self, expected, position):
ValueError.__init__(self, expected, position)
self.expected = expected
self.position = position
def __str__(self):
return 'Expected %s at %s' % (self.expected, self.position)
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
def parse_str(s, start_position):
"""
If a Python string literal begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the string literal and the position where
it ends. Otherwise, raise a L{ParseError}.
"""
# Read the open quote, and any modifiers.
m = _STRING_START_RE.match(s, start_position)
if not m: raise ParseError('open quote', start_position)
quotemark = m.group(1)
# Find the close quote.
_STRING_END_RE = re.compile(r'\\|%s' % quotemark)
position = m.end()
while True:
match = _STRING_END_RE.search(s, position)
if not match: raise ParseError('close quote', position)
if match.group(0) == '\\': position = match.end()+1
else: break
# Parse it, using eval. Strings with invalid escape sequences
# might raise ValueEerror.
try:
return eval(s[start_position:match.end()]), match.end()
except ValueError, e:
raise ParseError('valid string (%s)' % e, start)
_PARSE_INT_RE = re.compile(r'-?\d+')
def parse_int(s, start_position):
"""
If an integer begins at the specified position in the given
string, then return a tuple C{(val, end_position)} containing the
value of the integer and the position where it ends. Otherwise,
raise a L{ParseError}.
"""
m = _PARSE_INT_RE.match(s, start_position)
if not m: raise ParseError('integer', start_position)
return int(m.group()), m.end()
_PARSE_NUMBER_VALUE = re.compile(r'-?(\d*)([.]?\d*)?')
def parse_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the number and the position where it ends.
Otherwise, raise a L{ParseError}.
"""
m = _PARSE_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ParseError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end()
######################################################################
# Check if a method has been overridden
######################################################################
def overridden(method):
"""
@return: True if C{method} overrides some method with the same
name in a base class. This is typically used when defining
abstract base classes or interfaces, to allow subclasses to define
either of two related methods:
>>> class EaterI:
... '''Subclass must define eat() or batch_eat().'''
... def eat(self, food):
... if overridden(self.batch_eat):
... return self.batch_eat([food])[0]
... else:
... raise NotImplementedError()
... def batch_eat(self, foods):
... return [self.eat(food) for food in foods]
@type method: instance method
"""
# [xx] breaks on classic classes!
if isinstance(method, types.MethodType) and method.im_class is not None:
name = method.__name__
funcs = [cls.__dict__[name]
for cls in _mro(method.im_class)
if name in cls.__dict__]
return len(funcs) > 1
else:
raise TypeError('Expected an instance method.')
def _mro(cls):
"""
Return the I{method resolution order} for C{cls} -- i.e., a list
containing C{cls} and all its base classes, in the order in which
they would be checked by C{getattr}. For new-style classes, this
is just cls.__mro__. For classic classes, this can be obtained by
a depth-first left-to-right traversal of C{__bases__}.
"""
if isinstance(cls, type):
return cls.__mro__
else:
mro = [cls]
for base in cls.__bases__: mro.extend(_mro(base))
return mro
######################################################################
# Deprecation decorator & base class
######################################################################
# [xx] dedent msg first if it comes from a docstring.
def _add_deprecated_field(obj, message):
"""Add a @deprecated field to a given object's docstring."""
indent = ''
# If we already have a docstring, then add a blank line to separate
# it from the new field, and check its indentation.
if obj.__doc__:
obj.__doc__ = obj.__doc__.rstrip()+'\n\n'
indents = re.findall(r'(?<=\n)[ ]+(?!\s)', obj.__doc__.expandtabs())
if indents: indent = min(indents)
# If we don't have a docstring, add an empty one.
else:
obj.__doc__ = ''
obj.__doc__ += textwrap.fill('@deprecated: %s' % message,
initial_indent=indent,
subsequent_indent=indent+' ')
def deprecated(message):
"""
A decorator used to mark functions as deprecated. This will cause
a warning to be printed the when the function is used. Usage:
>>> @deprecated('Use foo() instead')
>>> def bar(x):
... print x/10
"""
def decorator(func):
msg = ("Function %s() has been deprecated. %s"
% (func.__name__, message))
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
def newFunc(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# Copy the old function's name, docstring, & dict
newFunc.__dict__.update(func.__dict__)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__deprecated__ = True
# Add a @deprecated field to the docstring.
_add_deprecated_field(newFunc, message)
return newFunc
return decorator
class Deprecated(object):
"""
A base class used to mark deprecated classes. A typical usage is to
alert users that the name of a class has changed:
>>> class OldClassName(Deprecated, NewClassName):
... "Use NewClassName instead."
The docstring of the deprecated class will be used in the
deprecation warning message.
"""
def __new__(cls, *args, **kwargs):
# Figure out which class is the deprecated one.
dep_cls = None
for base in _mro(cls):
if Deprecated in base.__bases__:
dep_cls = base; break
assert dep_cls, 'Unable to determine which base is deprecated.'
# Construct an appropriate warning.
doc = dep_cls.__doc__ or ''.strip()
# If there's a @deprecated field, strip off the field marker.
doc = re.sub(r'\A\s*@deprecated:', r'', doc)
# Strip off any indentation.
doc = re.sub(r'(?m)^\s*', '', doc)
# Construct a 'name' string.
name = 'Class %s' % dep_cls.__name__
if cls != dep_cls:
name += ' (base class for %s)' % cls.__name__
# Put it all together.
msg = '%s has been deprecated. %s' % (name, doc)
# Wrap it.
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
# Do the actual work of __new__.
return object.__new__(cls, *args, **kwargs)
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
| gpl-3.0 |
happy56/kivy | kivy/adapters/dictadapter.py | 3 | 5733 | '''
DictAdapter
===========
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
:class:`~kivy.adapters.dictadapter.DictAdapter` is an adapter around a python
dictionary of records. It extends the list-like capabilities of
:class:`~kivy.adapters.listadapter.ListAdapter`.
If you wish to have a bare-bones list adapter, without selection, use
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`.
'''
__all__ = ('DictAdapter', )
from kivy.properties import ListProperty, DictProperty
from kivy.adapters.listadapter import ListAdapter
class DictAdapter(ListAdapter):
''':class:`~kivy.adapters.dictadapter.DictAdapter` is an adapter around a
python dictionary of records. It extends the list-like capabilities of
:class:`~kivy.adapters.listadapter.ListAdapter`.
'''
sorted_keys = ListProperty([])
'''The sorted_keys list property contains a list of hashable objects (can
be strings) that will be used directly if no args_converter function is
provided. If there is an args_converter, the record received from a
lookup in the data, using key from sorted_keys, will be passed
to it, for instantiation of list item view class instances.
:data:`sorted_keys` is a :class:`~kivy.properties.ListProperty`, default
to [].
'''
data = DictProperty(None)
'''A dict that indexes records by keys that are equivalent to the keys in
sorted_keys, or they are a superset of the keys in sorted_keys.
The values can be strings, class instances, dicts, etc.
:data:`data` is a :class:`~kivy.properties.DictProperty`, default
to None.
'''
def __init__(self, **kwargs):
if 'sorted_keys' in kwargs:
if type(kwargs['sorted_keys']) not in (tuple, list):
msg = 'DictAdapter: sorted_keys must be tuple or list'
raise Exception(msg)
else:
self.sorted_keys = sorted(kwargs['data'].keys())
super(DictAdapter, self).__init__(**kwargs)
self.bind(sorted_keys=self.initialize_sorted_keys)
def bind_triggers_to_view(self, func):
self.bind(sorted_keys=func)
self.bind(data=func)
# self.data is paramount to self.sorted_keys. If sorted_keys is reset to
# mismatch data, force a reset of sorted_keys to data.keys(). So, in order
# to do a complete reset of data and sorted_keys, data must be reset
# first, followed by a reset of sorted_keys, if needed.
def initialize_sorted_keys(self, *args):
stale_sorted_keys = False
for key in self.sorted_keys:
if not key in self.data:
stale_sorted_keys = True
break
if stale_sorted_keys:
self.sorted_keys = sorted(self.data.keys())
self.delete_cache()
self.initialize_selection()
# Override ListAdapter.update_for_new_data().
def update_for_new_data(self, *args):
self.initialize_sorted_keys()
# Note: this is not len(self.data).
def get_count(self):
return len(self.sorted_keys)
def get_data_item(self, index):
if index < 0 or index >= len(self.sorted_keys):
return None
return self.data[self.sorted_keys[index]]
# [TODO] Also make methods for scroll_to_sel_start, scroll_to_sel_end,
# scroll_to_sel_middle.
def trim_left_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are less than the
index of the first selected item, if there is selection.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
first_sel_index = self.sorted_keys.index(selected_keys[0])
desired_keys = self.sorted_keys[first_sel_index:]
self.data = dict([(key, self.data[key]) for key in desired_keys])
def trim_right_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are greater than
the index of the last selected item, if there is selection.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
last_sel_index = self.sorted_keys.index(selected_keys[-1])
desired_keys = self.sorted_keys[:last_sel_index + 1]
self.data = dict([(key, self.data[key]) for key in desired_keys])
def trim_to_sel(self, *args):
'''Cut list items with indices in sorted_keys that are les than or
greater than the index of the last selected item, if there is
selection. This preserves intervening list items within the selected
range.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
first_sel_index = self.sorted_keys.index(selected_keys[0])
last_sel_index = self.sorted_keys.index(selected_keys[-1])
desired_keys = self.sorted_keys[first_sel_index:last_sel_index + 1]
self.data = dict([(key, self.data[key]) for key in desired_keys])
def cut_to_sel(self, *args):
'''Same as trim_to_sel, but intervening list items within the selected
range are cut also, leaving only list items that are selected.
sorted_keys will be updated by update_for_new_data().
'''
if len(self.selection) > 0:
selected_keys = [sel.text for sel in self.selection]
self.data = dict([(key, self.data[key]) for key in selected_keys])
| lgpl-3.0 |
taotie12010/bigfour | lms/djangoapps/shoppingcart/context_processor.py | 173 | 1679 | """
This is the shoppingcart context_processor module.
Currently the only context_processor detects whether request.user has a cart that should be displayed in the
navigation. We want to do this in the context_processor to
1) keep database accesses out of templates (this led to a transaction bug with user email changes)
2) because navigation.html is "called" by being included in other templates, there's no "views.py" to put this.
"""
from .models import Order, PaidCourseRegistration, CourseRegCodeItem
from .utils import is_shopping_cart_enabled
def user_has_cart_context_processor(request):
"""
Checks if request has an authenticated user. If so, checks if request.user has a cart that should
be displayed. Anonymous users don't.
Adds `display_shopping_cart` to the context
"""
def should_display_shopping_cart():
"""
Returns a boolean if the user has an items in a cart whereby the shopping cart should be
displayed to the logged in user
"""
return (
# user is logged in and
request.user.is_authenticated() and
# do we have the feature turned on
is_shopping_cart_enabled() and
# does the user actually have a cart (optimized query to prevent creation of a cart when not needed)
Order.does_user_have_cart(request.user) and
# user's cart has PaidCourseRegistrations or CourseRegCodeItem
Order.user_cart_has_items(
request.user,
[PaidCourseRegistration, CourseRegCodeItem]
)
)
return {'should_display_shopping_cart_func': should_display_shopping_cart}
| agpl-3.0 |
samratashok87/Rammbock | utest/test_templates/test_protocol.py | 3 | 1141 | from unittest import TestCase
from Rammbock.templates.containers import Protocol
from Rammbock.templates.primitives import UInt, PDU
class TestProtocol(TestCase):
def setUp(self):
self._protocol = Protocol('Test')
def test_header_length(self):
self._protocol.add(UInt(1, 'name1', None))
self.assertEquals(self._protocol.header_length(), 1)
def test_header_length_with_pdu(self):
self._protocol.add(UInt(1, 'name1', None))
self._protocol.add(UInt(2, 'name2', 5))
self._protocol.add(UInt(2, 'length', None))
self._protocol.add(PDU('length'))
self.assertEquals(self._protocol.header_length(), 5)
def test_verify_undefined_length(self):
self._protocol.add(UInt(1, 'name1', None))
self._protocol.add(UInt(2, 'name2', 5))
self.assertRaises(Exception, self._protocol.add, PDU('length'))
def test_verify_calculated_length(self):
self._protocol.add(UInt(1, 'name1', 1))
self._protocol.add(UInt(2, 'length', None))
self._protocol.add(PDU('length-8'))
self.assertEquals(self._protocol.header_length(), 3)
| apache-2.0 |
Lh4cKg/sl4a | python/src/Lib/plat-mac/Carbon/Events.py | 81 | 2232 | # Generated from 'Events.h'
nullEvent = 0
mouseDown = 1
mouseUp = 2
keyDown = 3
keyUp = 4
autoKey = 5
updateEvt = 6
diskEvt = 7
activateEvt = 8
osEvt = 15
kHighLevelEvent = 23
mDownMask = 1 << mouseDown
mUpMask = 1 << mouseUp
keyDownMask = 1 << keyDown
keyUpMask = 1 << keyUp
autoKeyMask = 1 << autoKey
updateMask = 1 << updateEvt
diskMask = 1 << diskEvt
activMask = 1 << activateEvt
highLevelEventMask = 0x0400
osMask = 1 << osEvt
everyEvent = 0xFFFF
charCodeMask = 0x000000FF
keyCodeMask = 0x0000FF00
adbAddrMask = 0x00FF0000
# osEvtMessageMask = (unsigned long)0xFF000000
mouseMovedMessage = 0x00FA
suspendResumeMessage = 0x0001
resumeFlag = 1
convertClipboardFlag = 2
activeFlagBit = 0
btnStateBit = 7
cmdKeyBit = 8
shiftKeyBit = 9
alphaLockBit = 10
optionKeyBit = 11
controlKeyBit = 12
rightShiftKeyBit = 13
rightOptionKeyBit = 14
rightControlKeyBit = 15
activeFlag = 1 << activeFlagBit
btnState = 1 << btnStateBit
cmdKey = 1 << cmdKeyBit
shiftKey = 1 << shiftKeyBit
alphaLock = 1 << alphaLockBit
optionKey = 1 << optionKeyBit
controlKey = 1 << controlKeyBit
rightShiftKey = 1 << rightShiftKeyBit
rightOptionKey = 1 << rightOptionKeyBit
rightControlKey = 1 << rightControlKeyBit
kNullCharCode = 0
kHomeCharCode = 1
kEnterCharCode = 3
kEndCharCode = 4
kHelpCharCode = 5
kBellCharCode = 7
kBackspaceCharCode = 8
kTabCharCode = 9
kLineFeedCharCode = 10
kVerticalTabCharCode = 11
kPageUpCharCode = 11
kFormFeedCharCode = 12
kPageDownCharCode = 12
kReturnCharCode = 13
kFunctionKeyCharCode = 16
kCommandCharCode = 17
kCheckCharCode = 18
kDiamondCharCode = 19
kAppleLogoCharCode = 20
kEscapeCharCode = 27
kClearCharCode = 27
kLeftArrowCharCode = 28
kRightArrowCharCode = 29
kUpArrowCharCode = 30
kDownArrowCharCode = 31
kSpaceCharCode = 32
kDeleteCharCode = 127
kBulletCharCode = 165
kNonBreakingSpaceCharCode = 202
kShiftUnicode = 0x21E7
kControlUnicode = 0x2303
kOptionUnicode = 0x2325
kCommandUnicode = 0x2318
kPencilUnicode = 0x270E
kCheckUnicode = 0x2713
kDiamondUnicode = 0x25C6
kBulletUnicode = 0x2022
kAppleLogoUnicode = 0xF8FF
networkEvt = 10
driverEvt = 11
app1Evt = 12
app2Evt = 13
app3Evt = 14
app4Evt = 15
networkMask = 0x0400
driverMask = 0x0800
app1Mask = 0x1000
app2Mask = 0x2000
app3Mask = 0x4000
app4Mask = 0x8000
| apache-2.0 |
vegarwe/luma | luma/base/gui/design/MainWindowDesign.py | 3 | 10734 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/debris/devel/repo/git/luma-fixes/resources/forms/MainWindowDesign.ui'
#
# Created: Wed May 25 21:41:09 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(700, 500)
MainWindow.setWindowOpacity(1.0)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.mainTabs = QtGui.QTabWidget(self.centralwidget)
self.mainTabs.setObjectName(_fromUtf8("mainTabs"))
self.gridLayout.addWidget(self.mainTabs, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 700, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
self.menuLanguage = QtGui.QMenu(self.menuEdit)
self.menuLanguage.setObjectName(_fromUtf8("menuLanguage"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menu_View = QtGui.QMenu(self.menubar)
self.menu_View.setObjectName(_fromUtf8("menu_View"))
MainWindow.setMenuBar(self.menubar)
self.statusBar = QtGui.QStatusBar(MainWindow)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
MainWindow.setStatusBar(self.statusBar)
self.actionShowLogger = QtGui.QAction(MainWindow)
self.actionShowLogger.setCheckable(True)
self.actionShowLogger.setObjectName(_fromUtf8("actionShowLogger"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionEditServerList = QtGui.QAction(MainWindow)
self.actionEditServerList.setObjectName(_fromUtf8("actionEditServerList"))
self.actionReloadPlugins = QtGui.QAction(MainWindow)
self.actionReloadPlugins.setObjectName(_fromUtf8("actionReloadPlugins"))
self.actionConfigurePlugins = QtGui.QAction(MainWindow)
self.actionConfigurePlugins.setObjectName(_fromUtf8("actionConfigurePlugins"))
self.actionAboutLuma = QtGui.QAction(MainWindow)
self.actionAboutLuma.setObjectName(_fromUtf8("actionAboutLuma"))
self.actionEditSettings = QtGui.QAction(MainWindow)
self.actionEditSettings.setObjectName(_fromUtf8("actionEditSettings"))
self.actionShowPluginList = QtGui.QAction(MainWindow)
self.actionShowPluginList.setCheckable(False)
self.actionShowPluginList.setObjectName(_fromUtf8("actionShowPluginList"))
self.actionShowWelcomeTab = QtGui.QAction(MainWindow)
self.actionShowWelcomeTab.setCheckable(False)
self.actionShowWelcomeTab.setEnabled(False)
self.actionShowWelcomeTab.setObjectName(_fromUtf8("actionShowWelcomeTab"))
self.actionShowToolbar = QtGui.QAction(MainWindow)
self.actionShowToolbar.setCheckable(True)
self.actionShowToolbar.setObjectName(_fromUtf8("actionShowToolbar"))
self.actionShowStatusbar = QtGui.QAction(MainWindow)
self.actionShowStatusbar.setCheckable(True)
self.actionShowStatusbar.setChecked(True)
self.actionShowStatusbar.setObjectName(_fromUtf8("actionShowStatusbar"))
self.actionFullscreen = QtGui.QAction(MainWindow)
self.actionFullscreen.setCheckable(True)
self.actionFullscreen.setObjectName(_fromUtf8("actionFullscreen"))
self.actionSet_Temporary_Password = QtGui.QAction(MainWindow)
self.actionSet_Temporary_Password.setObjectName(_fromUtf8("actionSet_Temporary_Password"))
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuEdit.addAction(self.actionEditServerList)
self.menuEdit.addAction(self.actionSet_Temporary_Password)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.menuLanguage.menuAction())
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditSettings)
self.menuHelp.addAction(self.actionAboutLuma)
self.menu_View.addAction(self.actionShowPluginList)
self.menu_View.addAction(self.actionShowWelcomeTab)
self.menu_View.addSeparator()
self.menu_View.addAction(self.actionShowStatusbar)
self.menu_View.addAction(self.actionShowLogger)
self.menu_View.addSeparator()
self.menu_View.addAction(self.actionFullscreen)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menu_View.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QObject.connect(self.actionAboutLuma, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showAboutLuma)
QtCore.QObject.connect(self.actionConfigurePlugins, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.configurePlugins)
QtCore.QObject.connect(self.actionReloadPlugins, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.reloadPlugins)
QtCore.QObject.connect(self.actionShowLogger, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), MainWindow.toggleLoggerWindow)
QtCore.QObject.connect(self.actionEditServerList, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showServerEditor)
QtCore.QObject.connect(self.actionEditSettings, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showSettingsDialog)
QtCore.QObject.connect(self.mainTabs, QtCore.SIGNAL(_fromUtf8("tabCloseRequested(int)")), MainWindow.tabClose)
QtCore.QObject.connect(self.actionShowPluginList, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showPlugins)
QtCore.QObject.connect(self.actionShowWelcomeTab, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showWelcome)
QtCore.QObject.connect(self.actionShowStatusbar, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), MainWindow.toggleStatusbar)
QtCore.QObject.connect(self.actionFullscreen, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), MainWindow.toggleFullscreen)
QtCore.QObject.connect(self.actionSet_Temporary_Password, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showTempPasswordDialog)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Luma", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setTitle(QtGui.QApplication.translate("MainWindow", "&Edit", None, QtGui.QApplication.UnicodeUTF8))
self.menuLanguage.setTitle(QtGui.QApplication.translate("MainWindow", "&Language", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.menu_View.setTitle(QtGui.QApplication.translate("MainWindow", "&View", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowLogger.setText(QtGui.QApplication.translate("MainWindow", "&Logger Window", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowLogger.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+L", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setText(QtGui.QApplication.translate("MainWindow", "&Quit", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditServerList.setText(QtGui.QApplication.translate("MainWindow", "S&erver List", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditServerList.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Shift+S", None, QtGui.QApplication.UnicodeUTF8))
self.actionReloadPlugins.setText(QtGui.QApplication.translate("MainWindow", "Reload Plugins", None, QtGui.QApplication.UnicodeUTF8))
self.actionReloadPlugins.setShortcut(QtGui.QApplication.translate("MainWindow", "F5", None, QtGui.QApplication.UnicodeUTF8))
self.actionConfigurePlugins.setText(QtGui.QApplication.translate("MainWindow", "Configure Plugins", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutLuma.setText(QtGui.QApplication.translate("MainWindow", "&About Luma", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutLuma.setShortcut(QtGui.QApplication.translate("MainWindow", "F12", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditSettings.setText(QtGui.QApplication.translate("MainWindow", "&Settings", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowPluginList.setText(QtGui.QApplication.translate("MainWindow", "&Plugin List", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowPluginList.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+P", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowWelcomeTab.setText(QtGui.QApplication.translate("MainWindow", "&Welcome Tab", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowWelcomeTab.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Shift+W", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowToolbar.setText(QtGui.QApplication.translate("MainWindow", "Toolbar", None, QtGui.QApplication.UnicodeUTF8))
self.actionShowStatusbar.setText(QtGui.QApplication.translate("MainWindow", "&Statusbar", None, QtGui.QApplication.UnicodeUTF8))
self.actionFullscreen.setText(QtGui.QApplication.translate("MainWindow", "&Fullscreen", None, QtGui.QApplication.UnicodeUTF8))
self.actionFullscreen.setShortcut(QtGui.QApplication.translate("MainWindow", "F11", None, QtGui.QApplication.UnicodeUTF8))
self.actionSet_Temporary_Password.setText(QtGui.QApplication.translate("MainWindow", "Temporary &Password", None, QtGui.QApplication.UnicodeUTF8))
| gpl-2.0 |
Heteroskedastic/chills-pos | chills_pos/pos/views.py | 1 | 1514 | from django.conf import settings
from django.contrib.staticfiles.views import serve
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from chills_pos.helpers.utils import PermissionRequiredMixin
from pos.models import Customer
@method_decorator(csrf_exempt, name='dispatch')
class IndexView(PermissionRequiredMixin, View):
permission_required = ()
def get(self, request, *args, **kwargs):
return redirect('/static/index.html')
def post(self, request, *args, **kwargs):
return redirect('/static/index.html')
@method_decorator(csrf_exempt, name='dispatch')
class LoginView(View):
template_name = "login.html"
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
next = request.GET.get('next') or settings.LOGIN_REDIRECT_URL
return redirect(next)
return render(request, self.template_name)
class CustomerCardsView(PermissionRequiredMixin, View):
permission_required = 'pos.view_customer'
def get(self, request, *args, **kwargs):
unit_customers = {}
for customer in Customer.objects.order_by('unit', 'id'):
unit_customers.setdefault(customer.unit and customer.unit.name, []).append(customer)
ctx = {
'unit_customers': unit_customers
}
return render(request, "pos/customer-cards.html", ctx)
| mit |
xzYue/odoo | addons/mrp/tests/test_multicompany.py | 374 | 2660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMrpMulticompany(common.TransactionCase):
def setUp(self):
super(TestMrpMulticompany, self).setUp()
cr, uid = self.cr, self.uid
# Usefull models
self.ir_model_data = self.registry('ir.model.data')
self.res_users = self.registry('res.users')
self.stock_location = self.registry('stock.location')
group_user_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'base.group_user')
group_stock_manager_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.group_stock_manager')
company_2_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.res_company_1')
self.multicompany_user_id = self.res_users.create(cr, uid,
{'name': 'multicomp', 'login': 'multicomp',
'groups_id': [(6, 0, [group_user_id, group_stock_manager_id])],
'company_id': company_2_id, 'company_ids': [(6,0,[company_2_id])]})
def test_00_multicompany_user(self):
"""check no error on getting default mrp.production values in multicompany setting"""
cr, uid, context = self.cr, self.multicompany_user_id, {}
fields = ['location_src_id', 'location_dest_id']
defaults = self.stock_location.default_get(cr, uid, ['location_id', 'location_dest_id', 'type'], context)
for field in fields:
if defaults.get(field):
try:
self.stock_location.check_access_rule(cr, uid, [defaults[field]], 'read', context)
except Exception, exc:
assert False, "unreadable location %s: %s" % (field, exc)
| agpl-3.0 |
dgzurita/odoo | addons/l10n_be_intrastat/l10n_be_intrastat.py | 258 | 7828 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'incoterm_id': fields.many2one(
'stock.incoterms', 'Incoterm',
help="International Commercial Terms are a series of predefined commercial terms "
"used in international transactions."),
'intrastat_transaction_id': fields.many2one(
'l10n_be_intrastat.transaction', 'Intrastat Transaction Type',
help="Intrastat nature of transaction"),
'transport_mode_id': fields.many2one(
'l10n_be_intrastat.transport_mode', 'Intrastat Transport Mode'),
'intrastat_country_id': fields.many2one(
'res.country', 'Intrastat Country',
help='Intrastat country, delivery for sales, origin for purchases',
domain=[('intrastat','=',True)]),
}
class intrastat_region(osv.osv):
_name = 'l10n_be_intrastat.region'
_columns = {
'code': fields.char('Code', required=True),
'country_id': fields.many2one('res.country', 'Country'),
'name': fields.char('Name', translate=True),
'description': fields.char('Description'),
}
_sql_constraints = [
('l10n_be_intrastat_regioncodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transaction(osv.osv):
_name = 'l10n_be_intrastat.transaction'
_rec_name = 'code'
_columns = {
'code': fields.char('Code', required=True, readonly=True),
'description': fields.text('Description', readonly=True),
}
_sql_constraints = [
('l10n_be_intrastat_trcodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transport_mode(osv.osv):
_name = 'l10n_be_intrastat.transport_mode'
_columns = {
'code': fields.char('Code', required=True, readonly=True),
'name': fields.char('Description', readonly=True),
}
_sql_constraints = [
('l10n_be_intrastat_trmodecodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class product_category(osv.osv):
_name = "product.category"
_inherit = "product.category"
_columns = {
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat Code'),
}
def get_intrastat_recursively(self, cr, uid, category, context=None):
""" Recursively search in categories to find an intrastat code id
:param category : Browse record of a category
"""
if category.intrastat_id:
res = category.intrastat_id.id
elif category.parent_id:
res = self.get_intrastat_recursively(cr, uid, category.parent_id, context=context)
else:
res = None
return res
class product_product(osv.osv):
_name = "product.product"
_inherit = "product.product"
def get_intrastat_recursively(self, cr, uid, id, context=None):
""" Recursively search in categories to find an intrastat code id
"""
product = self.browse(cr, uid, id, context=context)
if product.intrastat_id:
res = product.intrastat_id.id
elif product.categ_id:
res = self.pool['product.category'].get_intrastat_recursively(
cr, uid, product.categ_id, context=context)
else:
res = None
return res
class purchase_order(osv.osv):
_inherit = "purchase.order"
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""
copy incoterm from purchase order to invoice
"""
invoice = super(purchase_order, self)._prepare_invoice(
cr, uid, order, line_ids, context=context)
if order.incoterm_id:
invoice['incoterm_id'] = order.incoterm_id.id
#Try to determine products origin
if order.partner_id.country_id:
#It comes from supplier
invoice['intrastat_country_id'] = order.partner_id.country_id.id
return invoice
class report_intrastat_code(osv.osv):
_inherit = "report.intrastat.code"
_columns = {
'description': fields.text('Description', translate=True),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'region_id': fields.many2one('l10n_be_intrastat.region', 'Intrastat region'),
'transport_mode_id': fields.many2one('l10n_be_intrastat.transport_mode',
'Default transport mode'),
'incoterm_id': fields.many2one('stock.incoterms', 'Default incoterm for Intrastat',
help="International Commercial Terms are a series of "
"predefined commercial terms used in international "
"transactions."),
}
class sale_order(osv.osv):
_inherit = "sale.order"
def _prepare_invoice(self, cr, uid, saleorder, lines, context=None):
"""
copy incoterm from sale order to invoice
"""
invoice = super(sale_order, self)._prepare_invoice(
cr, uid, saleorder, lines, context=context)
if saleorder.incoterm:
invoice['incoterm_id'] = saleorder.incoterm.id
# Guess products destination
if saleorder.partner_shipping_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_shipping_id.country_id.id
elif saleorder.partner_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_id.country_id.id
elif saleorder.partner_invoice_id.country_id:
invoice['intrastat_country_id'] = saleorder.partner_invoice_id.country_id.id
return invoice
class stock_warehouse(osv.osv):
_inherit = "stock.warehouse"
_columns = {
'region_id': fields.many2one('l10n_be_intrastat.region', 'Intrastat region'),
}
def get_regionid_from_locationid(self, cr, uid, location_id, context=None):
location_model = self.pool['stock.location']
location = location_model.browse(cr, uid, location_id, context=context)
location_ids = location_model.search(cr, uid,
[('parent_left', '<=', location.parent_left),
('parent_right', '>=', location.parent_right)],
context=context)
warehouse_ids = self.search(cr, uid,
[('lot_stock_id', 'in', location_ids),
('region_id', '!=', False)],
context=context)
warehouses = self.browse(cr, uid, warehouse_ids, context=context)
if warehouses and warehouses[0]:
return warehouses[0].region_id.id
return None
| agpl-3.0 |
ThomasWollmann/bioconda-recipes | scripts/build-packages.py | 3 | 8142 | #!/usr/bin/env python
import os
import glob
import subprocess as sp
import argparse
import sys
from collections import defaultdict, Iterable
from itertools import product, chain
import networkx as nx
import nose
from conda_build.metadata import MetaData
import yaml
def flatten_dict(dict):
for key, values in dict.items():
if isinstance(values, str) or not isinstance(values, Iterable):
values = [values]
yield [(key, value) for value in values]
class EnvMatrix:
def __init__(self, path):
with open(path) as f:
self.env = yaml.load(f)
def __iter__(self):
for env in product(*flatten_dict(self.env)):
e = dict(os.environ)
e.update(env)
yield e
def get_metadata(recipes):
for recipe in recipes:
yield MetaData(recipe)
def get_deps(metadata, build=True):
for dep in metadata.get_value("requirements/{}".format("build" if build else "run"), []):
yield dep.split()[0]
def get_dag(recipes):
metadata = list(get_metadata(recipes))
name2recipe = defaultdict(list)
for meta, recipe in zip(metadata, recipes):
name2recipe[meta.get_value("package/name")].append(recipe)
def get_inner_deps(dependencies):
for dep in dependencies:
name = dep.split()[0]
if name in name2recipe:
yield name
dag = nx.DiGraph()
dag.add_nodes_from(meta.get_value("package/name") for meta in metadata)
for meta in metadata:
name = meta.get_value("package/name")
dag.add_edges_from((dep, name) for dep in set(get_inner_deps(chain(get_deps(meta), get_deps(meta, build=False)))))
#nx.relabel_nodes(dag, name2recipe, copy=False)
return dag, name2recipe
def conda_index():
index_dirs = [
"/anaconda/conda-bld/linux-64",
"/anaconda/conda-bld/osx-64",
]
sp.run(["conda", "index"] + index_dirs, check=True, stdout=sp.PIPE)
def build_recipe(recipe, env_matrix, testonly=False):
def build(env):
try:
out = None if args.verbose else sp.PIPE
build_args = []
if testonly:
build_args.append("--test")
else:
build_args += ["--no-anaconda-upload", "--skip-existing"]
sp.run(["conda", "build", "--quiet", recipe] + build_args,
stderr=out, stdout=out, check=True, universal_newlines=True,
env=env)
return True
except sp.CalledProcessError as e:
if e.stdout is not None:
print(e.stdout)
print(e.stderr)
return False
conda_index()
# use list to enforce all builds
success = all(list(map(build, env_matrix)))
if not success:
# fail if all builds result in an error
assert False, "At least one build of recipe {} failed.".format(recipe)
def filter_recipes(recipes, env_matrix):
def msgs(env):
p = sp.run(
["conda", "build", "--skip-existing", "--output"] + recipes,
check=True, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True,
env=env
)
return [msg for msg in p.stdout.split("\n") if "Ignoring non-recipe" not in msg][1:-1]
skip = lambda msg: "already built, skipping" in msg or "defines build/skip" in msg
try:
for item in zip(recipes, *map(msgs, env_matrix)):
recipe = item[0]
msg = item[1:]
if not all(map(skip, msg)):
yield recipe
except sp.CalledProcessError as e:
print(e.stderr, file=sys.stderr)
exit(1)
def get_recipes(package="*"):
path = os.path.join(args.repository, "recipes", package)
yield from map(os.path.dirname, glob.glob(os.path.join(path, "meta.yaml")))
yield from map(os.path.dirname, glob.glob(os.path.join(path, "*", "meta.yaml")))
def test_recipes():
if args.packages:
recipes = [recipe for package in args.packages for recipe in get_recipes(package)]
else:
recipes = list(get_recipes())
env_matrix = EnvMatrix(args.env_matrix)
if not args.testonly:
# filter out recipes that don't need to be build
recipes = list(filter_recipes(recipes, env_matrix))
# Build dag of recipes
dag, name2recipes = get_dag(recipes)
print("Packages to build", file=sys.stderr)
print(*nx.nodes(dag), file=sys.stderr, sep="\n")
subdags_n = int(os.environ.get("SUBDAGS", 1))
subdag_i = int(os.environ.get("SUBDAG", 0))
# Get connected subdags and sort by nodes
if args.testonly:
# use each node as a subdag (they are grouped into equal sizes below)
subdags = sorted([[n] for n in nx.nodes(dag)])
else:
# take connected components as subdags
subdags = sorted(map(sorted, nx.connected_components(dag.to_undirected())))
# chunk subdags such that we have at most args.subdags many
if subdags_n < len(subdags):
chunks = [[n for subdag in subdags[i::subdags_n] for n in subdag]
for i in range(subdags_n)]
else:
chunks = subdags
if subdag_i >= len(chunks):
print("Nothing to be done.")
return
# merge subdags of the selected chunk
subdag = dag.subgraph(chunks[subdag_i])
# ensure that packages which need a build are built in the right order
recipes = [recipe for package in nx.topological_sort(subdag) for recipe in name2recipes[package]]
print("Building/testing subdag {} of recipes in order:".format(subdag_i), file=sys.stderr)
print(*recipes, file=sys.stderr, sep="\n")
if args.testonly:
for recipe in recipes:
yield build_recipe, recipe, env_matrix, True
else:
# build packages
for recipe in recipes:
yield build_recipe, recipe, env_matrix
# upload builds
if os.environ.get("TRAVIS_BRANCH") == "master" and os.environ.get(
"TRAVIS_PULL_REQUEST") == "false":
for recipe in recipes:
packages = {
sp.run(["conda", "build", "--output", recipe],
stdout=sp.PIPE, env=env,
check=True).stdout.strip().decode()
for env in env_matrix
}
for package in packages:
if os.path.exists(package):
try:
sp.run(["anaconda", "-t",
os.environ.get("ANACONDA_TOKEN"),
"upload", package], stdout=sp.PIPE, stderr=sp.STDOUT, check=True)
except sp.CalledProcessError as e:
print(e.stdout.decode(), file=sys.stderr)
if b"already exists" in e.stdout:
# ignore error assuming that it is caused by existing package
pass
else:
raise e
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Build bioconda packages")
p.add_argument("--env-matrix", required=True, help="Path to environment variable matrix.")
p.add_argument("--repository",
default="/bioconda-recipes",
help="Path to checkout of bioconda recipes repository.")
p.add_argument("--packages",
nargs="+",
help="A specific package to build.")
p.add_argument("--testonly", action="store_true", help="Test packages instead of building.")
p.add_argument("-v", "--verbose",
help="Make output more verbose for local debugging",
default=False,
action="store_true")
global args
args = p.parse_args()
sp.run(["pip", "install", "git+https://github.com/conda/conda-build.git@1.20.3"], check=True)
sp.run(["gcc", "--version"], check=True)
try:
sp.run(["ldd", "--version"], check=True)
except:
pass
nose.main(argv=sys.argv[:1], defaultTest="__main__")
| mit |
rafaelmds/fatiando | gallery/datasets/hawaii_gravity.py | 6 | 2395 | """
Hawaii gravity data
-------------------
The :mod:`fatiando.datasets` package includes some data sets to make it easier
to try things out in Fatiando.
This example shows the gravity data from Hawaii.
"""
from __future__ import print_function
from fatiando.datasets import fetch_hawaii_gravity
import numpy as np
import matplotlib.pyplot as plt
# Load the gravity data from Hawaii
data = fetch_hawaii_gravity()
# The data are packaged in a dictionary. Look at the keys to see what is
# available.
print('Data keys:', data.keys())
# There are some metadata included
print('\nMetadata:\n')
print(data['metadata'])
# Let's plot all of it using the UTM x and y coordinates
shape = data['shape']
X, Y = data['x'].reshape(shape)/1000, data['y'].reshape(shape)/1000
fig = plt.figure(figsize=(14, 8))
plt.rcParams['font.size'] = 10
ax = plt.subplot(2, 3, 2)
ax.set_title('Raw gravity of Hawaii')
tmp = ax.contourf(Y, X, data['gravity'].reshape(shape), 60,
cmap='Reds')
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
ax = plt.subplot(2, 3, 3)
ax.set_title('Topography')
scale = np.abs([data['topography'].min(), data['topography'].max()]).max()
tmp = ax.contourf(Y, X, data['topography'].reshape(shape), 60,
cmap='terrain', vmin=-scale, vmax=scale)
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('m')
ax = plt.subplot(2, 3, 4)
ax.set_title('Gravity disturbance')
scale = np.abs([data['disturbance'].min(), data['disturbance'].max()]).max()
tmp = ax.contourf(Y, X, data['disturbance'].reshape(shape), 60,
cmap='RdBu_r', vmin=-scale, vmax=scale)
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
# The disturbance without the effects of topography (calculated using the
# Bouguer plate)
ax = plt.subplot(2, 3, 5)
ax.set_title('Topography-free disturbance (Bouguer)')
tmp = ax.contourf(Y, X, data['topo-free-bouguer'].reshape(shape), 60,
cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
# The disturbance without the effects of topography (calculated using a
# tesseroid model of the topography)
ax = plt.subplot(2, 3, 6)
ax.set_title('Topography-free disturbance (full)')
tmp = ax.contourf(Y, X, data['topo-free'].reshape(shape), 60,
cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0, aspect=30).set_label('mGal')
plt.tight_layout()
plt.show()
| bsd-3-clause |
manazhao/tf_recsys | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ebukoz/thrive | erpnext/accounts/report/tds_payable_monthly/tds_payable_monthly.py | 5 | 6066 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate
def execute(filters=None):
filters["invoices"] = frappe.cache().hget("invoices", frappe.session.user)
validate_filters(filters)
set_filters(filters)
columns = get_columns(filters)
if not filters["invoices"]:
return columns, []
res = get_result(filters)
return columns, res
def validate_filters(filters):
''' Validate if dates are properly set '''
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date"))
def set_filters(filters):
invoices = []
if not filters["invoices"]:
filters["invoices"] = get_tds_invoices()
if filters.supplier and filters.purchase_invoice:
for d in filters["invoices"]:
if d.name == filters.purchase_invoice and d.supplier == filters.supplier:
invoices.append(d)
elif filters.supplier and not filters.purchase_invoice:
for d in filters["invoices"]:
if d.supplier == filters.supplier:
invoices.append(d)
elif filters.purchase_invoice and not filters.supplier:
for d in filters["invoices"]:
if d.name == filters.purchase_invoice:
invoices.append(d)
filters["invoices"] = invoices if invoices else filters["invoices"]
filters.naming_series = frappe.db.get_single_value('Buying Settings', 'supp_master_name')
def get_result(filters):
supplier_map, tds_docs = get_supplier_map(filters)
gle_map = get_gle_map(filters)
out = []
for d in gle_map:
tds_deducted, total_amount_credited = 0, 0
supplier = supplier_map[d]
tds_doc = tds_docs[supplier.tax_withholding_category]
account_list = [i.account for i in tds_doc.accounts if i.company == filters.company]
if account_list:
account = account_list[0]
for k in gle_map[d]:
if k.party == supplier_map[d] and k.credit > 0:
total_amount_credited += k.credit
elif account_list and k.account == account and k.credit > 0:
tds_deducted = k.credit
total_amount_credited += k.credit
rate = [i.tax_withholding_rate for i in tds_doc.rates
if i.fiscal_year == gle_map[d][0].fiscal_year]
if rate and len(rate) > 0 and tds_deducted:
rate = rate[0]
if getdate(filters.from_date) <= gle_map[d][0].posting_date \
and getdate(filters.to_date) >= gle_map[d][0].posting_date:
row = [supplier.pan, supplier.name]
if filters.naming_series == 'Naming Series':
row.append(supplier.supplier_name)
row.extend([tds_doc.name, supplier.supplier_type, rate, total_amount_credited,
tds_deducted, gle_map[d][0].posting_date, "Purchase Invoice", d])
out.append(row)
return out
def get_supplier_map(filters):
# create a supplier_map of the form {"purchase_invoice": {supplier_name, pan, tds_name}}
# pre-fetch all distinct applicable tds docs
supplier_map, tds_docs = {}, {}
pan = "pan" if frappe.db.has_column("Supplier", "pan") else "tax_id"
supplier_detail = frappe.db.get_all('Supplier',
{"name": ["in", [d.supplier for d in filters["invoices"]]]},
["tax_withholding_category", "name", pan+" as pan", "supplier_type", "supplier_name"])
for d in filters["invoices"]:
supplier_map[d.get("name")] = [k for k in supplier_detail
if k.name == d.get("supplier")][0]
for d in supplier_detail:
if d.get("tax_withholding_category") not in tds_docs:
tds_docs[d.get("tax_withholding_category")] = \
frappe.get_doc("Tax Withholding Category", d.get("tax_withholding_category"))
return supplier_map, tds_docs
def get_gle_map(filters):
# create gle_map of the form
# {"purchase_invoice": list of dict of all gle created for this invoice}
gle_map = {}
gle = frappe.db.get_all('GL Entry',\
{"voucher_no": ["in", [d.get("name") for d in filters["invoices"]]]},
["fiscal_year", "credit", "debit", "account", "voucher_no", "posting_date"])
for d in gle:
if not d.voucher_no in gle_map:
gle_map[d.voucher_no] = [d]
else:
gle_map[d.voucher_no].append(d)
return gle_map
def get_columns(filters):
pan = "pan" if frappe.db.has_column("Supplier", "pan") else "tax_id"
columns = [
{
"label": _(frappe.unscrub(pan)),
"fieldname": pan,
"fieldtype": "Data",
"width": 90
},
{
"label": _("Supplier"),
"options": "Supplier",
"fieldname": "supplier",
"fieldtype": "Link",
"width": 180
}]
if filters.naming_series == 'Naming Series':
columns.append({
"label": _("Supplier Name"),
"fieldname": "supplier_name",
"fieldtype": "Data",
"width": 180
})
columns.extend([
{
"label": _("Section Code"),
"options": "Tax Withholding Category",
"fieldname": "section_code",
"fieldtype": "Link",
"width": 180
},
{
"label": _("Entity Type"),
"fieldname": "entity_type",
"fieldtype": "Data",
"width": 180
},
{
"label": _("TDS Rate %"),
"fieldname": "tds_rate",
"fieldtype": "Percent",
"width": 90
},
{
"label": _("Total Amount Credited"),
"fieldname": "total_amount_credited",
"fieldtype": "Float",
"width": 90
},
{
"label": _("Amount of TDS Deducted"),
"fieldname": "tds_deducted",
"fieldtype": "Float",
"width": 90
},
{
"label": _("Date of Transaction"),
"fieldname": "transaction_date",
"fieldtype": "Date",
"width": 90
},
{
"label": _("Transaction Type"),
"fieldname": "transaction_type",
"width": 90
},
{
"label": _("Reference No."),
"fieldname": "ref_no",
"fieldtype": "Dynamic Link",
"options": "transaction_type",
"width": 90
}
])
return columns
@frappe.whitelist()
def get_tds_invoices():
# fetch tds applicable supplier and fetch invoices for these suppliers
suppliers = [d.name for d in frappe.db.get_list("Supplier",
{"tax_withholding_category": ["!=", ""]}, ["name"])]
invoices = frappe.db.get_list("Purchase Invoice",
{"supplier": ["in", suppliers]}, ["name", "supplier"])
invoices = [d for d in invoices if d.supplier]
frappe.cache().hset("invoices", frappe.session.user, invoices)
return invoices
| gpl-3.0 |
we7/vamp-aubio-plugins | aubio/tests/python/src/utils/hist.py | 4 | 2279 | from template import aubio_unit_template
from localaubio import *
import random
buf_size = 2048
channels = 1
flow = float(random.randint(0, 100) + random.random())
fhig = float(random.randint(100, 1000) + random.random())
nelems = 1000
class hist_unit(aubio_unit_template):
def setUp(self):
self.o = new_aubio_hist(flow, fhig, nelems, channels)
def tearDown(self):
del_aubio_hist(self.o)
def test_hist(self):
""" create and delete hist """
pass
def test_hist_zeroes(self):
""" test hist on zeroes """
input = new_fvec(buf_size, channels)
aubio_hist_do_notnull(self.o, input)
aubio_hist_weight(self.o)
self.assertEqual(0., aubio_hist_mean(self.o))
del_fvec(input)
def test_hist_impulse_top(self):
""" test hist on impulse (top - 1.) """
""" this returns 1./nelems because 1 element is in the range """
input = new_fvec(buf_size, channels)
constant = fhig - 1.
fvec_write_sample(input,constant,0,0)
aubio_hist_do_notnull(self.o, input)
self.assertCloseEnough(1./nelems, aubio_hist_mean(self.o))
del_fvec(input)
def test_hist_impulse_over(self):
""" test hist on impulse (top + 1.) """
""" this returns 0 because constant is out of range """
input = new_fvec(buf_size, channels)
constant = fhig + 1.
fvec_write_sample(input,constant,0,0)
aubio_hist_do_notnull(self.o, input)
self.assertCloseEnough(0., aubio_hist_mean(self.o))
del_fvec(input)
def test_hist_impulse_bottom(self):
""" test hist on constant near lower limit """
""" this returns 1./nelems because 1 element is in the range """
input = new_fvec(buf_size, channels)
constant = flow + 1.
fvec_write_sample(input,constant,0,0)
aubio_hist_do_notnull(self.o, input)
self.assertCloseEnough(1./nelems, aubio_hist_mean(self.o))
del_fvec(input)
def test_hist_impulse_under(self):
""" test hist on constant under lower limit """
""" this returns 0 because constant is out of range """
input = new_fvec(buf_size, channels)
constant = flow - 1.
fvec_write_sample(input,constant,0,0)
aubio_hist_do_notnull(self.o, input)
self.assertCloseEnough(0., aubio_hist_mean(self.o))
del_fvec(input)
if __name__ == '__main__': unittest.main()
| gpl-2.0 |
sigmaris/python-gssapi | gssapi_ez_setup.py | 6 | 11837 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "1.1.5"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _check_call_py24(cmd, *args, **kwargs):
res = subprocess.call(cmd, *args, **kwargs)
class CalledProcessError(Exception):
pass
if not res == 0:
msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
raise CalledProcessError(msg)
vars(subprocess).setdefault('check_call', _check_call_py24)
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
subprocess.check_call(cmd)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
subprocess.check_call(cmd)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
subprocess.check_call(cmd)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| mit |
aeternallife/zookeeper | src/contrib/zkpython/src/python/setup.py | 128 | 1754 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup, Extension
zookeeper_basedir = "../../../"
zookeepermodule = Extension("zookeeper",
sources=["src/c/zookeeper.c"],
include_dirs=[zookeeper_basedir + "/src/c/include",
zookeeper_basedir + "/build/c",
zookeeper_basedir + "/src/c/generated"],
libraries=["zookeeper_mt"],
library_dirs=[zookeeper_basedir + "/src/c/.libs/",
zookeeper_basedir + "/build/c/.libs/",
zookeeper_basedir + "/build/test/test-cppunit/.libs",
"/usr/local/lib"
])
setup( name="ZooKeeper",
version = "0.4",
description = "ZooKeeper Python bindings",
ext_modules=[zookeepermodule] )
| apache-2.0 |
crowdhackathon-transport/optimizers | crowdstance-api/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 404 | 5976 | # urllib3/fields.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetimes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from parameter
of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type)
tuple where the MIME type is optional. For example: ::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as
`k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
lmjohns3/py-viewer | popglove/viewer.py | 1 | 11110 | '''This module contains OpenGL code for rendering a world.'''
from __future__ import division
import climate
import contextlib
import numpy as np
import os
import pyglet
# normally this import should work; the try/except here is so the documentation
# will build on readthedocs.org!
try:
from pyglet.gl import *
except ImportError:
pass
logging = climate.get_logger(__name__)
@contextlib.contextmanager
def gl_context(scale=None, translate=None, rotate=None, mat=None, color=None):
last_color = vec(0.0, 0.0, 0.0, 0.0)
if color is not None:
glGetFloatv(GL_CURRENT_COLOR, last_color)
glColor4f(*color)
glPushMatrix()
if mat is not None:
glMultMatrixf(vec(*mat))
if translate is not None:
glTranslatef(*translate)
if rotate is not None:
glRotatef(*rotate)
if scale is not None:
glScalef(*scale)
yield
glPopMatrix()
if last_color is not None:
glColor4f(*last_color)
def vec(*args):
return (GLfloat * len(args))(*args)
def build_vertex_list(idx, vtx, nrm):
return pyglet.graphics.vertex_list_indexed(
len(vtx) // 3, idx, ('v3f/static', vtx), ('n3f/static', nrm))
def box_vertices():
vtx = np.array([
[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1],
[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]], 'f')
nrm = vtx / np.sqrt((vtx * vtx).sum(axis=1))[:, None]
return [
0, 3, 2, 0, 2, 1, 4, 5, 7, 7, 5, 6, # x
0, 1, 4, 4, 1, 5, 6, 2, 3, 6, 3, 7, # y
0, 4, 7, 0, 7, 3, 1, 2, 5, 5, 2, 6, # z
], vtx.flatten(), nrm.flatten()
def sphere_vertices(n=3):
idx = [[0, 1, 2], [0, 5, 1], [0, 2, 4], [0, 4, 5],
[3, 2, 1], [3, 4, 2], [3, 5, 4], [3, 1, 5]]
vtx = list(np.array([
[1, 0, 0], [0, 1, 0], [0, 0, 1],
[-1, 0, 0], [0, -1, 0], [0, 0, -1]], 'f'))
for _ in range(n):
idx_ = []
for ui, vi, wi in idx:
u, v, w = vtx[ui], vtx[vi], vtx[wi]
d, e, f = u + v, v + w, w + u
di = len(vtx)
vtx.append(d / np.linalg.norm(d))
ei = len(vtx)
vtx.append(e / np.linalg.norm(e))
fi = len(vtx)
vtx.append(f / np.linalg.norm(f))
idx_.append([ui, di, fi])
idx_.append([vi, ei, di])
idx_.append([wi, fi, ei])
idx_.append([di, ei, fi])
idx = idx_
vtx = np.array(vtx, 'f').flatten()
return np.array(idx).flatten(), vtx, vtx
def cylinder_vertices(n=14):
idx = []
vtx = [0, 0, 1, 0, 0, -1, 1, 0, 1, 1, 0, -1]
nrm = [0, 0, 1, 0, 0, -1, 1, 0, 0, 1, 0, 0]
thetas = np.linspace(0, 2 * np.pi, n)
for i in range(len(thetas) - 1):
t0 = thetas[i]
t1 = thetas[i+1]
a = 2 * (i+1)
b = 2 * (i+2)
idx.extend([0, a, b, a, a+1, b, b, a+1, b+1, b+1, a+1, 1])
x, y = np.cos(t1), np.sin(t1)
vtx.extend([x, y, 1, x, y, -1])
nrm.extend([x, y, 0, x, y, 0])
return idx, vtx, nrm
class EventLoop(pyglet.app.EventLoop):
def run(self):
self.has_exit = False
self._legacy_setup()
platform_event_loop = pyglet.app.platform_event_loop
platform_event_loop.start()
self.dispatch_event('on_enter')
self.is_running = True
while not self.has_exit:
self.clock.tick()
platform_event_loop.step(self.clock.get_sleep_time(True))
self.is_running = False
self.dispatch_event('on_exit')
platform_event_loop.stop()
# use our event loop implementation rather than the default pyglet one.
pyglet.options['debug_gl'] = False
pyglet.app.event_loop = EventLoop()
class View(object):
'''A POD class for, in this case, holding view parameters.
Any keyword arguments passed to the constructor will be set as attributes on
the instance. This is used in the :class:`Window` class for holding
parameters related to the view (i.e., zoom, translation, etc.).
'''
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Window(pyglet.window.Window):
'''This class wraps pyglet's Window for simple rendering of an OpenGL world.
Default key bindings:
- ESCAPE: close the window
- SPACE: toggle pause
- S: save a frame
Parameters
----------
paused : bool, optional
Start the window with time paused. Defaults to False.
floor_z : float, optional
Height for a checkerboard floor in the rendered world. Defaults to 0.
Set this to None to disable the floor.
width : int, optional
Initial width of the window. Defaults to 1027.
height : int, optional
Initial height of the window. Defaults to 603.
Attributes
----------
saved_frames : str
Saved frames will be stored in this directory.
paused : bool
Current paused state of the renderer.
frame_no : bool
Number of the currently rendered frame, starting at 0. Increases by one
with each call to :func:`render`.
view : :class:`View`
An object holding view parameters for the renderer.
'''
def __init__(self, dt=1. / 30, paused=False, save_frames=None, floor_z=0,
width=1200, height=675, resizable=True):
# first, set up the pyglet screen, window, and display variables.
platform = pyglet.window.get_platform()
display = platform.get_default_display()
screen = display.get_default_screen()
try:
config = screen.get_best_config(Config(
alpha_size=8,
depth_size=24,
double_buffer=True,
sample_buffers=1,
samples=4))
except pyglet.window.NoSuchConfigException:
config = screen.get_best_config(Config())
super(Window, self).__init__(
width=width, height=height, resizable=resizable, vsync=False, config=config)
# then, set up our own view parameters.
self.step_dt = self.render_dt = dt
self.frame_no = 0
self.paused = paused
self.save_frames = save_frames
self.view = View(zoom=4.666, ty=0.23, tz=-0.5, ry=27, rz=-50)
self.on_resize(self.width, self.height)
glEnable(GL_BLEND)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_NORMALIZE)
glEnable(GL_POLYGON_SMOOTH)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glDepthFunc(GL_LEQUAL)
glCullFace(GL_BACK)
glFrontFace(GL_CCW)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
glShadeModel(GL_SMOOTH)
glLightfv(GL_LIGHT0, GL_AMBIENT, vec(0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1.0, 1.0, 1.0, 1.0))
glLightfv(GL_LIGHT0, GL_POSITION, vec(3.0, 3.0, 10.0, 1.0))
glEnable(GL_LIGHT0)
self.box = build_vertex_list(*box_vertices())
self.sphere = build_vertex_list(*sphere_vertices())
self.cylinder = build_vertex_list(*cylinder_vertices(32))
self.floor = None
if floor_z is not None:
# set up a chessboard floor.
BLK = [150, 150, 150] * 6
WHT = [160, 160, 160] * 6
N = 20
z = floor_z
vtx = []
for i in range(N, -N, -1):
for j in range(-N, N, 1):
vtx.extend((j, i, z, j, i-1, z, j+1, i, z,
j+1, i, z, j, i-1, z, j+1, i-1, z))
self.floor = pyglet.graphics.vertex_list(
len(vtx) // 3,
('v3f/static', vtx),
('c3B/static', ((BLK + WHT) * N + (WHT + BLK) * N) * N),
('n3i/static', [0, 0, 1] * (len(vtx) // 3)))
def _update_view(self):
# http://njoubert.com/teaching/cs184_fa08/section/sec09_camera.pdf
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(1, 0, 0, 0, 0, 0, 0, 0, 1)
glTranslatef(-self.view.zoom, self.view.ty, self.view.tz)
glRotatef(self.view.ry, 0, 1, 0)
glRotatef(self.view.rz, 0, 0, 1)
def on_mouse_scroll(self, x, y, dx, dy):
if dy == 0:
return
self.view.zoom *= 1.1 ** (-1 if dy > 0 else 1)
self._update_view()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if buttons == pyglet.window.mouse.LEFT:
# pan
self.view.ty += 0.03 * dx
self.view.tz += 0.03 * dy
else:
# roll
self.view.ry += 0.2 * -dy
self.view.rz += 0.2 * dx
self._update_view()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glu.gluPerspective(45, float(width) / height, 1, 100)
self._update_view()
def on_key_press(self, key, modifiers):
keymap = pyglet.window.key
if self.grab_key_press(key, modifiers, keymap):
return
if key == keymap.ESCAPE:
pyglet.app.exit()
if key == keymap.SPACE:
self.paused = False if self.paused else True
if key == keymap.S and self.save_frames:
self.save_frame()
def save_frame(self, dt=None):
bn = 'frame-{:05d}.png'.format(self.frame_no)
fn = os.path.join(self.save_frames, bn)
logging.info('saving frame %s', fn)
pyglet.image.get_buffer_manager().get_color_buffer().save(fn)
def _render(self, dt):
self.switch_to()
self.clear()
if self.floor is not None:
self.floor.draw(GL_TRIANGLES)
self.render(dt)
self.flip()
def _step(self, dt):
if not self.paused:
self.frame_no += 1
self.step(dt)
def draw_sphere(self, *args, **kwargs):
with gl_context(*args, **kwargs):
self.sphere.draw(GL_TRIANGLES)
def draw_box(self, *args, **kwargs):
with gl_context(*args, **kwargs):
self.box.draw(GL_TRIANGLES)
def draw_cylinder(self, *args, **kwargs):
with gl_context(*args, **kwargs):
self.cylinder.draw(GL_TRIANGLES)
def draw_lines(self, vertices, color=None):
if color is not None:
glColor4f(*color)
glBegin(GL_LINES)
for v in vertices:
glVertex3f(*v)
glEnd()
set_color = glColor4f
def exit(self):
pyglet.app.exit()
def run(self, movie=False):
pyglet.clock.schedule_interval(self._step, self.step_dt)
pyglet.clock.schedule_interval(self._render, self.render_dt)
if movie and self.save_frames:
pyglet.clock.schedule_interval(self.save_frame, self.render_dt)
pyglet.app.run()
def grab_key_press(self, key, modifiers, keymap):
pass
def step(self, dt):
pass
def render(self, dt):
pass
| mit |
migasfree/migasfree-backend | migasfree/client/views/__init__.py | 1 | 1230 | # -*- coding: UTF-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <jachavar@gmail.com>
# Copyright (c) 2015-2021 Alberto Gacías <alberto@migasfree.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .safe import (
SafeComputerViewSet, SafeSynchronizationView, SafeEndOfTransmissionView,
)
from .public import (
PackagerKeysView, ProjectKeysView,
RepositoriesKeysView,
)
from .token import (
ComputerViewSet, ErrorViewSet,
FaultDefinitionViewSet, FaultViewSet,
PackageHistoryViewSet, NotificationViewSet, MigrationViewSet,
StatusLogViewSet, SynchronizationViewSet, UserViewSet,
MessageViewSet,
)
| gpl-3.0 |
bwrsandman/OpenUpgrade | openerp/tools/mail.py | 125 | 29474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import cgi
import logging
import lxml.html
import lxml.html.clean as clean
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
import openerp
from openerp.loglevels import ustr
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
tags_to_remove = ['html', 'body', 'font']
# allow new semantic HTML5 tags
allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment])
safe_attrs = clean.defs.safe_attrs | frozenset(
['style',
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translate', 'data-oe-nodeid',
'data-snippet-id', 'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id'
])
def html_sanitize(src, silent=True, strict=False, strip_style=False):
if not src:
return src
src = ustr(src, errors='replace')
logger = logging.getLogger(__name__ + '.html_sanitize')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
# html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
src = src.replace('<%', cgi.escape('<%'))
src = src.replace('%>', cgi.escape('%>'))
kwargs = {
'page_structure': True,
'style': strip_style, # True = remove style tags/attrs
'forms': True, # remove form tags
'remove_unknown_tags': False,
'allow_tags': allowed_tags,
'comments': False,
'processing_instructions': False
}
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
kwargs.update({
'kill_tags': tags_to_kill,
'remove_tags': tags_to_remove,
})
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
if strict:
if etree.LXML_VERSION >= (3, 1, 0):
# lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
kwargs.update({
'safe_attrs_only': True,
'safe_attrs': safe_attrs,
})
else:
kwargs['safe_attrs_only'] = False # keep oe-data attributes + style
kwargs['frames'] = False, # do not remove frames (embbed video in CMS blogs)
try:
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = clean.Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace('%24', '$')
cleaned = cleaned.replace('%7B', '{')
cleaned = cleaned.replace('%7D', '}')
cleaned = cleaned.replace('%20', ' ')
cleaned = cleaned.replace('%5B', '[')
cleaned = cleaned.replace('%5D', ']')
cleaned = cleaned.replace('<%', '<%')
cleaned = cleaned.replace('%>', '%>')
except etree.ParserError, e:
if 'empty' in str(e):
return ""
if not silent:
raise
logger.warning('ParserError obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>ParserError when sanitizing</p>'
except Exception:
if not silent:
raise
logger.warning('unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>Unknown error when sanitizing</p>'
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if cleaned.startswith('<div>') and cleaned.endswith('</div>'):
cleaned = cleaned[5:-6]
return cleaned
#----------------------------------------------------------
# HTML Cleaner
#----------------------------------------------------------
def html_email_clean(html, remove=False, shorten=False, max_length=300, expand_options=None,
protect_sections=False):
""" html_email_clean: clean the html by doing the following steps:
- try to strip email quotes, by removing blockquotes or having some client-
specific heuristics
- try to strip signatures
- shorten the html to a maximum number of characters if requested
Some specific use case:
- MsOffice: ``div.style = border-top:solid;`` delimitates the beginning of
a quote; detecting by finding WordSection1 of MsoNormal
- Hotmail: ``hr.stopSpelling`` delimitates the beginning of a quote; detect
Hotmail by funding ``SkyDrivePlaceholder``
:param string html: sanitized html; tags like html or head should not
be present in the html string. This method therefore
takes as input html code coming from a sanitized source,
like fields.html.
:param boolean remove: remove the html code that is unwanted; otherwise it
is only flagged and tagged
:param boolean shorten: shorten the html; every excessing content will
be flagged as to remove
:param int max_length: if shortening, maximum number of characters before
shortening
:param dict expand_options: options for the read more link when shortening
the content.The used keys are the following:
- oe_expand_container_tag: class applied to the
container of the whole read more link
- oe_expand_container_class: class applied to the
link container (default: oe_mail_expand)
- oe_expand_container_content: content of the
container (default: ...)
- oe_expand_separator_node: optional separator, like
adding ... <br /><br /> <a ...>read more</a> (default: void)
- oe_expand_a_href: href of the read more link itself
(default: #)
- oe_expand_a_class: class applied to the <a> containing
the link itself (default: oe_mail_expand)
- oe_expand_a_content: content of the <a> (default: read more)
The formatted read more link is the following:
<cont_tag class="oe_expand_container_class">
oe_expand_container_content
if expand_options.get('oe_expand_separator_node'):
<oe_expand_separator_node/>
<a href="oe_expand_a_href" class="oe_expand_a_class">
oe_expand_a_content
</a>
</span>
"""
def _replace_matching_regex(regex, source, replace=''):
""" Replace all matching expressions in source by replace """
if not source:
return source
dest = ''
idx = 0
for item in re.finditer(regex, source):
dest += source[idx:item.start()] + replace
idx = item.end()
dest += source[idx:]
return dest
def _create_node(tag, text, tail=None, attrs={}):
new_node = etree.Element(tag)
new_node.text = text
new_node.tail = tail
for key, val in attrs.iteritems():
new_node.set(key, val)
return new_node
def _insert_new_node(node, index, new_node_tag, new_node_text, new_node_tail=None, new_node_attrs={}):
new_node = _create_node(new_node_tag, new_node_text, new_node_tail, new_node_attrs)
node.insert(index, new_node)
return new_node
def _tag_matching_regex_in_text(regex, node, new_node_tag='span', new_node_attrs={}):
text = node.text or ''
if not re.search(regex, text):
return
cur_node = node
node.text = ''
idx, iteration = 0, 0
for item in re.finditer(regex, text):
if iteration == 0:
cur_node.text = text[idx:item.start()]
else:
_insert_new_node(node, (iteration - 1) * 2 + 1, new_node_tag, text[idx:item.start()])
new_node = _insert_new_node(node, iteration * 2, new_node_tag, text[item.start():item.end()], None, new_node_attrs)
cur_node = new_node
idx = item.end()
iteration += 1
new_node = _insert_new_node(node, -1, new_node_tag, text[idx:] + (cur_node.tail or ''), None, {})
def _truncate_node(node, position, simplify_whitespaces=True):
""" Truncate a node text at a given position. This algorithm will shorten
at the end of the word whose ending character exceeds position.
:param bool simplify_whitespaces: whether to try to count all successive
whitespaces as one character. This
option should not be True when trying
to keep 'pre' consistency.
"""
if node.text is None:
node.text = ''
truncate_idx = -1
if simplify_whitespaces:
cur_char_nbr = 0
word = None
node_words = node.text.strip(' \t\r\n').split()
for word in node_words:
cur_char_nbr += len(word)
if cur_char_nbr >= position:
break
if word:
truncate_idx = node.text.find(word) + len(word)
else:
truncate_idx = position
if truncate_idx == -1 or truncate_idx > len(node.text):
truncate_idx = len(node.text)
# compose new text bits
innertext = node.text[0:truncate_idx]
outertext = node.text[truncate_idx:]
node.text = innertext
# create <span> ... <a href="#">read more</a></span> node
read_more_node = _create_node(
expand_options.get('oe_expand_container_tag', 'span'),
expand_options.get('oe_expand_container_content', ' ... '),
None,
{'class': expand_options.get('oe_expand_container_class', 'oe_mail_expand')}
)
if expand_options.get('oe_expand_separator_node'):
read_more_separator_node = _create_node(
expand_options.get('oe_expand_separator_node'),
'',
None,
{}
)
read_more_node.append(read_more_separator_node)
read_more_link_node = _create_node(
'a',
expand_options.get('oe_expand_a_content', _('read more')),
None,
{
'href': expand_options.get('oe_expand_a_href', '#'),
'class': expand_options.get('oe_expand_a_class', 'oe_mail_expand'),
}
)
read_more_node.append(read_more_link_node)
# create outertext node
overtext_node = _create_node('span', outertext)
# tag node
overtext_node.set('in_overlength', '1')
# add newly created nodes in dom
node.append(read_more_node)
node.append(overtext_node)
if expand_options is None:
expand_options = {}
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
# Pre processing
# ------------------------------------------------------------
# TDE TODO: --- MAIL ORIGINAL ---: '[\-]{4,}([^\-]*)[\-]{4,}'
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
html = doctype.sub(r"", html)
# html: ClEditor seems to love using <div><br /><div> -> replace with <br />
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)', re.IGNORECASE)
html = _replace_matching_regex(br_div_tags, html, '<br />')
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
for node in root.iter():
# remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
if node.tail:
tail_node = _create_node('span', node.tail)
node.tail = None
node.addnext(tail_node)
# form node and tag text-based quotes and signature
_tag_matching_regex_in_text(quote_tags, node, 'span', {'text_quote': '1'})
_tag_matching_regex_in_text(signature, node, 'span', {'text_signature': '1'})
# Processing
# ------------------------------------------------------------
# tree: tag nodes
# signature_begin = False # try dynamic signature recognition
quote_begin = False
overlength = False
overlength_section_id = None
overlength_section_count = 0
cur_char_nbr = 0
for node in root.iter():
# comments do not need processing
# note: bug in node.get(value, default) for HtmlComments, default never returned
if node.tag == etree.Comment:
continue
# do not take into account multiple spaces that are displayed as max 1 space in html
node_text = ' '.join((node.text and node.text.strip(' \t\r\n') or '').split())
# root: try to tag the client used to write the html
if 'WordSection1' in node.get('class', '') or 'MsoNormal' in node.get('class', ''):
root.set('msoffice', '1')
if 'SkyDrivePlaceholder' in node.get('class', '') or 'SkyDrivePlaceholder' in node.get('id', ''):
root.set('hotmail', '1')
# protect sections by tagging section limits and blocks contained inside sections, using an increasing id to re-find them later
if node.tag == 'section':
overlength_section_count += 1
node.set('section_closure', str(overlength_section_count))
if node.getparent() is not None and (node.getparent().get('section_closure') or node.getparent().get('section_inner')):
node.set('section_inner', str(overlength_section_count))
# state of the parsing: flag quotes and tails to remove
if quote_begin:
node.set('in_quote', '1')
node.set('tail_remove', '1')
# state of the parsing: flag when being in over-length content, depending on section content if defined (only when having protect_sections)
if overlength:
if not overlength_section_id or int(node.get('section_inner', overlength_section_count + 1)) > overlength_section_count:
node.set('in_overlength', '1')
node.set('tail_remove', '1')
# find quote in msoffice / hotmail / blockquote / text quote and signatures
if root.get('msoffice') and node.tag == 'div' and 'border-top:solid' in node.get('style', ''):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if root.get('hotmail') and node.tag == 'hr' and ('stopSpelling' in node.get('class', '') or 'stopSpelling' in node.get('id', '')):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if node.tag == 'blockquote' or node.get('text_quote') or node.get('text_signature'):
# here no quote_begin because we want to be able to remove some quoted
# text without removing all the remaining context
node.set('in_quote', '1')
if node.getparent() is not None and node.getparent().get('in_quote'):
# inside a block of removed text but not in quote_begin (see above)
node.set('in_quote', '1')
# shorten:
# if protect section:
# 1/ find the first parent not being inside a section
# 2/ add the read more link
# else:
# 1/ truncate the text at the next available space
# 2/ create a 'read more' node, next to current node
# 3/ add the truncated text in a new node, next to 'read more' node
node_text = (node.text or '').strip().strip('\n').strip()
if shorten and not overlength and cur_char_nbr + len(node_text) > max_length:
node_to_truncate = node
while node_to_truncate.getparent() is not None:
if node_to_truncate.get('in_quote'):
node_to_truncate = node_to_truncate.getparent()
elif protect_sections and (node_to_truncate.getparent().get('section_inner') or node_to_truncate.getparent().get('section_closure')):
node_to_truncate = node_to_truncate.getparent()
overlength_section_id = node_to_truncate.get('section_closure')
else:
break
overlength = True
node_to_truncate.set('truncate', '1')
if node_to_truncate == node:
node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
else:
node_to_truncate.set('truncate_position', str(len(node.text or '')))
cur_char_nbr += len(node_text)
# Tree modification
# ------------------------------------------------------------
for node in root.iter():
if node.get('truncate'):
_truncate_node(node, int(node.get('truncate_position', '0')), node.tag != 'pre')
# Post processing
# ------------------------------------------------------------
to_remove = []
for node in root.iter():
if node.get('in_quote') or node.get('in_overlength'):
# copy the node tail into parent text
if node.tail and not node.get('tail_remove'):
parent = node.getparent()
parent.tail = node.tail + (parent.tail or '')
to_remove.append(node)
if node.get('tail_remove'):
node.tail = ''
# clean node
for attribute_name in ['in_quote', 'tail_remove', 'in_overlength', 'msoffice', 'hotmail', 'truncate', 'truncate_position']:
node.attrib.pop(attribute_name, None)
for node in to_remove:
if remove:
node.getparent().remove(node)
else:
if not expand_options.get('oe_expand_a_class', 'oe_mail_expand') in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
node_class = node.get('class', '') + ' oe_mail_cleaned'
node.set('class', node_class)
# html: \n that were tail of elements have been encapsulated into <span> -> back to \n
html = etree.tostring(root, pretty_print=False)
linebreaks = re.compile(r'<span[^>]*>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
html = _replace_matching_regex(linebreaks, html, '\n')
return html
#----------------------------------------------------------
# HTML/Text management
#----------------------------------------------------------
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <peter@fry-it.com>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
html = html.replace('>', '>')
html = html.replace('<', '<')
html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
text = text.replace('\r', '<br/>')
# 2-3: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return ustr(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % ustr(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return '%s%s' % (html, content)
return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
# matches any email in a body of text
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6})""", re.VERBOSE)
# matches a string containing only one email
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}$""", re.VERBOSE)
res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?[^>]*@([^>]*)>", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%.15f.%s-openerp-%s@%s>" % (time.time(), rndstr, res_id, socket.gethostname())
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None,
smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None):
"""Low-level function for sending an email (deprecated).
:deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead.
:param email_from: A string used to fill the `From` header, if falsy,
config['email_from'] is used instead. Also used for
the `Reply-To` header if `reply_to` is not provided
:param email_to: a sequence of addresses to send the mail to.
"""
# If not cr, get cr from current thread database
local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
local_cr = cr = openerp.registry(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
# Send Email
try:
mail_server_pool = openerp.registry(cr.dbname)['ir.mail_server']
res = False
# Pack Message into MIME Object
email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to,
attachments, message_id, references, openobject_id, subtype, headers=headers)
res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None,
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), smtp_debug=debug)
except Exception:
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
if local_cr:
cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [addr[1] for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]]
| agpl-3.0 |
adderall/regulations-site | regulations/tests/views_diff_tests.py | 7 | 8352 | from unittest import TestCase
from django.test import RequestFactory
from mock import patch
from regulations.views.diff import *
class ChromeSectionDiffViewTests(TestCase):
def test_extract_sections_subparts(self):
sub = [{'section_id': '8888-1', 'index': ['8888', '1']},
{'section_id': '8888-3', 'index': ['8888', '3']}]
toc = [{
'section_id': '8888-Subpart-A',
'index': ['8888', 'Subpart', 'A'],
'sub_toc': sub},
{'section_id': '8888-Interp', 'index': ['8888', 'Interp']}]
sections = extract_sections(toc)
self.assertEqual(['8888', '1'], sections[0]['index'])
self.assertEqual(['8888', '3'], sections[1]['index'])
self.assertEqual(['8888', 'Interp'], sections[2]['index'])
def test_extract_sections(self):
toc = [{'section_id': '8888-1', 'index': ['8888', '1']},
{'section_id': '8888-3', 'index': ['8888', '3']}]
sections = extract_sections(toc)
self.assertEqual(['8888', '1'], sections[0]['index'])
self.assertEqual(['8888', '3'], sections[1]['index'])
def test_diff_toc(self):
"""Integration test."""
old_toc = [{'section_id': '8888-1', 'index': ['8888', '1'],
'is_section':True},
{'section_id': '8888-3', 'index': ['8888', '3'],
'is_section':True},
{'section_id': '8888-4', 'index': ['8888', '4'],
'is_section':True},
{'section_id': '8888-A', 'index': ['8888', 'A'],
'is_appendix':True},
{'section_id': '8888-B', 'index': ['8888', 'B'],
'is_appendix':True},
{'section_id': '8888-Interp', 'index': ['8888', 'Interp'],
'is_supplement':True}]
diff = {
'8888-2': {'op': 'added',
'node': {'title': '8888.2', 'label': ['8888', '2']}},
'8888-C': {'op': 'added',
'node': {'title': 'App C', 'label': ['8888', 'C']}},
'8888-1-a': {'op': 'modified'},
'8888-B': {'op': 'deleted'},
'8888-3-b': {'op': 'deleted'},
'8888-B-1': {'op': 'modified'}
}
result = diff_toc('oldold', 'newnew', old_toc, diff, 'from_ver')
self.assertEqual(8, len(result))
self.assertTrue('8888-1' in result[0]['url'])
self.assertTrue('?from_version=from_ver' in result[0]['url'])
self.assertEqual('8888-1', result[0]['section_id'])
self.assertEqual('modified', result[0]['op'])
self.assertTrue('8888-2' in result[1]['url'])
self.assertTrue('?from_version=from_ver' in result[1]['url'])
self.assertEqual('8888-2', result[1]['section_id'])
self.assertEqual('added', result[1]['op'])
self.assertTrue('8888-3' in result[2]['url'])
self.assertTrue('?from_version=from_ver' in result[2]['url'])
self.assertEqual('8888-3', result[2]['section_id'])
self.assertEqual('modified', result[2]['op'])
self.assertTrue('8888-4' in result[3]['url'])
self.assertTrue('?from_version=from_ver' in result[3]['url'])
self.assertEqual('8888-4', result[3]['section_id'])
self.assertFalse('op' in result[3])
self.assertTrue('8888-A' in result[4]['url'])
self.assertTrue('?from_version=from_ver' in result[4]['url'])
self.assertEqual('8888-A', result[4]['section_id'])
self.assertFalse('op' in result[4])
self.assertTrue('8888-B' in result[5]['url'])
self.assertTrue('?from_version=from_ver' in result[5]['url'])
self.assertEqual('8888-B', result[5]['section_id'])
self.assertEqual('deleted', result[5]['op'])
self.assertTrue('8888-C' in result[6]['url'])
self.assertTrue('?from_version=from_ver' in result[6]['url'])
self.assertEqual('8888-C', result[6]['section_id'])
self.assertEqual('added', result[6]['op'])
self.assertTrue('8888-Interp' in result[7]['url'])
self.assertEqual('8888-Interp', result[7]['section_id'])
self.assertFalse('op' in result[7])
for el in result:
self.assertTrue('oldold', el['url'])
self.assertTrue('newnew', el['url'])
def test_interp_headers(self):
from django.template import loader, Context
t = loader.get_template('regulations/interp-tree.html')
context_dict = {'interp': {
'header': '<ins>My header</ins>', 'section_header': True}}
response = t.render(Context(context_dict))
tags_preserved_header = '<h3 tabindex=\"0\"> <ins>My header</ins></h3>'
self.assertTrue(tags_preserved_header in response)
@patch('regulations.views.diff.fetch_toc')
@patch('regulations.views.diff.SectionUrl')
def test_add_main_content(self, SectionUrl, fetch_toc):
fetch_toc.return_value = [
{'index': ['1111', '222'], 'is_section': True},
{'index': ['1111', 'B'], 'is_appendix': True},
{'index': ['1111', 'Interp'], 'is_supplement': True,
'sub_toc': [
{'index': ['1111', 'Interp', 'h1']},
{'index': ['1111', 'Subpart', 'Interp']},
{'index': ['1111', 'A', 'Interp']}]}]
context = {
'main_content_context': {'newer_version': '1', 'TOC': 'toc'},
'label_id': '111-222',
'version': '2'}
request = RequestFactory().get('?new_version=1')
csdv = ChromeSectionDiffView()
csdv.request = request
csdv.add_diff_content(context)
self.assertEqual(context['from_version'], '2')
self.assertEqual(context['left_version'], '2')
self.assertEqual(context['right_version'], '1')
self.assertEqual(context['TOC'], 'toc')
self.assertTrue('first_subterp' in context)
self.assertEqual(['1111', 'Interp', 'h1'],
context['first_subterp']['index'])
self.assertTrue('url' in context['first_subterp'])
class PartialSectionDiffViewTests(TestCase):
def test_footer_nav(self):
view = PartialSectionDiffView()
toc = [{'section_id': '9898-1'}, {'section_id': '9898-5'},
{'section_id': '9898-A'}, {'section_id': '9898-Interp'}]
self.assertEqual({}, view.footer_nav(
'9898-2', toc, 'old', 'new', 'from'))
result = view.footer_nav('9898-1', toc, 'old', 'new', 'from')
self.assertFalse('previous' in result)
self.assertTrue('9898-5' in result['next']['url'])
self.assertTrue('old' in result['next']['url'])
self.assertTrue('new' in result['next']['url'])
self.assertTrue('?from_version=from' in result['next']['url'])
result = view.footer_nav('9898-5', toc, 'old', 'new', 'from')
self.assertTrue('9898-1' in result['previous']['url'])
self.assertTrue('old' in result['previous']['url'])
self.assertTrue('new' in result['previous']['url'])
self.assertTrue('?from_version=from' in result['previous']['url'])
self.assertTrue('9898-A' in result['next']['url'])
self.assertTrue('old' in result['next']['url'])
self.assertTrue('new' in result['next']['url'])
self.assertTrue('?from_version=from' in result['next']['url'])
result = view.footer_nav('9898-A', toc, 'old', 'new', 'from')
self.assertTrue('9898-5' in result['previous']['url'])
self.assertTrue('old' in result['previous']['url'])
self.assertTrue('new' in result['previous']['url'])
self.assertTrue('?from_version=from' in result['previous']['url'])
self.assertTrue('9898-Interp' in result['next']['url'])
self.assertTrue('old' in result['next']['url'])
self.assertTrue('new' in result['next']['url'])
self.assertTrue('?from_version=from' in result['next']['url'])
result = view.footer_nav('9898-Interp', toc, 'old', 'new', 'from')
self.assertTrue('9898-A' in result['previous']['url'])
self.assertTrue('old' in result['previous']['url'])
self.assertTrue('new' in result['previous']['url'])
self.assertTrue('?from_version=from' in result['previous']['url'])
self.assertFalse('next' in result)
| cc0-1.0 |
daisymax/nvda | source/appModules/wlmail.py | 2 | 2452 | #appModules/wlmail.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2010 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
import controlTypes
import api
import winUser
from keyboardHandler import KeyboardInputGesture
from NVDAObjects.IAccessible.MSHTML import MSHTML
import msimn
class AboutBlankDocument(MSHTML):
"""A document called about:blank which hosts the HTML message composer document using viewlink.
Unfortunately, there doesn't seem to be any way to access the real (editable) viewlink document.
Therefore, we need to ignore this about:blank document so the user can access the editable document.
"""
# Make sure a buffer doesn't get created for this document.
# Otherwise, the viewLink document beneath it will be treated as part of this buffer and won't be accessible.
role = controlTypes.ROLE_UNKNOWN
def event_gainFocus(self):
# This document is useless to us, so don't bother to report it.
return
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.windowClassName == "Internet Explorer_Server" and obj.role == controlTypes.ROLE_DOCUMENT and obj.HTMLNode and obj.HTMLNode.document.url=="about:blank":
clsList.insert(0, AboutBlankDocument)
elif obj.windowClassName=="SysListView32" and obj.windowControlID in (128,129,130) and obj.role==controlTypes.ROLE_LISTITEM:
clsList.insert(0,msimn.MessageRuleListItem)
elif obj.windowClassName=="SysListView32" and obj.role==controlTypes.ROLE_LISTITEM and obj.parent.name=="Outlook Express Message List":
clsList.insert(0,msimn.MessageListItem)
def event_gainFocus(self,obj,nextHandler):
nextHandler()
#Force focus to move to something sane when landing on a plain text message window
if obj.windowClassName=="ME_DocHost" and obj.windowControlID==1000 and obj.role==controlTypes.ROLE_PANE:
firstChild=obj.firstChild
if firstChild:
firstChild=obj.firstChild
if firstChild:
firstChild.setFocus()
return
if obj.windowClassName=="ATH_Note" and obj.event_objectID==winUser.OBJID_CLIENT and obj.IAccessibleChildID==0:
api.processPendingEvents()
if obj==api.getFocusObject() and controlTypes.STATE_FOCUSED in obj.states:
return KeyboardInputGesture.fromName("shift+tab").send()
| gpl-2.0 |
DooMLoRD/android_kernel_sony_msm8974ab | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
gqwest-erp/server | openerp/addons/claim_from_delivery/__init__.py | 68 | 1032 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
orion1024/Sick-Beard | lib/hachoir_parser/misc/msoffice_summary.py | 90 | 12537 | """
Microsoft Document summaries structures.
Documents
---------
- Apache POI (HPSF Internals):
http://poi.apache.org/hpsf/internals.html
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (FieldSet, ParserError,
RootSeekableFieldSet, SeekableFieldSet,
Bit, Bits, NullBits,
UInt8, UInt16, UInt32, TimestampWin64, TimedeltaWin64, Enum,
Bytes, RawBytes, NullBytes, String,
Int8, Int32, Float32, Float64, PascalString32)
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.tools import createDict
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_parser.common.win32 import GUID, PascalStringWin32, CODEPAGE_CHARSET
from lib.hachoir_parser.image.bmp import BmpHeader, parseImageData
MAX_SECTION_COUNT = 100
OS_MAC = 1
OS_NAME = {
0: "Windows 16-bit",
1: "Macintosh",
2: "Windows 32-bit",
}
class OSConfig:
def __init__(self, big_endian):
if big_endian:
self.charset = "MacRoman"
self.utf16 = "UTF-16-BE"
else:
# FIXME: Don't guess the charset, use ISO-8859-1 or UTF-8
#self.charset = "ISO-8859-1"
self.charset = None
self.utf16 = "UTF-16-LE"
class PropertyIndex(FieldSet):
TAG_CODEPAGE = 1
COMMON_PROPERTY = {
0: "Dictionary",
1: "CodePage",
0x80000000: "LOCALE_SYSTEM_DEFAULT",
0x80000003: "CASE_SENSITIVE",
}
DOCUMENT_PROPERTY = {
2: "Category",
3: "PresentationFormat",
4: "NumBytes",
5: "NumLines",
6: "NumParagraphs",
7: "NumSlides",
8: "NumNotes",
9: "NumHiddenSlides",
10: "NumMMClips",
11: "Scale",
12: "HeadingPairs",
13: "DocumentParts",
14: "Manager",
15: "Company",
16: "LinksDirty",
17: "DocSumInfo_17",
18: "DocSumInfo_18",
19: "DocSumInfo_19",
20: "DocSumInfo_20",
21: "DocSumInfo_21",
22: "DocSumInfo_22",
23: "DocSumInfo_23",
}
DOCUMENT_PROPERTY.update(COMMON_PROPERTY)
COMPONENT_PROPERTY = {
2: "Title",
3: "Subject",
4: "Author",
5: "Keywords",
6: "Comments",
7: "Template",
8: "LastSavedBy",
9: "RevisionNumber",
10: "TotalEditingTime",
11: "LastPrinted",
12: "CreateTime",
13: "LastSavedTime",
14: "NumPages",
15: "NumWords",
16: "NumCharacters",
17: "Thumbnail",
18: "AppName",
19: "Security",
}
COMPONENT_PROPERTY.update(COMMON_PROPERTY)
def createFields(self):
if self["../.."].name.startswith("doc_summary"):
enum = self.DOCUMENT_PROPERTY
else:
enum = self.COMPONENT_PROPERTY
yield Enum(UInt32(self, "id"), enum)
yield UInt32(self, "offset")
def createDescription(self):
return "Property: %s" % self["id"].display
class Bool(Int8):
def createValue(self):
value = Int8.createValue(self)
return (value == -1)
class Thumbnail(FieldSet):
"""
Thumbnail.
Documents:
- See Jakarta POI
http://jakarta.apache.org/poi/hpsf/thumbnails.html
http://www.penguin-soft.com/penguin/developer/poi/
org/apache/poi/hpsf/Thumbnail.html#CF_BITMAP
- How To Extract Thumbnail Images
http://sparks.discreet.com/knowledgebase/public/
solutions/ExtractThumbnailImg.htm
"""
FORMAT_CLIPBOARD = -1
FORMAT_NAME = {
-1: "Windows clipboard",
-2: "Macintosh clipboard",
-3: "GUID that contains format identifier",
0: "No data",
2: "Bitmap",
3: "Windows metafile format",
8: "Device Independent Bitmap (DIB)",
14: "Enhanced Windows metafile",
}
DIB_BMP = 8
DIB_FORMAT = {
2: "Bitmap Obsolete (old BMP)",
3: "Windows metafile format (WMF)",
8: "Device Independent Bitmap (BMP)",
14: "Enhanced Windows metafile (EMF)",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield filesizeHandler(UInt32(self, "size"))
yield Enum(Int32(self, "format"), self.FORMAT_NAME)
if self["format"].value == self.FORMAT_CLIPBOARD:
yield Enum(UInt32(self, "dib_format"), self.DIB_FORMAT)
if self["dib_format"].value == self.DIB_BMP:
yield BmpHeader(self, "bmp_header")
size = (self.size - self.current_size) // 8
yield parseImageData(self, "pixels", size, self["bmp_header"])
return
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
class PropertyContent(FieldSet):
TYPE_LPSTR = 30
TYPE_INFO = {
0: ("EMPTY", None),
1: ("NULL", None),
2: ("UInt16", UInt16),
3: ("UInt32", UInt32),
4: ("Float32", Float32),
5: ("Float64", Float64),
6: ("CY", None),
7: ("DATE", None),
8: ("BSTR", None),
9: ("DISPATCH", None),
10: ("ERROR", None),
11: ("BOOL", Bool),
12: ("VARIANT", None),
13: ("UNKNOWN", None),
14: ("DECIMAL", None),
16: ("I1", None),
17: ("UI1", None),
18: ("UI2", None),
19: ("UI4", None),
20: ("I8", None),
21: ("UI8", None),
22: ("INT", None),
23: ("UINT", None),
24: ("VOID", None),
25: ("HRESULT", None),
26: ("PTR", None),
27: ("SAFEARRAY", None),
28: ("CARRAY", None),
29: ("USERDEFINED", None),
30: ("LPSTR", PascalString32),
31: ("LPWSTR", PascalString32),
64: ("FILETIME", TimestampWin64),
65: ("BLOB", None),
66: ("STREAM", None),
67: ("STORAGE", None),
68: ("STREAMED_OBJECT", None),
69: ("STORED_OBJECT", None),
70: ("BLOB_OBJECT", None),
71: ("THUMBNAIL", Thumbnail),
72: ("CLSID", None),
0x1000: ("Vector", None),
}
TYPE_NAME = createDict(TYPE_INFO, 0)
def createFields(self):
self.osconfig = self.parent.osconfig
if True:
yield Enum(Bits(self, "type", 12), self.TYPE_NAME)
yield Bit(self, "is_vector")
yield NullBits(self, "padding", 32-12-1)
else:
yield Enum(Bits(self, "type", 32), self.TYPE_NAME)
tag = self["type"].value
kw = {}
try:
handler = self.TYPE_INFO[tag][1]
if handler == PascalString32:
osconfig = self.osconfig
if tag == self.TYPE_LPSTR:
kw["charset"] = osconfig.charset
else:
kw["charset"] = osconfig.utf16
elif handler == TimestampWin64:
if self.description == "TotalEditingTime":
handler = TimedeltaWin64
except LookupError:
handler = None
if not handler:
raise ParserError("OLE2: Unable to parse property of type %s" \
% self["type"].display)
if self["is_vector"].value:
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield handler(self, "item[]", **kw)
else:
yield handler(self, "value", **kw)
self.createValue = lambda: self["value"].value
PropertyContent.TYPE_INFO[12] = ("VARIANT", PropertyContent)
class SummarySection(SeekableFieldSet):
def __init__(self, *args):
SeekableFieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
self.osconfig = self.parent.osconfig
yield UInt32(self, "size")
yield UInt32(self, "property_count")
for index in xrange(self["property_count"].value):
yield PropertyIndex(self, "property_index[]")
for index in xrange(self["property_count"].value):
findex = self["property_index[%u]" % index]
self.seekByte(findex["offset"].value)
field = PropertyContent(self, "property[]", findex["id"].display)
yield field
if not self.osconfig.charset \
and findex['id'].value == PropertyIndex.TAG_CODEPAGE:
codepage = field['value'].value
if codepage in CODEPAGE_CHARSET:
self.osconfig.charset = CODEPAGE_CHARSET[codepage]
else:
self.warning("Unknown codepage: %r" % codepage)
class SummaryIndex(FieldSet):
static_size = 20*8
def createFields(self):
yield String(self, "name", 16)
yield UInt32(self, "offset")
class BaseSummary:
endian = LITTLE_ENDIAN
def __init__(self):
if self["endian"].value == "\xFF\xFE":
self.endian = BIG_ENDIAN
elif self["endian"].value == "\xFE\xFF":
self.endian = LITTLE_ENDIAN
else:
raise ParserError("OLE2: Invalid endian value")
self.osconfig = OSConfig(self["os_type"].value == OS_MAC)
def createFields(self):
yield Bytes(self, "endian", 2, "Endian (0xFF 0xFE for Intel)")
yield UInt16(self, "format", "Format (0)")
yield UInt8(self, "os_version")
yield UInt8(self, "os_revision")
yield Enum(UInt16(self, "os_type"), OS_NAME)
yield GUID(self, "format_id")
yield UInt32(self, "section_count")
if MAX_SECTION_COUNT < self["section_count"].value:
raise ParserError("OLE2: Too much sections (%s)" % self["section_count"].value)
section_indexes = []
for index in xrange(self["section_count"].value):
section_index = SummaryIndex(self, "section_index[]")
yield section_index
section_indexes.append(section_index)
for section_index in section_indexes:
self.seekByte(section_index["offset"].value)
yield SummarySection(self, "section[]")
size = (self.size - self.current_size) // 8
if 0 < size:
yield NullBytes(self, "end_padding", size)
class SummaryParser(BaseSummary, HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"description": "Microsoft Office summary",
}
def __init__(self, stream, **kw):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **kw)
BaseSummary.__init__(self)
def validate(self):
return True
class SummaryFieldSet(BaseSummary, FieldSet):
def __init__(self, parent, name, description=None, size=None):
FieldSet.__init__(self, parent, name, description=description, size=size)
BaseSummary.__init__(self)
class CompObj(FieldSet):
OS_VERSION = {
0x0a03: "Windows 3.1",
}
def createFields(self):
# Header
yield UInt16(self, "version", "Version (=1)")
yield textHandler(UInt16(self, "endian", "Endian (0xFF 0xFE for Intel)"), hexadecimal)
yield UInt8(self, "os_version")
yield UInt8(self, "os_revision")
yield Enum(UInt16(self, "os_type"), OS_NAME)
yield Int32(self, "unused", "(=-1)")
yield GUID(self, "clsid")
# User type
yield PascalString32(self, "user_type", strip="\0")
# Clipboard format
if self["os_type"].value == OS_MAC:
yield Int32(self, "unused[]", "(=-2)")
yield String(self, "clipboard_format", 4)
else:
yield PascalString32(self, "clipboard_format", strip="\0")
if self.current_size == self.size:
return
#-- OLE 2.01 ---
# Program ID
yield PascalString32(self, "prog_id", strip="\0")
if self["os_type"].value != OS_MAC:
# Magic number
yield textHandler(UInt32(self, "magic", "Magic number (0x71B239F4)"), hexadecimal)
# Unicode version
yield PascalStringWin32(self, "user_type_unicode", strip="\0")
yield PascalStringWin32(self, "clipboard_format_unicode", strip="\0")
yield PascalStringWin32(self, "prog_id_unicode", strip="\0")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "end_padding", size)
| gpl-3.0 |
mdhaman/superdesk-core | apps/duplication/archive_translate.py | 1 | 3967 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2016 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from apps.archive.archive import SOURCE as ARCHIVE
from apps.content import push_content_notification
from apps.auth import get_user_id
from superdesk import get_resource_service
from superdesk.errors import SuperdeskApiError, InvalidStateTransitionError
from superdesk.metadata.item import CONTENT_STATE, ITEM_STATE
from superdesk.metadata.packages import RESIDREF
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.workflow import is_workflow_state_transition_valid
from superdesk.utc import utcnow
from apps.packages import PackageService
from flask_babel import _
package_service = PackageService()
class TranslateResource(Resource):
endpoint_name = 'translate'
resource_title = endpoint_name
schema = {
'guid': {
'type': 'string',
'required': True
},
'language': {
'type': 'string',
'required': True
},
'desk': Resource.rel('desks', nullable=True),
}
url = 'archive/translate'
resource_methods = ['POST']
item_methods = []
privileges = {'POST': 'translate'}
class TranslateService(BaseService):
def _translate_item(self, guid, language, task=None, service=None, **kwargs):
if not service:
service = ARCHIVE
archive_service = get_resource_service(service)
macros_service = get_resource_service('macros')
published_service = get_resource_service('published')
item = archive_service.find_one(req=None, _id=guid)
if not item:
raise SuperdeskApiError.notFoundError(_('Fail to found item with guid: {guid}').format(guid=guid))
if not is_workflow_state_transition_valid('translate', item[ITEM_STATE]):
raise InvalidStateTransitionError()
if item.get('language') == language:
return guid
if package_service.is_package(item):
refs = package_service.get_item_refs(item)
for ref in refs:
ref[RESIDREF] = self._translate_item(ref[RESIDREF], language,
service=ref.get('location'),
task=task)
if not item.get('translation_id'):
archive_service.system_update(item['_id'], {'translation_id': item['_id']}, item)
item['translation_id'] = item['_id']
published_service.update_published_items(item['_id'], 'translation_id', item['_id'])
macros_service.execute_translation_macro(
item, item.get('language', None), language)
item['language'] = language
item['translated_from'] = guid
item['versioncreated'] = utcnow()
item['firstcreated'] = utcnow()
if task:
item['task'] = task
_id = archive_service.duplicate_item(item, operation='translate')
if kwargs.get('notify', True):
push_content_notification([item])
return _id
def create(self, docs, **kwargs):
ids = []
for doc in docs:
task = None
if doc.get('desk'):
desk = get_resource_service('desks').find_one(req=None, _id=doc['desk']) or {}
task = dict(desk=desk.get('_id'), stage=desk.get('working_stage'), user=get_user_id())
ids.append(self._translate_item(doc['guid'], doc['language'], task, **kwargs))
return ids
superdesk.workflow_action(
name='translate',
exclude_states=[CONTENT_STATE.SPIKED, CONTENT_STATE.KILLED, CONTENT_STATE.RECALLED],
privileges=['archive', 'translate']
)
| agpl-3.0 |
iawells/gluon | gluon/common/particleGenerator/DataBaseModelGenerator.py | 2 | 6227 | #!/usr/bin/python
from __future__ import print_function
import sys
import re
import yaml
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
class DataBaseModelProcessor(object):
def __init__(self):
self.db_models = {}
def add_model(self, model):
self.data = model
def get_table_class(self, table_name):
try:
return self.db_models[table_name]
except ValueError as e:
raise Exception('Unknown table name %s' % table_name)
def build_sqla_models(self, base=None):
"""Make SQLAlchemy classes for each of the elements in the data read"""
if not base:
base = declarative_base()
if not self.data:
raise Exception('Cannot create Database Model from empty model.')
def de_camel(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# Make a model class that we've never thought of before
for table_name, table_data in self.data.iteritems():
self.get_primary_key(table_data)
for table_name, table_data in self.data.iteritems():
try:
attrs = {}
for col_name, col_desc in table_data['attributes'].iteritems():
try:
options = {}
args = []
# Step 1: deal with object xrefs
if col_desc['type'] in self.data:
# This is a foreign key reference. Make the column
# like the FK, but drop the primary from it and
# use the local one.
tgt_name = col_desc['type']
tgt_data = self.data[tgt_name]
primary_col = tgt_data['primary']
repl_col_desc = \
dict(tgt_data['attributes'][primary_col])
if 'primary' in repl_col_desc:
# The FK will be a primary, doesn't mean we are
del repl_col_desc['primary']
# May still be the local PK if we used to be,
# though
if col_desc.get('primary'):
repl_col_desc['primary'] = True
# Set the SQLA col option to make clear what's
# going on
args.append(sa.ForeignKey('%s.%s' %
(de_camel(tgt_name),
primary_col)))
# The col creation code will now duplicate the FK
# column nicely
col_desc = repl_col_desc
# Step 2: convert our special types to ones a DB likes
if col_desc['type'] == 'uuid':
# UUIDs, from a DB perspective, are a form of
# string
repl_col_desc = dict(col_desc)
repl_col_desc['type'] = 'string'
repl_col_desc['length'] = 64
col_desc = repl_col_desc
# Step 3: with everything DB-ready, spit out the table
# definition
if col_desc.get('primary', False):
options['primary_key'] = True
# Save the information about the primary key as well
# in the object
attrs['_primary_key'] = col_name
required = col_desc.get('required', False)
options['nullable'] = not required
if col_desc['type'] == 'string':
attrs[col_name] = sa.Column(sa.String(
col_desc['length']), *args, **options)
elif col_desc['type'] == 'integer':
attrs[col_name] = sa.Column(sa.Integer(), *args,
**options)
elif col_desc['type'] == 'boolean':
attrs[col_name] = sa.Column(sa.Boolean(), *args,
**options)
elif col_desc['type'] == 'enum':
attrs[col_name] = sa.Column(
sa.Enum(*col_desc['values']), *args,
**options)
else:
raise Exception('Unknown column type %s' %
col_desc['type'])
except:
print('During processing of attribute ', col_name,
file=sys.stderr)
raise
if not '_primary_key' in attrs:
raise Exception("One and only one primary key has to "
"be given to each column")
attrs['__tablename__'] = de_camel(table_name)
attrs['__name__'] = table_name
self.db_models[table_name] = type(table_name, (base,), attrs)
except:
print('During processing of table ', table_name,
file=sys.stderr)
raise
@classmethod
def get_primary_key(cls, table_data):
primary = []
for k, v in table_data['attributes'].iteritems():
if 'primary' in v:
primary = k
break
# If not specified, a UUID is used as the PK
if not primary:
table_data['attributes']['uuid'] = \
{'type': 'string', 'length': 36, 'primary': True,
'required': True}
primary = 'uuid'
table_data['primary'] = primary
return primary
| apache-2.0 |
alfredgamulo/cloud-custodian | tools/c7n_gcp/c7n_gcp/resources/mlengine.py | 2 | 1923 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import jmespath
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
@resources.register('ml-model')
class MLModel(QueryResourceManager):
"""GCP Resource
https://cloud.google.com/ai-platform/prediction/docs/reference/rest/v1/projects.models
"""
class resource_type(TypeInfo):
service = 'ml'
version = 'v1'
component = 'projects.models'
enum_spec = ('list', 'models[]', None)
scope = 'project'
scope_key = 'parent'
scope_template = 'projects/{}'
name = id = 'name'
default_report_fields = [
id, name, "description", "onlinePredictionLogging"]
get_requires_event = True
@staticmethod
def get(client, event):
return client.execute_query(
'get', {'name': jmespath.search(
'protoPayload.response.name', event
)})
@resources.register('ml-job')
class MLJob(QueryResourceManager):
"""GCP Resource
https://cloud.google.com/ai-platform/prediction/docs/reference/rest/v1/projects.jobs
"""
class resource_type(TypeInfo):
service = 'ml'
version = 'v1'
component = 'projects.jobs'
enum_spec = ('list', 'jobs[]', None)
scope = 'project'
scope_key = 'parent'
scope_template = 'projects/{}'
name = id = 'jobId'
default_report_fields = [
"jobId", "status", "createTime", "endTime"]
get_requires_event = True
@staticmethod
def get(client, event):
return client.execute_query(
'get', {'name': 'projects/{}/jobs/{}'.format(
jmespath.search('resource.labels.project_id', event),
jmespath.search('protoPayload.response.jobId', event))})
| apache-2.0 |
inkerra/cinder | cinder/openstack/common/scheduler/weights/__init__.py | 6 | 1305 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler host weights
"""
from cinder.openstack.common.scheduler import weight
class WeighedHost(weight.WeighedObject):
def to_dict(self):
return {
'weight': self.weight,
'host': self.obj.host,
}
def __repr__(self):
return ("WeighedHost [host: %s, weight: %s]" %
(self.obj.host, self.weight))
class BaseHostWeigher(weight.BaseWeigher):
"""Base class for host weights."""
pass
class HostWeightHandler(weight.BaseWeightHandler):
object_class = WeighedHost
def __init__(self, namespace):
super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace)
| apache-2.0 |
indictranstech/Das_frappe | frappe/website/utils.py | 26 | 4682 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re, os
from werkzeug.urls import url_parse, url_unparse
def delete_page_cache(path):
cache = frappe.cache()
groups = ("page_context", "website_page", "sitemap_options")
if path:
for name in groups:
cache.hdel(name, path)
else:
for name in groups:
cache.delete_key(name)
def find_first_image(html):
m = re.finditer("""<img[^>]*src\s?=\s?['"]([^'"]*)['"]""", html)
try:
return m.next().groups()[0]
except StopIteration:
return None
def can_cache(no_cache=False):
return not (frappe.conf.disable_website_cache or getattr(frappe.local, "no_cache", False) or no_cache)
def get_comment_list(doctype, name):
return frappe.db.sql("""select
comment, comment_by_fullname, creation, comment_by
from `tabComment` where comment_doctype=%s
and ifnull(comment_type, "Comment")="Comment"
and comment_docname=%s order by creation""", (doctype, name), as_dict=1) or []
def get_home_page():
def _get_home_page():
role_home_page = frappe.get_hooks("role_home_page")
home_page = None
for role in frappe.get_roles():
if role in role_home_page:
home_page = role_home_page[role][-1]
break
if not home_page:
home_page = frappe.get_hooks("home_page")
if home_page:
home_page = home_page[-1]
if not home_page:
home_page = frappe.db.get_value("Website Settings", None, "home_page") or "login"
return home_page
return frappe.cache().hget("home_page", frappe.session.user, _get_home_page)
def is_signup_enabled():
if getattr(frappe.local, "is_signup_enabled", None) is None:
frappe.local.is_signup_enabled = True
if frappe.utils.cint(frappe.db.get_value("Website Settings",
"Website Settings", "disable_signup")):
frappe.local.is_signup_enabled = False
return frappe.local.is_signup_enabled
def cleanup_page_name(title):
"""make page name from title"""
name = title.lower()
name = re.sub('[~!@#$%^&*+()<>,."\'\?]', '', name)
name = re.sub('[:/]', '-', name)
name = '-'.join(name.split())
# replace repeating hyphens
name = re.sub(r"(-)\1+", r"\1", name)
return name
def get_shade(color, percent):
color, color_format = detect_color_format(color)
r, g, b, a = color
avg = (float(int(r) + int(g) + int(b)) / 3)
# switch dark and light shades
if avg > 128:
percent = -percent
# stronger diff for darker shades
if percent < 25 and avg < 64:
percent = percent * 2
new_color = []
for channel_value in (r, g, b):
new_color.append(get_shade_for_channel(channel_value, percent))
r, g, b = new_color
return format_color(r, g, b, a, color_format)
def detect_color_format(color):
if color.startswith("rgba"):
color_format = "rgba"
color = [c.strip() for c in color[5:-1].split(",")]
elif color.startswith("rgb"):
color_format = "rgb"
color = [c.strip() for c in color[4:-1].split(",")] + [1]
else:
# assume hex
color_format = "hex"
if color.startswith("#"):
color = color[1:]
if len(color) == 3:
# hex in short form like #fff
color = "{0}{0}{1}{1}{2}{2}".format(*tuple(color))
color = [int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16), 1]
return color, color_format
def get_shade_for_channel(channel_value, percent):
v = int(channel_value) + int(int('ff', 16) * (float(percent)/100))
if v < 0:
v=0
if v > 255:
v=255
return v
def format_color(r, g, b, a, color_format):
if color_format == "rgba":
return "rgba({0}, {1}, {2}, {3})".format(r, g, b, a)
elif color_format == "rgb":
return "rgb({0}, {1}, {2})".format(r, g, b)
else:
# assume hex
return "#{0}{1}{2}".format(convert_to_hex(r), convert_to_hex(g), convert_to_hex(b))
def convert_to_hex(channel_value):
h = hex(channel_value)[2:]
if len(h) < 2:
h = "0" + h
return h
def abs_url(path):
"""Deconstructs and Reconstructs a URL into an absolute URL or a URL relative from root '/'"""
if not path:
return
if path.startswith('http://') or path.startswith('https://'):
return path
if not path.startswith("/"):
path = "/" + path
return path
def get_full_index(doctype="Web Page"):
"""Returns full index of the website (on Web Page) upto the n-th level"""
all_routes = []
def get_children(parent):
children = frappe.db.get_all(doctype, ["parent_website_route", "page_name", "title"],
{"parent_website_route": parent}, order_by="idx asc")
for d in children:
d.url = abs_url(os.path.join(d.parent_website_route or "", d.page_name))
if d.url not in all_routes:
d.children = get_children(d.url[1:])
all_routes.append(d.url)
return children
return get_children("")
| mit |
dya2/python-for-android | python3-alpha/python3-src/Lib/encodings/utf_32.py | 180 | 5128 | """
Python 'utf-32' Codec
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_32_encode
def decode(input, errors='strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
def getstate(self):
# additonal state info from the base class must be None here,
# as it isn't passed along to the caller
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
# additional state info we pass to the caller:
# 0: stream is in natural order for this platform
# 1: stream is in unnatural order
# 2: endianness hasn't been determined yet
if self.decoder is None:
return (state, 2)
addstate = int((sys.byteorder == "big") !=
(self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = (codecs.utf_32_be_decode
if sys.byteorder == "big"
else codecs.utf_32_le_decode)
elif state == 1:
self.decoder = (codecs.utf_32_le_decode
if sys.byteorder == "big"
else codecs.utf_32_be_decode)
else:
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed>=4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-32',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
Saurabh7/shogun | tests/integration/generator/featop.py | 22 | 4276 | """
Common operations on features
"""
import shogun.Features as features
import shogun.Preprocessor as preproc
WORDSTRING_ORDER=3
WORDSTRING_GAP=0
WORDSTRING_REVERSE=False
def get_features(fclass, ftype, data, *args, **kwargs):
if fclass=='simple':
return get_simple(ftype, data, *args, **kwargs)
elif fclass=='string':
return get_string(ftype, data, *args, **kwargs)
elif fclass=='string_complex':
return get_string_complex(ftype, data, *args, **kwargs)
elif fclass=='wd':
return get_wd(data, *args, **kwargs)
else:
raise ValueError, 'Unknown feature class %s.'%fclass
def get_simple (ftype, data, alphabet=features.DNA, sparse=False):
"""Return SimpleFeatures.
@param ftype Feature type, e.g. Real, Byte
@param data Train/test data for feature creation
@param alphabet Alphabet for feature creation
@param sparse Is feature sparse?
@return Dict with SimpleFeatures train/test
"""
if ftype=='Byte' or ftype=='Char':
train=eval('features.'+ftype+'Features(alphabet)')
test=eval('features.'+ftype+'Features(alphabet)')
train.copy_feature_matrix(data['train'])
test.copy_feature_matrix(data['test'])
else:
train=eval('features.'+ftype+"Features(data['train'])")
test=eval('features.'+ftype+"Features(data['test'])")
if sparse:
sparse_train=eval('features.Sparse'+ftype+'Features()')
sparse_train.obtain_from_simple(train)
sparse_test=eval('features.Sparse'+ftype+'Features()')
sparse_test.obtain_from_simple(test)
return {'train':sparse_train, 'test':sparse_test}
else:
return {'train':train, 'test':test}
def get_string (ftype, data, alphabet=features.DNA):
"""Return StringFeatures.
@param ftype Feature type, e.g. Real, Byte
@param data Train/test data for feature creation
@param alphabet Alphabet for feature creation
@return Dict with StringFeatures train/test
"""
train=eval('features.String'+ftype+"Features(data['train'], alphabet)")
test=eval('features.String'+ftype+"Features(data['test'], alphabet)")
return {'train':train, 'test':test}
def get_string_complex (ftype, data, alphabet=features.DNA,
order=WORDSTRING_ORDER, gap=WORDSTRING_GAP, reverse=WORDSTRING_REVERSE):
"""Return complex StringFeatures.
@param ftype Feature type, e.g. RealFeature, ByteFeature
@param data Train/test data for feature creation
@param alphabet Alphabet for feature creation
@param order Order of the feature
@param gap Gap of the feature
@param reverse Is feature reverse?
@return Dict with complex StringFeatures train/test
"""
feats={}
charfeat=features.StringCharFeatures(data['train'], alphabet)
feat=eval('features.String'+ftype+'Features(alphabet)')
feat.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats['train']=feat
charfeat=features.StringCharFeatures(data['test'], alphabet)
feat=eval('features.String'+ftype+'Features(alphabet)')
feat.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats['test']=feat
if ftype=='Word' or ftype=='Ulong':
name='Sort'+ftype+'String'
return add_preproc(name, feats)
else:
return feats
def get_wd (data, order=WORDSTRING_ORDER):
"""Return WDFeatures.
@param data Train/test data for feature creation
@param order Order of the feature
@return Dict with WDFeatures train/test
"""
feats={}
charfeat=features.StringCharFeatures(data['train'], features.DNA)
bytefeat=features.StringByteFeatures(features.RAWDNA)
bytefeat.obtain_from_char(charfeat, 0, 1, 0, False)
feats['train']=features.WDFeatures(bytefeat, order, order)
charfeat=features.StringCharFeatures(data['test'], features.DNA)
bytefeat=features.StringByteFeatures(features.RAWDNA)
bytefeat.obtain_from_char(charfeat, 0, 1, 0, False)
feats['test']=features.WDFeatures(bytefeat, order, order)
return feats
def add_preproc (name, feats, *args):
"""Add a preprocessor to the given features.
@param name Name of the preprocessor
@param feats Features train/test
@param *args Variable argument list of the preprocessor
@return Dict with features having a preprocessor applied
"""
fun=eval('preproc.'+name)
preproc=fun(*args)
preproc.init(feats['train'])
feats['train'].add_preprocessor(preproc)
feats['train'].apply_preprocessor()
feats['test'].add_preprocessor(preproc)
feats['test'].apply_preprocessor()
return feats
| mit |
prarthitm/edxplatform | common/test/acceptance/pages/studio/textbook_upload.py | 10 | 3051 | """
Course Textbooks page.
"""
import requests
from path import Path as path
from common.test.acceptance.pages.common.utils import click_css
from common.test.acceptance.pages.studio.course_page import CoursePage
class TextbookUploadPage(CoursePage):
"""
Course Textbooks page.
"""
url_path = "textbooks"
def is_browser_on_page(self):
return self.q(css='.textbooks-list').visible
def open_add_textbook_form(self):
"""
Open new textbook form by clicking on new textbook button.
"""
self.q(css='.nav-item .new-button').click()
def get_element_text(self, selector):
"""
Return the text of the css selector.
"""
return self.q(css=selector)[0].text
def set_input_field_value(self, selector, value):
"""
Set the value of input field by selector.
"""
self.q(css=selector)[0].send_keys(value)
def upload_pdf_file(self, file_name):
"""
Uploads a pdf textbook.
"""
# If the pdf upload section has not yet been toggled on, click on the upload pdf button
test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname() # pylint:disable=no-value-for-parameter
file_path = test_dir + '/data/uploads/' + file_name
click_css(self, ".edit-textbook .action-upload", require_notification=False)
self.wait_for_element_visibility(".upload-dialog input", "Upload modal opened")
file_input = self.q(css=".upload-dialog input").results[0]
file_input.send_keys(file_path)
click_css(self, ".wrapper-modal-window-assetupload .action-upload", require_notification=False)
self.wait_for_element_absence(".modal-window-overlay", "Upload modal closed")
def click_textbook_submit_button(self):
"""
Submit the new textbook form and check if it is rendered properly.
"""
self.wait_for_element_visibility('#edit_textbook_form button[type="submit"]', 'Save button visibility')
self.q(css='#edit_textbook_form button[type="submit"]').first.click()
self.wait_for_element_absence(".wrapper-form", "Add/Edit form closed")
def is_view_live_link_worked(self):
"""
Check if the view live button of textbook is working fine.
"""
try:
self.wait_for(lambda: len(self.q(css='.textbook a.view').attrs('href')) > 0, "href value present")
response = requests.get(self.q(css='.textbook a.view').attrs('href')[0])
except requests.exceptions.ConnectionError:
return False
return response.status_code == 200
def upload_new_textbook(self):
"""
Fills out form to upload a new textbook
"""
self.open_add_textbook_form()
self.upload_pdf_file('textbook.pdf')
self.set_input_field_value('.edit-textbook #textbook-name-input', 'book_1')
self.set_input_field_value('.edit-textbook #chapter1-name', 'chap_1')
self.click_textbook_submit_button()
| agpl-3.0 |
onitake/ansible | lib/ansible/modules/remote_management/hpilo/hpilo_boot.py | 47 | 6630 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hpilo_boot
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Boot system using specific media through HP iLO interface
description:
- "This module boots a system through its HP iLO interface. The boot media
can be one of: cdrom, floppy, hdd, network or usb."
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
media:
description:
- The boot media to boot the system from
default: network
choices: [ "cdrom", "floppy", "hdd", "network", "normal", "usb" ]
image:
description:
- The URL of a cdrom, floppy or usb boot media image.
protocol://username:password@hostname:port/filename
- protocol is either 'http' or 'https'
- username:password is optional
- port is optional
state:
description:
- The state of the boot media.
- "no_boot: Do not boot from the device"
- "boot_once: Boot from the device once and then notthereafter"
- "boot_always: Boot from the device each time the serveris rebooted"
- "connect: Connect the virtual media device and set to boot_always"
- "disconnect: Disconnects the virtual media device and set to no_boot"
- "poweroff: Power off the server"
default: boot_once
choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
force:
description:
- Whether to force a reboot (even when the system is already booted).
- As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
default: no
type: bool
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
version_added: '2.4'
requirements:
- hpilo
notes:
- To use a USB key image you need to specify floppy as boot media.
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
hpilo_boot:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
media: cdrom
image: http://some-web-server/iso/boot.iso
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- name: Power off a server
hpilo_boot:
host: YOUR_ILO_HOST
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
state: poweroff
delegate_to: localhost
'''
RETURN = '''
# Default return values
'''
import time
import warnings
try:
import hpilo
HAS_HPILO = True
except ImportError:
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
image=dict(type='str'),
state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
force=dict(type='bool', default=False),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
)
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
media = module.params['media']
image = module.params['image']
state = module.params['state']
force = module.params['force']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
changed = False
status = {}
power_status = 'UNKNOWN'
if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
# Workaround for: Error communicating with iLO: Problem manipulating EV
try:
ilo.set_one_time_boot(media)
except hpilo.IloError:
time.sleep(60)
ilo.set_one_time_boot(media)
# TODO: Verify if image URL exists/works
if image:
ilo.insert_virtual_media(media, image)
changed = True
if media == 'cdrom':
ilo.set_vm_status('cdrom', state, True)
status = ilo.get_vm_status()
changed = True
elif media in ('floppy', 'usb'):
ilo.set_vf_status(state, True)
status = ilo.get_vf_status()
changed = True
# Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
if state in ('boot_once', 'boot_always') or force:
power_status = ilo.get_host_power_status()
if not force and power_status == 'ON':
module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
if power_status == 'ON':
ilo.warm_boot_server()
# ilo.cold_boot_server()
changed = True
else:
ilo.press_pwr_btn()
# ilo.reset_server()
# ilo.set_host_power(host_power=True)
changed = True
elif state in ('poweroff'):
power_status = ilo.get_host_power_status()
if not power_status == 'OFF':
ilo.hold_pwr_btn()
# ilo.set_host_power(host_power=False)
changed = True
module.exit_json(changed=changed, power=power_status, **status)
if __name__ == '__main__':
main()
| gpl-3.0 |
a-b/PopClip-Extensions | source/OneNote/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
landonb/chjson | test_chjson.py | 1 | 21792 | #!/usr/bin/python
# -*- coding: utf-8 -*-
## this test suite is an almost verbatim copy of the jsontest.py test suite
## found in json-py available from http://sourceforge.net/projects/json-py/
##
## Copyright (C) 2005 Patrick D. Logan
## Contact mailto:patrickdlogan@stardecisions.com
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
import itertools
import unittest
import chjson
_exception = chjson.DecodeError
# The object tests should be order-independent. They're not.
# i.e. they should test for existence of keys and values
# with read/write invariance.
def _removeWhitespace(str):
return str.replace(" ", "")
class JsonTest(unittest.TestCase):
# *** [pdl]'s tests copiedish from jsontest.py.
def testReadEmptyObject(self):
obj = chjson.decode("{}")
self.assertEqual({}, obj)
def testWriteEmptyObject(self):
s = chjson.encode({})
self.assertEqual("{}", _removeWhitespace(s))
def testReadStringValue(self):
obj = chjson.decode('{ "name" : "Patrick" }')
self.assertEqual({ "name" : "Patrick" }, obj)
def testReadEscapedQuotationMark(self):
obj = chjson.decode(r'"\""')
self.assertEqual(r'"', obj)
def testReadEscapedSolidus(self):
obj = chjson.decode(r'"\/"')
self.assertEqual(r'/', obj)
def testReadEscapedReverseSolidus(self):
obj = chjson.decode(r'"\\"')
self.assertEqual('\\', obj)
def testReadEscapedEndingQuote(self):
self.assertRaises(chjson.DecodeError, self._testReadEscapedEndingQuote)
def _testReadEscapedEndingQuote(self):
chjson.decode('"\\"')
def testReadEscapedBackspace(self):
obj = chjson.decode(r'"\b"')
self.assertEqual("\b", obj)
def testReadEscapedFormfeed(self):
obj = chjson.decode(r'"\f"')
self.assertEqual("\f", obj)
def testReadEscapedNewline(self):
obj = chjson.decode(r'"\n"')
self.assertEqual("\n", obj)
def testReadEscapedCarriageReturn(self):
obj = chjson.decode(r'"\r"')
self.assertEqual("\r", obj)
def testReadEscapedHorizontalTab(self):
obj = chjson.decode(r'"\t"')
self.assertEqual("\t", obj)
def testReadEscapedHexCharacter(self):
obj = chjson.decode(r'"\u000A"')
self.assertEqual("\n", obj)
obj = chjson.decode(r'"\u1001"')
self.assertEqual(u'\u1001', obj)
def testWriteEscapedQuotationMark(self):
s = chjson.encode(r'"')
self.assertEqual(r'"\""', _removeWhitespace(s))
def testWriteEscapedSolidus(self):
s = chjson.encode(r'/')
self.assertEqual(r'"\/"', _removeWhitespace(s))
# Same as: self.assertEqual('"\\/"', _removeWhitespace(s))
def testWriteNonEscapedSolidus(self):
s = chjson.encode(r'/')
self.assertEqual('"\\/"', _removeWhitespace(s))
def testWriteEscapedReverseSolidus(self):
s = chjson.encode("\\")
self.assertEqual(r'"\\"', _removeWhitespace(s))
def testWriteEscapedBackspace(self):
s = chjson.encode("\b")
self.assertEqual(r'"\b"', _removeWhitespace(s))
def testWriteEscapedFormfeed(self):
if sys.version_info[0] >= 3:
# Hrmm. Return gets interrupted as KeyboardInterrupt:
# File "test_chjson.py", line 111, in testWriteEscapedFormfeed
# s = chjson.encode("\f")
# KeyboardInterrupt
pass
else:
s = chjson.encode("\f")
self.assertEqual(r'"\f"', _removeWhitespace(s))
def testWriteEscapedNewline(self):
s = chjson.encode("\n")
self.assertEqual(r'"\n"', _removeWhitespace(s))
def testWriteEscapedCarriageReturn(self):
s = chjson.encode("\r")
self.assertEqual(r'"\r"', _removeWhitespace(s))
def testWriteEscapedHorizontalTab(self):
s = chjson.encode("\t")
self.assertEqual(r'"\t"', _removeWhitespace(s))
def testWriteEscapedHexCharacter(self):
s = chjson.encode(u'\u1001')
if sys.version_info[0] >= 3:
self.assertEqual(r'"ခ"', _removeWhitespace(s))
else:
#self.assertEqual(r'"\u1001"', _removeWhitespace(s))
self.assertEqual(r'u"\u1001"', _removeWhitespace(s))
def testReadBadEscapedHexCharacter(self):
self.assertRaises(_exception, self.doReadBadEscapedHexCharacter)
def doReadBadEscapedHexCharacter(self):
chjson.decode(r'"\u10K5"')
def testReadBadObjectKey(self):
self.assertRaises(_exception, self.doReadBadObjectKey)
def doReadBadObjectKey(self):
chjson.decode('{ 44 : "age" }')
def testReadBadArray(self):
self.assertRaises(_exception, self.doReadBadArray)
def doReadBadArray(self):
chjson.decode('[1,2,3,,]')
def testReadBadObjectSyntax(self):
self.assertRaises(_exception, self.doReadBadObjectSyntax)
def doReadBadObjectSyntax(self):
chjson.decode('{"age", 44}')
def testWriteStringValue(self):
s = chjson.encode({ "name" : "Patrick" })
self.assertEqual('{"name":"Patrick"}', _removeWhitespace(s))
def testReadIntegerValue(self):
obj = chjson.decode('{ "age" : 44 }')
self.assertEqual({ "age" : 44 }, obj)
def testReadNegativeIntegerValue(self):
obj = chjson.decode('{ "key" : -44 }')
self.assertEqual({ "key" : -44 }, obj)
def testReadFloatValue(self):
obj = chjson.decode('{ "age" : 44.5 }')
self.assertEqual({ "age" : 44.5 }, obj)
def testReadNegativeFloatValue(self):
obj = chjson.decode(' { "key" : -44.5 } ')
self.assertEqual({ "key" : -44.5 }, obj)
def testReadBadNumber(self):
self.assertRaises(_exception, self.doReadBadNumber)
def doReadBadNumber(self):
chjson.decode('-44.4.4')
def testReadSmallObject(self):
obj = chjson.decode('{ "name" : "Patrick", "age":44} ')
self.assertEqual({ "age" : 44, "name" : "Patrick" }, obj)
def testReadEmptyArray(self):
obj = chjson.decode('[]')
self.assertEqual([], obj)
def testWriteEmptyArray(self):
self.assertEqual("[]", _removeWhitespace(chjson.encode([])))
def testReadSmallArray(self):
obj = chjson.decode(' [ "a" , "b", "c" ] ')
self.assertEqual(["a", "b", "c"], obj)
def testWriteSmallArray(self):
self.assertEqual('[1,2,3,4]', _removeWhitespace(chjson.encode([1, 2, 3, 4])))
def testWriteSmallObject(self):
s = chjson.encode({ "name" : "Patrick", "age": 44 })
# HA! This is a hack.
self.assertTrue(
_removeWhitespace(s) in [
'{"name":"Patrick","age":44}',
'{"age":44,"name":"Patrick"}',
]
)
def testWriteFloat(self):
n = 3.44556677
self.assertEqual(repr(n), _removeWhitespace(chjson.encode(n)))
def testReadTrue(self):
self.assertEqual(True, chjson.decode("true"))
def testReadFalse(self):
self.assertEqual(False, chjson.decode("false"))
def testReadNull(self):
self.assertEqual(None, chjson.decode("null"))
def testWriteTrue(self):
self.assertEqual("true", _removeWhitespace(chjson.encode(True)))
def testWriteFalse(self):
self.assertEqual("false", _removeWhitespace(chjson.encode(False)))
def testWriteNull(self):
self.assertEqual("null", _removeWhitespace(chjson.encode(None)))
def testReadArrayOfSymbols(self):
self.assertEqual([True, False, None], chjson.decode(" [ true, false,null] "))
def testWriteArrayOfSymbolsFromList(self):
self.assertEqual("[true,false,null]", _removeWhitespace(chjson.encode([True, False, None])))
def testWriteArrayOfSymbolsFromTuple(self):
self.assertEqual("[true,false,null]", _removeWhitespace(chjson.encode((True, False, None))))
def testReadComplexObject(self):
src = '''
{ "name": "Patrick", "age" : 44, "Employed?" : true, "Female?" : false, "grandchildren":null }
'''
obj = chjson.decode(src)
self.assertEqual({"name":"Patrick","age":44,"Employed?":True,"Female?":False,"grandchildren":None}, obj)
def testReadLongArray(self):
src = '''[ "used",
"abused",
"confused",
true, false, null,
1,
2,
[3, 4, 5]]
'''
obj = chjson.decode(src)
self.assertEqual(["used","abused","confused", True, False, None,
1,2,[3,4,5]], obj)
def testReadIncompleteArray(self):
self.assertRaises(_exception, self.doReadIncompleteArray)
def doReadIncompleteArray(self):
chjson.decode('[')
def testReadComplexArray(self):
src = '''
[
{ "name": "Patrick", "age" : 44,
"Employed?" : true, "Female?" : false,
"grandchildren":null },
"used",
"abused",
"confused",
1,
2,
[3, 4, 5]
]
'''
obj = chjson.decode(src)
self.assertEqual([{"name":"Patrick","age":44,"Employed?":True,"Female?":False,"grandchildren":None},
"used","abused","confused",
1,2,[3,4,5]], obj)
def testWriteComplexArray(self):
obj = [{"name":"Patrick","age":44,"Employed?":True,"Female?":False,"grandchildren":None},
"used","abused","confused",
1,2,[3,4,5]]
# HA! This is a hack: Programmatically generate the list of
# acceptable answers, since order is not predictable.
kvals = [
'"age":44',
'"Female?":false',
'"name":"Patrick"',
'"Employed?":true',
'"grandchildren":null',
]
acceptable_answers = set([
('[{%s},"used","abused","confused",1,2,[3,4,5]]' % ','.join(x))
for x in itertools.permutations(kvals)
])
self.assertTrue(_removeWhitespace(chjson.encode(obj)) in acceptable_answers)
def testReadWriteCopies(self):
orig_obj = {'a':' " '}
json_str = chjson.encode(orig_obj)
copy_obj = chjson.decode(json_str)
self.assertEqual(orig_obj, copy_obj)
self.assertEqual(True, orig_obj == copy_obj)
self.assertEqual(False, orig_obj is copy_obj)
def testStringEncoding(self):
s = chjson.encode([1, 2, 3])
if sys.version_info[0] >= 3:
encoded = "[1,2,3]"
else:
encoded = unicode("[1,2,3]", "utf-8")
self.assertEqual(encoded, _removeWhitespace(s))
def testReadEmptyObjectAtEndOfArray(self):
self.assertEqual(["a","b","c",{}],
chjson.decode('["a","b","c",{}]'))
def testReadEmptyObjectMidArray(self):
self.assertEqual(["a","b",{},"c"],
chjson.decode('["a","b",{},"c"]'))
def testReadClosingObjectBracket(self):
self.assertEqual({"a":[1,2,3]}, chjson.decode('{"a":[1,2,3]}'))
def testEmptyObjectInList(self):
obj = chjson.decode('[{}]')
self.assertEqual([{}], obj)
def testObjectWithEmptyList(self):
obj = chjson.decode('{"test": [] }')
self.assertEqual({"test":[]}, obj)
def testObjectWithNonEmptyList(self):
obj = chjson.decode('{"test": [3, 4, 5] }')
self.assertEqual({"test":[3, 4, 5]}, obj)
def testWriteLong(self):
self.assertEqual("12345678901234567890", chjson.encode(12345678901234567890))
def testWriteLongUnicode(self):
# This test causes a buffer overrun in cjson 1.0.5, on UCS4 builds.
# The string length is only resized for wide unicode characters if
# there is less than 12 bytes of space left. Padding with
# narrow-but-escaped characters prevents string resizing.
# Note that u'\U0001D11E\u1234' also breaks, but sometimes goes
# undetected.
s = chjson.encode(u'\U0001D11E\U0001D11E\U0001D11E\U0001D11E'
u'\u1234\u1234\u1234\u1234\u1234\u1234')
if sys.version_info[0] >= 3:
# Wha?
# FIXME: This has got to be wrong.......... or is this just unicode output?
self.assertEqual(
'"𝄞𝄞𝄞𝄞ሴሴሴሴሴሴ"'
, s
)
else:
#self.assertEqual(r'"\U0001d11e\U0001d11e\U0001d11e\U0001d11e'
# r'\u1234\u1234\u1234\u1234\u1234\u1234"', s)
self.assertEqual(r'u"\U0001d11e\U0001d11e\U0001d11e\U0001d11e'
r'\u1234\u1234\u1234\u1234\u1234\u1234"', s)
# *** [lb]'s chjson tests.
def testObjectWithTrailingCommaAndComment(self):
obj = chjson.decode('{"a":123,} // nothing')
self.assertEqual({"a": 123}, obj)
def testObjectWithDashAndTrailingCommaAndComment(self):
obj = chjson.decode('{"a-b": 123,} // nothing')
self.assertEqual({"a-b": 123}, obj)
def testObjectWithEmDashAndTrailingCommaAndComment(self):
obj = chjson.decode('{"a–b":123,} // nothing')
self.assertEqual({"a–b": 123}, obj)
def testObjectWithUuencodedEmDashAndTrailingCommaAndComment(self):
obj = chjson.decode('{"a\–b":123,} // nothing')
# FIXME/EXPLAIN: Is chjson really decoding this correctly? Seems weird.
self.assertEqual({"a\\u2013b": 123}, obj)
def testObjectWithBackslashAndEmDashAndTrailingCommaAndMLComment(self):
obj = chjson.decode('{"a\\–b":123,} /* nothing */ \r\n')
# NOTE: Because of how \ works, sometimes \ and \\ are the same:
# The string that Python reads and passes to chjson interprets
# \ and \\ as the same: just one backslash.
self.assertEqual({'a\\u2013b': 123}, obj)
def testObjectWithBackslashAndEndOfString(self):
self.assertRaises(chjson.DecodeError, self._testObjectWithBackslashAndEndOfString)
def _testObjectWithBackslashAndEndOfString(self):
# This gets interpreted as a string key ('a": ')
# missing a colon (and instead finds a stray 'x').
chjson.decode('{"a\\": "x"}')
def testObjectWithCRNewlineAndCommentAndNewlineAndListTuple(self):
self.assertRaises(chjson.DecodeError, self._testObjectWithCRNewlineAndCommentAndNewlineAndListTuple)
def _testObjectWithCRNewlineAndCommentAndNewlineAndListTuple(self):
chjson.decode('{"a":null, \r // nothing \r"tup":(1,"a",True,),\r }')
def testObjectWithCRNewlineAndCommentAndNewlineAndListListAndCapitalizedTrue(self):
self.assertRaises(
chjson.DecodeError,
self._testObjectWithCRNewlineAndCommentAndNewlineAndListListAndCapitalizedTrue
)
def _testObjectWithCRNewlineAndCommentAndNewlineAndListListAndCapitalizedTrue(self):
chjson.decode('{"a":null, \r // nothing \r"tup":[1,"a",True,],\r }')
def testObjectWithCRNewlineAndCommentAndNewlineAndListListAndLowercaseTrue(self):
obj = chjson.decode('{"a":null, \r // nothing \r"tup":[1,"a",true,],\r }')
self.assertEqual({"a": None, "tup": [1, "a", True],}, obj)
def testObjectWithoutLeadingZeroInNumber(self):
obj = chjson.decode('{"a":.123,} // nothing')
self.assertEqual({"a": 0.123,}, obj)
def testObjectWithEscapeLineContinuationsLoose(self):
obj = chjson.decode('{"string": "blah blah \\\n more blahs \\\r\n",} // nothing')
self.assertEqual({"string": "blah blah \n more blahs \r\n",}, obj)
def testObjectWithEscapeLineContinuations(self):
self.assertRaises(chjson.DecodeError, self._testObjectWithEscapeLineContinuations)
def _testObjectWithEscapeLineContinuations(self):
chjson.decode('{"string": "blah blah \\\n more blahs \\\r\n",} // nothing', strict=True)
def testDecodeWithNewLinesLoose_01(self):
self.assertRaises(chjson.DecodeError, self._testDecodeWithNewLinesLoose_01)
def _testDecodeWithNewLinesLoose_01(self):
chjson.decode('{"string": "blah blah \n more blahs \r\n",} // nothing')
def testDecodeWithNewLinesLoose_02(self):
# chjson accects newlines but they have to be escaped
# (you can't just hit Enter in the middle of typing a string).
obj = chjson.decode('{"string": "blah blah \\n more blahs \\r\\n",} // nothing')
self.assertEqual({"string": "blah blah \n more blahs \r\n",}, obj)
def testDecodeWithNewLinesStrict(self):
self.assertRaises(chjson.DecodeError, self._testDecodeWithNewLinesStrict)
def _testDecodeWithNewLinesStrict(self):
chjson.decode('{"string": "blah blah \n more blahs \r\n"}', strict=True)
def testObjectBasicString_01(self):
obj = chjson.decode(r'"\\"')
self.assertEqual('\\', obj)
obj = chjson.decode("\"\\\\\"")
self.assertEqual('\\', obj)
def testObjectBasicString_02(self):
obj = chjson.decode('"\"')
# This reduces to the empty string because '"\"' is interpreted
# by Python as '""'.
self.assertEqual('', obj)
def testObjectBasicString_03(self):
self.assertRaises(chjson.DecodeError, self._testObjectBasicString_03)
def _testObjectBasicString_03(self):
chjson.decode('"\\"')
def testObjectBasicString_04(self):
obj = chjson.encode("\f")
self.assertEqual('"\\f"', obj)
def testDecodeBasicList(self):
obj = chjson.decode(" [ true, false,null] ")
self.assertEqual([True, False, None], obj)
def testEncodeStringEscapes(self):
# FIXME: How do you account for ordering?
# obj = chjson.encode({"a\"b": 'zz', "22": (1,2),})
# because it's one of these:
# self.assertEqual('{"a\\"b": "zz", "22": [1, 2]}', obj)
# self.assertEqual('{"22": [1, 2], "a\\"b": "zz"}', obj)
obj = chjson.encode({"a\"b": 'zz'})
self.assertEqual('{"a\\"b": "zz"}', obj)
obj = chjson.encode({"22": (1,2),})
self.assertEqual('{"22": [1, 2]}', obj)
def testEncodeUnicodeStringLeader(self):
obj = chjson.encode([u'xx','yy'])
self.assertEqual('["xx", "yy"]', obj)
def testDecodeSolidus_01(self):
obj = chjson.decode('{"string": "\/",}')
self.assertEqual({'string': '/'}, obj)
def testDecodeSolidus_02(self):
obj = chjson.decode('{"string": "o\/g",}')
self.assertEqual({'string': 'o/g'}, obj)
def testDecodeSolidus_03(self):
obj = chjson.decode('{"string": "hello\/goodbye",}')
self.assertEqual({'string': 'hello/goodbye'}, obj)
def testDecodeSolidus_04(self):
obj = chjson.decode('{"string": "hello/goodbye",}')
self.assertEqual({'string': 'hello/goodbye'}, obj)
def testEncodeSolidus_01(self):
obj = chjson.encode("{'string': 'hello/goodbye'}")
self.assertEqual('"{\'string\': \'hello\\/goodbye\'}"', obj)
def testEncodeSolidus_02(self):
obj = chjson.encode("{'string': 'hello\/goodbye'}")
# NOTE: This might look wrong and you might think it should be: 'hello\\/goodbye'
# But [lb]'s understanding of the spec. is that \/ is for
# decoding: it's stripped on input, so if \/ is being encoded,
# it's a backslash which we need to escape, followed by a
# solidus which also needs to be escaped.
self.assertEqual('"{\'string\': \'hello\\\\\\/goodbye\'}"', obj)
def testDecodeStringEscapedSolidusAndTrailingComma(self):
self.assertRaises(chjson.DecodeError, self._testDecodeStringEscapedSolidusAndTrailingComma)
def _testDecodeStringEscapedSolidusAndTrailingComma(self):
obj = chjson.decode('{"string": "hello\/goodbye",}', strict=True)
def testDecodeStringEscapedSolidusAndNoTrailingComma(self):
obj = chjson.decode('{"string": "hello\/goodbye"}', strict=True)
self.assertEqual({"string": "hello/goodbye",}, obj)
def testDecodeObjectWithTrailingOnelineComment(self):
self.assertRaises(chjson.DecodeError, self._testDecodeObjectWithTrailingOnelineComment)
def _testDecodeObjectWithTrailingOnelineComment(self):
obj = chjson.decode('{"string": "blah blah more blahs "} // nothing', strict=True)
def testDecodeLineContinuationsAndOtherEscapes(self):
obj = chjson.decode('{"x\t\\\/": "a green \\\r cow \t mooed \f oh heavens \b\b\b",}')
self.assertEqual({'x\t\\/': 'a green \r cow \t mooed \x0c oh heavens \x08\x08\x08'}, obj)
def testSingleLineCommentAndLineContinuation_1(self):
obj = chjson.decode('{"SQL Statement": "SELECT foo; -- A comment. \\\rSELECT bar;",}')
self.assertEqual({'SQL Statement': 'SELECT foo; -- A comment. \rSELECT bar;'}, obj)
def testSingleLineCommentAndLineContinuation_2(self):
self.assertRaises(chjson.DecodeError, self._testSingleLineCommentAndLineContinuation_2)
def _testSingleLineCommentAndLineContinuation_2(self):
obj = chjson.decode('{"SQL Statement": "SELECT foo; -- A comment. \rSELECT bar;",}')
def main():
unittest.main()
if __name__ == '__main__':
main()
# vim:tw=0:ts=4:sw=4:et
| gpl-3.0 |
aktech/sympy | sympy/series/residues.py | 84 | 2386 | """
This module implements the Residue function and related tools for working
with residues.
"""
from __future__ import print_function, division
from sympy import sympify
from sympy.utilities.timeutils import timethis
@timethis('residue')
def residue(expr, x, x0):
"""
Finds the residue of ``expr`` at the point x=x0.
The residue is defined as the coefficient of 1/(x-x0) in the power series
expansion about x=x0.
Examples
========
>>> from sympy import Symbol, residue, sin
>>> x = Symbol("x")
>>> residue(1/x, x, 0)
1
>>> residue(1/x**2, x, 0)
0
>>> residue(2/sin(x), x, 0)
2
This function is essential for the Residue Theorem [1].
References
==========
1. http://en.wikipedia.org/wiki/Residue_theorem
"""
# The current implementation uses series expansion to
# calculate it. A more general implementation is explained in
# the section 5.6 of the Bronstein's book {M. Bronstein:
# Symbolic Integration I, Springer Verlag (2005)}. For purely
# rational functions, the algorithm is much easier. See
# sections 2.4, 2.5, and 2.7 (this section actually gives an
# algorithm for computing any Laurent series coefficient for
# a rational function). The theory in section 2.4 will help to
# understand why the resultant works in the general algorithm.
# For the definition of a resultant, see section 1.4 (and any
# previous sections for more review).
from sympy import collect, Mul, Order, S
expr = sympify(expr)
if x0 != 0:
expr = expr.subs(x, x + x0)
for n in [0, 1, 2, 4, 8, 16, 32]:
if n == 0:
s = expr.series(x, n=0)
else:
s = expr.nseries(x, n=n)
if s.has(Order) and s.removeO() == 0:
# bug in nseries
continue
if not s.has(Order) or s.getn() >= 0:
break
if s.has(Order) and s.getn() < 0:
raise NotImplementedError('Bug in nseries?')
s = collect(s.removeO(), x)
if s.is_Add:
args = s.args
else:
args = [s]
res = S(0)
for arg in args:
c, m = arg.as_coeff_mul(x)
m = Mul(*m)
if not (m == 1 or m == x or (m.is_Pow and m.exp.is_Integer)):
raise NotImplementedError('term of unexpected form: %s' % m)
if m == 1/x:
res += c
return res
| bsd-3-clause |
djhenderson/byterun | tests/vmtest.py | 4 | 2839 | """Testing tools for byterun."""
from __future__ import print_function
import dis
import sys
import textwrap
import types
import unittest
import six
from byterun.pyvm2 import VirtualMachine, VirtualMachineError
# Make this false if you need to run the debugger inside a test.
CAPTURE_STDOUT = ('-s' not in sys.argv)
# Make this false to see the traceback from a failure inside pyvm2.
CAPTURE_EXCEPTION = 1
def dis_code(code):
"""Disassemble `code` and all the code it refers to."""
for const in code.co_consts:
if isinstance(const, types.CodeType):
dis_code(const)
print("")
print(code)
dis.dis(code)
class VmTestCase(unittest.TestCase):
def assert_ok(self, code, raises=None):
"""Run `code` in our VM and in real Python: they behave the same."""
code = textwrap.dedent(code)
code = compile(code, "<%s>" % self.id(), "exec", 0, 1)
# Print the disassembly so we'll see it if the test fails.
dis_code(code)
real_stdout = sys.stdout
# Run the code through our VM.
vm_stdout = six.StringIO()
if CAPTURE_STDOUT: # pragma: no branch
sys.stdout = vm_stdout
vm = VirtualMachine()
vm_value = vm_exc = None
try:
vm_value = vm.run_code(code)
except VirtualMachineError: # pragma: no cover
# If the VM code raises an error, show it.
raise
except AssertionError: # pragma: no cover
# If test code fails an assert, show it.
raise
except Exception as e:
# Otherwise, keep the exception for comparison later.
if not CAPTURE_EXCEPTION: # pragma: no cover
raise
vm_exc = e
finally:
real_stdout.write("-- stdout ----------\n")
real_stdout.write(vm_stdout.getvalue())
# Run the code through the real Python interpreter, for comparison.
py_stdout = six.StringIO()
sys.stdout = py_stdout
py_value = py_exc = None
globs = {}
try:
py_value = eval(code, globs, globs)
except AssertionError: # pragma: no cover
raise
except Exception as e:
py_exc = e
sys.stdout = real_stdout
self.assert_same_exception(vm_exc, py_exc)
self.assertEqual(vm_stdout.getvalue(), py_stdout.getvalue())
self.assertEqual(vm_value, py_value)
if raises:
self.assertIsInstance(vm_exc, raises)
else:
self.assertIsNone(vm_exc)
def assert_same_exception(self, e1, e2):
"""Exceptions don't implement __eq__, check it ourselves."""
self.assertEqual(str(e1), str(e2))
self.assertIs(type(e1), type(e2))
| mit |
fivestars/python-escpos | escpos/exceptions.py | 151 | 1974 | """ ESC/POS Exceptions classes """
import os
class Error(Exception):
""" Base class for ESC/POS errors """
def __init__(self, msg, status=None):
Exception.__init__(self)
self.msg = msg
self.resultcode = 1
if status is not None:
self.resultcode = status
def __str__(self):
return self.msg
# Result/Exit codes
# 0 = success
# 10 = No Barcode type defined
# 20 = Barcode size values are out of range
# 30 = Barcode text not supplied
# 40 = Image height is too large
# 50 = No string supplied to be printed
# 60 = Invalid pin to send Cash Drawer pulse
class BarcodeTypeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 10
def __str__(self):
return "No Barcode type is defined"
class BarcodeSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 20
def __str__(self):
return "Barcode size is out of range"
class BarcodeCodeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 30
def __str__(self):
return "Code was not supplied"
class ImageSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 40
def __str__(self):
return "Image height is longer than 255px and can't be printed"
class TextError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 50
def __str__(self):
return "Text string must be supplied to the text() method"
class CashDrawerError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 60
def __str__(self):
return "Valid pin must be set to send pulse"
| gpl-3.0 |
apyrgio/synnefo | snf-pithos-backend/pithos/backends/lib/hashfiler/context_archipelago.py | 10 | 5223 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os import SEEK_CUR, SEEK_SET
from archipelago.common import (
Request,
string_at,
)
from pithos.workers import monkey
monkey.patch_Request()
_zeros = ''
def zeros(nr):
global _zeros
size = len(_zeros)
if nr == size:
return _zeros
if nr > size:
_zeros += '\0' * (nr - size)
return _zeros
if nr < size:
_zeros = _zeros[:nr]
return _zeros
def file_sync_write_chunks(archipelagoobject, chunksize, offset,
chunks, size=None):
"""Write given chunks to the given buffered file object.
Writes never span across chunk boundaries.
If size is given stop after or pad until size bytes have been written.
"""
padding = 0
cursize = chunksize * offset
archipelagoobject.seek(cursize)
for chunk in chunks:
if padding:
archipelagoobject.sync_write(buffer(zeros(chunksize), 0, padding))
if size is not None and cursize + chunksize >= size:
chunk = chunk[:chunksize - (cursize - size)]
archipelagoobject.sync_write(chunk)
cursize += len(chunk)
break
archipelagoobject.sync_write(chunk)
padding = chunksize - len(chunk)
padding = size - cursize if size is not None else 0
if padding <= 0:
return
q, r = divmod(padding, chunksize)
for x in xrange(q):
archipelagoobject.sync_write(zeros(chunksize))
archipelagoobject.sync_write(buffer(zeros(chunksize), 0, r))
def file_sync_read_chunks(archipelagoobject, chunksize, nr, offset=0):
"""Read and yield groups of chunks from a buffered file object at offset.
Reads never span accros chunksize boundaries.
"""
archipelagoobject.seek(offset * chunksize)
while nr:
remains = chunksize
chunk = ''
while 1:
s = archipelagoobject.sync_read(remains)
if not s:
if chunk:
yield chunk
return
chunk += s
remains -= len(s)
if remains <= 0:
break
yield chunk
nr -= 1
class ArchipelagoObject(object):
__slots__ = ("name", "ioctx_pool", "dst_port", "create", "offset")
def __init__(self, name, ioctx_pool, dst_port=None, create=0):
self.name = name
self.ioctx_pool = ioctx_pool
self.create = create
self.dst_port = dst_port
self.offset = 0
def __enter__(self):
return self
def __exit__(self, exc, arg, trace):
return False
def seek(self, offset, whence=SEEK_SET):
if whence == SEEK_CUR:
offset += self.offset
self.offset = offset
return offset
def tell(self):
return self.offset
def truncate(self, size):
raise NotImplementedError("File truncation is not implemented yet \
in archipelago")
def sync_write(self, data):
ioctx = self.ioctx_pool.pool_get()
req = Request.get_write_request(ioctx, self.dst_port, self.name,
data=data, offset=self.offset,
datalen=len(data))
req.submit()
req.wait()
ret = req.success()
req.put()
self.ioctx_pool.pool_put(ioctx)
if ret:
self.offset += len(data)
else:
raise IOError("archipelago: Write request error")
def sync_write_chunks(self, chunksize, offset, chunks, size=None):
return file_sync_write_chunks(self, chunksize, offset, chunks, size)
def sync_read(self, size):
read = Request.get_read_request
data = ''
datalen = 0
dsize = size
while 1:
ioctx = self.ioctx_pool.pool_get()
req = read(ioctx, self.dst_port,
self.name, size=dsize - datalen, offset=self.offset)
req.submit()
req.wait()
ret = req.success()
if ret:
s = string_at(req.get_data(), dsize - datalen)
else:
s = None
req.put()
self.ioctx_pool.pool_put(ioctx)
if not s:
break
data += s
datalen += len(s)
self.offset += len(s)
if datalen >= size:
break
return data
def sync_read_chunks(self, chunksize, nr, offset=0):
return file_sync_read_chunks(self, chunksize, nr, offset)
| gpl-3.0 |
coderabhishek/scrapy | tests/test_engine.py | 92 | 8653 | """
Scrapy engine tests
This starts a testing web server (using twisted.server.Site) and then crawls it
with the Scrapy crawler.
To view the testing web server in a browser you can start it by running this
module with the ``runserver`` argument::
python test_engine.py runserver
"""
from __future__ import print_function
import sys, os, re
from six.moves.urllib.parse import urlparse
from twisted.internet import reactor, defer
from twisted.web import server, static, util
from twisted.trial import unittest
from scrapy import signals
from scrapy.utils.test import get_crawler
from pydispatch import dispatcher
from tests import tests_datadir
from scrapy.spiders import Spider
from scrapy.item import Item, Field
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request
from scrapy.utils.signal import disconnect_all
class TestItem(Item):
name = Field()
url = Field()
price = Field()
class TestSpider(Spider):
name = "scrapytest.org"
allowed_domains = ["scrapytest.org", "localhost"]
itemurl_re = re.compile("item\d+.html")
name_re = re.compile("<h1>(.*?)</h1>", re.M)
price_re = re.compile(">Price: \$(.*?)<", re.M)
item_cls = TestItem
def parse(self, response):
xlink = LinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
item = self.item_cls()
m = self.name_re.search(response.body)
if m:
item['name'] = m.group(1)
item['url'] = response.url
m = self.price_re.search(response.body)
if m:
item['price'] = m.group(1)
return item
class TestDupeFilterSpider(TestSpider):
def make_requests_from_url(self, url):
return Request(url) # dont_filter=False
class DictItemsSpider(TestSpider):
item_cls = dict
def start_test_site(debug=False):
root_dir = os.path.join(tests_datadir, "test_site")
r = static.File(root_dir)
r.putChild("redirect", util.Redirect("/redirected"))
r.putChild("redirected", static.Data("Redirected here", "text/plain"))
port = reactor.listenTCP(0, server.Site(r), interface="127.0.0.1")
if debug:
print("Test server running at http://localhost:%d/ - hit Ctrl-C to finish." \
% port.getHost().port)
return port
class CrawlerRun(object):
"""A class to run the crawler and keep track of events occurred"""
def __init__(self, spider_class):
self.spider = None
self.respplug = []
self.reqplug = []
self.reqdropped = []
self.itemresp = []
self.signals_catched = {}
self.spider_class = spider_class
def run(self):
self.port = start_test_site()
self.portno = self.port.getHost().port
start_urls = [self.geturl("/"), self.geturl("/redirect"),
self.geturl("/redirect")] # a duplicate
for name, signal in vars(signals).items():
if not name.startswith('_'):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler(self.spider_class)
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.request_dropped, signals.request_dropped)
self.crawler.signals.connect(self.response_downloaded, signals.response_downloaded)
self.crawler.crawl(start_urls=start_urls)
self.spider = self.crawler.spider
self.deferred = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
return self.deferred
def stop(self):
self.port.stopListening()
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
self.deferred.callback(None)
def geturl(self, path):
return "http://localhost:%s%s" % (self.portno, path)
def getpath(self, url):
u = urlparse(url)
return u.path
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def request_dropped(self, request, spider):
self.reqdropped.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop('signal')
signalargs.pop('sender', None)
self.signals_catched[sig] = signalargs
class EngineTest(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler(self):
for spider in TestSpider, DictItemsSpider:
self.run = CrawlerRun(spider)
yield self.run.run()
self._assert_visited_urls()
self._assert_scheduled_requests(urls_to_visit=8)
self._assert_downloaded_responses()
self._assert_scraped_items()
self._assert_signals_catched()
self.run = CrawlerRun(TestDupeFilterSpider)
yield self.run.run()
self._assert_scheduled_requests(urls_to_visit=7)
self._assert_dropped_requests()
def _assert_visited_urls(self):
must_be_visited = ["/", "/redirect", "/redirected",
"/item1.html", "/item2.html", "/item999.html"]
urls_visited = set([rp[0].url for rp in self.run.respplug])
urls_expected = set([self.run.geturl(p) for p in must_be_visited])
assert urls_expected <= urls_visited, "URLs not visited: %s" % list(urls_expected - urls_visited)
def _assert_scheduled_requests(self, urls_to_visit=None):
self.assertEqual(urls_to_visit, len(self.run.reqplug))
paths_expected = ['/item999.html', '/item2.html', '/item1.html']
urls_requested = set([rq[0].url for rq in self.run.reqplug])
urls_expected = set([self.run.geturl(p) for p in paths_expected])
assert urls_expected <= urls_requested
scheduled_requests_count = len(self.run.reqplug)
dropped_requests_count = len(self.run.reqdropped)
responses_count = len(self.run.respplug)
self.assertEqual(scheduled_requests_count,
dropped_requests_count + responses_count)
def _assert_dropped_requests(self):
self.assertEqual(len(self.run.reqdropped), 1)
def _assert_downloaded_responses(self):
# response tests
self.assertEqual(8, len(self.run.respplug))
for response, _ in self.run.respplug:
if self.run.getpath(response.url) == '/item999.html':
self.assertEqual(404, response.status)
if self.run.getpath(response.url) == '/redirect':
self.assertEqual(302, response.status)
def _assert_scraped_items(self):
self.assertEqual(2, len(self.run.itemresp))
for item, response in self.run.itemresp:
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_signals_catched(self):
assert signals.engine_started in self.run.signals_catched
assert signals.engine_stopped in self.run.signals_catched
assert signals.spider_opened in self.run.signals_catched
assert signals.spider_idle in self.run.signals_catched
assert signals.spider_closed in self.run.signals_catched
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_opened])
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_idle])
self.run.signals_catched[signals.spider_closed].pop('spider_stats', None) # XXX: remove for scrapy 0.17
self.assertEqual({'spider': self.run.spider, 'reason': 'finished'},
self.run.signals_catched[signals.spider_closed])
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'runserver':
start_test_site(debug=True)
reactor.run()
| bsd-3-clause |
mejwaller/MothDB | py/db1.py | 1 | 1122 | import mysql.connector
cnx = mysql.connector.connect(user='martin',password='',
host='127.0.0.1',
database='mothrecs')
#curA = cnx.cursor(buffered=True)
curA = cnx.cursor()
showtables = ("SHOW TABLES")
curA.execute(showtables)
print curA.fetchall()
curA.close()
cnx.close()
cnx.close()
| gpl-2.0 |
bobeirasa/virtualenvs | pygeckozabbix/lib/python2.7/site-packages/requests/packages/charade/utf8prober.py | 205 | 2728 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| mit |
jwinzer/OpenSlides | server/openslides/motions/numbering.py | 7 | 11862 | from collections import defaultdict
from typing import Any, Dict, List, Tuple
from django.db import transaction
from django.db.models import Model
from ..core.config import config
from ..utils.rest_api import ValidationError
from .models import Category, Motion
__all__ = ["numbering"]
def numbering(main_category: Category) -> List[Model]:
"""
Given the _main category_ by params the numbering of all motions in this or
any subcategory is done. The numbering behaves as defined by the the following rules:
- The set of the main category with all child categories are _affected categories_.
- All motions in the affected categories are _affected motions_.
- All affected motions are numbered with respect to 'category_weight' ordering.
- Checks, if parents of all affected amendments, are also affected.
So, all parents of every affected amendment must also be affected. If not,
an error will be returned.
- If a category does not have a prefix, the prefix of the first parent with
one will be taken. This is just checked until the main category is reached.
So, if the main category does not have a prefix, no prefix will be used.
- If a motions should get a new identifier, that already a non-affected motion has,
an error will be returned. It is ensured, that all identifiers generated with
call will be unique.
- Identifier of non-amendments: <A><B><C>
<A>: Categories calculated prefix (see above; note: can be blank)
<B>: '' if blanks are disabled or <A> is blank, else ' '
<C>: Motion counter (unique existing counter for all affected motions)
- Amendments: An amendment will get the following identifier: <A><B><C>
<A>: Parent's _new_ identifier
<B>: '', if blanks are disabled, else ' '
<C>: Amendment prefix
<D>: Amendment counter (counter for amendments of one parent)
- Both counters may be filled with leading zeros according to `Motion.extend_identifier_number`
- On errors, ValidationErrors with appropriate content will be raised.
"""
# If the config is false, don't use blanks when building identifier.
without_blank = not config["motions_identifier_with_blank"]
# Get child categories (to build affected categories) and precalculate all prefixes.
child_categories = get_child_categories(main_category)
category_prefix_mapping = get_category_prefix_mapping(
main_category, child_categories, without_blank
)
# put together to affected_categories
affected_categories = [main_category]
affected_categories.extend(child_categories)
# Get all affected motions
affected_motions = get_affected_motions(affected_categories)
# The affected_motion_ids is used for a fast lookup later.
affected_motion_ids = set([motion.id for motion in affected_motions])
# Assert, that we do have some motions.
if len(affected_motions) == 0:
raise ValidationError({"detail": "No motions were numbered"})
# To ensure, that amendments will get the _new_ identifier of the parent, the affected
# motions are split in disjoint lists (keep the ordering right) by their " amendment level"
# in the motion (amendment) tree. There are at most len(affected_motions) levels.
# In this step it is also ensures, that every parent of an amendment is an affected motion.
max_amendment_level, amendment_level_mapping = get_amendment_level_mapping(
affected_motions, affected_motion_ids, main_category
)
# Generate new identifiers.
new_identifier_mapping = generate_new_identifiers(
max_amendment_level,
amendment_level_mapping,
category_prefix_mapping,
without_blank,
)
# Check, if all new identifiers are not used in all non-afected motions.
check_new_identifiers_for_conflicts(new_identifier_mapping, affected_motion_ids)
# Change all identifiers
return update_identifiers(affected_motions, new_identifier_mapping)
def get_child_categories(main_category: Category) -> List[Category]:
# -> generate a mapping from a category id to all it's children with respect to `weight`:
category_children_mapping: Dict[int, List[Category]] = defaultdict(list)
for category in Category.objects.exclude(parent=None).order_by("weight").all():
category_children_mapping[category.parent_id].append(category)
# - collect child categories
child_categories = [] # they are ordered like a flat tree would be.
queue = category_children_mapping[main_category.id]
queue.reverse()
while len(queue) > 0:
category = queue.pop()
child_categories.append(category)
children = category_children_mapping[category.id]
if children:
children.reverse()
queue.extend(children)
return child_categories
def get_category_prefix_mapping(
main_category: Category, child_categories: List[Category], without_blank: bool
) -> Dict[int, str]:
# Precalculates all prefixes, e.g. traversing the category tree up, if a category
# does not have a prefix to search for a parent's one. Also the without_blank is
# respected, so the prefixes may will have blanks.
# Add main category as a lookup anchor.
category_prefix_mapping: Dict[int, str] = {}
if not main_category.prefix:
main_category_prefix = ""
elif without_blank:
main_category_prefix = main_category.prefix
else:
main_category_prefix = f"{main_category.prefix} "
category_prefix_mapping[main_category.id] = main_category_prefix
for category in child_categories:
# Update prefix map. It is ensured, that the parent does have a calculated prefix, because
# the child_categories is an ordered flat tree.
if category.prefix:
if without_blank:
prefix = category.prefix
else:
prefix = f"{category.prefix} "
category_prefix_mapping[category.id] = prefix
else:
category_prefix_mapping[category.id] = category_prefix_mapping[
category.parent_id
]
return category_prefix_mapping
def get_affected_motions(affected_categories) -> List[Motion]:
# Affected motions: A list of motions from all categories in the right category order
# and sorted with `category_weight` per category.
affected_motions = []
for category in affected_categories:
motions = (
Motion.objects.prefetch_related(
"agenda_items", "lists_of_speakers", "parent"
)
.filter(category=category)
.order_by("category_weight", "id")
)
affected_motions.extend(list(motions))
return affected_motions
def get_amendment_level_mapping(
affected_motions, affected_motion_ids, main_category
) -> Tuple[int, Dict[int, List[Motion]]]:
amendment_level_mapping: Dict[int, List[Motion]] = defaultdict(list)
max_amendment_level = 0
for motion in affected_motions:
level = motion.amendment_level
amendment_level_mapping[level].append(motion)
if level > max_amendment_level:
max_amendment_level = level
if motion.parent_id is not None and motion.parent_id not in affected_motion_ids:
raise ValidationError(
{
"detail": 'Amendment "{0}" cannot be numbered, because '
"it's lead motion ({1}) is not in category "
"{2} or any subcategory.",
"args": [str(motion), str(motion.parent), str(main_category)],
}
)
return max_amendment_level, amendment_level_mapping
def generate_new_identifiers(
max_amendment_level, amendment_level_mapping, category_prefix_mapping, without_blank
) -> Dict[int, Any]:
# Generate identifiers for all lead motions.
new_identifier_mapping = {}
for i, main_motion in enumerate(amendment_level_mapping[0]):
prefix = category_prefix_mapping[
main_motion.category_id
] # without_blank is precalculated.
number = i + 1
identifier = f"{prefix}{Motion.extend_identifier_number(number)}"
new_identifier_mapping[main_motion.id] = {
"identifier": identifier,
"number": number,
}
# - Generate new identifiers for all amendments. For this, they are travesed by level,
# so the parent's identifier is already set.
amendment_counter: Dict[int, int] = defaultdict(
lambda: 1
) # maps amendment parent ids to their counter values.
for level in range(1, max_amendment_level + 1):
for amendment in amendment_level_mapping[level]:
number = amendment_counter[amendment.parent_id]
amendment_counter[amendment.parent_id] += 1
parent_identifier = new_identifier_mapping[amendment.parent_id][
"identifier"
]
if without_blank:
prefix = f"{parent_identifier}{config['motions_amendments_prefix']}"
else:
prefix = f"{parent_identifier} {config['motions_amendments_prefix']} "
identifier = f"{prefix}{Motion.extend_identifier_number(number)}"
new_identifier_mapping[amendment.id] = {
"identifier": identifier,
"number": number,
}
return new_identifier_mapping
def check_new_identifiers_for_conflicts(
new_identifier_mapping, affected_motion_ids
) -> None:
all_new_identifiers = [
entry["identifier"] for entry in new_identifier_mapping.values()
]
# Check, if any new identifier exists in any non-affected motion
conflicting_motions = Motion.objects.exclude(id__in=affected_motion_ids).filter(
identifier__in=all_new_identifiers
)
if conflicting_motions.exists():
# We do have a conflict. Build a nice error message.
conflicting_motion = conflicting_motions.first()
if conflicting_motion.category:
raise ValidationError(
{
"detail": 'Numbering aborted because the motion identifier "{0}" already exists in category {1}.',
"args": [
conflicting_motion.identifier,
str(conflicting_motion.category),
],
}
)
else:
raise ValidationError(
{
"detail": 'Numbering aborted because the motion identifier "{0}" already exists.',
"args": [conflicting_motion.identifier],
}
)
def update_identifiers(affected_motions, new_identifier_mapping) -> List[Model]:
# Acutally update the identifiers now.
with transaction.atomic():
changed_instances = []
# Remove old identifiers, to avoid conflicts within the affected motions
for motion in affected_motions:
motion.identifier = None
# This line is to skip agenda item and list of speakers autoupdate.
# See agenda/signals.py.
motion.set_skip_autoupdate_agenda_item_and_list_of_speakers()
motion.save(skip_autoupdate=True)
# Set the indetifier
for motion in affected_motions:
motion.identifier = new_identifier_mapping[motion.id]["identifier"]
motion.identifier_number = new_identifier_mapping[motion.id]["number"]
motion.set_skip_autoupdate_agenda_item_and_list_of_speakers()
motion.save(skip_autoupdate=True)
changed_instances.append(motion)
if motion.agenda_item:
changed_instances.append(motion.agenda_item)
changed_instances.append(motion.list_of_speakers)
return changed_instances
| mit |
thomassa/xen-api | scripts/examples/python/shell.py | 25 | 3535 | #!/usr/bin/env python
# Copyright (c) 2006-2008 Citrix Systems.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import atexit
import cmd
import pprint
import readline
import shlex
import string
import sys
import XenAPI
def logout():
try:
session.xenapi.session.logout()
except:
pass
atexit.register(logout)
class Shell(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.identchars = string.ascii_letters + string.digits + '_.'
self.prompt = "xe> "
def preloop(self):
cmd.Cmd.preloop(self)
readline.set_completer_delims(' ')
def default(self, line):
words = shlex.split(line)
if len(words) > 0:
res = session.xenapi_request(words[0], tuple(words[1:]))
if res is not None and res != '':
pprint.pprint(res)
return False
def completedefault(self, text, line, begidx, endidx):
words = shlex.split(line[:begidx])
clas, func = words[0].split('.')
if len(words) > 1 or \
func.startswith('get_by_') or \
func == 'get_all':
return []
uuids = session.xenapi_request('%s.get_all' % clas, ())
return [u + " " for u in uuids if u.startswith(text)]
def emptyline(self):
pass
def do_EOF(self, line):
print
sys.exit(0)
def munge_types (str):
if str == "True":
return True
elif str == "False":
return False
try:
return int(str)
except:
return str
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:"
print sys.argv[0], " <url> <username> <password>"
sys.exit(1)
if sys.argv[1] <> "-" and len(sys.argv) < 4:
print "Usage:"
print sys.argv[0], " <url> <username> <password>"
sys.exit(1)
if sys.argv[1] <> "-":
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
session = XenAPI.Session(url)
session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-shell.py")
cmdAt = 4
else:
session = XenAPI.xapi_local()
session.xenapi.login_with_password("", "", "1.0", "xen-api-scripts-shell.py")
cmdAt = 2
# We want to support directly executing the cmd line,
# where appropriate
if len(sys.argv) > cmdAt:
cmd = sys.argv[cmdAt]
params = [munge_types(x) for x in sys.argv[(cmdAt + 1):]]
try:
print >> sys.stdout, session.xenapi_request(cmd, tuple(params))
except XenAPI.Failure, x:
print >> sys.stderr, x
sys.exit(2)
except Exception, e:
print >> sys.stderr, e
sys.exit(3)
sys.exit(0)
else:
Shell().cmdloop('Welcome to the XenServer shell. (Try "VM.get_all")')
| lgpl-2.1 |
hassoon3/odoo | addons/account/wizard/account_move_line_select.py | 385 | 2800 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_move_line_select(osv.osv_memory):
"""
Account move line select
"""
_name = "account.move.line.select"
_description = "Account move line select"
def open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
account_obj = self.pool.get('account.account')
fiscalyear_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
if 'fiscalyear' not in context:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
fiscalyear_ids = [context['fiscalyear']]
fiscalyears = fiscalyear_obj.browse(cr, uid, fiscalyear_ids, context=context)
period_ids = []
if fiscalyears:
for fiscalyear in fiscalyears:
for period in fiscalyear.period_ids:
period_ids.append(period.id)
domain = str(('period_id', 'in', period_ids))
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
result['context'] = {
'fiscalyear': False,
'account_id': context['active_id'],
'active_id': context['active_id'],
}
if context['active_id']:
acc_data = account_obj.browse(cr, uid, context['active_id']).child_consol_ids
if acc_data:
result['context'].update({'consolidate_children': True})
result['domain']=result['domain'][0:-1]+','+domain+result['domain'][-1]
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hasinitg/airavata | airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/thrift/protocol/TBinaryProtocol.py | 6 | 6573 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from struct import pack, unpack
from lib.thrift.protocol.TProtocol import *
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeString(self, str):
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory:
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans)
| apache-2.0 |
jhawkesworth/ansible | lib/ansible/modules/cloud/ovirt/ovirt_user.py | 75 | 5137 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_user
short_description: Module to manage users in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage users in oVirt/RHV."
options:
name:
description:
- "Name of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
required: true
state:
description:
- "Should the user be present/absent."
choices: ['present', 'absent']
default: present
authz_name:
description:
- "Authorization provider of the user. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace where the user resides. When using the authorization provider that stores users in the LDAP server,
this attribute equals the naming context of the LDAP server."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add user user1 from authorization provider example.com-authz
- ovirt_user:
name: user1
domain: example.com-authz
# Add user user1 from authorization provider example.com-authz
# In case of Active Directory specify UPN:
- ovirt_user:
name: user1@ad2.example.com
domain: example.com-authz
# Remove user user1 with authorization provider example.com-authz
- ovirt_user:
state: absent
name: user1
authz_name: example.com-authz
'''
RETURN = '''
id:
description: ID of the user which is managed
returned: On success if user is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
user:
description: "Dictionary of all the user attributes. User attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
returned: On success if user is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
ovirt_full_argument_spec,
)
def username(module):
return '{0}@{1}'.format(module.params['name'], module.params['authz_name'])
class UsersModule(BaseModule):
def build_entity(self):
return otypes.User(
domain=otypes.Domain(
name=self._module.params['authz_name']
),
user_name=username(self._module),
principal=self._module.params['name'],
namespace=self._module.params['namespace'],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(required=True),
authz_name=dict(required=True, aliases=['domain']),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
users_service = connection.system_service().users_service()
users_module = UsersModule(
connection=connection,
module=module,
service=users_service,
)
state = module.params['state']
if state == 'present':
ret = users_module.create(
search_params={
'usrname': username(module),
}
)
elif state == 'absent':
ret = users_module.remove(
search_params={
'usrname': username(module),
}
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
fossoult/odoo | addons/mail/res_config.py | 301 | 2233 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import urlparse
from openerp.osv import osv, fields
class project_configuration(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'alias_domain': fields.char('Alias Domain',
help="If you have setup a catch-all email domain redirected to "
"the Odoo server, enter the domain name here."),
}
def get_default_alias_domain(self, cr, uid, ids, context=None):
alias_domain = self.pool.get("ir.config_parameter").get_param(cr, uid, "mail.catchall.domain", default=None, context=context)
if alias_domain is None:
domain = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.base.url", context=context)
try:
alias_domain = urlparse.urlsplit(domain).netloc.split(':')[0]
except Exception:
pass
return {'alias_domain': alias_domain or False}
def set_alias_domain(self, cr, uid, ids, context=None):
config_parameters = self.pool.get("ir.config_parameter")
for record in self.browse(cr, uid, ids, context=context):
config_parameters.set_param(cr, uid, "mail.catchall.domain", record.alias_domain or '', context=context)
| agpl-3.0 |
Jortolsa/l10n-spain | l10n_es_fiscal_year_closing/models/account_fiscalyear.py | 16 | 1417 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api, _
class AccountFiscalyear(models.Model):
_inherit = "account.fiscalyear"
@api.v7
def create_period(self, cr, uid, ids, context=None, interval=1):
recs = self.browse(cr, uid, ids, context)
recs.create_period(interval=interval)
@api.v8
def create_period(self, interval=1):
res = super(AccountFiscalyear, self).create_period(interval=interval)
period_model = self.env['account.period']
for fy in self:
period_model.create({
'name': "%s %s" % (_('Closing Period'), fy.code),
'code': '%s/%s' % (_('C'), fy.code),
'date_start': fy.date_stop,
'date_stop': fy.date_stop,
'special': True,
'fiscalyear_id': fy.id})
period_model.create({
'name': "%s %s" % (_('Profit and loss Period'), fy.code),
'code': '%s/%s' % (_('PL'), fy.code),
'date_start': fy.date_stop,
'date_stop': fy.date_stop,
'special': True,
'fiscalyear_id': fy.id})
return res
| agpl-3.0 |
renard/ansible | test/units/module_utils/common/validation/test_check_required_together.py | 44 | 1494 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.common.validation import check_required_together
@pytest.fixture
def together_terms():
return [
['bananas', 'potatoes'],
['cats', 'wolves']
]
def test_check_required_together(together_terms):
params = {
'bananas': 'hello',
'potatoes': 'this is here too',
'dogs': 'haha',
}
assert check_required_together(together_terms, params) == []
def test_check_required_together_missing(together_terms):
params = {
'bananas': 'woohoo',
'wolves': 'uh oh',
}
expected = "parameters are required together: bananas, potatoes"
with pytest.raises(TypeError) as e:
check_required_together(together_terms, params)
assert to_native(e.value) == expected
def test_check_required_together_missing_none():
terms = None
params = {
'foo': 'bar',
'baz': 'buzz',
}
assert check_required_together(terms, params) == []
def test_check_required_together_no_params(together_terms):
with pytest.raises(TypeError) as te:
check_required_together(together_terms, None)
assert "'NoneType' object is not iterable" in to_native(te.value)
| gpl-3.0 |
jvehent/cipherscan | cscan/config.py | 2 | 5861 | # Copyright (c) 2016 Hubert Kario <hkario@redhat.com>
# Released under Mozilla Public License Version 2.0
"""Typical Client Hello messages sent by different clients."""
import random
from tlslite.messages import ClientHello
from tlslite.constants import \
ECPointFormat, HashAlgorithm, SignatureAlgorithm
from tlslite.extensions import SNIExtension, SupportedGroupsExtension, \
TLSExtension, SignatureAlgorithmsExtension, NPNExtension, \
ECPointFormatsExtension
from tlslite.utils.cryptomath import numberToByteArray
from .constants import CipherSuite, ExtensionType, GroupName
class HelloConfig(object):
"""Base object for all Client Hello configurations."""
def __init__(self):
"""Initialize object with default settings."""
self._name = None
self.modifications = []
self.callbacks = []
self.version = (3, 3)
self.record_version = (3, 0)
self.ciphers = []
self.extensions = None
self.random = None
self.session_id = bytearray(0)
self.compression_methods = [0]
self.ssl2 = False
@property
def name(self):
"""Return the name of config with all the modifications applied."""
if self.modifications:
return "{0} ({1})".format(self._name,
", ".join(self.modifications))
else:
return self._name
@name.setter
def name(self, value):
"""Set the base name of the configuration."""
self._name = value
def __call__(self, hostname):
"""Generate a client hello object, use hostname in SNI extension."""
# SNI is special in that we don't want to send it if it is empty
if self.extensions:
sni = next((x for x in self.extensions
if isinstance(x, SNIExtension)),
None)
if sni:
if hostname is not None:
if sni.serverNames is None:
sni.serverNames = []
sni.hostNames = [hostname]
else:
# but if we were not provided with a host name, we want
# to remove empty extension
if sni.serverNames is None:
self.extensions = [x for x in self.extensions
if not isinstance(x, SNIExtension)]
if self.random:
rand = self.random
else:
# we're not doing any crypto with it, just need "something"
# TODO: place unix time at the beginning
rand = numberToByteArray(random.getrandbits(256), 32)
ch = ClientHello(self.ssl2).create(self.version, rand, self.session_id,
self.ciphers,
extensions=self.extensions)
ch.compression_methods = self.compression_methods
for cb in self.callbacks:
ch = cb(ch)
return ch
class Firefox_42(HelloConfig):
"""Create Client Hello like Firefox 42."""
def __init__(self):
"""Set the configuration to Firefox 42."""
super(Firefox_42, self).__init__()
self._name = "Firefox 42"
self.version = (3, 3)
self.record_version = (3, 1)
self.ciphers = [CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA]
ext = self.extensions = []
ext.append(SNIExtension())
ext.append(TLSExtension(extType=ExtensionType.renegotiation_info)
.create(bytearray(1)))
ext.append(SupportedGroupsExtension().create([GroupName.secp256r1,
GroupName.secp384r1,
GroupName.secp521r1]))
ext.append(ECPointFormatsExtension()
.create([ECPointFormat.uncompressed]))
ext.append(TLSExtension(extType=ExtensionType.session_ticket))
ext.append(NPNExtension())
ext.append(TLSExtension(extType=ExtensionType.alpn)
.create(bytearray(b'\x00\x15' +
b'\x02' + b'h2' +
b'\x08' + b'spdy/3.1' +
b'\x08' + b'http/1.1')))
ext.append(TLSExtension(extType=ExtensionType.status_request)
.create(bytearray(b'\x01' +
b'\x00\x00' +
b'\x00\x00')))
sig_algs = []
for alg in ['sha256', 'sha384', 'sha512', 'sha1']:
sig_algs.append((getattr(HashAlgorithm, alg),
SignatureAlgorithm.rsa))
for alg in ['sha256', 'sha384', 'sha512', 'sha1']:
sig_algs.append((getattr(HashAlgorithm, alg),
SignatureAlgorithm.ecdsa))
for alg in ['sha256', 'sha1']:
sig_algs.append((getattr(HashAlgorithm, alg),
SignatureAlgorithm.dsa))
ext.append(SignatureAlgorithmsExtension()
.create(sig_algs))
| mpl-2.0 |
ryuunosukeyoshi/PartnerPoi-Bot | lib/pycparser/ply/cpp.py | 21 | 33317 | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2017
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
| gpl-3.0 |
rwrobe/daisy | django_daisy/settings.py | 1 | 2999 | """
Django settings for django_daisy project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$6(x*g_2g9l_*g8peb-@anl5^*8q!1w)k&e&2!i)t6$s8kia94'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', True)
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'compressor',
'geoposition',
'authentication',
'risks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_daisy.urls'
WSGI_APPLICATION = 'django_daisy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Copenhagen'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', False)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Use the custom account model I built!
AUTH_USER_MODEL = 'authentication.Account' | gpl-2.0 |
MasonLeeBack/PolyEngine | thirdparty/vulkan/shaderc/third_party/googletest/googletest/test/gtest_filter_unittest.py | 364 | 21325 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(set(set_var), set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(set(tests_to_run) - set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
agronholm/ircproto | examples/twisted_client.py | 1 | 2777 | from __future__ import print_function, unicode_literals
from argparse import ArgumentParser
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, connectionDone, ClientFactory
from ircproto.connection import IRCClientConnection
from ircproto.constants import RPL_MYINFO
from ircproto.events import Reply, Error, Join
class IRCProtocol(Protocol):
def __init__(self, nickname, channel, message):
self.nickname = nickname
self.channel = channel
self.message = message
self.conn = IRCClientConnection()
def connectionMade(self):
self.conn.send_command('NICK', self.nickname)
self.conn.send_command('USER', 'ircproto', '0', 'ircproto example client')
self.send_outgoing_data()
def connectionLost(self, reason=connectionDone):
reactor.stop()
def dataReceived(self, data):
close_connection = False
for event in self.conn.feed_data(data):
print('<<< ' + event.encode().rstrip())
if isinstance(event, Reply):
if event.is_error:
self.transport.abortConnection()
return
elif event.code == RPL_MYINFO:
self.conn.send_command('JOIN', self.channel)
elif isinstance(event, Join):
self.conn.send_command('PRIVMSG', self.channel, self.message)
self.conn.send_command('QUIT')
close_connection = True
elif isinstance(event, Error):
self.transport.abortConnection()
return
self.send_outgoing_data()
if close_connection:
self.transport.loseConnection()
def send_outgoing_data(self):
# This is more complicated than it should because we want to print all outgoing data here.
# Normally, self.transport.write(self.conn.data_to_send()) would suffice.
output = self.conn.data_to_send()
if output:
print('>>> ' + output.decode('utf-8').replace('\r\n', '\r\n>>> ').rstrip('> \r\n'))
self.transport.write(output)
class IRCClientFactory(ClientFactory):
def buildProtocol(self, addr):
return IRCProtocol(args.nickname, args.channel, args.message)
parser = ArgumentParser(description='A sample IRC client')
parser.add_argument('host', help='address of irc server (foo.bar.baz or foo.bar.baz:port)')
parser.add_argument('nickname', help='nickname to register as')
parser.add_argument('channel', help='channel to join once registered')
parser.add_argument('message', help='message to send once joined')
args = parser.parse_args()
host, _, port = args.host.partition(':')
reactor.connectTCP(host, int(port or 6667), IRCClientFactory())
reactor.run()
| mit |
jorge-marques/shoop | shoop_tests/core/test_taxing_utils.py | 2 | 6140 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from decimal import Decimal
import pytest
from shoop.core.models import Tax
from shoop.core.pricing import TaxfulPrice, TaxlessPrice
from shoop.core.taxing.utils import calculate_compounded_added_taxes
from shoop.utils.money import Money
def tax(code, rate=None, amount=None):
return Tax(code=code, name=('Tax ' + code), rate=rate, amount=amount)
def tfprice(value):
return TaxfulPrice(value, 'USD')
def tlprice(value):
return TaxlessPrice(value, 'USD')
def money(value):
return Money(value, 'USD')
@pytest.mark.parametrize("price", [tlprice('123.00'), tfprice('123.00')])
def test_compounded_added_taxes_empty(price):
result = calculate_compounded_added_taxes(price, [])
assert result.taxful == tfprice(123)
assert result.taxless == tlprice(123)
assert result.taxes == []
result2 = calculate_compounded_added_taxes(price, [[]])
assert result2.taxful == tfprice(123)
assert result2.taxless == tlprice(123)
assert result2.taxes == []
@pytest.mark.parametrize("price", [tlprice('100.00'), tfprice('115.00')])
def test_compounded_added_taxes_simple(price):
taxes = [[tax('15%', rate=Decimal('0.15'))]]
result = calculate_compounded_added_taxes(price, taxes)
assert result.taxful == tfprice('115')
assert result.taxless == tlprice('100')
assert len(result.taxes) == 1
assert result.taxes[0].tax.code == '15%'
assert result.taxes[0].amount == money('15')
assert result.taxes[0].base_amount == money('100')
@pytest.mark.parametrize("price", [tlprice('100.00'), tfprice('121.00')])
def test_compounded_added_taxes_simple_added(price):
taxes = [[
tax('15%', rate=Decimal('0.15')),
tax('5%', rate=Decimal('0.05')),
tax('1%', rate=Decimal('0.01')),
]]
result = calculate_compounded_added_taxes(price, taxes)
assert result.taxful == tfprice('121')
assert result.taxless == tlprice('100')
assert len(result.taxes) == 3
assert result.taxes[0].tax.code == '15%'
assert result.taxes[0].amount == money('15')
assert result.taxes[0].base_amount == money('100')
assert result.taxes[1].tax.code == '5%'
assert result.taxes[1].amount == money('5')
assert result.taxes[1].base_amount == money('100')
assert result.taxes[2].tax.code == '1%'
assert result.taxes[2].amount == money('1')
assert result.taxes[2].base_amount == money('100')
@pytest.mark.parametrize("price", [tlprice('100.00'), tfprice('121.9575')])
def test_compounded_added_taxes_simple_compound(price):
taxes = [
[tax('15%', rate=Decimal('0.15'))],
[tax('5%', rate=Decimal('0.05'))],
[tax('1%', rate=Decimal('0.01'))],
]
result = calculate_compounded_added_taxes(price, taxes)
assert result.taxful == tfprice('121.9575')
assert result.taxless == tlprice('100')
assert len(result.taxes) == 3
assert result.taxes[0].tax.code == '15%'
assert result.taxes[0].amount == money('15')
assert result.taxes[0].base_amount == money('100')
assert result.taxes[1].tax.code == '5%'
assert result.taxes[1].amount == money('5.75')
assert result.taxes[1].base_amount == money('115')
assert result.taxes[2].tax.code == '1%'
assert result.taxes[2].amount == money('1.2075')
assert result.taxes[2].base_amount == money('120.75')
COMPLEX_TAX_GROUPS = [
[
tax('1A', rate=Decimal('0.5')),
tax('1B', rate=Decimal('0.24')),
tax('1C', amount=money('1.11')),
tax('1D', amount=money('0.89')),
], [
tax('2A', rate=Decimal('0.1')),
tax('2B', rate=Decimal('0.01')),
tax('2C', amount=money('1.25')),
tax('2D', amount=money('0.25')),
], [
tax('3A', amount=money('0.123')),
], [
tax('4A', rate=Decimal('0.1')),
]
]
@pytest.mark.parametrize("price", [tlprice('100'), tfprice('216.6813')])
def test_compounded_added_taxes_complex(price):
result = calculate_compounded_added_taxes(price, COMPLEX_TAX_GROUPS)
result_taxes = [
(line_tax.tax.code, line_tax.amount, line_tax.base_amount)
for line_tax in result.taxes]
expected_taxes = [
# code, tax_amount, base_amount
('1A', money('50.0'), money('100')),
('1B', money('24.00'), money('100')),
('1C', money('1.11'), money('100')),
('1D', money('0.89'), money('100')),
('2A', money('17.60'), money('176.0')),
('2B', money('1.760'), money('176.0')),
('2C', money('1.25'), money('176.0')),
('2D', money('0.25'), money('176.0')),
('3A', money('0.123'), money('196.860')),
('4A', money('19.6983'), money('196.983')),
]
assert result_taxes == expected_taxes
assert result.taxless == tlprice('100')
assert result.taxful == tfprice('216.6813')
@pytest.mark.parametrize("prices", [
(tlprice('12345.6789'), tfprice('26233.115950206')),
(tlprice('10000.00'), tfprice('21249.6273')),
(tlprice('100.00'), tfprice('216.6813')),
(tlprice('12.97'), tfprice('31.7825838')),
(tlprice('10.00'), tfprice('25.4727')),
(tlprice('1.00'), tfprice('6.35184')),
(tlprice('0.03'), tfprice('4.2910362')),
(tlprice('0.02'), tfprice('4.2697908')),
(tlprice('0.01'), tfprice('4.2485454')),
(tlprice('0.00'), tfprice('4.2273')),
(tlprice('-1.00'), tfprice('2.10276')),
(tlprice('-1.9897483'), tfprice('0.000000146718')),
(tlprice('-10.00'), tfprice('-17.0181')),
])
def test_compounded_added_taxes_complex2(prices):
(taxless_price, taxful_price) = prices
res1 = calculate_compounded_added_taxes(taxless_price, COMPLEX_TAX_GROUPS)
assert res1.taxless == taxless_price
assert res1.taxful == taxful_price
res2 = calculate_compounded_added_taxes(taxful_price, COMPLEX_TAX_GROUPS)
assert res2.taxless == taxless_price
assert res2.taxful == taxful_price
| agpl-3.0 |
alexkost819/thesis | train.py | 1 | 12313 | """Created on 24 June 2017.
@author: Alex Kost
@description: Training class for CNN and RNN models
Attributes:
DEFAULT_FORMAT (str): Logging format
LOGFILE_NAME (str): Logging file name
OUTPUT_DIR (str): TensorBoard output directory
"""
# Basic Python
import logging
import os
from time import strftime
from math import ceil
# Extended Python
import progressbar
import tensorflow as tf
# Alex Python
from data_processor import DataProcessor
from rnn_model import RNNModel # RNN MODEL
from cnn_model import CNNModel # CNN MODEL
# Progressbar config
progressbar.streams.wrap_stderr()
# Constants
DEFAULT_FORMAT = '%(asctime)s: %(levelname)s: %(message)s'
LOGFILE_NAME = 'train.log'
OUTPUT_DIR = 'output'
class TrainModel(DataProcessor):
"""
TrainModel is a class that builds and trains a provided model.
Attributes:
batch_size (int): number of examples in a single batch
dropout_rate (float): dropout rate; 0.1 == 10% of input units drop out
learning_rate (float): learning rate, used for optimizing
logger (logger object): logging object to write to stream/file
model (TensorFlow model object): Model to train and evaluate
n_checks (int): number of times to check performance while training
n_epochs (int): number of times we go through all data
summary_op (TensorFlow operation): summary operation of all tf.summary objects
"""
def __init__(self, model, n_epochs=20, batch_size=32):
"""Constructor.
Args:
model (TensorFlow model object): Model to train and evaluate
n_epochs (int, optional): number of times we go through all data
batch_size (int, optional): number of examples in a single batch
"""
# TRAINING PARAMETERS
self.n_epochs = n_epochs
self.batch_size = batch_size
# CONSTANT
self.model = model
self.summary_op = None
self.logger = logging.getLogger(__name__)
self.n_checks = 5
# INPUT DATA/LABELS
super(TrainModel, self).__init__(self.model.n_classes, self.model.n_features)
self.preprocess_data_by_label()
# HELPER VARIABLES
self._ex_per_epoch = None
self._steps_per_epoch = None
self._train_length_ex = None
self._train_length_steps = None
self.calculate_helpers()
def calculate_helpers(self):
"""Calculate helper variables for training length."""
self._ex_per_epoch = len(self.train_files)
self._steps_per_epoch = int(ceil(self._ex_per_epoch / float(self.batch_size)))
self._train_length_ex = self._ex_per_epoch * self.n_epochs
self._train_length_steps = self._steps_per_epoch * self.n_epochs
self.logger.debug('self._ex_per_epoch: %d', self._ex_per_epoch)
self.logger.debug('self._steps_per_epoch: %d', self._steps_per_epoch)
self.logger.debug('self._train_length_ex: %d', self._train_length_ex)
self.logger.debug('self._train_length_steps: %d', self._train_length_steps)
def train_model(self, use_tensorboard=True):
"""Train the model.
Args:
use_tensorboard (bool, optional): Description
Returns:
TYPE: Description
"""
# SETUP TENSORBOARD FOR NEW RUN
if use_tensorboard:
checkpoint_prefix, run_dir = self._setup_tensorboard_directories()
saver = tf.train.Saver(tf.global_variables())
else:
self.logger.info('*** NEW RUN ***')
self._log_training_and_model_params()
self.summary_op = tf.summary.merge_all()
# TRAIN
with tf.Session() as sess:
# Initialization
progress_bar = progressbar.ProgressBar(max_value=self._train_length_steps)
sess.run(tf.global_variables_initializer())
if use_tensorboard:
train_writer = tf.summary.FileWriter(run_dir + '/train', sess.graph)
val_writer = tf.summary.FileWriter(run_dir + '/val')
batch_idx = 0
progress_bar.start()
progress_bar.update(0)
self.logger.info("The training shall begin.")
try:
_, acc_test_before, _ = self.evaluate_model_on_data(sess, 'test')
for step in range(self._train_length_steps):
# Reset/increment batch_idx
if step % self._steps_per_epoch == 0:
batch_idx = 0
else:
batch_idx += 1
if use_tensorboard:
do_full_eval = step % ceil(self._train_length_steps / float(self.n_checks)) == 0
do_full_eval = do_full_eval or (step == self._train_length_steps - 1)
if do_full_eval:
# Check training and validation performance
cost_train, acc_train, _ = self.evaluate_model_on_data(sess, 'train')
cost_val, acc_val, summary = self.evaluate_model_on_data(sess, 'val')
# Report information to user
self.logger.info('%d epochs elapsed.', step / self._steps_per_epoch)
self.logger.info('COST: Train: %5.3f / Val: %5.3f', cost_train, cost_val)
self.logger.info('ACCURACY: Train: %5.3f / Val: %5.3f', acc_train, acc_val)
# Save to Tensorboard
val_writer.add_summary(summary, step)
saver.save(sess, checkpoint_prefix, global_step=step)
# # If model is not learning immediately, break out of training
# if acc_val == acc_test_before and step > 100:
# self.logger.info('Stuck on value: %d', acc_val)
# break
# Training step
x_batch, y_batch = self._generate_batch(batch_idx)
_, summary = sess.run([self.model.optimizer, self.summary_op],
feed_dict={self.model.x: x_batch,
self.model.y: y_batch,
self.model.trainable: True})
# Save to Tensorboard, update progress bar
if use_tensorboard:
train_writer.add_summary(summary, step)
progress_bar.update(step)
except KeyboardInterrupt:
self.logger.info('Keyboard Interrupt? Gracefully quitting.')
finally:
progress_bar.finish()
_, acc_test_after, _ = self.evaluate_model_on_data(sess, 'test')
self.logger.info("The training is done.")
self.logger.info('Test accuracy before training: %.3f.', acc_test_before)
self.logger.info('Test accuracy after training: %.3f.', acc_test_after)
if use_tensorboard:
train_writer.close()
val_writer.close()
return acc_test_after
def evaluate_model_on_data(self, sess, dataset_label):
"""Evaluate the model on the entire training data.
Args:
sess (tf.Session object): active session object
dataset_label (string): dataset label
Returns:
float, float: the cost and accuracy of the model based on the dataset.
"""
try:
dataset_dict = {'test': self.test_data,
'train': self.test_data,
'val': self.val_data}
dataset = dataset_dict[dataset_label]
except KeyError:
raise '"dataset" arg must be in dataset dict: {}'.format(dataset_dict.keys())
cost, acc, summary = sess.run([self.model.cost, self.model.accuracy, self.summary_op],
feed_dict={self.model.x: dataset[0],
self.model.y: dataset[1],
self.model.trainable: False})
return cost, acc, summary
@staticmethod
def reset_model():
"""Reset the model to prepare for next run."""
tf.reset_default_graph()
""" Helper Functions """
def _setup_tensorboard_directories(self):
"""Set up TensorBoard directories.
Returns:
checkpoint_prefix, run_dir (string, string): checkpoint prefix, output root folder
"""
timestamp = str(strftime("%Y.%m.%d-%H.%M.%S"))
model_type = self.model.__class__.__name__.replace('Model', '')
model_name = timestamp + '_' + model_type
out_dir = os.path.abspath(os.path.join(os.path.curdir, OUTPUT_DIR))
run_dir = os.path.abspath(os.path.join(out_dir, model_name))
checkpoint_dir = os.path.abspath(os.path.join(run_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Logging the Run
self.logger.info('*** NEW RUN ***')
self.logger.info('filename: %s', model_name)
return checkpoint_prefix, run_dir
def _log_training_and_model_params(self):
"""Record new run details."""
model_type = self.model.__class__.__name__
self.logger.info(' *** TRAINING ***')
self.logger.info(' n_epochs: %d', self.n_epochs)
self.logger.info(' batch_size: %d', self.batch_size)
self.logger.info(' *** MODEL ***')
if 'CNN' in model_type:
self.logger.info(' num_filt_1: %d', self.model.num_filt_1)
self.logger.info(' kernel_size: %d', self.model.kernel_size)
self.logger.info(' num_fc_1: %d', self.model.num_fc_1)
elif 'RNN' in model_type:
self.logger.info(' n_hidden: %d', self.model.n_hidden)
self.logger.info(' num_fc_1: %d', self.model.num_fc_1)
self.logger.info(' n_layers: %d', self.model.n_layers)
self.logger.info(' dropout_rate: %f', self.model.dropout_rate)
self.logger.info(' learning_rate: %f', self.model.learning_rate)
self.logger.info(' beta1: %f', self.model.beta1)
self.logger.info(' beta2: %f', self.model.beta2)
self.logger.info(' epsilon: %f', self.model.epsilon)
def _generate_batch(self, batch_idx):
"""Generate a batch and increment the sliding batch window within the data."""
features = self.train_data[0]
labels = self.train_data[1]
start_idx = batch_idx * self.batch_size
end_idx = start_idx + self.batch_size - 1
# Error handling for if sliding window goes beyond data list length
if end_idx > self._ex_per_epoch:
end_idx = self._ex_per_epoch
if self.n_features > 1:
x_batch = features[:, start_idx:end_idx]
else:
x_batch = features[start_idx:end_idx]
y_batch = labels[start_idx:end_idx]
self.logger.debug('batch_idx: %d', batch_idx)
self.logger.debug('Got training examples %d to %d', start_idx, end_idx)
return x_batch, y_batch
def main():
"""Sup Main!"""
models = [CNNModel(), RNNModel()]
for model in models:
model.build_model()
train = TrainModel(model, n_epochs=200, batch_size=128)
train.train_model()
train.reset_model()
if __name__ == '__main__':
# create logger with 'spam_application'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(LOGFILE_NAME)
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(DEFAULT_FORMAT)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
main()
| apache-2.0 |
chipx86/reviewboard | reviewboard/scmtools/svn/base.py | 2 | 6825 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.utils.six.moves.urllib.parse import quote
from django.utils.translation import ugettext as _
from reviewboard.scmtools.core import HEAD
from reviewboard.scmtools.errors import SCMError
class Client(object):
"""Base SVN client."""
LOG_DEFAULT_START = 'HEAD'
LOG_DEFAULT_END = '1'
def __init__(self, config_dir, repopath, username=None, password=None):
self.repopath = repopath
def set_ssl_server_trust_prompt(self, cb):
raise NotImplementedError
def get_file(self, path, revision=HEAD):
"""Returns the contents of a given file at the given revision."""
raise NotImplementedError
def get_keywords(self, path, revision=HEAD):
"""Returns a list of SVN keywords for a given path."""
raise NotImplementedError
def get_log(self, path, start=None, end=None, limit=None,
discover_changed_paths=False, limit_to_path=False):
"""Returns log entries at the specified path.
The log entries will appear ordered from most recent to least,
with 'start' being the most recent commit in the range.
If 'start' is not specified, then it will default to 'HEAD'. If
'end' is not specified, it will default to '1'.
To limit the commits to the given path, not factoring in history
from any branch operations, set 'limit_to_path' to True.
"""
raise NotImplementedError
def list_dir(self, path):
"""Lists the contents of the specified path.
The result will be an ordered dictionary of contents, mapping
filenames or directory names with a dictionary containing:
* ``path`` - The full path of the file or directory.
* ``created_rev`` - The revision where the file or directory was
created.
"""
raise NotImplementedError
def diff(self, revision1, revision2, path=None):
"""Returns a diff between two revisions.
The diff will contain the differences between the two revisions,
and may optionally be limited to a specific path.
The returned diff will be returned as a Unicode object.
"""
raise NotImplementedError
@property
def repository_info(self):
"""Metadata about the repository.
This is a dictionary containing the following keys:
``uuid`` (:py:class:`unicode`):
The UUID of the repository.
``root_url`` (:py:class:`unicode`):
The root URL of the configured repository.
``url`` (:py:class:`unicoe`):
The full URL of the configured repository.
"""
raise NotImplementedError
def normalize_path(self, path):
"""Normalize a path to a file/directory for a request to Subversion.
If the path is an absolute path beginning at the base of the
repository, it will be returned as-is. Otherwise, it will be appended
onto the repository path, with any leading ``/`` characters on the
path removed.
If appending the path, care will be taken to quote special characters
like a space, ``#``, or ``?``, in order to ensure that they're not
mangled. There are many characters Subversion does consider valid that
would normally be quoted, so this isn't true URL quoting.
All trailing ``/`` characters will also be removed.
Args:
path (unicode):
The path to normalize.
Returns:
unicode:
The normalized path.
"""
if path.startswith(self.repopath):
norm_path = path
else:
# Some important notes for the quoting below:
#
# 1) Subversion requires that we operate off of a URI-based
# repository path in order for file lookups to at all work, so
# we can be sure we're building a URI here. That means we're
# safe to quote.
#
# 2) This is largely being mentioned because the original
# contribution to fix a lookup issue here with special
# characters was written to be compatible with local file
# paths. Support for that is a pretty common assumption, but
# is unnecessary, so the code here is safe.
#
# 3) We can't rely on urllib's standard quoting behavior.
# completely. Subversion has a specific table of characters
# that must be quoted, and ones that can't be. There is enough
# we can leverage from urlquote's own table, but we need to
# mark several more as safe.
#
# See the "svn_uri_char_validity" look up table and notes here:
#
# https://github.com/apache/subversion/blob/trunk/subversion/libsvn_subr/path.c
#
# 4) file:// URLs don't allow non-printable characters (character
# codes < 32), while non-file:// URLs do. We don't want to
# trigger issues in Subversion (earlier versions assume this
# is our responsibility), so we validate here.
#
# 5) Modern Subversion seems to handle its own normalization now,
# from what we can tell. That might not always be true, though,
# and we need to support older versions, so we'll continue to
# maintain this going forward.
if self.repopath.startswith('file:'):
# Validate that this doesn't have any unprintable ASCII
# characters or older versions of Subversion will throw a
# fit.
for c in path:
if 0 <= ord(c) < 32:
raise SCMError(
_('Invalid character code %(code)s found in '
'path %(path)r.')
% {
'code': ord(c),
'path': path,
})
norm_path = '%s/%s' % (
self.repopath,
quote(path.lstrip('/'), safe="!$&'()*+,'-./:=@_~")
)
return norm_path.rstrip('/')
def accept_ssl_certificate(self, path, on_failure=None):
"""If the repository uses SSL, this method is used to determine whether
the SSL certificate can be automatically accepted.
If the cert cannot be accepted, the ``on_failure`` callback
is executed.
``on_failure`` signature::
void on_failure(e:Exception, path:str, cert:dict)
"""
raise NotImplementedError
| mit |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_3/tests/modeltests/lookup/tests.py | 48 | 29352 | from datetime import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from models import Author, Article, Tag
class LookupTests(TestCase):
#def setUp(self):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(set([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertRaises(AssertionError, Article.objects.in_bulk, 'foo')
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x:x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': u'Article 5'},
{'headline': u'Article 6'},
{'headline': u'Article 4'},
{'headline': u'Article 2'},
{'headline': u'Article 3'},
{'headline': u'Article 7'},
{'headline': u'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': u'Article 5', 'id': self.a5.id},
{'headline': u'Article 6', 'id': self.a6.id},
{'headline': u'Article 4', 'id': self.a4.id},
{'headline': u'Article 2', 'id': self.a2.id},
{'headline': u'Article 3', 'id': self.a3.id},
{'headline': u'Article 7', 'id': self.a7.id},
{'headline': u'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elemnts is the same as the order
# of fields in the values_list() call.
identity = lambda x:x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
(u'Article 5',),
(u'Article 6',),
(u'Article 4',),
(u'Article 2',),
(u'Article 3',),
(u'Article 7',),
(u'Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id+1, self.a1.id),
(self.a2.id+1, self.a2.id),
(self.a3.id+1, self.a3.id),
(self.a4.id+1, self.a4.id),
(self.a5.id+1, self.a5.id),
(self.a6.id+1, self.a6.id),
(self.a7.id+1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id+1),
(self.a2.id, self.a2.id+1),
(self.a3.id, self.a3.id+1),
(self.a4.id, self.a4.id+1),
(self.a5.id, self.a5.id+1),
(self.a6.id, self.a6.id+1),
(self.a7.id, self.a7.id+1)
],
transform=identity)
self.assertQuerysetEqual(
Author.objects.values_list('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
a8 = Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a9 = Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a10 = Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns an EmptyQuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError, ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, headline, id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError, ex:
self.assertEqual(str(ex), "Join on field 'headline' not permitted. "
"Did you misspell 'starts' for the lookup type?")
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
| apache-2.0 |
misuzu/torpedomsg | setup.py | 1 | 1685 | import os
import re
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
def find_version(*file_paths):
"""
Build a path from *file_paths* and search for a ``__version__``
string inside.
"""
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
version = find_version('torpedomsg/__init__.py')
setup(
name='torpedomsg',
version=version,
description='Flexible Pub-Sub on top of Tornado',
long_description=read('README.rst'),
author='misuzu',
url='https://github.com/misuzu/torpedomsg',
download_url = 'https://github.com/misuzu/torpedomsg/tarball/{}'.format(version),
license='MIT',
packages=['torpedomsg'],
install_requires=[
'cbor<2',
'tornado<5'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| mit |
TalShafir/ansible | lib/ansible/plugins/lookup/cpm_metering.py | 18 | 5916 | # (c) 2018, Western Telematic Inc. <kenp@wti.com>
# (c) 2012-18 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
lookup: cpm_metering
author: "Western Telematic Inc. (@wtinetworkgear)"
version_added: "2.7"
short_description: Get Power and Current data from WTI OOB/Combo and PDU devices
description:
- "Get Power and Current data from WTI OOB/Combo and PDU devices"
options:
_terms:
description:
- This is the Action to send the module.
required: true
choices: [ "getpower", "getcurrent" ]
cpm_url:
description:
- This is the URL of the WTI device to send the module.
required: true
cpm_username:
description:
- This is the Username of the WTI device to send the module.
cpm_password:
description:
- This is the Password of the WTI device to send the module.
use_https:
description:
- Designates to use an https connection or http connection.
required: false
default: True
choices: [ True, False ]
validate_certs:
description:
- If false, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
type: bool
default: true
use_proxy:
description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
type: boolean
default: True
startdate:
description:
- Start date of the range to look for power data
required: false
enddate:
description:
- End date of the range to look for power data
required: false
"""
EXAMPLES = """
# Get Power data
- name: Get Power data for a given WTI device
- debug:
var: lookup('cpm_metering',
'getpower',
validate_certs=true,
use_https=true,
cpm_url='rest.wti.com',
cpm_username='restpower',
cpm_password='restfulpowerpass12')
# Get Current data
- name: Get Current data for a given WTI device
- debug:
var: lookup('cpm_metering',
'getcurrent',
validate_certs=true,
use_https=true,
cpm_url='rest.wti.com',
cpm_username='restpower',
cpm_password='restfulpowerpass12')
# Get Power data for a date range
- name: Get Power data for a given WTI device given a certain date range
- debug:
var: lookup('cpm_metering',
'getpower',
validate_certs=true,
use_https=true,
cpm_url='rest.wti.com',
cpm_username='restpower',
cpm_password='restfulpowerpass12',
startdate='08-12-2018'
enddate='08-14-2018')
"""
RETURN = """
_list:
description: The output JSON returned from the commands sent
returned: always
type: str
"""
import base64
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_text, to_bytes, to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(direct=kwargs)
ret = []
for term in terms:
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(self.get_option('cpm_username'), self.get_option('cpm_password')),
errors='surrogate_or_strict')))
additional = ""
if self.get_option("startdate") is not None and (len(self.get_option("startdate")) > 0):
if self.get_option("enddate") is not None and (len(self.get_option("enddate")) > 0):
additional = "?startdate=" + self.get_option("startdate") + "&enddate=" + self.get_option("enddate")
if self.get_option('use_https') is True:
protocol = "https://"
else:
protocol = "http://"
if (term == 'getpower'):
fullurl = ("%s%s/api/v2/config/power" % (protocol, self.get_option('cpm_url')))
elif (term == 'getcurrent'):
fullurl = ("%s%s/api/v2/config/current" % (protocol, self.get_option('cpm_url')))
else:
raise AnsibleError("Power command not recognized %s " % (term))
if (len(additional) > 0):
fullurl += additional
display.vvvv("cpm_metering connecting to %s" % fullurl)
try:
response = open_url(fullurl, validate_certs=self.get_option('validate_certs'), use_proxy=self.get_option('use_proxy'),
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (fullurl, to_native(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (fullurl, to_native(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (fullurl, to_native(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (fullurl, to_native(e)))
ret.append(to_text(response.read()))
return ret
| gpl-3.0 |
salamer/django | django/utils/baseconv.py | 650 | 2982 | # Copyright (c) 2010 Guilherme Gondim. All rights reserved.
# Copyright (c) 2009 Simon Willison. All rights reserved.
# Copyright (c) 2002 Drew Perttula. All rights reserved.
#
# License:
# Python Software Foundation License version 2
#
# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
# ALL WARRANTIES.
#
# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
# code so it may be used in proprietary projects just like prior ``baseconv``
# distributions.
#
# All trademarks referenced herein are property of their respective holders.
#
"""
Convert numbers from base 10 integers to base X strings and back again.
Sample usage::
>>> base20 = BaseConverter('0123456789abcdefghij')
>>> base20.encode(1234)
'31e'
>>> base20.decode('31e')
1234
>>> base20.encode(-1234)
'-31e'
>>> base20.decode('-31e')
-1234
>>> base11 = BaseConverter('0123456789-', sign='$')
>>> base11.encode('$1234')
'$-22'
>>> base11.decode('$-22')
'$1234'
"""
BASE2_ALPHABET = '01'
BASE16_ALPHABET = '0123456789ABCDEF'
BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
BASE64_ALPHABET = BASE62_ALPHABET + '-_'
class BaseConverter(object):
decimal_digits = '0123456789'
def __init__(self, digits, sign='-'):
self.sign = sign
self.digits = digits
if sign in self.digits:
raise ValueError('Sign character found in converter base digits.')
def __repr__(self):
return "<BaseConverter: base%s (%s)>" % (len(self.digits), self.digits)
def encode(self, i):
neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
if neg:
return self.sign + value
return value
def decode(self, s):
neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
if neg:
value = '-' + value
return int(value)
def convert(self, number, from_digits, to_digits, sign):
if str(number)[0] == sign:
number = str(number)[1:]
neg = 1
else:
neg = 0
# make an integer out of the number
x = 0
for digit in str(number):
x = x * len(from_digits) + from_digits.index(digit)
# create the result in base 'len(to_digits)'
if x == 0:
res = to_digits[0]
else:
res = ''
while x > 0:
digit = x % len(to_digits)
res = to_digits[digit] + res
x = int(x // len(to_digits))
return neg, res
base2 = BaseConverter(BASE2_ALPHABET)
base16 = BaseConverter(BASE16_ALPHABET)
base36 = BaseConverter(BASE36_ALPHABET)
base56 = BaseConverter(BASE56_ALPHABET)
base62 = BaseConverter(BASE62_ALPHABET)
base64 = BaseConverter(BASE64_ALPHABET, sign='$')
| bsd-3-clause |
aaron-fz/neutron_full_sync | neutron/tests/unit/test_metadata_namespace_proxy.py | 8 | 14209 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import socket
import mock
import testtools
import webob
from neutron.agent.metadata import namespace_proxy as ns_proxy
from neutron.common import utils
from neutron.tests import base
class FakeConf(object):
admin_user = 'neutron'
admin_password = 'password'
admin_tenant_name = 'tenant'
auth_url = 'http://127.0.0.1'
auth_strategy = 'keystone'
auth_region = 'region'
nova_metadata_ip = '9.9.9.9'
nova_metadata_port = 8775
metadata_proxy_shared_secret = 'secret'
class TestUnixDomainHttpConnection(base.BaseTestCase):
def test_connect(self):
with mock.patch.object(ns_proxy, 'cfg') as cfg:
cfg.CONF.metadata_proxy_socket = '/the/path'
with mock.patch('socket.socket') as socket_create:
conn = ns_proxy.UnixDomainHTTPConnection('169.254.169.254',
timeout=3)
conn.connect()
socket_create.assert_has_calls([
mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
mock.call().settimeout(3),
mock.call().connect('/the/path')]
)
self.assertEqual(conn.timeout, 3)
class TestNetworkMetadataProxyHandler(base.BaseTestCase):
def setUp(self):
super(TestNetworkMetadataProxyHandler, self).setUp()
self.log_p = mock.patch.object(ns_proxy, 'LOG')
self.log = self.log_p.start()
self.handler = ns_proxy.NetworkMetadataProxyHandler('router_id')
def test_call(self):
req = mock.Mock(headers={})
with mock.patch.object(self.handler, '_proxy_request') as proxy_req:
proxy_req.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
proxy_req.assert_called_once_with(req.remote_addr,
req.method,
req.path_info,
req.query_string,
req.body)
def test_no_argument_passed_to_init(self):
with testtools.ExpectedException(ValueError):
ns_proxy.NetworkMetadataProxyHandler()
def test_call_internal_server_error(self):
req = mock.Mock(headers={})
with mock.patch.object(self.handler, '_proxy_request') as proxy_req:
proxy_req.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
self.assertTrue(proxy_req.called)
def test_proxy_request_router_200(self):
self.handler.router_id = 'router_id'
resp = mock.MagicMock(status=200)
with mock.patch('httplib2.Http') as mock_http:
resp.__getitem__.return_value = "text/plain"
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Router-ID': 'router_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertEqual(retval.headers['Content-Type'], 'text/plain')
self.assertEqual(retval.body, 'content')
def test_proxy_request_network_200(self):
self.handler.network_id = 'network_id'
resp = mock.MagicMock(status=200)
with mock.patch('httplib2.Http') as mock_http:
resp.__getitem__.return_value = "application/json"
mock_http.return_value.request.return_value = (resp, '{}')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertEqual(retval.headers['Content-Type'],
'application/json')
self.assertEqual(retval.body, '{}')
def test_proxy_request_network_404(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=404)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_proxy_request_network_409(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=409)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
retval = self.handler._proxy_request('192.168.1.1',
'POST',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='POST',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertIsInstance(retval, webob.exc.HTTPConflict)
def test_proxy_request_network_500(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=500)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
def test_proxy_request_network_418(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=418)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
with testtools.ExpectedException(Exception):
self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
def test_proxy_request_network_exception(self):
self.handler.network_id = 'network_id'
mock.Mock(status=500)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.side_effect = Exception
with testtools.ExpectedException(Exception):
self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
class TestProxyDaemon(base.BaseTestCase):
def test_init(self):
with mock.patch('neutron.agent.linux.daemon.Pidfile'):
pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', 'router_id')
self.assertEqual(pd.router_id, 'router_id')
self.assertEqual(pd.network_id, 'net_id')
def test_run(self):
with mock.patch('neutron.agent.linux.daemon.Pidfile'):
with mock.patch('neutron.wsgi.Server') as Server:
pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id',
'router_id')
pd.run()
Server.assert_has_calls([
mock.call('neutron-network-metadata-proxy'),
mock.call().start(mock.ANY, 9697),
mock.call().wait()]
)
def test_main(self):
with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon:
with mock.patch.object(ns_proxy, 'config') as config:
with mock.patch.object(ns_proxy, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg') as utils_cfg:
cfg.CONF.router_id = 'router_id'
cfg.CONF.network_id = None
cfg.CONF.metadata_port = 9697
cfg.CONF.pid_file = 'pidfile'
cfg.CONF.daemonize = True
utils_cfg.CONF.log_opt_values.return_value = None
ns_proxy.main()
self.assertTrue(config.setup_logging.called)
daemon.assert_has_calls([
mock.call('pidfile', 9697,
router_id='router_id',
network_id=None),
mock.call().start()]
)
def test_main_dont_fork(self):
with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon:
with mock.patch.object(ns_proxy, 'config') as config:
with mock.patch.object(ns_proxy, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg') as utils_cfg:
cfg.CONF.router_id = 'router_id'
cfg.CONF.network_id = None
cfg.CONF.metadata_port = 9697
cfg.CONF.pid_file = 'pidfile'
cfg.CONF.daemonize = False
utils_cfg.CONF.log_opt_values.return_value = None
ns_proxy.main()
self.assertTrue(config.setup_logging.called)
daemon.assert_has_calls([
mock.call('pidfile', 9697,
router_id='router_id',
network_id=None),
mock.call().run()]
)
| apache-2.0 |
jayhetee/coveragepy | coverage/test_helpers.py | 45 | 11036 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Mixin classes to help make good tests."""
import atexit
import collections
import contextlib
import os
import random
import shutil
import sys
import tempfile
import textwrap
from coverage.backunittest import TestCase
from coverage.backward import StringIO, to_bytes
class Tee(object):
"""A file-like that writes to all the file-likes it has."""
def __init__(self, *files):
"""Make a Tee that writes to all the files in `files.`"""
self._files = files
if hasattr(files[0], "encoding"):
self.encoding = files[0].encoding
def write(self, data):
"""Write `data` to all the files."""
for f in self._files:
f.write(data)
def flush(self):
"""Flush the data on all the files."""
for f in self._files:
f.flush()
if 0:
# Use this if you need to use a debugger, though it makes some tests
# fail, I'm not sure why...
def __getattr__(self, name):
return getattr(self._files[0], name)
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@contextlib.contextmanager
def saved_sys_path():
"""Save sys.path, and restore it later."""
old_syspath = sys.path[:]
try:
yield
finally:
sys.path = old_syspath
def setup_with_context_manager(testcase, cm):
"""Use a contextmanager to setUp a test case.
If you have a context manager you like::
with ctxmgr(a, b, c) as v:
# do something with v
and you want to have that effect for a test case, call this function from
your setUp, and it will start the context manager for your test, and end it
when the test is done::
def setUp(self):
self.v = setup_with_context_manager(self, ctxmgr(a, b, c))
def test_foo(self):
# do something with self.v
"""
val = cm.__enter__()
testcase.addCleanup(cm.__exit__, None, None, None)
return val
class ModuleAwareMixin(TestCase):
"""A test case mixin that isolates changes to sys.modules."""
def setUp(self):
super(ModuleAwareMixin, self).setUp()
# Record sys.modules here so we can restore it in cleanup_modules.
self.old_modules = list(sys.modules)
self.addCleanup(self.cleanup_modules)
def cleanup_modules(self):
"""Remove any new modules imported during the test run.
This lets us import the same source files for more than one test.
"""
for m in [m for m in sys.modules if m not in self.old_modules]:
del sys.modules[m]
class SysPathAwareMixin(TestCase):
"""A test case mixin that isolates changes to sys.path."""
def setUp(self):
super(SysPathAwareMixin, self).setUp()
setup_with_context_manager(self, saved_sys_path())
class EnvironmentAwareMixin(TestCase):
"""A test case mixin that isolates changes to the environment."""
def setUp(self):
super(EnvironmentAwareMixin, self).setUp()
# Record environment variables that we changed with set_environ.
self.environ_undos = {}
self.addCleanup(self.cleanup_environ)
def set_environ(self, name, value):
"""Set an environment variable `name` to be `value`.
The environment variable is set, and record is kept that it was set,
so that `cleanup_environ` can restore its original value.
"""
if name not in self.environ_undos:
self.environ_undos[name] = os.environ.get(name)
os.environ[name] = value
def cleanup_environ(self):
"""Undo all the changes made by `set_environ`."""
for name, value in self.environ_undos.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = value
class StdStreamCapturingMixin(TestCase):
"""A test case mixin that captures stdout and stderr."""
def setUp(self):
super(StdStreamCapturingMixin, self).setUp()
# Capture stdout and stderr so we can examine them in tests.
# nose keeps stdout from littering the screen, so we can safely Tee it,
# but it doesn't capture stderr, so we don't want to Tee stderr to the
# real stderr, since it will interfere with our nice field of dots.
self.old_stdout = sys.stdout
self.captured_stdout = StringIO()
sys.stdout = Tee(sys.stdout, self.captured_stdout)
self.old_stderr = sys.stderr
self.captured_stderr = StringIO()
sys.stderr = self.captured_stderr
self.addCleanup(self.cleanup_std_streams)
def cleanup_std_streams(self):
"""Restore stdout and stderr."""
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
def stdout(self):
"""Return the data written to stdout during the test."""
return self.captured_stdout.getvalue()
def stderr(self):
"""Return the data written to stderr during the test."""
return self.captured_stderr.getvalue()
class TempDirMixin(SysPathAwareMixin, ModuleAwareMixin, TestCase):
"""A test case mixin that creates a temp directory and files in it.
Includes SysPathAwareMixin and ModuleAwareMixin, because making and using
temp directories like this will also need that kind of isolation.
"""
# Our own setting: most of these tests run in their own temp directory.
# Set this to False in your subclass if you don't want a temp directory
# created.
run_in_temp_dir = True
# Set this if you aren't creating any files with make_file, but still want
# the temp directory. This will stop the test behavior checker from
# complaining.
no_files_in_temp_dir = False
def setUp(self):
super(TempDirMixin, self).setUp()
if self.run_in_temp_dir:
# Create a temporary directory.
self.temp_dir = self.make_temp_dir("test_cover")
self.chdir(self.temp_dir)
# Modules should be importable from this temp directory. We don't
# use '' because we make lots of different temp directories and
# nose's caching importer can get confused. The full path prevents
# problems.
sys.path.insert(0, os.getcwd())
class_behavior = self.class_behavior()
class_behavior.tests += 1
class_behavior.temp_dir = self.run_in_temp_dir
class_behavior.no_files_ok = self.no_files_in_temp_dir
self.addCleanup(self.check_behavior)
def make_temp_dir(self, slug="test_cover"):
"""Make a temp directory that is cleaned up when the test is done."""
name = "%s_%08d" % (slug, random.randint(0, 99999999))
temp_dir = os.path.join(tempfile.gettempdir(), name)
os.makedirs(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
return temp_dir
def chdir(self, new_dir):
"""Change directory, and change back when the test is done."""
old_dir = os.getcwd()
os.chdir(new_dir)
self.addCleanup(os.chdir, old_dir)
def check_behavior(self):
"""Check that we did the right things."""
class_behavior = self.class_behavior()
if class_behavior.test_method_made_any_files:
class_behavior.tests_making_files += 1
def make_file(self, filename, text="", newline=None):
"""Create a file for testing.
`filename` is the relative path to the file, including directories if
desired, which will be created if need be.
`text` is the content to create in the file, a native string (bytes in
Python 2, unicode in Python 3).
If `newline` is provided, it is a string that will be used as the line
endings in the created file, otherwise the line endings are as provided
in `text`.
Returns `filename`.
"""
# Tests that call `make_file` should be run in a temp environment.
assert self.run_in_temp_dir
self.class_behavior().test_method_made_any_files = True
text = textwrap.dedent(text)
if newline:
text = text.replace("\n", newline)
# Make sure the directories are available.
dirs, _ = os.path.split(filename)
if dirs and not os.path.exists(dirs):
os.makedirs(dirs)
# Create the file.
with open(filename, 'wb') as f:
f.write(to_bytes(text))
return filename
# We run some tests in temporary directories, because they may need to make
# files for the tests. But this is expensive, so we can change per-class
# whether a temp directory is used or not. It's easy to forget to set that
# option properly, so we track information about what the tests did, and
# then report at the end of the process on test classes that were set
# wrong.
class ClassBehavior(object):
"""A value object to store per-class."""
def __init__(self):
self.tests = 0
self.skipped = 0
self.temp_dir = True
self.no_files_ok = False
self.tests_making_files = 0
self.test_method_made_any_files = False
# Map from class to info about how it ran.
class_behaviors = collections.defaultdict(ClassBehavior)
@classmethod
def report_on_class_behavior(cls):
"""Called at process exit to report on class behavior."""
for test_class, behavior in cls.class_behaviors.items():
bad = ""
if behavior.tests <= behavior.skipped:
bad = ""
elif behavior.temp_dir and behavior.tests_making_files == 0:
if not behavior.no_files_ok:
bad = "Inefficient"
elif not behavior.temp_dir and behavior.tests_making_files > 0:
bad = "Unsafe"
if bad:
if behavior.temp_dir:
where = "in a temp directory"
else:
where = "without a temp directory"
print(
"%s: %s ran %d tests, %d made files %s" % (
bad,
test_class.__name__,
behavior.tests,
behavior.tests_making_files,
where,
)
)
def class_behavior(self):
"""Get the ClassBehavior instance for this test."""
return self.class_behaviors[self.__class__]
# When the process ends, find out about bad classes.
atexit.register(TempDirMixin.report_on_class_behavior)
| apache-2.0 |
krfkeith/enough | gui/draw/pygame_draw/__init__.py | 3 | 1589 | # Copyright (c) 2007 Enough Project.
# See LICENSE for details.
import pygame
def fill(display, color, rect=None):
if rect is None:
display.fill(color)
else:
display.fill(color, rect)
circle = pygame.draw.circle
ellipse = pygame.draw.ellipse
arc = pygame.draw.arc
rect = pygame.draw.rect
def lines(surface, color, closed, points, width=1, antialias=False):
if antialias:
width = 1
f = pygame.draw.aalines
else:
f = pygame.draw.lines
return f(surface, color, closed, points, width)
def line(surface, color, startpos, endpos, width=1, antialias=False):
if antialias:
width = 1
f = pygame.draw.aaline
else:
f = pygame.draw.line
return f(surface, color, startpos, endpos, width)
set_mode = pygame.display.set_mode
_font_cache = {}
def get_font(name, size, is_bold=False, is_underline=False, is_italic=False):
global _font_cache
f = _font_cache.get((name, size, is_bold, is_underline, is_italic), None)
if not f:
f = pygame.font.Font(name, size)
f.set_underline(is_underline)
f.set_bold(is_bold)
f.set_italic(is_italic)
_font_cache[(name, size, is_bold, is_underline, is_italic)] = f
return f
def draw_font(surface, font_rendered_surface, pos):
surface.blit(font_rendered_surface, pos)
def lock(surface):
surface.lock()
def unlock(surface):
surface.unlock()
def blit(surface, blit_surface, pos):
surface.blit(blit_surface, pos)
def save(surface, filename):
pygame.image.save(surface, filename)
| gpl-3.0 |
PeterWangIntel/chromium-crosswalk | content/browser/devtools/protocol/devtools_protocol_handler_generator.py | 3 | 25018 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import string
import json
blink_protocol_path = sys.argv[1]
browser_protocol_path = sys.argv[2]
output_cc_path = sys.argv[3]
output_h_path = sys.argv[4]
header = """\
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// Generated by
// content/public/browser/devtools_protocol_handler_generator.py from
// third_party/WebKit/Source/devtools/protocol.json and
// content/browser/devtools/browser_protocol.json
"""
template_h = string.Template(header + """\
#ifndef CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_
#define CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_
#include "content/browser/devtools/protocol/devtools_protocol_client.h"
namespace content {
class DevToolsProtocolDispatcher;
namespace devtools {
extern const char kProtocolVersion[];
bool IsSupportedProtocolVersion(const std::string& version);
template<typename T>
base::Value* CreateValue(const T& param) {
return new base::FundamentalValue(param);
}
template<class T>
base::Value* CreateValue(scoped_ptr<T>& param) {
return param.release();
}
template<class T>
base::Value* CreateValue(scoped_refptr<T> param) {
return param->ToValue().release();
}
template<typename T>
base::Value* CreateValue(const std::vector<T> param) {
base::ListValue* result = new base::ListValue();
for (auto& item : param) {
result->Append(CreateValue(item));
}
return result;
}
template<>
base::Value* CreateValue(const std::string& param);
${types}\
} // namespace devtools
class DevToolsProtocolDispatcher {
public:
using Notifier = DevToolsProtocolClient::RawMessageCallback;
using CommandHandler =
base::Callback<bool(int, scoped_ptr<base::DictionaryValue>)>;
explicit DevToolsProtocolDispatcher(const Notifier& notifier);
~DevToolsProtocolDispatcher();
CommandHandler FindCommandHandler(const std::string& method);
${setters}\
private:
using Response = DevToolsProtocolClient::Response;
using CommandHandlers = std::map<std::string, CommandHandler>;
${methods}\
Notifier notifier_;
DevToolsProtocolClient client_;
CommandHandlers command_handlers_;
${fields}\
};
} // namespace content
#endif // CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_
""")
tmpl_typedef = string.Template("""\
namespace ${domain} {
typedef ${param_type} ${declared_name};
} // namespace ${domain}
""")
tmpl_struct = string.Template("""\
namespace ${domain} {
template<int MASK>
struct ${declared_name}Builder
: base::RefCounted<${declared_name}Builder<MASK>> {
public:
enum {
kAllSet = 0,
${fields_enum}\
};
${methods}\
static scoped_refptr<${declared_name}Builder<kNoneSet>> Create() {
return new ${declared_name}Builder<kNoneSet>();
}
scoped_ptr<base::DictionaryValue> ToValue() {
static_assert(MASK == kAllSet, "required properties missing");
return make_scoped_ptr(dict_->DeepCopy());
}
private:
friend struct ${declared_name}Builder<0>;
${declared_name}Builder() : dict_(new base::DictionaryValue()) {
}
template<class T> T* ThisAs() {
static_assert(sizeof(*this) == sizeof(T), "cannot cast");
return reinterpret_cast<T*>(this);
}
scoped_ptr<base::DictionaryValue> dict_;
};
typedef ${declared_name}Builder<0> ${declared_name};
} // namespace ${domain}
""")
tmpl_builder_setter_req = string.Template("""\
scoped_refptr<${declared_name}Builder<MASK & ~k${Param}>>
set_${param}(${pass_type} ${param}) {
static_assert(MASK & k${Param}, "already set");
dict_->Set("${proto_param}", CreateValue(${param}));
return ThisAs<${declared_name}Builder<MASK & ~k${Param}>>();
}
""")
tmpl_builder_setter_opt = string.Template("""\
scoped_refptr<${declared_name}Builder<MASK>>
set_${param}(${pass_type} ${param}) {
dict_->Set("${proto_param}", CreateValue(${param}));
return this;
}
""")
tmpl_builder_enum = string.Template("""\
k${Param} = 1 << ${ordinal},
""")
tmpl_builder_none_set = string.Template("""\
kNoneSet = ${all_fields}
""")
tmpl_named_enum = string.Template("""\
namespace ${domain} {
${values}\
} // namespace ${domain}
""")
tmpl_inline_enum = string.Template("""\
namespace ${domain} {
namespace ${subdomain} {
${values}\
} // namespace ${subdomain}
} // namespace ${domain}
""")
tmpl_enum_value = string.Template("""\
extern const char k${Enum}${Value}[];
""")
tmpl_enum_value_def = string.Template("""\
const char k${Enum}${Value}[] = "${value}";
""")
tmpl_handler = string.Template("""\
namespace ${domain} {
class ${Domain}Handler;
} // namespace domain
""")
tmpl_client = string.Template("""\
namespace ${domain} {
class Client : public DevToolsProtocolClient {
public:
explicit Client(const RawMessageCallback& raw_message_callback);
~Client() override;
${methods}\
};
} // namespace ${domain}
""")
tmpl_event = string.Template("""\
void ${Command}(
scoped_refptr<${Command}Params> params);
""")
tmpl_response = string.Template("""\
void Send${Command}Response(
DevToolsCommandId command_id,
scoped_refptr<${Command}Response> params);
""")
tmpl_setter = string.Template("""\
void Set${Domain}Handler(
devtools::${domain}::${Domain}Handler* ${domain}_handler);
""")
tmpl_callback = string.Template("""\
bool On${Domain}${Command}(
DevToolsCommandId command_id,
scoped_ptr<base::DictionaryValue> params);
""")
tmpl_field = string.Template("""\
devtools::${domain}::${Domain}Handler* ${domain}_handler_;
""")
template_cc = string.Template(header + """\
#include "content/browser/devtools/protocol/devtools_protocol_handler.h"
#include "base/bind.h"
#include "base/strings/string_number_conversions.h"
${includes}\
namespace content {
DevToolsProtocolDispatcher::DevToolsProtocolDispatcher(
const Notifier& notifier)
: notifier_(notifier),
client_(notifier),
${fields_init} {
}
DevToolsProtocolDispatcher::~DevToolsProtocolDispatcher() {
}
DevToolsProtocolDispatcher::CommandHandler
DevToolsProtocolDispatcher::FindCommandHandler(const std::string& method) {
CommandHandlers::iterator it = command_handlers_.find(method);
return it == command_handlers_.end() ? CommandHandler() : it->second;
}
${methods}\
namespace devtools {
const char kProtocolVersion[] = "${major}.${minor}";
bool IsSupportedProtocolVersion(const std::string& version) {
std::vector<std::string> tokens;
Tokenize(version, ".", &tokens);
int major, minor;
return tokens.size() == 2 &&
base::StringToInt(tokens[0], &major) && major == ${major} &&
base::StringToInt(tokens[1], &minor) && minor <= ${minor};
}
template<>
base::Value* CreateValue(const std::string& param) {
return new base::StringValue(param);
}
${types}\
} // namespace devtools
} // namespace content
""")
tmpl_include = string.Template("""\
#include "content/browser/devtools/protocol/${domain}_handler.h"
""")
tmpl_field_init = string.Template("${domain}_handler_(nullptr)")
tmpl_setter_impl = string.Template("""\
void DevToolsProtocolDispatcher::Set${Domain}Handler(
devtools::${domain}::${Domain}Handler* ${domain}_handler) {
DCHECK(!${domain}_handler_);
${domain}_handler_ = ${domain}_handler;
${initializations}\
}
""")
tmpl_register = string.Template("""\
command_handlers_["${Domain}.${command}"] =
base::Bind(
&DevToolsProtocolDispatcher::On${TargetDomain}${Command},
base::Unretained(this));
""")
tmpl_init_client = string.Template("""\
${domain}_handler_->SetClient(make_scoped_ptr(
new devtools::${domain}::Client(notifier_)));
""")
tmpl_callback_impl = string.Template("""\
bool DevToolsProtocolDispatcher::On${Domain}${Command}(
DevToolsCommandId command_id,
scoped_ptr<base::DictionaryValue> params) {
${prep}\
Response response = ${domain}_handler_->${Command}(${args});
scoped_ptr<base::DictionaryValue> protocol_response;
if (client_.SendError(command_id, response))
return true;
if (response.IsFallThrough())
return false;
scoped_ptr<base::DictionaryValue> result(new base::DictionaryValue());
${wrap}\
client_.SendSuccess(command_id, result.Pass());
return true;
}
""")
tmpl_wrap = string.Template("""\
result->Set("${proto_param}", devtools::CreateValue(out_${param}));
""")
tmpl_callback_async_impl = string.Template("""\
bool DevToolsProtocolDispatcher::On${Domain}${Command}(
DevToolsCommandId command_id,
scoped_ptr<base::DictionaryValue> params) {
${prep}\
Response response = ${domain}_handler_->${Command}(${args});
if (client_.SendError(command_id, response))
return true;
return !response.IsFallThrough();
}
""")
tmpl_prep_req = string.Template("""\
${raw_type} in_${param}${init};
if (!params || !params->Get${Type}("${proto_param}", &in_${param})) {
client_.SendError(command_id, Response::InvalidParams("${proto_param}"));
return true;
}
""")
tmpl_prep_req_list = string.Template("""\
base::ListValue* list_${param} = nullptr;
if (!params || !params->GetList("${proto_param}", &list_${param})) {
client_.SendError(command_id, Response::InvalidParams("${proto_param}"));
return true;
}
std::vector<${item_type}> in_${param};
for (base::ListValue::const_iterator it =
list_${param}->begin(); it != list_${param}->end(); ++it) {
${item_raw_type} item;
if (!(*it)->GetAs${ItemType}(&item)) {
client_.SendError(command_id, Response::InvalidParams("${proto_param}"));
return true;
}
in_${param}.push_back(${item_pass});
}
""")
tmpl_prep_opt = string.Template("""\
${raw_type} in_${param}${init};
bool ${param}_found = params && params->Get${Type}(
"${proto_param}",
&in_${param});
""")
tmpl_prep_output = string.Template("""\
${param_type} out_${param}${init};
""")
tmpl_arg_name = string.Template("in_${param}")
tmpl_arg_req = string.Template("${param_pass}")
tmpl_arg_opt = string.Template(
"${param}_found ? ${param_pass} : nullptr")
tmpl_object_pass = string.Template(
"make_scoped_ptr<base::DictionaryValue>(${name}->DeepCopy())")
tmpl_client_impl = string.Template("""\
namespace ${domain} {
Client::Client(const RawMessageCallback& raw_message_callback)
: DevToolsProtocolClient(raw_message_callback) {
}
Client::~Client() {
}
${methods}\
} // namespace ${domain}
""")
tmpl_event_impl = string.Template("""\
void Client::${Command}(
scoped_refptr<${Command}Params> params) {
SendNotification("${Domain}.${command}",
params->ToValue().Pass());
}
""")
tmpl_response_impl = string.Template("""\
void Client::Send${Command}Response(
DevToolsCommandId command_id,
scoped_refptr<${Command}Response> params) {
SendSuccess(command_id, params->ToValue().Pass());
}
""")
tmpl_typename = string.Template("devtools::${domain}::${declared_name}")
def Capitalize(s):
return s[:1].upper() + s[1:]
def Uncamelcase(s):
result = ""
for i, c in enumerate(s):
if c.isupper():
if (i > 0) and ((i < len(s)-1) and s[i+1].islower() or s[i-1].islower()):
result += "_"
result += c.lower()
else:
result += c
return result
types = {}
blink_protocol = json.loads(open(blink_protocol_path, "r").read())
browser_protocol = json.loads(open(browser_protocol_path, "r").read())
type_decls = []
type_impls = []
handler_methods = []
handler_method_impls = []
domain_maps = []
redirects = {}
all_domains = blink_protocol["domains"] + browser_protocol["domains"]
for json_domain in all_domains:
if "types" in json_domain:
for json_type in json_domain["types"]:
types["%s.%s" % (json_domain["domain"], json_type["id"])] = json_type
def DeclareStruct(json_properties, mapping):
methods = []
fields_enum = []
enum_items = []
req_fields_num = 0
for json_prop in json_properties:
prop_map = mapping.copy()
prop_map["proto_param"] = json_prop["name"]
prop_map["param"] = Uncamelcase(json_prop["name"])
prop_map["Param"] = Capitalize(json_prop["name"])
prop_map["subdomain"] = Uncamelcase(prop_map["declared_name"])
del prop_map["declared_name"]
ResolveType(json_prop, prop_map)
prop_map["declared_name"] = mapping["declared_name"]
if json_prop.get("optional"):
methods.append(tmpl_builder_setter_opt.substitute(prop_map))
else:
methods.append(tmpl_builder_setter_req.substitute(prop_map))
enum_items.append("k%s" % prop_map["Param"]);
fields_enum.append(tmpl_builder_enum.substitute(prop_map,
ordinal = req_fields_num))
req_fields_num += 1
all_fields = "kAllSet"
if len(enum_items) > 0:
all_fields = " | ".join(enum_items)
fields_enum.append(tmpl_builder_none_set.substitute(mapping,
all_fields = all_fields))
type_decls.append(tmpl_struct.substitute(mapping,
methods = "\n".join(methods),
fields_enum = "".join(fields_enum)))
def DeclareEnum(json, mapping):
values = []
value_defs = []
tmpl_enum = tmpl_inline_enum
if "declared_name" in mapping:
mapping["Enum"] = mapping["declared_name"]
tmpl_enum = tmpl_named_enum
else:
mapping["Enum"] = Capitalize(mapping["proto_param"])
for enum_value in json["enum"]:
values.append(tmpl_enum_value.substitute(mapping,
Value = Capitalize(enum_value)))
value_defs.append(tmpl_enum_value_def.substitute(mapping,
value = enum_value,
Value = Capitalize(enum_value)))
type_decls.append(tmpl_enum.substitute(mapping,
values = "".join(values)))
type_impls.append(tmpl_enum.substitute(mapping,
values = "".join(value_defs)))
def ResolveRef(json, mapping):
dot_pos = json["$ref"].find(".")
if dot_pos == -1:
domain_name = mapping["Domain"]
type_name = json["$ref"]
else:
domain_name = json["$ref"][:dot_pos]
type_name = json["$ref"][dot_pos + 1:]
json_type = types["%s.%s" % (domain_name, type_name)]
mapping["declared_name"] = Capitalize(type_name)
mapping["Domain"] = domain_name
mapping["domain"] = Uncamelcase(domain_name)
mapping["param_type"] = tmpl_typename.substitute(mapping)
ResolveType(json_type, mapping)
if not "___type_declared" in json_type:
json_type["___type_declared"] = True;
if (json_type.get("type") == "object") and ("properties" in json_type):
DeclareStruct(json_type["properties"], mapping)
else:
if ("enum" in json_type):
DeclareEnum(json_type, mapping)
type_decls.append(tmpl_typedef.substitute(mapping))
def ResolveArray(json, mapping):
items_map = mapping.copy()
ResolveType(json["items"], items_map)
if items_map["Type"] == "List":
# TODO(dgozman) Implement this.
raise Exception("Nested arrays are not implemented")
mapping["param_type"] = "std::vector<%s>" % items_map["param_type"]
mapping["Type"] = "List"
mapping["pass_type"] = "const %s&" % mapping["param_type"]
mapping["storage_type"] = "std::vector<%s>" % items_map["storage_type"]
mapping["raw_type"] = mapping["storage_type"]
mapping["prep_req"] = tmpl_prep_req_list.substitute(mapping,
item_type = items_map["storage_type"],
item_init = items_map["init"],
item_raw_type = items_map["raw_type"],
item_pass = items_map["pass_template"].substitute(name="item", opt=""),
ItemType = items_map["Type"])
mapping["arg_out"] = "&out_%s" % mapping["param"]
def ResolveObject(json, mapping):
mapping["Type"] = "Dictionary"
mapping["storage_type"] = "scoped_ptr<base::DictionaryValue>"
mapping["raw_type"] = "base::DictionaryValue*"
mapping["pass_template"] = tmpl_object_pass
if "properties" in json:
if not "declared_name" in mapping:
mapping["declared_name"] = ("%s%s" %
(mapping["Command"], Capitalize(mapping["proto_param"])))
mapping["param_type"] = ("scoped_refptr<%s>" %
tmpl_typename.substitute(mapping))
DeclareStruct(json["properties"], mapping)
else:
mapping["param_type"] = ("scoped_refptr<%s>" %
tmpl_typename.substitute(mapping))
mapping["pass_type"] = mapping["param_type"]
mapping["arg_out"] = "&out_%s" % mapping["param"]
else:
mapping["param_type"] = "base::DictionaryValue"
mapping["pass_type"] = "scoped_ptr<base::DictionaryValue>"
mapping["arg_out"] = "out_%s.get()" % mapping["param"]
mapping["prep_req"] = tmpl_prep_req.substitute(mapping)
def ResolvePrimitive(json, mapping):
jsonrpc_type = json["type"]
if jsonrpc_type == "boolean":
mapping["param_type"] = "bool"
mapping["Type"] = "Boolean"
mapping["init"] = " = false"
elif jsonrpc_type == "integer":
mapping["param_type"] = "int"
mapping["Type"] = "Integer"
mapping["init"] = " = 0"
elif jsonrpc_type == "number":
mapping["param_type"] = "double"
mapping["Type"] = "Double"
mapping["init"] = " = 0.0"
elif jsonrpc_type == "string":
mapping["param_type"] = "std::string"
mapping["pass_type"] = "const std::string&"
mapping["Type"] = "String"
if "enum" in json and not "declared_name" in mapping:
if not "subdomain" in mapping:
mapping["subdomain"] = Uncamelcase(mapping["command"])
DeclareEnum(json, mapping)
else:
raise Exception("Unknown type: %s" % json_type)
mapping["storage_type"] = mapping["param_type"]
mapping["raw_type"] = mapping["param_type"]
mapping["prep_req"] = tmpl_prep_req.substitute(mapping)
if jsonrpc_type != "string":
mapping["pass_type"] = mapping["param_type"]
mapping["arg_out"] = "&out_%s" % mapping["param"]
def ResolveType(json, mapping):
mapping["init"] = ""
mapping["pass_template"] = string.Template("${opt}${name}")
if "$ref" in json:
ResolveRef(json, mapping)
elif "type" in json:
jsonrpc_type = json["type"]
if jsonrpc_type == "array":
ResolveArray(json, mapping)
elif jsonrpc_type == "object":
ResolveObject(json, mapping)
else:
ResolvePrimitive(json, mapping)
else:
raise Exception("Unknown type at %s.%s %s" %
(mapping["Domain"], mapping["command"], mapping["proto_param"]))
setters = []
fields = []
includes = []
fields_init = []
for json_domain in all_domains:
domain_map = {}
domain_map["Domain"] = json_domain["domain"]
domain_map["domain"] = Uncamelcase(json_domain["domain"])
initializations = []
client_methods = []
client_method_impls = []
domain_empty = True
domain_needs_client = False
if "commands" in json_domain:
for json_command in json_domain["commands"]:
if (not ("handlers" in json_command) or
not ("browser" in json_command["handlers"])):
continue
domain_empty = False
command_map = domain_map.copy()
command_map["command"] = json_command["name"]
command_map["Command"] = Capitalize(json_command["name"])
if "redirect" in json_command:
redirect_domain = json_command["redirect"]
if not (redirect_domain in redirects):
redirects[redirect_domain] = []
command_map["TargetDomain"] = redirect_domain
redirects[redirect_domain].append(tmpl_register.substitute(command_map))
continue
command_map["TargetDomain"] = command_map["Domain"]
prep = []
args = []
if "parameters" in json_command:
for json_param in json_command["parameters"]:
param_map = command_map.copy()
param_map["proto_param"] = json_param["name"]
param_map["param"] = Uncamelcase(json_param["name"])
ResolveType(json_param, param_map)
if json_param.get("optional"):
if param_map["Type"] in ["List"]:
# TODO(vkuzkokov) Implement transformation of base::ListValue
# to std::vector and base::DictonaryValue to struct.
raise Exception(
"Optional array parameters are not implemented")
prep.append(tmpl_prep_opt.substitute(param_map))
param_pass = param_map["pass_template"].substitute(
name=tmpl_arg_name.substitute(param_map),
opt="&")
args.append(
tmpl_arg_opt.substitute(param_map, param_pass=param_pass))
else:
prep.append(param_map["prep_req"])
param_pass = param_map["pass_template"].substitute(
name=tmpl_arg_name.substitute(param_map),
opt="")
args.append(
tmpl_arg_req.substitute(param_map, param_pass=param_pass))
if json_command.get("async"):
domain_needs_client = True
json_returns = []
if "returns" in json_command:
json_returns = json_command["returns"]
command_map["declared_name"] = "%sResponse" % command_map["Command"]
DeclareStruct(json_returns, command_map)
# TODO(vkuzkokov) Pass async callback instance similar to how
# InspectorBackendDispatcher does it. This, however, can work
# only if Blink and Chrome are in the same repo.
args.insert(0, "command_id")
handler_method_impls.append(
tmpl_callback_async_impl.substitute(command_map,
prep = "".join(prep),
args = "\n " + ",\n ".join(args)))
client_methods.append(tmpl_response.substitute(command_map))
client_method_impls.append(tmpl_response_impl.substitute(command_map))
else:
wrap = []
if "returns" in json_command:
for json_param in json_command["returns"]:
param_map = command_map.copy()
param_map["proto_param"] = json_param["name"]
param_map["param"] = Uncamelcase(json_param["name"])
if json_param.get("optional"):
# TODO(vkuzkokov) Implement Optional<T> for value types.
raise Exception("Optional return values are not implemented")
ResolveType(json_param, param_map)
prep.append(tmpl_prep_output.substitute(param_map))
args.append(param_map["arg_out"])
wrap.append(tmpl_wrap.substitute(param_map))
args_str = ""
if len(args) > 0:
args_str = "\n " + ",\n ".join(args)
handler_method_impls.append(tmpl_callback_impl.substitute(command_map,
prep = "".join(prep),
args = args_str,
wrap = "".join(wrap)))
initializations.append(tmpl_register.substitute(command_map))
handler_methods.append(tmpl_callback.substitute(command_map))
if "events" in json_domain:
for json_event in json_domain["events"]:
if (not ("handlers" in json_event) or
not ("browser" in json_event["handlers"])):
continue
domain_empty = False
domain_needs_client = True
event_map = domain_map.copy()
event_map["command"] = json_event["name"]
event_map["Command"] = Capitalize(json_event["name"])
json_parameters = []
if "parameters" in json_event:
json_parameters = json_event["parameters"]
event_map["declared_name"] = "%sParams" % event_map["Command"]
DeclareStruct(json_parameters, event_map);
client_methods.append(tmpl_event.substitute(event_map))
client_method_impls.append(tmpl_event_impl.substitute(event_map))
if domain_empty:
continue
type_decls.append(tmpl_handler.substitute(domain_map))
setters.append(tmpl_setter.substitute(domain_map))
fields.append(tmpl_field.substitute(domain_map))
includes.append(tmpl_include.substitute(domain_map))
fields_init.append(tmpl_field_init.substitute(domain_map))
if domain_needs_client:
type_decls.append(tmpl_client.substitute(domain_map,
methods = "".join(client_methods)))
initializations.append(tmpl_init_client.substitute(domain_map))
type_impls.append(tmpl_client_impl.substitute(domain_map,
methods = "\n".join(client_method_impls)))
domain_map["initializations"] = "".join(initializations)
domain_maps.append(domain_map)
for domain_map in domain_maps:
domain = domain_map["Domain"]
if domain in redirects:
domain_map["initializations"] += "".join(redirects[domain])
handler_method_impls.append(tmpl_setter_impl.substitute(domain_map))
output_h_file = open(output_h_path, "w")
output_cc_file = open(output_cc_path, "w")
output_h_file.write(template_h.substitute({},
types = "\n".join(type_decls),
setters = "".join(setters),
methods = "".join(handler_methods),
fields = "".join(fields)))
output_h_file.close()
output_cc_file.write(template_cc.substitute({},
major = blink_protocol["version"]["major"],
minor = blink_protocol["version"]["minor"],
includes = "".join(sorted(includes)),
fields_init = ",\n ".join(fields_init),
methods = "\n".join(handler_method_impls),
types = "\n".join(type_impls)))
output_cc_file.close()
| bsd-3-clause |
pwmarcz/django | django/utils/lorem_ipsum.py | 505 | 4960 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod '
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim '
'veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea '
'commodo consequat. Duis aute irure dolor in reprehenderit in voluptate '
'velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint '
'occaecat cupidatat non proident, sunt in culpa qui officia deserunt '
'mollit anim id est laborum.'
)
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join(sentence() for i in range(random.randint(1, 4)))
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
| bsd-3-clause |
SR1s/WMS | WMS/views/__init__.py | 1 | 1759 | #-*-coding: utf-8
from flask import session, render_template, url_for, redirect, flash
import functools
from datetime import datetime, timedelta
def verify_login(func):
@functools.wraps(func)
def wrappper():
valid_time = timedelta(0, 60*15)
time = session.get('time', None)
if time and \
(datetime.utcnow() - time) < valid_time:
session['time'] = datetime.utcnow()
else:
session['time'] = None
if session['time']:
return func()
else:
return redirect(url_for('accounts.login'))
return wrappper
def verify_admin(func):
@functools.wraps(func)
def wrappper():
if session['privilege'] == 255:
return func()
else:
flash('权限不足,无法访问', 'error')
return redirect(url_for('accounts.login'))
return wrappper
def mycmp(x, y):
order1 = dict()
order1['XS']=0
order1['S']=1
order1['M']=2
order1['L']=3
order1['XL']=4
order1['XXL']=5
order1['-']=6
order2=dict()
if order1.get(x['size'], 6)-order1.get(y['size'], 6) != 0:
return order1[x['size']]-order1[y['size']]
if order2.get(x['size'], 6)-order2.get(y['size'], 6) != 0:
return order2[x['size']]-order2[y['size']]
return cmp(x['size'], y['size'])
def sort_cal_all(c):
c.sort(mycmp)
sum = 0
for k in c:
sum = sum + k['amount']
while len(c)<6:
c.append(dict(size='-', amount=0))
return sum
def chkstatus(status_code):
if status_code==0:
return u'尚未到货完毕'
elif status_code==1:
return u'到货完毕'
elif status_code==-1:
return u'订单已删除'
return '状态异常' | gpl-2.0 |
jaclyniulianetti/jaclyniulianetti.github.io | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py | 1446 | 65937 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| mit |
rhertzog/django | django/templatetags/static.py | 91 | 4391 | from django import template
from django.apps import apps
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(template.Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
if apps.is_installed('django.contrib.staticfiles'):
from django.contrib.staticfiles.storage import staticfiles_storage
return staticfiles_storage.url(path)
else:
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
"""
Given a relative path to a static asset, return the absolute path to the
asset.
"""
return StaticNode.handle_simple(path)
| bsd-3-clause |
jordanemedlock/psychtruths | temboo/Library/SendGrid/WebAPI/FilterCommands/ListAvailableApps.py | 5 | 3593 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListAvailableApps
# List all availalbe apps available through the SendGrid Web API.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListAvailableApps(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListAvailableApps Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListAvailableApps, self).__init__(temboo_session, '/Library/SendGrid/WebAPI/FilterCommands/ListAvailableApps')
def new_input_set(self):
return ListAvailableAppsInputSet()
def _make_result_set(self, result, path):
return ListAvailableAppsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListAvailableAppsChoreographyExecution(session, exec_id, path)
class ListAvailableAppsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListAvailableApps
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(ListAvailableAppsInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(ListAvailableAppsInputSet, self)._set_input('APIUser', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The username registered with SendGrid.)
"""
super(ListAvailableAppsInputSet, self)._set_input('ResponseFormat', value)
class ListAvailableAppsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListAvailableApps Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class ListAvailableAppsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListAvailableAppsResultSet(response, path)
| apache-2.0 |
ataylor32/django | django/utils/timesince.py | 409 | 2671 | from __future__ import unicode_literals
import calendar
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import ugettext, ungettext_lazy
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, ungettext_lazy('%d year', '%d years')),
(60 * 60 * 24 * 30, ungettext_lazy('%d month', '%d months')),
(60 * 60 * 24 * 7, ungettext_lazy('%d week', '%d weeks')),
(60 * 60 * 24, ungettext_lazy('%d day', '%d days')),
(60 * 60, ungettext_lazy('%d hour', '%d hours')),
(60, ungettext_lazy('%d minute', '%d minutes'))
)
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
delta = (d - now) if reversed else (now - d)
# Deal with leapyears by subtracing the number of leapdays
delta -= datetime.timedelta(calendar.leapdays(d.year, now.year))
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(ugettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(name % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += ugettext(', ') + avoid_wrapping(name2 % count2)
return result
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
| bsd-3-clause |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/bdb.py | 108 | 21084 | """Debugger basics"""
import fnmatch
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = 0
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame): #the name "until" is borrowed from gdb
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
self._set_stopinfo(frame, frame, frame.f_lineno+1)
def set_step(self):
"""Stop after one line of code."""
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
| mit |
fedora-infra/elections | tests/__init__.py | 1 | 6400 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Pierre-Yves Chibon
Author: Pierre-Yves Chibon <pingou@pingoured.fr>
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2, or (at your option) any later version. This
# program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the GNU
# General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public License and
# may only be used or replicated with the express permission of Red Hat, Inc.
fedora_elections test script
"""
import logging
import os
import sys
import unittest
from contextlib import contextmanager
from datetime import date
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
import fedora_elections # noqa: E402
import fedora_elections.admin # noqa:E402
import fedora_elections.elections # noqa:E402
import fedora_elections.forms # noqa:E402
from fedora_elections import models # noqa:E402
import six # noqa:E402
DB_PATH = "sqlite:///:memory:"
FAITOUT_URL = "http://faitout.fedorainfracloud.org/"
if os.environ.get("BUILD_ID"):
try:
import requests
req = requests.get("%s/new" % FAITOUT_URL)
if req.status_code == 200:
DB_PATH = req.text
print("Using faitout at: %s" % DB_PATH)
except Exception:
pass
TODAY = date.today()
@contextmanager
def user_set(APP, user, oidc_id_token=None):
""" Set the provided user as fas_user in the provided application."""
# Hack used to remove the before_request function set by
# flask.ext.fas_openid.FAS which otherwise kills our effort to set a
# flask.g.fas_user.
from flask import appcontext_pushed, g
APP.before_request_funcs[None] = []
def handler(sender, **kwargs):
g.fas_user = user
g.oidc_id_token = oidc_id_token
with appcontext_pushed.connected_to(handler, APP):
yield
class Modeltests(unittest.TestCase):
""" Model tests. """
def __init__(self, method_name="runTest"):
""" Constructor. """
unittest.TestCase.__init__(self, method_name)
self.session = None
# pylint: disable=C0103
def setUp(self):
""" Set up the environnment, ran before every tests. """
self.session = models.create_tables(DB_PATH)
# pylint: disable=C0103
def tearDown(self):
""" Remove the test.db database if there is one. """
self.session.close()
if os.path.exists(DB_PATH):
os.unlink(DB_PATH)
if DB_PATH.startswith("postgres"):
if "localhost" in DB_PATH:
models.drop_tables(DB_PATH, self.session.bind)
else:
db_name = DB_PATH.rsplit("/", 1)[1]
requests.get("%s/clean/%s" % (FAITOUT_URL, db_name))
class ModelFlasktests(Modeltests):
""" Model flask application tests. """
def setup_db(self):
"""Add a calendar and some meetings so that we can play with
something."""
from tests.test_vote import Votetests
votes = Votetests("test_init_vote")
votes.session = self.session
votes.test_init_vote()
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(ModelFlasktests, self).setUp()
fedora_elections.APP.config["TESTING"] = True
fedora_elections.APP.config["FEDORA_ELECTIONS_ADMIN_GROUP"] = "elections"
fedora_elections.APP.debug = True
fedora_elections.APP.logger.handlers = []
fedora_elections.APP.logger.setLevel(logging.CRITICAL)
fedora_elections.SESSION = self.session
fedora_elections.admin.SESSION = self.session
fedora_elections.elections.SESSION = self.session
fedora_elections.forms.SESSION = self.session
self.app = fedora_elections.APP.test_client()
def get_wtforms_version(self):
"""Returns the wtforms version as a tuple."""
import wtforms
wtforms_v = wtforms.__version__.split(".")
for idx, val in enumerate(wtforms_v):
try:
val = int(val)
except ValueError:
pass
wtforms_v[idx] = val
return tuple(wtforms_v)
def get_csrf(self, url="/admin/new", output=None):
"""Retrieve a CSRF token from given URL."""
if output is None:
output = self.app.get(url)
self.assertEqual(output.status_code, 200)
return (
output.get_data(as_text=True)
.split('name="csrf_token" type="hidden" value="')[1]
.split('">')[0]
)
class FakeGroup(object):
"""Fake object used to make the FakeUser object closer to the
expectations.
"""
def __init__(self, name):
"""Constructor.
:arg name: the name given to the name attribute of this object.
"""
self.name = name
self.group_type = "cla"
# pylint: disable=R0903
class FakeUser(object):
""" Fake user used to test the fedocallib library. """
def __init__(self, groups=[], username="username", cla_done=True):
"""Constructor.
:arg groups: list of the groups in which this fake user is
supposed to be.
"""
if isinstance(groups, six.string_types):
groups = [groups]
self.groups = groups
self.username = username
self.name = username
self.approved_memberships = [FakeGroup("packager"), FakeGroup("design-team")]
self.dic = {}
self.dic["timezone"] = "Europe/Paris"
self.cla_done = cla_done
self.email = "test@example.com"
def __getitem__(self, key):
return self.dic[key]
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(Modeltests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 |
stackforge/cloudkitty | setup.py | 154 | 1030 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
| apache-2.0 |
sgiavasis/C-PAC | CPAC/cwas/hats.py | 4 | 2617 | import numpy as np
# TODO: make better exception class?
def check_rank(x):
k = x.shape[1]
rank = np.linalg.matrix_rank(x)
if rank < k:
raise Exception("matrix is rank deficient (rank %i vs cols %i)" % (rank, k))
def add_intercept(x):
"""
Adds an intercept column to the left of the matrix
Paramaters
----------
x : ndarray
Design matrix (e.g. with 1st column as your intercept)
Returns
-------
x : ndarray
"""
uno = np.ones((x.shape[0],1)) # intercept
xx = np.hstack((uno,x)) # design matrix
return xx
def hatify(x):
"""
Distance-based hat matrix
Paramaters
----------
x : ndarray
Design matrix (e.g. with 1st column as your intercept)
Notes
-----
This function assumes that the input is not rank-deficient.
Returns
-------
H : ndarray
This will be a `x.shape[0]` by `x.shape[0]` matrix.
"""
Q1, R1 = np.linalg.qr(x)
H = Q1.dot(Q1.T)
return H
def permute_design(x, cols, indexperm):
"""docstring for permute_design"""
Xj = x.copy()
Xp = np.take(Xj[:,cols], indexperm, axis=0)
Xj[:,cols] = Xp
return Xj
# make sure this function doesn't overwrite
# the original x
def gen_h(x, cols=None, indexperm=None):
"""
Permuted hat matrix
Parameters
----------
x : ndarray
Design matrix (e.g. with 1st column as your intercept)
cols : list (optional)
Columns to be permuted (if `indexperm` is specified)
indexperm : list (optional)
Re-ordering (permuting) of rows in `x`
Returns
-------
H : ndarray
This will be a `x.shape[0]` by `x.shape[0]` matrix.
"""
if indexperm is not None:
x = permute_design(x, cols, indexperm)
H = hatify(x)
return H
def gen_h2(x, cols, indexperm=None):
"""
Permuted regressor-specific hat matrix
Parameters
----------
x : ndarray
Design matrix (e.g. with 1st column as your intercept)
cols : list
Columns to be permuted (if `indexperm` is specified)
indexperm : list (optional)
Re-ordering (permuting) of rows in `x`
Returns
-------
H2 : ndarray
This will be a `x.shape[0]` by `x.shape[0]` matrix.
"""
# H
H = gen_h(x, cols, indexperm)
# H2
# take H and subtract it by everything other than the columns of interest
other_cols = [ i for i in range(x.shape[1]) if i not in cols ]
Xj = x[:,other_cols]
H2 = H - hatify(Xj)
return H2
| bsd-3-clause |
folpindo/exercises-in-programming-style | 11-letterbox/tf-11.py | 17 | 3147 | #!/usr/bin/env python
import sys, re, operator, string
class DataStorageManager():
""" Models the contents of the file """
_data = ''
def dispatch(self, message):
if message[0] == 'init':
return self._init(message[1])
elif message[0] == 'words':
return self._words()
else:
raise Exception("Message not understood " + message[0])
def _init(self, path_to_file):
with open(path_to_file) as f:
self._data = f.read()
pattern = re.compile('[\W_]+')
self._data = pattern.sub(' ', self._data).lower()
def _words(self):
""" Returns the list words in storage"""
data_str = ''.join(self._data)
return data_str.split()
class StopWordManager():
""" Models the stop word filter """
_stop_words = []
def dispatch(self, message):
if message[0] == 'init':
return self._init()
elif message[0] == 'is_stop_word':
return self._is_stop_word(message[1])
else:
raise Exception("Message not understood " + message[0])
def _init(self):
with open('../stop_words.txt') as f:
self._stop_words = f.read().split(',')
self._stop_words.extend(list(string.ascii_lowercase))
def _is_stop_word(self, word):
return word in self._stop_words
class WordFrequencyManager():
""" Keeps the word frequency data """
_word_freqs = {}
def dispatch(self, message):
if message[0] == 'increment_count':
return self._increment_count(message[1])
elif message[0] == 'sorted':
return self._sorted()
else:
raise Exception("Message not understood " + message[0])
def _increment_count(self, word):
if word in self._word_freqs:
self._word_freqs[word] += 1
else:
self._word_freqs[word] = 1
def _sorted(self):
return sorted(self._word_freqs.iteritems(), key=operator.itemgetter(1), reverse=True)
class WordFrequencyController():
def dispatch(self, message):
if message[0] == 'init':
return self._init(message[1])
elif message[0] == 'run':
return self._run()
else:
raise Exception("Message not understood " + message[0])
def _init(self, path_to_file):
self._storage_manager = DataStorageManager()
self._stop_word_manager = StopWordManager()
self._word_freq_manager = WordFrequencyManager()
self._storage_manager.dispatch(['init', path_to_file])
self._stop_word_manager.dispatch(['init'])
def _run(self):
for w in self._storage_manager.dispatch(['words']):
if not self._stop_word_manager.dispatch(['is_stop_word', w]):
self._word_freq_manager.dispatch(['increment_count', w])
word_freqs = self._word_freq_manager.dispatch(['sorted'])
for (w, c) in word_freqs[0:25]:
print w, ' - ', c
#
# The main function
#
wfcontroller = WordFrequencyController()
wfcontroller.dispatch(['init', sys.argv[1]])
wfcontroller.dispatch(['run'])
| mit |
diego-d5000/MisValesMd | env/lib/sre_compile.py | 123 | 19817 | # -*- coding: utf-8 -*-
#
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import _sre, sys
import sre_parse
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFFL
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
# Sets of lowercase characters which have the same uppercase.
_equivalences = (
# LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I
(0x69, 0x131), # iı
# LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S
(0x73, 0x17f), # sſ
# MICRO SIGN, GREEK SMALL LETTER MU
(0xb5, 0x3bc), # µμ
# COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI
(0x345, 0x3b9, 0x1fbe), # \u0345ιι
# GREEK SMALL LETTER BETA, GREEK BETA SYMBOL
(0x3b2, 0x3d0), # βϐ
# GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL
(0x3b5, 0x3f5), # εϵ
# GREEK SMALL LETTER THETA, GREEK THETA SYMBOL
(0x3b8, 0x3d1), # θϑ
# GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL
(0x3ba, 0x3f0), # κϰ
# GREEK SMALL LETTER PI, GREEK PI SYMBOL
(0x3c0, 0x3d6), # πϖ
# GREEK SMALL LETTER RHO, GREEK RHO SYMBOL
(0x3c1, 0x3f1), # ρϱ
# GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA
(0x3c2, 0x3c3), # ςσ
# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
(0x3c6, 0x3d5), # φϕ
# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
(0x1e61, 0x1e9b), # ṡẛ
)
# Maps the lowercase code to lowercase codes which have the same uppercase.
_ignorecase_fixes = {i: tuple(j for j in t if i != j)
for t in _equivalences for i in t}
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
if (flags & SRE_FLAG_IGNORECASE and
not (flags & SRE_FLAG_LOCALE) and
flags & SRE_FLAG_UNICODE):
fixes = _ignorecase_fixes
else:
fixes = None
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
lo = _sre.getlower(av, flags)
if fixes and lo in fixes:
emit(OPCODES[IN_IGNORE])
skip = _len(code); emit(0)
if op is NOT_LITERAL:
emit(OPCODES[NEGATE])
for k in (lo,) + fixes[lo]:
emit(OPCODES[LITERAL])
emit(k)
emit(OPCODES[FAILURE])
code[skip] = _len(code) - skip
else:
emit(OPCODES[OP_IGNORE[op]])
emit(lo)
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = None
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup, fixes)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error, "internal: unsupported template operator"
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error, "look-behind requires fixed-width pattern"
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError, ("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None, fixes=None):
# compile charset subprogram
emit = code.append
for op, av in _optimize_charset(charset, fixup, fixes,
flags & SRE_FLAG_UNICODE):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error, "internal: unsupported set operator"
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup, fixes, isunicode):
# internal: optimize character set
out = []
tail = []
charmap = bytearray(256)
for op, av in charset:
while True:
try:
if op is LITERAL:
if fixup:
i = fixup(av)
charmap[i] = 1
if fixes and i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
charmap[av] = 1
elif op is RANGE:
r = range(av[0], av[1]+1)
if fixup:
r = map(fixup, r)
if fixup and fixes:
for i in r:
charmap[i] = 1
if i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
for i in r:
charmap[i] = 1
elif op is NEGATE:
out.append((op, av))
else:
tail.append((op, av))
except IndexError:
if len(charmap) == 256:
# character set contains non-UCS1 character codes
charmap += b'\0' * 0xff00
continue
# character set contains non-BMP character codes
if fixup and isunicode and op is RANGE:
lo, hi = av
ranges = [av]
# There are only two ranges of cased astral characters:
# 10400-1044F (Deseret) and 118A0-118DF (Warang Citi).
_fixup_range(max(0x10000, lo), min(0x11fff, hi),
ranges, fixup)
for lo, hi in ranges:
if lo == hi:
tail.append((LITERAL, hi))
else:
tail.append((RANGE, (lo, hi)))
else:
tail.append((op, av))
break
# compress character map
runs = []
q = 0
while True:
p = charmap.find(b'\1', q)
if p < 0:
break
if len(runs) >= 2:
runs = None
break
q = charmap.find(b'\0', p)
if q < 0:
runs.append((p, len(charmap)))
break
runs.append((p, q))
if runs is not None:
# use literal/range
for p, q in runs:
if q - p == 1:
out.append((LITERAL, p))
else:
out.append((RANGE, (p, q - 1)))
out += tail
# if the case was changed or new representation is more compact
if fixup or len(out) < len(charset):
return out
# else original character set is good enough
return charset
# use bitmap
if len(charmap) == 256:
data = _mk_bitmap(charmap)
out.append((CHARSET, data))
out += tail
return out
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 32-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (64 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of 256-bit chunks (8 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of Unicode has not yet been developed.
charmap = bytes(charmap) # should be hashable
comps = {}
mapping = bytearray(256)
block = 0
data = bytearray()
for i in range(0, 65536, 256):
chunk = charmap[i: i + 256]
if chunk in comps:
mapping[i // 256] = comps[chunk]
else:
mapping[i // 256] = comps[chunk] = block
block += 1
data += chunk
data = _mk_bitmap(data)
data[0:0] = [block] + _bytes_to_codes(mapping)
out.append((BIGCHARSET, data))
out += tail
return out
def _fixup_range(lo, hi, ranges, fixup):
for i in map(fixup, range(lo, hi+1)):
for k, (lo, hi) in enumerate(ranges):
if i < lo:
if l == lo - 1:
ranges[k] = (i, hi)
else:
ranges.insert(k, (i, i))
break
elif i > hi:
if i == hi + 1:
ranges[k] = (lo, i)
break
else:
break
else:
ranges.append((i, i))
_CODEBITS = _sre.CODESIZE * 8
_BITS_TRANS = b'0' + b'1' * 255
def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int):
s = bytes(bits).translate(_BITS_TRANS)[::-1]
return [_int(s[i - _CODEBITS: i], 2)
for i in range(len(s), 0, -_CODEBITS)]
def _bytes_to_codes(b):
# Convert block indices to word array
import array
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
a = array.array(code, bytes(b))
assert a.itemsize == _sre.CODESIZE
assert len(a) * a.itemsize == len(b)
return a.tolist()
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in xrange(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
try:
unicode
except NameError:
STRING_TYPES = (type(""),)
else:
STRING_TYPES = (type(""), type(unicode("")))
def isstring(obj):
for tp in STRING_TYPES:
if isinstance(obj, tp):
return 1
return 0
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| mit |
a10networks/a10-neutron-lbaas | a10_neutron_lbaas/db/migration/alembic_migrations/versions/bc5626a5af2a_nova_instance_id_not_required.py | 2 | 1173 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova instance ID not required
Revision ID: bc5626a5af2a
Revises: 3c7123f2aeba
Create Date: 2016-10-27 18:47:41.163902
"""
# revision identifiers, used by Alembic.
revision = 'bc5626a5af2a'
down_revision = '3c7123f2aeba'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('a10_device_instances',
sa.Column('nova_instance_id', sa.String(36), nullable=True))
def downgrade():
op.alter_column('a10_device_instances',
sa.Column('nova_instance_id', sa.String(36), nullable=False))
| apache-2.0 |
OpenUpgrade-dev/OpenUpgrade | addons/payment_transfer/models/payment_acquirer.py | 107 | 3763 | # -*- coding: utf-'8' "-*-"
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.osv import osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
import logging
import pprint
_logger = logging.getLogger(__name__)
class TransferPaymentAcquirer(osv.Model):
_inherit = 'payment.acquirer'
def _get_providers(self, cr, uid, context=None):
providers = super(TransferPaymentAcquirer, self)._get_providers(cr, uid, context=context)
providers.append(['transfer', 'Wire Transfer'])
return providers
def transfer_get_form_action_url(self, cr, uid, id, context=None):
return '/payment/transfer/feedback'
def _format_transfer_data(self, cr, uid, context=None):
bank_ids = [bank.id for bank in self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.bank_ids]
# filter only bank accounts marked as visible
bank_ids = self.pool['res.partner.bank'].search(cr, uid, [('id', 'in', bank_ids), ('footer', '=', True)], context=context)
accounts = self.pool['res.partner.bank'].name_get(cr, uid, bank_ids, context=context)
bank_title = _('Bank Accounts') if len(accounts) > 1 else _('Bank Account')
bank_accounts = ''.join(['<ul>'] + ['<li>%s</li>' % name for id, name in accounts] + ['</ul>'])
post_msg = '''<div>
<h3>Please use the following transfer details</h3>
<h4>%(bank_title)s</h4>
%(bank_accounts)s
<h4>Communication</h4>
<p>Please use the order name as communication reference.</p>
</div>''' % {
'bank_title': bank_title,
'bank_accounts': bank_accounts,
}
return post_msg
def create(self, cr, uid, values, context=None):
""" Hook in create to create a default post_msg. This is done in create
to have access to the name and other creation values. If no post_msg
or a void post_msg is given at creation, generate a default one. """
if values.get('name') == 'transfer' and not values.get('post_msg'):
values['post_msg'] = self._format_transfer_data(cr, uid, context=context)
return super(TransferPaymentAcquirer, self).create(cr, uid, values, context=context)
class TransferPaymentTransaction(osv.Model):
_inherit = 'payment.transaction'
def _transfer_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, amount, currency_name = data.get('reference'), data.get('amount'), data.get('currency_name')
tx_ids = self.search(
cr, uid, [
('reference', '=', reference),
], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'received data for reference %s' % (pprint.pformat(reference))
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
return self.browse(cr, uid, tx_ids[0], context=context)
def _transfer_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
if float_compare(float(data.get('amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if data.get('currency') != tx.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), tx.currency_id.name))
return invalid_parameters
def _transfer_form_validate(self, cr, uid, tx, data, context=None):
_logger.info('Validated transfer payment for tx %s: set as pending' % (tx.reference))
return tx.write({'state': 'pending'})
| agpl-3.0 |
hslatman/spiderfoot | modules/sfp_socialprofiles.py | 3 | 6933 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_socialprofiles
# Purpose: Obtains social media profiles of any identified human names.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 12/04/2014
# Copyright: (c) Steve Micallef 2014
# Licence: GPL
# -------------------------------------------------------------------------------
import random
import re
import time
import urllib
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
sites = {
# Search string to use, domain name the profile will sit on within
# those search results.
"Facebook": ['+intitle:%22{0}%22%20+site:facebook.com',
'"(https?://[a-z\.]*facebook.[a-z\.]+/[^\"<> ]+)"'],
"Google+": ['+intitle:%22{0}%22%20+site:plus.google.com',
'"(https?://plus.google.[a-z\.]+/\d+[^\"<>\/ ]+)"'],
"LinkedIn": ['+intitle:%22{0}%22%20+site:linkedin.com',
'"(https?://[a-z\.]*linkedin.[a-z\.]+/[^\"<> ]+)"']
}
class sfp_socialprofiles(SpiderFootPlugin):
"""Social Media Profiles:Identify the social media profiles for human names identified."""
# Default options
opts = {
'pages': 1,
'method': "yahoo",
'tighten': True
}
# Option descriptions
optdescs = {
'pages': "Number of search engine pages of identified profiles to iterate through.",
'tighten': "Tighten results by expecting to find the keyword of the target domain mentioned in the social media profile page results?",
'method': "Search engine to use: google, yahoo or bing."
}
keywords = None
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["HUMAN_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SOCIAL_MEDIA", "SEARCH_ENGINE_WEB_CONTENT"]
def yahooCleaner(self, string):
ret = "\"" + urllib.unquote(string.group(1)) + "\""
return ret
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.currentEventSrc = event
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# Don't look up stuff twice
if eventData in self.results:
self.sf.debug("Skipping " + eventData + " as already mapped.")
return None
else:
self.results.append(eventData)
if self.keywords is None:
self.keywords = self.sf.domainKeywords(self.getTarget().getNames(),
self.opts['_internettlds'])
for site in sites.keys():
s = unicode(sites[site][0]).format(eventData)
searchStr = s.replace(" ", "%20")
searchDom = sites[site][1]
if self.opts['method'].lower() == "google":
results = self.sf.googleIterate(searchStr, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['method'].lower() == "yahoo":
results = self.sf.yahooIterate(searchStr, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['method'].lower() == "bing":
results = self.sf.bingIterate(searchStr, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if results is None:
self.sf.info("No data returned from " + self.opts['method'] + ".")
return None
if self.checkForStop():
return None
pauseSecs = random.randint(4, 15)
self.sf.debug("Pausing for " + str(pauseSecs))
time.sleep(pauseSecs)
for key in results.keys():
instances = list()
# Yahoo requires some additional parsing
if self.opts['method'].lower() == "yahoo":
res = re.sub("RU=(.[^\/]+)\/RK=", self.yahooCleaner,
results[key], 0)
else:
res = results[key]
matches = re.findall(searchDom, res, re.IGNORECASE)
if matches is not None:
for match in matches:
if match in instances:
continue
else:
instances.append(match)
if self.checkForStop():
return None
# Fetch the profile page if we are checking
# for a firm relationship.
if self.opts['tighten']:
pres = self.sf.fetchUrl(match, timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
if pres['content'] is None:
continue
else:
found = False
for kw in self.keywords:
if re.search("[^a-zA-Z\-\_]" + kw + "[^a-zA-Z\-\_]", pres['content'], re.IGNORECASE):
found = True
if not found:
continue
self.sf.info("Social Media Profile found at " + site + ": " + match)
evt = SpiderFootEvent("SOCIAL_MEDIA", match,
self.__name__, event)
self.notifyListeners(evt)
# Submit the bing results for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", res,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_socialprofiles class
| gpl-2.0 |
untitaker/werkzeug | examples/couchy/views.py | 44 | 1999 | from werkzeug.utils import redirect
from werkzeug.exceptions import NotFound
from couchy.utils import render_template, expose, \
validate_url, url_for, Pagination
from couchy.models import URL
@expose('/')
def new(request):
error = url = ''
if request.method == 'POST':
url = request.form.get('url')
alias = request.form.get('alias')
if not validate_url(url):
error = "I'm sorry but you cannot shorten this URL."
elif alias:
if len(alias) > 140:
error = 'Your alias is too long'
elif '/' in alias:
error = 'Your alias might not include a slash'
elif URL.load(alias):
error = 'The alias you have requested exists already'
if not error:
url = URL(target=url, public='private' not in request.form, shorty_id=alias if alias else None)
url.store()
uid = url.id
return redirect(url_for('display', uid=uid))
return render_template('new.html', error=error, url=url)
@expose('/display/<uid>')
def display(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return render_template('display.html', url=url)
@expose('/u/<uid>')
def link(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return redirect(url.target, 301)
@expose('/list/', defaults={'page': 1})
@expose('/list/<int:page>')
def list(request, page):
def wrap(doc):
data = doc.value
data['_id'] = doc.id
return URL.wrap(data)
code = '''function(doc) { if (doc.public){ map([doc._id], doc); }}'''
docResults = URL.query(code)
results = [wrap(doc) for doc in docResults]
pagination = Pagination(results, 1, page, 'list')
if pagination.page > 1 and not pagination.entries:
raise NotFound()
return render_template('list.html', pagination=pagination)
def not_found(request):
return render_template('not_found.html')
| bsd-3-clause |
ASMlover/study | python/src/test/test_math.py | 1 | 1941 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
import my_math
class ProductTestCase(unittest.TestCase):
def testIntegers(self):
for x in xrange(-10, 10):
for y in xrange(-10, 10):
p = my_math.product(x, y)
self.failUnless(p == x * y, 'Interger multiplication failed')
def testFloats(self):
for x in xrange(-10, 10):
for y in xrange(-10, 10):
x = x / 10.0
y = y / 10.0
p = my_math.product(x, y)
self.failUnless(p == x * y, 'Float multiplication failed')
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
Yoric/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/memorizingfile.py | 680 | 3709 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
| mpl-2.0 |
DJMuggs/ansible-modules-extras | monitoring/zabbix_maintenance.py | 8 | 11903 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: zabbix_maintenance
short_description: Create Zabbix maintenance windows
description:
- This module will let you create Zabbix maintenance windows.
version_added: "1.8"
author: '"Alexander Bulimov (@abulimov)" <lazywolf0@gmail.com>'
requirements:
- "python >= 2.6"
- zabbix-api
options:
state:
description:
- Create or remove a maintenance window.
required: false
default: present
choices: [ "present", "absent" ]
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
default: null
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
host_names:
description:
- Hosts to manage maintenance window for.
Separate multiple hosts with commas.
C(host_name) is an alias for C(host_names).
B(Required) option when C(state) is I(present)
and no C(host_groups) specified.
required: false
default: null
aliases: [ "host_name" ]
host_groups:
description:
- Host groups to manage maintenance window for.
Separate multiple groups with commas.
C(host_group) is an alias for C(host_groups).
B(Required) option when C(state) is I(present)
and no C(host_names) specified.
required: false
default: null
aliases: [ "host_group" ]
minutes:
description:
- Length of maintenance window in minutes.
required: false
default: 10
name:
description:
- Unique name of maintenance window.
required: true
desc:
description:
- Short description of maintenance window.
required: true
default: Created by Ansible
collect_data:
description:
- Type of maintenance. With data collection, or without.
required: false
default: "true"
notes:
- Useful for setting hosts in maintenance mode before big update,
and removing maintenance window after update.
- Module creates maintenance window from now() to now() + minutes,
so if Zabbix server's time and host's time are not synchronized,
you will get strange results.
- Install required module with 'pip install zabbix-api' command.
- Checks existance only by maintenance name.
'''
EXAMPLES = '''
# Create maintenance window named "Update of www1"
# for host www1.example.com for 90 minutes
- zabbix_maintenance: name="Update of www1"
host_name=www1.example.com
state=present
minutes=90
server_url=https://monitoring.example.com
login_user=ansible
login_password=pAsSwOrD
# Create maintenance window named "Mass update"
# for host www1.example.com and host groups Office and Dev
- zabbix_maintenance: name="Update of www1"
host_name=www1.example.com
host_groups=Office,Dev
state=present
server_url=https://monitoring.example.com
login_user=ansible
login_password=pAsSwOrD
# Create maintenance window named "update"
# for hosts www1.example.com and db1.example.com and without data collection.
- zabbix_maintenance: name=update
host_names=www1.example.com,db1.example.com
state=present
collect_data=false
server_url=https://monitoring.example.com
login_user=ansible
login_password=pAsSwOrD
# Remove maintenance window named "Test1"
- zabbix_maintenance: name=Test1
state=absent
server_url=https://monitoring.example.com
login_user=ansible
login_password=pAsSwOrD
'''
import datetime
import time
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc):
end_time = start_time + period
try:
zbx.maintenance.create(
{
"groupids": group_ids,
"hostids": host_ids,
"name": name,
"maintenance_type": maintenance_type,
"active_since": str(start_time),
"active_till": str(end_time),
"description": desc,
"timeperiods": [{
"timeperiod_type": "0",
"start_date": str(start_time),
"period": str(period),
}]
}
)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_maintenance_id(zbx, name):
try:
result = zbx.maintenance.get(
{
"filter":
{
"name": name,
}
}
)
except BaseException as e:
return 1, None, str(e)
maintenance_ids = []
for res in result:
maintenance_ids.append(res["maintenanceid"])
return 0, maintenance_ids, None
def delete_maintenance(zbx, maintenance_id):
try:
zbx.maintenance.delete(maintenance_id)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def check_maintenance(zbx, name):
try:
result = zbx.maintenance.exists(
{
"name": name
}
)
except BaseException as e:
return 1, None, str(e)
return 0, result, None
def get_group_ids(zbx, host_groups):
group_ids = []
for group in host_groups:
try:
result = zbx.hostgroup.get(
{
"output": "extend",
"filter":
{
"name": group
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Group id for group %s not found" % group
group_ids.append(result[0]["groupid"])
return 0, group_ids, None
def get_host_ids(zbx, host_names):
host_ids = []
for host in host_names:
try:
result = zbx.host.get(
{
"output": "extend",
"filter":
{
"name": host
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Host id for host %s not found" % host
host_ids.append(result[0]["hostid"])
return 0, host_ids, None
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
server_url=dict(required=True, default=None, aliases=['url']),
host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
minutes=dict(type='int', required=False, default=10),
host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
name=dict(required=True),
desc=dict(required=False, default="Created by Ansible"),
collect_data=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
host_names = module.params['host_names']
host_groups = module.params['host_groups']
state = module.params['state']
login_user = module.params['login_user']
login_password = module.params['login_password']
minutes = module.params['minutes']
name = module.params['name']
desc = module.params['desc']
server_url = module.params['server_url']
collect_data = module.params['collect_data']
if collect_data:
maintenance_type = 0
else:
maintenance_type = 1
try:
zbx = ZabbixAPI(server_url)
zbx.login(login_user, login_password)
except BaseException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
changed = False
if state == "present":
now = datetime.datetime.now()
start_time = time.mktime(now.timetuple())
period = 60 * int(minutes) # N * 60 seconds
if host_groups:
(rc, group_ids, error) = get_group_ids(zbx, host_groups)
if rc != 0:
module.fail_json(msg="Failed to get group_ids: %s" % error)
else:
group_ids = []
if host_names:
(rc, host_ids, error) = get_host_ids(zbx, host_names)
if rc != 0:
module.fail_json(msg="Failed to get host_ids: %s" % error)
else:
host_ids = []
(rc, exists, error) = check_maintenance(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
if not exists:
if not host_names and not host_groups:
module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
if module.check_mode:
changed = True
else:
(rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to create maintenance: %s" % error)
if state == "absent":
(rc, exists, error) = check_maintenance(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
if exists:
(rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to get maintenance id: %s" % error)
if maintenance:
if module.check_mode:
changed = True
else:
(rc, _, error) = delete_maintenance(zbx, maintenance)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to remove maintenance: %s" % error)
module.exit_json(changed=changed)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
jonnor/FreeCAD | src/Mod/Arch/importOBJ.py | 8 | 5530 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD, DraftGeomUtils, Part, Draft
from DraftTools import translate
p = Draft.precision()
if open.__module__ == '__builtin__':
pythonopen = open
def findVert(aVertex,aList):
"finds aVertex in aList, returns index"
for i in range(len(aList)):
if ( round(aVertex.X,p) == round(aList[i].X,p) ):
if ( round(aVertex.Y,p) == round(aList[i].Y,p) ):
if ( round(aVertex.Z,p) == round(aList[i].Z,p) ):
return i
def getIndices(shape,offset):
"returns a list with 2 lists: vertices and face indexes, offsetted with the given amount"
vlist = []
elist = []
flist = []
curves = None
for e in shape.Edges:
try:
if not isinstance(e.Curve,Part.Line):
if not curves:
curves = shape.tessellate(1)
FreeCAD.Console.PrintWarning(translate("Arch","Found a shape containing curves, triangulating\n"))
break
except: # unimplemented curve type
curves = shape.tessellate(1)
FreeCAD.Console.PrintWarning(translate("Arch","Found a shape containing curves, triangulating\n"))
break
if curves:
for v in curves[0]:
vlist.append(" "+str(round(v.x,p))+" "+str(round(v.y,p))+" "+str(round(v.z,p)))
for f in curves[1]:
fi = ""
for vi in f:
fi += " " + str(vi + offset)
flist.append(fi)
else:
for v in shape.Vertexes:
vlist.append(" "+str(round(v.X,p))+" "+str(round(v.Y,p))+" "+str(round(v.Z,p)))
if not shape.Faces:
for e in shape.Edges:
if DraftGeomUtils.geomType(e) == "Line":
ei = " " + str(findVert(e.Vertexes[0],shape.Vertexes) + offset)
ei += " " + str(findVert(e.Vertexes[-1],shape.Vertexes) + offset)
elist.append(ei)
for f in shape.Faces:
if len(f.Wires) > 1:
# if we have holes, we triangulate
tris = f.tessellate(1)
for fdata in tris[1]:
fi = ""
for vi in fdata:
vdata = Part.Vertex(tris[0][vi])
fi += " " + str(findVert(vdata,shape.Vertexes) + offset)
flist.append(fi)
else:
fi = ""
# OCC vertices are unsorted. We need to sort in the right order...
edges = Part.__sortEdges__(f.OuterWire.Edges)
#print edges
for e in edges:
#print e.Vertexes[0].Point,e.Vertexes[1].Point
v = e.Vertexes[0]
fi += " " + str(findVert(v,shape.Vertexes) + offset)
flist.append(fi)
return vlist,elist,flist
def export(exportList,filename):
"called when freecad exports a file"
outfile = pythonopen(filename,"wb")
ver = FreeCAD.Version()
outfile.write("# FreeCAD v" + ver[0] + "." + ver[1] + " build" + ver[2] + " Arch module\n")
outfile.write("# http://www.freecadweb.org\n")
offset = 1
for obj in exportList:
if obj.isDerivedFrom("Part::Feature"):
if obj.ViewObject.isVisible():
vlist,elist,flist = getIndices(obj.Shape,offset)
offset += len(vlist)
outfile.write("o " + obj.Name + "\n")
for v in vlist:
outfile.write("v" + v + "\n")
for e in elist:
outfile.write("l" + e + "\n")
for f in flist:
outfile.write("f" + f + "\n")
outfile.close()
FreeCAD.Console.PrintMessage(translate("Arch","successfully written ")+filename+"\n")
| lgpl-2.1 |
bertugatt/textmt | textmt/tag/taggers.old.py | 1 | 4183 | def build_data(inputdir, outputdir, split=0.9):
data = []
for f in os.listdir(inputdir):
with open(os.path.join(inputdir, f), 'r', encoding='utf-8') as infile:
for l in infile.readlines():
l = l.strip()
if len(l) > 0:
data.append(l)
shuffle(data)
total = len(data)
print(total)
train = int(split * total)
os.mkdir(os.path.join(outputdir, 'train'))
os.mkdir(os.path.join(outputdir, 'test'))
with open(os.path.join(outputdir, 'train', 'train.txt'), 'w', encoding='utf-8') as training:
training.write('\n'.join(data[:train]))
with open(os.path.join(outputdir, 'test', 'test.txt'), 'w', encoding='utf-8') as testing:
testing.write('\n'.join(data[train:]))
def get_crf_tagger():
tagger = MTTagger()
tagger.load_model('models/tagger.100.crf')
return tagger
def get_backoff_tagger():
tagger = MTTagger()
tagger.load_model('models/backoff.t3.100.pickle')
return tagger
class MTTagger(object):
def __init__(self, train=None, test=None):
self.__train = None
self.__test = None
self.tokenizer = MTTokenizer()
self.model = None
if train is not None:
self.training = train
if test is not None:
self.testing = test
@property
def training(self):
return self.__train
@training.setter
def training(self, corpusdir):
self.__train = TaggedCorpusReader(corpusdir, '.+.txt$', sep='/')
@property
def testing(self):
return self.__test
@testing.setter
def testing(self, corpusdir):
self.__test = TaggedCorpusReader(corpusdir, '.+.txt$', sep='/')
@property
def training_sents(self):
return list(self.__train.tagged_sents())
@property
def test_sents(self):
return list(self.__test.tagged_sents())
# def build_train_test_sets(self, train_portion=0.9):
# ns = self.num_sents
# indices = [x for x in range(ns)]
# shuffle(indices)
# train_total = int(ns * train_portion)
# self.train = indices[:train_total]
# self.test = indices[train_total:]
#
def __untag(self, s):
return [w for w ,t in s]
@property
def num_train_sents(self):
return len(self.training_sents)
@property
def num_test_sents(self):
return len(self.test_sents)
@property
def num_train_words(self):
return len(self.train.tagged_words())
@property
def num_test_words(self):
return len(self.test.tagged_words())
def train_model(self, mode='nltk.backoff', save_file=None):
if mode == 'nltk.backoff':
self.__build_backoff_tagger()
if save_file is not None:
with open(save_file, 'wb') as f:
pickle.dump(self.model, f)
elif mode == 'nltk.crf':
if save_file is None:
save_file = 'models/tagger.crf'
self.__build_crf_tagger(save_file)
def __build_crf_tagger(self, save_file):
self.model = nltk.CRFTagger()
train_data = self.training_sents
self.model.train(train_data, save_file)
def __build_backoff_tagger(self, default_tag='NOUN'):
train_data = self.training_sents
t0 = nltk.DefaultTagger(default_tag)
t1 = nltk.UnigramTagger(train_data, backoff=t0)
t2 = nltk.BigramTagger(train_data, backoff=t1)
self.model = nltk.TrigramTagger(train_data,
backoff=t2)
def tag(self, sentence):
if type(sentence) is str:
tokens = self.tokenizer.tokenize(sentence)
else:
tokens = sentence
return self.model.tag(tokens)
def evaluate_model(self):
return self.model.evaluate(self.test_sents)
def load_model(self, model_file):
if model_file.endswith('pickle'):
self.model = pickle.load(open(model_file, 'rb'))
elif model_file.endswith('crf'):
self.model = nltk.CRFTagger()
self.model.set_model_file(model_file)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.