hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
643f7c2fdec8c6c16d3f0b194f9359447db96d42 | 180 | py | Python | conftest.py | berrondo/saec | e8063f7b75fbeec4ea4d514958c073ff97a08088 | [
"MIT"
] | null | null | null | conftest.py | berrondo/saec | e8063f7b75fbeec4ea4d514958c073ff97a08088 | [
"MIT"
] | null | null | null | conftest.py | berrondo/saec | e8063f7b75fbeec4ea4d514958c073ff97a08088 | [
"MIT"
] | null | null | null | import pytest
from rest_framework.test import APIClient
@pytest.fixture(autouse=True)
def enable_db_access(db):
pass
@pytest.fixture
def api_client():
return APIClient
| 13.846154 | 41 | 0.772222 |
b0e9862eb64ccb437af5d2d326b2f143ca46fffc | 8,137 | py | Python | tests/components/whois/test_sensor.py | WilliamTian0987/core | 814db01561393ead12c9252e0c5c0dd90be7f247 | [
"Apache-2.0"
] | 3 | 2022-01-27T17:00:51.000Z | 2022-03-09T03:49:03.000Z | tests/components/whois/test_sensor.py | WilliamTian0987/core | 814db01561393ead12c9252e0c5c0dd90be7f247 | [
"Apache-2.0"
] | 7 | 2022-03-01T06:32:03.000Z | 2022-03-31T07:19:10.000Z | tests/components/whois/test_sensor.py | WilliamTian0987/core | 814db01561393ead12c9252e0c5c0dd90be7f247 | [
"Apache-2.0"
] | null | null | null | """Tests for the sensors provided by the Whois integration."""
from unittest.mock import AsyncMock, MagicMock
import pytest
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.components.whois.const import DOMAIN, SCAN_INTERVAL
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity import EntityCategory
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.mark.freeze_time("2022-01-01 12:00:00", tz_offset=0)
async def test_whois_sensors(
hass: HomeAssistant,
enable_all_entities: AsyncMock,
init_integration: MockConfigEntry,
) -> None:
"""Test the Whois sensors."""
entity_registry = er.async_get(hass)
device_registry = dr.async_get(hass)
state = hass.states.get("sensor.home_assistant_io_admin")
entry = entity_registry.async_get("sensor.home_assistant_io_admin")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_admin"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "admin@example.com"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Admin"
assert state.attributes.get(ATTR_ICON) == "mdi:account-star"
assert ATTR_DEVICE_CLASS not in state.attributes
state = hass.states.get("sensor.home_assistant_io_created")
entry = entity_registry.async_get("sensor.home_assistant_io_created")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_creation_date"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "2019-01-01T00:00:00+00:00"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Created"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert ATTR_ICON not in state.attributes
state = hass.states.get("sensor.home_assistant_io_days_until_expiration")
entry = entity_registry.async_get("sensor.home_assistant_io_days_until_expiration")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_days_until_expiration"
assert entry.entity_category is None
assert state.state == "364"
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "home-assistant.io Days Until Expiration"
)
assert state.attributes.get(ATTR_ICON) == "mdi:calendar-clock"
assert ATTR_DEVICE_CLASS not in state.attributes
state = hass.states.get("sensor.home_assistant_io_expires")
entry = entity_registry.async_get("sensor.home_assistant_io_expires")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_expiration_date"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "2023-01-01T00:00:00+00:00"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Expires"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert ATTR_ICON not in state.attributes
state = hass.states.get("sensor.home_assistant_io_last_updated")
entry = entity_registry.async_get("sensor.home_assistant_io_last_updated")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_last_updated"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "2021-12-31T23:00:00+00:00"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Last Updated"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert ATTR_ICON not in state.attributes
state = hass.states.get("sensor.home_assistant_io_owner")
entry = entity_registry.async_get("sensor.home_assistant_io_owner")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_owner"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "owner@example.com"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Owner"
assert state.attributes.get(ATTR_ICON) == "mdi:account"
assert ATTR_DEVICE_CLASS not in state.attributes
state = hass.states.get("sensor.home_assistant_io_registrant")
entry = entity_registry.async_get("sensor.home_assistant_io_registrant")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_registrant"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "registrant@example.com"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Registrant"
assert state.attributes.get(ATTR_ICON) == "mdi:account-edit"
assert ATTR_DEVICE_CLASS not in state.attributes
state = hass.states.get("sensor.home_assistant_io_registrar")
entry = entity_registry.async_get("sensor.home_assistant_io_registrar")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_registrar"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "My Registrar"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Registrar"
assert state.attributes.get(ATTR_ICON) == "mdi:store"
assert ATTR_DEVICE_CLASS not in state.attributes
state = hass.states.get("sensor.home_assistant_io_reseller")
entry = entity_registry.async_get("sensor.home_assistant_io_reseller")
assert entry
assert state
assert entry.unique_id == "home-assistant.io_reseller"
assert entry.entity_category == EntityCategory.DIAGNOSTIC
assert state.state == "Top Domains, Low Prices"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "home-assistant.io Reseller"
assert state.attributes.get(ATTR_ICON) == "mdi:store"
assert ATTR_DEVICE_CLASS not in state.attributes
assert entry.device_id
device_entry = device_registry.async_get(entry.device_id)
assert device_entry
assert device_entry.configuration_url is None
assert device_entry.entry_type == dr.DeviceEntryType.SERVICE
assert device_entry.identifiers == {(DOMAIN, "home-assistant.io")}
assert device_entry.manufacturer is None
assert device_entry.model is None
assert device_entry.name is None
assert device_entry.sw_version is None
@pytest.mark.parametrize(
"entity_id",
(
"sensor.home_assistant_io_admin",
"sensor.home_assistant_io_owner",
"sensor.home_assistant_io_registrant",
"sensor.home_assistant_io_registrar",
"sensor.home_assistant_io_reseller",
),
)
async def test_disabled_by_default_sensors(
hass: HomeAssistant,
init_integration: MockConfigEntry,
entity_id: str,
) -> None:
"""Test the disabled by default Whois sensors."""
registry = er.async_get(hass)
state = hass.states.get(entity_id)
assert state is None
entry = registry.async_get(entity_id)
assert entry
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
@pytest.mark.parametrize(
"entity_id",
(
"sensor.home_assistant_io_admin",
"sensor.home_assistant_io_created",
"sensor.home_assistant_io_days_until_expiration",
"sensor.home_assistant_io_expires",
"sensor.home_assistant_io_last_updated",
"sensor.home_assistant_io_owner",
"sensor.home_assistant_io_registrant",
"sensor.home_assistant_io_registrar",
"sensor.home_assistant_io_reseller",
),
)
async def test_no_data(
hass: HomeAssistant,
mock_whois: MagicMock,
enable_all_entities: AsyncMock,
init_integration: MockConfigEntry,
entity_id: str,
) -> None:
"""Test whois sensors become unknown when there is no data provided."""
mock_whois.return_value = None
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_UNKNOWN
| 40.083744 | 87 | 0.753349 |
957799b1a41944ce14f4ed67e0fdbdcdb65731b7 | 13,912 | py | Python | src/object_detection/builders/preprocessor_builder.py | Tobias-Fischer/ros_people_object_detection_tensorflow | 2a0af311b4eef55c053bd2349e1dff10abe1f32a | [
"Apache-2.0"
] | null | null | null | src/object_detection/builders/preprocessor_builder.py | Tobias-Fischer/ros_people_object_detection_tensorflow | 2a0af311b4eef55c053bd2349e1dff10abe1f32a | [
"Apache-2.0"
] | null | null | null | src/object_detection/builders/preprocessor_builder.py | Tobias-Fischer/ros_people_object_detection_tensorflow | 2a0af311b4eef55c053bd2349e1dff10abe1f32a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for preprocessing steps."""
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
def _get_step_config_from_proto(preprocessor_step_config, step_name):
"""Returns the value of a field named step_name from proto.
Args:
preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object.
step_name: Name of the field to get value from.
Returns:
result_dict: a sub proto message from preprocessor_step_config which will be
later converted to a dictionary.
Raises:
ValueError: If field does not exist in proto.
"""
for field, value in preprocessor_step_config.ListFields():
if field.name == step_name:
return value
raise ValueError('Could not get field %s from proto!', step_name)
def _get_dict_from_proto(config):
"""Helper function to put all proto fields into a dictionary.
For many preprocessing steps, there's an trivial 1-1 mapping from proto fields
to function arguments. This function automatically populates a dictionary with
the arguments from the proto.
Protos that CANNOT be trivially populated include:
* nested messages.
* steps that check if an optional field is set (ie. where None != 0).
* protos that don't map 1-1 to arguments (ie. list should be reshaped).
* fields requiring additional validation (ie. repeated field has n elements).
Args:
config: A protobuf object that does not violate the conditions above.
Returns:
result_dict: |config| converted into a python dictionary.
"""
result_dict = {}
for field, value in config.ListFields():
result_dict[field.name] = value
return result_dict
# A map from a PreprocessingStep proto config field name to the preprocessing
# function that should be used. The PreprocessingStep proto should be parsable
# with _get_dict_from_proto.
PREPROCESSING_FUNCTION_MAP = {
'normalize_image': preprocessor.normalize_image,
'random_pixel_value_scale': preprocessor.random_pixel_value_scale,
'random_image_scale': preprocessor.random_image_scale,
'random_rgb_to_gray': preprocessor.random_rgb_to_gray,
'random_adjust_brightness': preprocessor.random_adjust_brightness,
'random_adjust_contrast': preprocessor.random_adjust_contrast,
'random_adjust_hue': preprocessor.random_adjust_hue,
'random_adjust_saturation': preprocessor.random_adjust_saturation,
'random_distort_color': preprocessor.random_distort_color,
'random_jitter_boxes': preprocessor.random_jitter_boxes,
'random_crop_to_aspect_ratio': preprocessor.random_crop_to_aspect_ratio,
'random_black_patches': preprocessor.random_black_patches,
'rgb_to_gray': preprocessor.rgb_to_gray,
'scale_boxes_to_pixel_coordinates': (
preprocessor.scale_boxes_to_pixel_coordinates),
'subtract_channel_mean': preprocessor.subtract_channel_mean,
}
# A map to convert from preprocessor_pb2.ResizeImage.Method enum to
# tf.image.ResizeMethod.
RESIZE_METHOD_MAP = {
preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA,
preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC,
preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR,
preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: (
tf.image.ResizeMethod.NEAREST_NEIGHBOR),
}
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof('preprocessing_step')
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config,
step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == 'random_horizontal_flip':
config = preprocessor_step_config.random_horizontal_flip
return (preprocessor.random_horizontal_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation),
})
if step_type == 'random_vertical_flip':
config = preprocessor_step_config.random_vertical_flip
return (preprocessor.random_vertical_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation),
})
if step_type == 'random_rotation90':
return (preprocessor.random_rotation90, {})
if step_type == 'random_crop_image':
config = preprocessor_step_config.random_crop_image
return (preprocessor.random_crop_image,
{
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'random_coef': config.random_coef,
})
if step_type == 'random_pad_image':
config = preprocessor_step_config.random_pad_image
min_image_size = None
if (config.HasField('min_image_height') !=
config.HasField('min_image_width')):
raise ValueError('min_image_height and min_image_width should be either '
'both set or both unset.')
if config.HasField('min_image_height'):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if (config.HasField('max_image_height') !=
config.HasField('max_image_width')):
raise ValueError('max_image_height and max_image_width should be either '
'both set or both unset.')
if config.HasField('max_image_height'):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError('pad_color should have 3 elements (RGB) if set!')
if not pad_color:
pad_color = None
return (preprocessor.random_pad_image,
{
'min_image_size': min_image_size,
'max_image_size': max_image_size,
'pad_color': pad_color,
})
if step_type == 'random_crop_pad_image':
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError('pad_color should have 3 elements if set!')
kwargs = {
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'random_coef': config.random_coef,
}
if min_padded_size_ratio:
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
if max_padded_size_ratio:
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if pad_color:
kwargs['pad_color'] = tuple(pad_color)
return (preprocessor.random_crop_pad_image, kwargs)
if step_type == 'random_resize_method':
config = preprocessor_step_config.random_resize_method
return (preprocessor.random_resize_method,
{
'target_size': [config.target_height, config.target_width],
})
if step_type == 'resize_image':
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (preprocessor.resize_image,
{
'new_height': config.new_height,
'new_width': config.new_width,
'method': method
})
if step_type == 'ssd_random_crop':
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop, {})
if step_type == 'ssd_random_crop_pad':
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [tuple(op.min_padded_size_ratio)
for op in config.operations]
max_padded_size_ratio = [tuple(op.max_padded_size_ratio)
for op in config.operations]
pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations]
return (preprocessor.ssd_random_crop_pad,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'random_coef': random_coef,
'min_padded_size_ratio': min_padded_size_ratio,
'max_padded_size_ratio': max_padded_size_ratio,
'pad_color': pad_color,
})
return (preprocessor.ssd_random_crop_pad, {})
if step_type == 'ssd_random_crop_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
'min_object_covered': min_object_covered,
'aspect_ratio': config.aspect_ratio,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
kwargs = {}
aspect_ratio = config.aspect_ratio
if aspect_ratio:
kwargs['aspect_ratio'] = aspect_ratio
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio:
if len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio:
if len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if config.operations:
kwargs['min_object_covered'] = [op.min_object_covered
for op in config.operations]
kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
kwargs['area_range'] = [(op.min_area, op.max_area)
for op in config.operations]
kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations]
kwargs['random_coef'] = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs)
raise ValueError('Unknown preprocessing step.')
| 43.071207 | 80 | 0.693502 |
fbf4f2b808e934c891fbaaf317e58e81b3fea067 | 2,862 | py | Python | app/models.py | zhazhalaila/rfid-signin-python | 33c7bd4940b0896bc0f5dce1926db9e9d5adc1ed | [
"MIT"
] | 18 | 2018-10-24T08:33:46.000Z | 2021-11-30T15:55:24.000Z | app/models.py | zhazhalaila/rfid-signin-python | 33c7bd4940b0896bc0f5dce1926db9e9d5adc1ed | [
"MIT"
] | 2 | 2019-07-01T06:12:03.000Z | 2021-06-01T22:50:37.000Z | app/models.py | zhazhalaila/rfid-signin-python | 33c7bd4940b0896bc0f5dce1926db9e9d5adc1ed | [
"MIT"
] | 2 | 2019-08-04T05:40:10.000Z | 2020-02-14T02:55:23.000Z | from app import db, login
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from hashlib import md5
from datetime import datetime
student_identifier = db.Table('student_identifier',
db.Column('class_id', db.Integer, db.ForeignKey('classes.class_id')),
db.Column('student_id', db.Integer, db.ForeignKey('students.student_id'))
)
class Student(db.Model):
__tablename__ = 'students'
student_id = db.Column(db.Integer, unique=True, primary_key=True)
rfid_id = db.Column(db.String(64), index=True, unique=True)
student_number = db.Column(db.String(64), index=True, unique=True)
student_name = db.Column(db.String(64), index=True, unique=True)
student_nickname = db.Column(db.String(64), nullable=False, default='')
curr_active = db.Column(db.Boolean, nullable=False, default=False)
class Timetable(db.Model):
__tablename__ = 'timetable'
time_id = db.Column(db.Integer, unique=True, primary_key=True)
time_class_name = db.Column(db.String(128), index=True)
time_class_id = db.Column(db.Integer, index=True)
time_student_id = db.Column(db.Integer, index=True)
time_time = db.Column(db.DateTime, default=datetime.now())
active = db.Column(db.Boolean, nullable=False, default=False)
class Class(UserMixin, db.Model):
__tablename__ = 'classes'
class_id = db.Column(db.Integer, primary_key=True)
class_name = db.Column(db.String(128), index=True)
class_token = db.Column(db.String(128), unique=True)
class_teacher = db.Column(db.Integer, index=True)
password_hash = db.Column(db.String(128))
last_seen = db.Column(db.DateTime, default=datetime.now())
students = db.relationship("Student",
secondary=student_identifier,
backref=db.backref('classes', lazy='dynamic'), lazy='dynamic')
def get_id(self):
return self.class_id
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avastar(self, size):
digest = md5(self.class_token.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def insert(self, student):
if not self.is_inserting(student):
self.students.append(student)
def uninsert(self, student):
if self.is_inserting(student):
self.students.remove(student)
def is_inserting(self, student):
return self.students.filter(
student_identifier.c.student_id == student.student_id).count() > 0
def get_student(self):
return Student.query.join(
student_identifier, student_identifier.c.student_id == Student.student_id).filter(
student_identifier.c.class_id == self.class_id)
@login.user_loader
def load_user(id):
return Class.query.get(int(id)) | 38.16 | 86 | 0.7355 |
cb9f818ff3554de89a7486ccfd6fe098e7f47d2e | 2,230 | py | Python | bundle-workflow/src/build_workflow/builder.py | kavilla/opensearch-build | a0f2614140b8c3243609e80010f190baa1f5642b | [
"Apache-2.0"
] | null | null | null | bundle-workflow/src/build_workflow/builder.py | kavilla/opensearch-build | a0f2614140b8c3243609e80010f190baa1f5642b | [
"Apache-2.0"
] | null | null | null | bundle-workflow/src/build_workflow/builder.py | kavilla/opensearch-build | a0f2614140b8c3243609e80010f190baa1f5642b | [
"Apache-2.0"
] | null | null | null | # Copyright OpenSearch Contributors.
# SPDX-License-Identifier: Apache-2.0
import os
"""
This class is responsible for executing the build for a component and passing the results to a build recorder.
It will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.
Artifacts found in "<build root>/artifacts/<maven|plugins|libs|bundle>" will be recognized and recorded.
"""
class Builder:
def __init__(self, component_name, git_repo, script_finder, build_recorder):
"""
Construct a new Builder instance.
:param component_name: The name of the component to build.
:param git_repo: A GitRepository instance containing the checked-out code.
:param script_finder: The ScriptFinder to use for finding build.sh scripts.
:param build_recorder: The build recorder that will capture build information and artifacts.
"""
self.component_name = component_name
self.git_repo = git_repo
self.script_finder = script_finder
self.build_recorder = build_recorder
self.output_path = "artifacts"
def build(self, version, arch, snapshot):
build_script = self.script_finder.find_build_script(
self.component_name, self.git_repo.dir
)
build_command = f"{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}"
self.git_repo.execute(build_command)
self.build_recorder.record_component(self.component_name, self.git_repo)
def export_artifacts(self):
artifacts_dir = os.path.realpath(
os.path.join(self.git_repo.dir, self.output_path)
)
for artifact_type in ["maven", "bundle", "plugins", "libs", "core-plugins"]:
for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
for file_name in files:
absolute_path = os.path.join(dir, file_name)
relative_path = os.path.relpath(absolute_path, artifacts_dir)
self.build_recorder.record_artifact(
self.component_name, artifact_type, relative_path, absolute_path
)
| 45.510204 | 128 | 0.678027 |
9e10e31a4f1b020322174aa1052353d8450c411f | 7,293 | py | Python | Post_NLPResult_Database.py | Feiyi-Ding/2021A | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | [
"Apache-2.0"
] | null | null | null | Post_NLPResult_Database.py | Feiyi-Ding/2021A | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | [
"Apache-2.0"
] | null | null | null | Post_NLPResult_Database.py | Feiyi-Ding/2021A | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | [
"Apache-2.0"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_core_components as dcc
import requests
import json
import base64
from sshtunnel import SSHTunnelForwarder
import pymysql
from pymysql.converters import escape_string
from stanfordcorenlp import StanfordCoreNLP
import json
from json2html import *
from collections import defaultdict
from nltk.tree import ParentedTree
from nltk.treeprettyprinter import TreePrettyPrinter
import re
app = dash.Dash(__name__) #external_stylesheets=[dbc.themes.SUPERHERO]
graph_card = dbc.Card([
dbc.CardBody([
dbc.Textarea(id='textzone', value='',bs_size="lg",
className="mb-3", placeholder="Please inter a statement in proper grammar with all implied clauses."),
])
])
app.layout = html.Div([
html.Br(),
html.Br(),
html.Br(),
html.Br(),
dbc.Row([html.H1('Machine Learning & Ontology Software')], justify="around"),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
dbc.Row(dcc.Input(id='name', placeholder='Enter Complete Name...', type='text'), style={"height": "auto", "margin-left": 314}),
html.Br(),
html.Div(dcc.Input(id='email', placeholder='Enter Email Address...', type='text'),style={"height": "auto", "margin-left": 314} ),
html.Br(),
html.Div(dcc.Input(id='phone', placeholder='Enter Phone Number...', type='text') ,style={"height": "auto", "margin-left": 314}),
html.Br(),
dbc.Row([dbc.Col(graph_card, width=8)], justify="around"),
dcc.ConfirmDialogProvider( html.Button('Submit to sNLP', style={"height": "auto", "margin-left": 314}),id='submit_button'), #message='Danger danger! Are you sure you want to continue?'
# dbc.Row([html.Div(id='textoutput', style={'whiteSpace': 'pre-line','margin-left': 320, 'width': '1000px', 'display' : "inline-block !important"})])
html.Div(id='textoutput', style={'whiteSpace': 'pre', 'margin-left': 320, 'width': '1000px', "font-family": "monospace"})
])
@app.callback(
Output('textoutput', 'children'),
[Input('submit_button', 'submit_n_clicks'),
Input('textzone', 'value'),
Input("name", "value"),
Input("email", "value"),
Input("phone", "value"),]
)
def text_output(submit_n_clicks,value, name, email, phone):
if not submit_n_clicks:
return ''
else:
host='http://66.76.242.198'
port=9888
nlp = StanfordCoreNLP(host, port=port,timeout=30000)
props = {
'annotators': 'tokenize,ssplit,pos,lemma,ner,parse,depparse,dcoref,relation',
'pipelineLanguage': 'en',
'outputFormat': 'json'}
tokn=nlp.word_tokenize(value)
Pos=nlp.pos_tag(value)
Ner=nlp.ner(value)
Parse=nlp.parse(value)
Dep_parse=nlp.dependency_parse(value)
Json_data=json.loads(nlp.annotate(value, properties=props))
# Connect to the Database server and save the required information
#Password encryption/decryption for accessing Database server
#------------Generate encrypted password---------------------------
#EncodedServerPass = base64.b64encode("ServerPass".encode("utf-8"))
#EncodedMySQLPass = base64.b64encode("MySQLPass".encode("utf-8"))
#------------------------------------------------------------------
with open("EncodedPasswords.txt", "r") as filestream:
for line in filestream:
EncServerPass, EncMySQLPass = line.split(",")
ServerPass = base64.b64decode(EncServerPass)
MySQLPass = base64.b64decode(EncMySQLPass)
# Initializing required variables
ServerAdress = '66.76.242.194'
ServerUserName = 'ambekarakhil'
MySQLUserName = 'ambekarakhil'
DatabaseName = 'ambekarakhil'
# SSH linux server
server = SSHTunnelForwarder(
ServerAdress,
ssh_username= ServerUserName,
ssh_password= ServerPass,
remote_bind_address = ('127.0.0.1', 3306)
)
server.start()
#Make a database connection
cnx = pymysql.connect(
host='127.0.0.1',
port=server.local_bind_port,
user= MySQLUserName,
password= MySQLPass,
db = DatabaseName
)
print("Connected to the following MySQL Server: " + cnx.get_server_info())
# Add escape string (\\") for double quotes present in the json data, removes any conflict with insert statement
json_data_sql = json.dumps(Json_data)
CountVal = 3
TeacherVal = name
AssertionVal= "dummyAssertion"
LinkageVal = "dummyLinkage"
#Execute SQL Commands
with cnx.cursor() as cur:
cur.execute('use ambekarakhil;')
sql_command = """INSERT INTO verus0302c(count, teacher, assertion, nlp, linkages) VALUES (%s, %s, %s, %s, %s)"""
cur.execute(sql_command, (CountVal, TeacherVal, AssertionVal, json_data_sql, LinkageVal))
cnx.commit()
"""
#Retrieve data from MySQL
with cnx.cursor() as cur:
cur.execute('use ambekarakhil;')
cur.execute('Select * from verus0302c;')
rows = cur.fetchall()
for row in rows:
print(f'{row[0]} {row[1]} {row[2]} {row[3]} {row[4]}')
"""
# Close all connections
cnx.close()
server.stop()
return output_nlp(Json_data, tokn, Pos, Ner, Parse, Dep_parse)
# # Convert the JSON result in a tabular format with HTML tags
# jsontohtml = json2html.convert(json = Json_data);
# #---------------Will expand this code to add more complex tasks --------------------
# # Display the HTML code in a new browser
# import webbrowser
# f = open('JSONResult.html','w')
# f.write(jsontohtml)
# f.close()
# webbrowser.open_new_tab('JSONResult.html')
# #---------------Will expand this code to add more complex tasks --------------------
# def output_nlp(Json_data, tokn, Pos, Ner, Parse, Dep_parse):
# parse_tree = ParentedTree.fromstring(Parse)
# parse_tree.pretty_print()
# return "NLP Parse: \n {}".format(Parse)
def output_nlp(Json_data, tokn, Pos, Ner, Parse, Dep_parse):
parse_tree = ParentedTree.fromstring(Parse)
pretty_tree = TreePrettyPrinter(parse_tree).text()
tree = pretty_tree.split("\n")
tree.reverse()
parse_print = '\n'.join(tree)
parse_print = re.sub("_", "-", parse_print)
return "NLP Parse(sentence structure): \n{}".format(parse_print)
if __name__ == "__main__":
app.run_server(debug=False)
| 33.454128 | 189 | 0.569724 |
3873bc7b26bdeb7fc50108ff031785d5b92112de | 799 | py | Python | contrib/bt/startup/gui/exportRibs.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T19:04:46.000Z | 2015-02-10T19:04:46.000Z | contrib/bt/startup/gui/exportRibs.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | contrib/bt/startup/gui/exportRibs.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | import Gaffer
import GafferScene
import GafferUI
def __exportRibs( menu ):
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
start = script['frameRange']['start'].getValue()
end = script['frameRange']['end'].getValue()
if len(script.selection()):
sel = script.selection()[0]
if sel.typeName() == 'GafferRenderMan::RenderManRender':
if sel['mode'].getValue() == 'generate':
for i in range(start, end+1):
script.context().setFrame(i)
sel.execute( [script.context()] )
else:
print 'abandoning run as output is set to render!'
else:
print 'needs to be a renderman render node!'
else:
print 'select a node!'
GafferUI.ScriptWindow.menuDefinition(application).append( "/ben/Export Ribs", { "command" : __exportRibs } )
| 29.592593 | 108 | 0.693367 |
f45df127fa0e54c501fe848fb39cdecf77d30e0b | 247 | py | Python | Aulasatualizada/execicio_curso em video/execicio_sequencia_fibonaci.py | swellington231/AulaPaython | 7b72ddec4d85f4660c0c395de07a133993aa2c70 | [
"MIT"
] | null | null | null | Aulasatualizada/execicio_curso em video/execicio_sequencia_fibonaci.py | swellington231/AulaPaython | 7b72ddec4d85f4660c0c395de07a133993aa2c70 | [
"MIT"
] | null | null | null | Aulasatualizada/execicio_curso em video/execicio_sequencia_fibonaci.py | swellington231/AulaPaython | 7b72ddec4d85f4660c0c395de07a133993aa2c70 | [
"MIT"
] | null | null | null | n = int(input('Quantos termos você quer mostrar? '))
t1 = 0
t2 = 1
cont = 3
print('{} -> {} ->'.format(t1, t2),end=" ")
while cont <= n:
t3 = t1 + t2
print('{} ->'.format(t3), end=" ")
cont += 1
t1 = t2
t2 = t3
print('FIM')
| 15.4375 | 52 | 0.481781 |
054df4c93ec109e835e60a06c5513a946d887a66 | 7,738 | py | Python | cvpods/evaluation/evaluator.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/evaluation/evaluator.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/evaluation/evaluator.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
import datetime
import time
from collections import OrderedDict
from contextlib import contextmanager
from loguru import logger
import torch
from cvpods.utils import comm, log_every_n_seconds
from .registry import EVALUATOR
@EVALUATOR.register()
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
def process(self, input, output):
"""
Process an input/output pair.
Args:
input: the input that's used to call the model.
output: the return value of `model(input)`
"""
pass
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
pass
class DatasetEvaluators(DatasetEvaluator):
def __init__(self, evaluators):
assert len(evaluators)
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, input, output):
for evaluator in self._evaluators:
evaluator.process(input, output)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if comm.is_main_process() and result is not None:
for k, v in result.items():
assert (
k not in results
), "Different evaluators produce results with the same key {}".format(k)
results[k] = v
return results
def inference_on_dataset(model, data_loader, evaluator):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
logger.info("Start inference on {} data samples".format(len(data_loader)))
total = len(data_loader) # inference data loader must have a fixed length
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
evaluator.reset()
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs)
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
evaluator.process(inputs, outputs)
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
"INFO",
"Inference done {}/{}. {:.4f} s / sample. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info(
"Total inference time: {} ({:.6f} s / sample per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / sample per device, "
"on {} devices)".format(
total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results
def inference_on_files(evaluator):
"""
Evaluate the metrics with evaluator on the predicted files
Args:
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
# num_devices = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
logger.info("Start evaluate on dumped prediction")
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
evaluator.reset()
start_time = time.perf_counter()
results = evaluator.evaluate_files()
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info("Total inference time: {}".format(total_time_str))
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
| 35.013575 | 99 | 0.634919 |
22dc3aaa57a0c1146184164955c347c0ad653030 | 7,358 | py | Python | utils/build_swift/tests/test_presets.py | 000james000/swift | a4fcc48fddd2d03e853728654e6ad692de75fad3 | [
"Apache-2.0"
] | null | null | null | utils/build_swift/tests/test_presets.py | 000james000/swift | a4fcc48fddd2d03e853728654e6ad692de75fad3 | [
"Apache-2.0"
] | null | null | null | utils/build_swift/tests/test_presets.py | 000james000/swift | a4fcc48fddd2d03e853728654e6ad692de75fad3 | [
"Apache-2.0"
] | null | null | null | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import unicode_literals
import os
from six.moves import configparser
from .utils import TestCase, UTILS_PATH, add_metaclass
from ..build_swift import presets
from ..build_swift.presets import Preset, PresetParser
PRESET_FILES = [
os.path.join(UTILS_PATH, 'build-presets.ini'),
]
PRESET_DEFAULTS = {
'darwin_toolchain_alias': 'Alias',
'darwin_toolchain_bundle_identifier': 'BundleIdentifier',
'darwin_toolchain_display_name': 'DisplayName',
'darwin_toolchain_display_name_short': 'DispalyNameShort',
'darwin_toolchain_version': '1.0',
'darwin_toolchain_xctoolchain_name': 'default',
'extra_swift_args': '',
'install_destdir': '/tmp/install',
'install_symroot': '/tmp/install/symroot',
'install_toolchain_dir': '/tmp/install/toolchain',
'installable_package': '/tmp/install/pkg',
'swift_install_destdir': '/tmp/install/swift',
'symbols_package': '/path/to/symbols/package',
'ndk_path': '/path/to/ndk',
'arm_dir': '/path/to/arm',
'toolchain_path': '/tmp/toolchain',
}
SAMPLE_PRESET = """
[preset: sample]
# This is a comment
ios
tvos
watchos
test
validation-test
lit-args=-v
compiler-vendor=apple
# The '--' argument is now unnecessary
dash-dash
verbose-build
build-ninja
# Default interpolation
install-symroot=%(install_symroot)s
"""
IGNORED_SECTION = """
[section_name]
random-options=1
"""
MIXIN_ORDER_PRESETS = """
[preset: test_mixin]
first-opt=0
second-opt=1
[preset: test]
first-opt=1
mixin-preset=test_mixin
second-opt=2
"""
INTERPOLATED_PRESET = """
[preset: test]
install-symroot=%(install_symroot)s
"""
DUPLICATE_PRESET_NAMES = """
[preset: test]
ios
[preset: test]
tvos
"""
DUPLICATE_PRESET_OPTIONS = """
[preset: test]
ios
ios
"""
# -----------------------------------------------------------------------------
class TestPreset(TestCase):
def test_format_args(self):
preset = Preset('sample', [('--ios', None), ('--test', '1')])
self.assertEqual(preset.format_args(), ['--ios', '--test=1'])
# -----------------------------------------------------------------------------
class TestPresetParserMeta(type):
"""Metaclass used to dynamically generate test methods to validate all of
the available presets.
"""
def __new__(cls, name, bases, attrs):
preset_parser = PresetParser()
preset_parser.read(PRESET_FILES)
# Generate tests for each preset
for preset_name in preset_parser.preset_names:
test_name = 'test_get_preset_' + preset_name
attrs[test_name] = cls.generate_get_preset_test(
preset_parser, preset_name)
return super(TestPresetParserMeta, cls).__new__(
cls, name, bases, attrs)
@classmethod
def generate_get_preset_test(cls, preset_parser, preset_name):
def test(self):
preset_parser.get_preset(preset_name, vars=PRESET_DEFAULTS)
return test
@add_metaclass(TestPresetParserMeta)
class TestPresetParser(TestCase):
def test_read(self):
parser = PresetParser()
parser.read(PRESET_FILES)
def test_read_invalid_files(self):
parser = PresetParser()
with self.assertRaises(presets.UnparsedFilesError) as cm:
parser.read(['nonsense-presets.ini'])
e = cm.exception
self.assertListEqual(e.filenames, ['nonsense-presets.ini'])
def test_read_file(self):
parser = PresetParser()
parser.read_file(PRESET_FILES[0])
def test_read_string(self):
parser = PresetParser()
parser.read_string(SAMPLE_PRESET)
preset = parser.get_preset('sample', vars={'install_symroot': '/tmp'})
self.assertIsNotNone(preset)
self.assertEqual(preset.name, 'sample')
self.assertListEqual(preset.args, [
('--ios', None),
('--tvos', None),
('--watchos', None),
('--test', None),
('--validation-test', None),
('--lit-args', '-v'),
('--compiler-vendor', 'apple'),
('--verbose-build', None),
('--build-ninja', None),
('--install-symroot', '/tmp')
])
def test_parser_ignores_non_preset_sections(self):
parser = PresetParser()
parser.read_string(IGNORED_SECTION)
self.assertEqual(len(parser._presets), 0)
def test_mixin_expansion_preserves_argument_order(self):
"""Mixins should be expanded in-place.
"""
parser = PresetParser()
parser.read_string(MIXIN_ORDER_PRESETS)
preset = parser.get_preset('test')
self.assertListEqual(preset.format_args(), [
'--first-opt=1',
# Mixin arguments
'--first-opt=0',
'--second-opt=1',
'--second-opt=2',
])
def test_interpolation_error(self):
parser = PresetParser()
parser.read_string(INTERPOLATED_PRESET)
with self.assertRaises(presets.InterpolationError) as cm:
parser.get_preset('test')
e = cm.exception
self.assertEqual(e.preset_name, 'test')
self.assertEqual(e.option, '--install-symroot')
self.assertEqual(e.rawval, '%(install_symroot)s')
self.assertEqual(e.reference, 'install_symroot')
def test_duplicate_option_error(self):
# Skip test if using the Python 2 ConfigParser module
if not hasattr(configparser, 'DuplicateOptionError'):
return
parser = PresetParser()
with self.assertRaises(presets.DuplicateOptionError) as cm:
parser.read_string(DUPLICATE_PRESET_OPTIONS)
e = cm.exception
self.assertEqual(e.preset_name, 'test')
self.assertEqual(e.option, 'ios')
def test_duplicate_preset_error(self):
# Skip test if using the Python 2 ConfigParser module
if not hasattr(configparser, 'DuplicateOptionError'):
return
parser = PresetParser()
with self.assertRaises(presets.DuplicatePresetError) as cm:
parser.read_string(DUPLICATE_PRESET_NAMES)
e = cm.exception
self.assertEqual(e.preset_name, 'test')
def test_get_preset_raw(self):
parser = PresetParser()
parser.read_string(INTERPOLATED_PRESET)
preset = parser.get_preset('test', raw=True)
self.assertEqual(preset.args, [
('--install-symroot', '%(install_symroot)s')
])
def test_get_missing_preset(self):
parser = PresetParser()
with self.assertRaises(presets.PresetNotFoundError) as cm:
parser.get_preset('test')
e = cm.exception
self.assertEqual(e.preset_name, 'test')
def test_preset_names(self):
parser = PresetParser()
parser.read_string('[preset: foo]')
parser.read_string('[preset: bar]')
parser.read_string('[preset: baz]')
self.assertEqual(set(parser.preset_names),
set(['foo', 'bar', 'baz']))
| 26.467626 | 79 | 0.634547 |
3f6c9322410050f5bb93d5f0287106e17c2ec499 | 350 | py | Python | src/logs/log_writer.py | Akanni96/feng-hirst-rst-parser | 973dba0156a099ba4f1ad2dc3e18ea72530c64e0 | [
"BSD-2-Clause"
] | 1 | 2021-08-19T14:01:09.000Z | 2021-08-19T14:01:09.000Z | src/logs/log_writer.py | Akanni96/feng-hirst-rst-parser | 973dba0156a099ba4f1ad2dc3e18ea72530c64e0 | [
"BSD-2-Clause"
] | null | null | null | src/logs/log_writer.py | Akanni96/feng-hirst-rst-parser | 973dba0156a099ba4f1ad2dc3e18ea72530c64e0 | [
"BSD-2-Clause"
] | null | null | null | '''
Created on 2014-01-17
@author: Wei
'''
class LogWriter:
def __init__(self, writer):
self.writer = writer
def write(self, text):
if self.writer != None:
self.writer.write(text + '\n')
def close(self):
if self.writer:
self.writer.flush()
self.writer.close() | 20.588235 | 42 | 0.522857 |
0a3daa97943e9b058d264aa1773450dfc6a35fa3 | 496 | py | Python | api/serialisers.py | TIJMacLean/FlyMyPlane | 3e3f069b37482456857146383e3c5d8bbcdfdb0e | [
"Apache-2.0"
] | null | null | null | api/serialisers.py | TIJMacLean/FlyMyPlane | 3e3f069b37482456857146383e3c5d8bbcdfdb0e | [
"Apache-2.0"
] | null | null | null | api/serialisers.py | TIJMacLean/FlyMyPlane | 3e3f069b37482456857146383e3c5d8bbcdfdb0e | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from .models import Aircraft, Owner
class OwnerSerialiser(serializers.HyperlinkedModelSerializer):
class Meta:
model = Owner
fields = ('name', 'email_address', 'phone_number')
class AircraftSerialiser(serializers.HyperlinkedModelSerializer):
class Meta:
model = Aircraft
fields = ('registration', 'serial_number', 'aircraft_class', 'aircraft_type',
'number_of_seats', 'cost_per_hour', 'home_base', 'owner') | 35.428571 | 85 | 0.719758 |
03776bcb8a00e503c1b8616656cf2a290bbe52fc | 611 | py | Python | setup.py | bnbalsamo/slackbotframework | dc37d1b466e7a76be3a3edac03c9980805fb3645 | [
"Apache-2.0"
] | null | null | null | setup.py | bnbalsamo/slackbotframework | dc37d1b466e7a76be3a3edac03c9980805fb3645 | [
"Apache-2.0"
] | null | null | null | setup.py | bnbalsamo/slackbotframework | dc37d1b466e7a76be3a3edac03c9980805fb3645 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
def readme():
with open("README.md", 'r') as f:
return f.read()
setup(
name="slackbotframework",
description="A framework for building internal integration slack bots",
version="0.0.1",
long_description=readme(),
author="Brian Balsamo",
author_email="Brian@BrianBalsamo.com",
packages=find_packages(
exclude=[
]
),
include_package_data=True,
url='https://github.com/bnbalsamo/slackbotframework',
install_requires=[
],
tests_require=[
'pytest'
],
test_suite='tests'
)
| 21.068966 | 75 | 0.639935 |
b70fe50655f8e0742e043196b2e251e75724d983 | 9,187 | py | Python | pytrafikverket/trafikverket_ferry.py | AnderssonPeter/pytrafikverket | d7b6efc755f48805a1f67959b1745841f5b463a6 | [
"MIT"
] | 5 | 2017-05-15T15:00:33.000Z | 2018-03-17T09:51:31.000Z | pytrafikverket/trafikverket_ferry.py | AnderssonPeter/pytrafikverket | d7b6efc755f48805a1f67959b1745841f5b463a6 | [
"MIT"
] | 1 | 2018-08-20T18:56:15.000Z | 2018-08-22T17:16:33.000Z | pytrafikverket/trafikverket_ferry.py | AnderssonPeter/pytrafikverket | d7b6efc755f48805a1f67959b1745841f5b463a6 | [
"MIT"
] | 1 | 2018-08-18T14:56:07.000Z | 2018-08-18T14:56:07.000Z | """Enables retreival of ferry departure information from Trafikverket API."""
import typing
from datetime import datetime
from enum import Enum
from typing import List
import aiohttp
from pytrafikverket.trafikverket import (
FieldFilter,
FieldSort,
FilterOperation,
NodeHelper,
SortOrder,
Trafikverket,
)
class RouteInfo(object):
"""Contains information about a FerryRoute."""
_required_fields = ["Id", "Name", "Shortname", "Type.Name"]
def __init__(self, id: str, name: str, short_name: str, route_type: str):
"""Initialize RouteInfo class."""
self.id = id
self.name = name
self.short_name = short_name
self.route_type = route_type
@classmethod
def from_xml_node(cls, node):
"""Map route information in XML data."""
node_helper = NodeHelper(node)
id = node_helper.get_text("Id")
name = node_helper.get_text("Name")
short_name = node_helper.get_text("Shortname")
route_type = node_helper.get_text("Type/Name")
return cls(id, name, short_name, route_type)
class DeviationInfo(object):
"""Contains information about a Situation/Deviation."""
_required_fields = [
"Deviation.Id",
"Deviation.Header",
"Deviation.EndTime",
"Deviation.StartTime",
"Deviation.Message",
"Deviation.IconId",
"Deviation.LocationDescriptor",
]
def __init__(
self,
id: str,
header: str,
message: str,
start_time: datetime,
end_time: datetime,
icon_id: str,
location_desc: str,
):
"""Initialize DeviationInfo class."""
self.id = id
self.header = header
self.message = message
self.start_time = start_time
self.end_time = end_time
self.icon_id = icon_id
self.location_desc = location_desc
@classmethod
def from_xml_node(cls, node):
"""Map deviation information in XML data."""
node_helper = NodeHelper(node)
id = node_helper.get_text("Deviation/Id")
header = node_helper.get_text("Deviation/Header")
message = node_helper.get_text("Deviation/Message")
start_time = node_helper.get_text("Deviation/StartTime")
end_time = node_helper.get_text("Deviation/EndTime")
icon_id = node_helper.get_text("Deviation/IconId")
location_desc = node_helper.get_text("Deviation/LocationDescriptor")
return cls(id, header, message, start_time, end_time,
icon_id, location_desc)
class FerryStopStatus(Enum):
"""Contain the different ferry stop statuses."""
on_time = "scheduled to arrive on schedule"
canceled = "canceled"
class FerryStop(object):
"""Contain information about a ferry departure."""
_required_fields = [
"Id",
"Deleted",
"DepartureTime",
"Route.Name",
"DeviationId",
"ModifiedTime",
"FromHarbor",
"ToHarbor",
"Info",
]
def __init__(
self,
id,
deleted: bool,
departure_time: datetime,
other_information: typing.List[str],
deviation_id: str,
modified_time: datetime,
from_harbor_name: str,
to_harbor_name: str,
):
"""Initialize FerryStop."""
self.id = id
self.deleted = deleted
self.departure_time = departure_time
self.other_information = other_information
self.deviation_id = deviation_id
self.modified_time = modified_time
self.from_harbor_name = from_harbor_name
self.to_harbor_name = to_harbor_name
def get_state(self) -> FerryStopStatus:
"""Retrieve the state of the departure."""
if self.deleted:
return FerryStopStatus.deleted
return FerryStopStatus.on_time
@classmethod
def from_xml_node(cls, node):
"""Map the path in the return XML data."""
node_helper = NodeHelper(node)
id = node_helper.get_text("Id")
deleted = node_helper.get_bool("Deleted")
departure_time = node_helper.get_datetime("DepartureTime")
other_information = node_helper.get_texts("Info")
deviation_id = node_helper.get_texts("DeviationId")
modified_time = node_helper.get_datetime_for_modified("ModifiedTime")
from_harbor_name = node_helper.get_text("FromHarbor/Name")
to_harbor_name = node_helper.get_text("ToHarbor/Name")
return cls(
id,
deleted,
departure_time,
other_information,
deviation_id,
modified_time,
from_harbor_name,
to_harbor_name,
)
class TrafikverketFerry(object):
"""Class used to communicate with trafikverket's ferry route api."""
def __init__(self, client_session: aiohttp.ClientSession, api_key: str):
"""Initialize FerryInfo object."""
self._api = Trafikverket(client_session, api_key)
async def async_get_ferry_route(self, route_name: str) -> RouteInfo:
"""Retreive ferry route id based on name."""
routes = await self._api.async_make_request(
"FerryRoute",
"1.2",
RouteInfo._required_fields,
[FieldFilter(FilterOperation.equal, "Name", route_name)],
)
if len(routes) == 0:
raise ValueError("Could not find a route with the specified name")
if len(routes) > 1:
raise ValueError("Found multiple routes with the specified name")
return RouteInfo.from_xml_node(routes[0])
async def async_get_ferry_route_id(self, route_id: int) -> RouteInfo:
"""Retreive ferry route id based on routeId."""
routes = await self._api.async_make_request(
"FerryRoute",
"1.2",
RouteInfo._required_fields,
[FieldFilter(FilterOperation.equal, "Id", str(route_id))],
)
if len(routes) == 0:
raise ValueError("Could not find a route with the specified name")
if len(routes) > 1:
raise ValueError("Found multiple routes with the specified name")
return RouteInfo.from_xml_node(routes[0])
async def async_search_ferry_routes(self, name: str) -> typing.List[RouteInfo]:
"""Search for ferry routes based on the route name."""
routes = await self._api.async_make_request(
"FerryRoute",
"1.2",
RouteInfo._required_fields,
[FieldFilter(FilterOperation.like, "Name", name)],
)
if len(routes) == 0:
raise ValueError("Could not find a ferry route with the specified name")
result = [RouteInfo] * 0
for route in routes:
result.append(RouteInfo.from_xml_node(route))
return result
async def async_get_next_ferry_stops(
self,
from_harbor_name: str,
to_harnbor_name: str = "",
after_time: datetime = datetime.now(),
number_of_stops: int = 1,
) -> List[FerryStop]:
"""Enable retreival of next departures."""
date_as_text = after_time.strftime(Trafikverket.date_time_format)
filters = [
FieldFilter(FilterOperation.equal, "FromHarbor.Name", from_harbor_name),
FieldFilter(
FilterOperation.greater_than_equal, "DepartureTime", date_as_text
),
]
if to_harnbor_name:
filters.append(
FieldFilter(FilterOperation.equal, "ToHarbor.Name", to_harnbor_name)
)
sorting = [FieldSort("DepartureTime", SortOrder.ascending)]
ferry_announcements = await self._api.async_make_request(
"FerryAnnouncement",
"1.2",
FerryStop._required_fields,
filters,
number_of_stops,
sorting,
)
if len(ferry_announcements) == 0:
raise ValueError("No FerryAnnouncement found")
stops = []
for announcement in ferry_announcements:
stops.append(FerryStop.from_xml_node(announcement))
return stops
async def async_get_next_ferry_stop(
self,
from_harbor_name: str,
to_harnbor_name: str = "",
after_time: datetime = datetime.now(),
) -> FerryStop:
"""Enable retreival of next departure."""
stops = await self.async_get_next_ferry_stops(
from_harbor_name, to_harnbor_name, after_time, 1
)
return stops[0]
async def async_get_deviation(self, id: str) -> DeviationInfo:
"""Retreive deviation info from Deviation Id."""
filters = [FieldFilter(FilterOperation.equal, "Deviation.Id", id)]
deviations = await self._api.async_make_request(
"Situation", "1.5", DeviationInfo._required_fields, filters
)
if len(deviations) == 0:
raise ValueError("No Deviation found")
if len(deviations) > 1:
raise ValueError("Multiple Deviations found")
deviation = deviations[0]
return DeviationInfo.from_xml_node(deviation)
| 32.348592 | 84 | 0.620115 |
8c1b964476f1cc7dea393d374f81f40a03943017 | 127 | py | Python | netexec/__init__.py | dogoncouch/netexec | 3a0d675dc65058e83aa215536363054a8b9b4286 | [
"MIT"
] | null | null | null | netexec/__init__.py | dogoncouch/netexec | 3a0d675dc65058e83aa215536363054a8b9b4286 | [
"MIT"
] | null | null | null | netexec/__init__.py | dogoncouch/netexec | 3a0d675dc65058e83aa215536363054a8b9b4286 | [
"MIT"
] | null | null | null | __version__ = '0.1'
__author__ = 'Dan Persons <dpersonsdev@gmail.com>'
__license__ = 'MIT License'
import netexec.devicetypes
| 21.166667 | 50 | 0.76378 |
d461d9259cf33892898cf9399f3b37a102e05810 | 447 | py | Python | data/scripts/templates/object/draft_schematic/food/shared_dish_trimpian.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/food/shared_dish_trimpian.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/food/shared_dish_trimpian.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_dish_trimpian.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.294118 | 73 | 0.727069 |
8cf81cc73b9d1f32d986864dcaf315803d21a294 | 6,205 | py | Python | sdk/python/pulumi_azure_nextgen/relay/v20170401/get_namespace.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/relay/v20170401/get_namespace.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/relay/v20170401/get_namespace.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNamespaceResult',
'AwaitableGetNamespaceResult',
'get_namespace',
]
@pulumi.output_type
class GetNamespaceResult:
"""
Description of a namespace resource.
"""
def __init__(__self__, created_at=None, location=None, metric_id=None, name=None, provisioning_state=None, service_bus_endpoint=None, sku=None, tags=None, type=None, updated_at=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if metric_id and not isinstance(metric_id, str):
raise TypeError("Expected argument 'metric_id' to be a str")
pulumi.set(__self__, "metric_id", metric_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service_bus_endpoint and not isinstance(service_bus_endpoint, str):
raise TypeError("Expected argument 'service_bus_endpoint' to be a str")
pulumi.set(__self__, "service_bus_endpoint", service_bus_endpoint)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
The time the namespace was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="metricId")
def metric_id(self) -> str:
"""
Identifier for Azure Insights metrics.
"""
return pulumi.get(self, "metric_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceBusEndpoint")
def service_bus_endpoint(self) -> str:
"""
Endpoint you can use to perform Service Bus operations.
"""
return pulumi.get(self, "service_bus_endpoint")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
SKU of the namespace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> str:
"""
The time the namespace was updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetNamespaceResult(GetNamespaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceResult(
created_at=self.created_at,
location=self.location,
metric_id=self.metric_id,
name=self.name,
provisioning_state=self.provisioning_state,
service_bus_endpoint=self.service_bus_endpoint,
sku=self.sku,
tags=self.tags,
type=self.type,
updated_at=self.updated_at)
def get_namespace(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Use this data source to access information about an existing resource.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:relay/v20170401:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
created_at=__ret__.created_at,
location=__ret__.location,
metric_id=__ret__.metric_id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
service_bus_endpoint=__ret__.service_bus_endpoint,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
updated_at=__ret__.updated_at)
| 34.472222 | 187 | 0.641902 |
fbdcc8f41dbe0bd90c6916d045e454e414492195 | 2,515 | py | Python | backends/core/database.py | jwstafford/crmint | 925c581551157a6f2e0795ab6fe997251af1d504 | [
"Apache-2.0"
] | null | null | null | backends/core/database.py | jwstafford/crmint | 925c581551157a6f2e0795ab6fe997251af1d504 | [
"Apache-2.0"
] | null | null | null | backends/core/database.py | jwstafford/crmint | 925c581551157a6f2e0795ab6fe997251af1d504 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy_mixins import AllFeaturesMixin, ReprMixin
from core.mixins import TimestampsMixin
engine = None
Base = declarative_base()
class BaseModel(Base, AllFeaturesMixin, TimestampsMixin):
"""Base class for models"""
__abstract__ = True
__repr__ = ReprMixin.__repr__
def init_engine(uri, **kwargs):
"""Initialization db engine"""
global engine
engine = create_engine(uri, **kwargs)
session = scoped_session(sessionmaker(bind=engine, autocommit=True))
BaseModel.set_session(session)
return engine
def init_db():
"""Create model tables.
NB: Import all modules here that might define models so that
they will be registered properly on the metadata. Otherwise
you will have to import them first before calling init_db().
"""
from core import models # NOQA
Base.metadata.create_all(bind=engine)
def load_fixtures(logger_func=None):
"""Load initial data into the database
:param: Logger function to display the loading state
"""
from core import models
# added two more rows to list, for google ads auth code and refresh token
general_settings = ['client_id', 'client_secret', 'emails_for_notifications', 'google_ads_authentication_code', 'google_ads_refresh_token', 'developer_token']
for setting in general_settings:
general_setting = models.GeneralSetting.where(name=setting).first()
if not general_setting:
general_setting = models.GeneralSetting()
general_setting.name = setting
general_setting.save()
if logger_func:
logger_func('Added setting %s' % setting)
def reset_jobs_and_pipelines_statuses_to_idle():
from core.models import Pipeline
for pipeline in Pipeline.all():
for job in pipeline.jobs:
job.update(status='idle')
pipeline.update(status='idle')
| 33.533333 | 160 | 0.757455 |
0f86289cff8afdf66f7af8846378d999d60f7fbe | 32,581 | py | Python | tests/migrations/test_commands.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | tests/migrations/test_commands.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | tests/migrations/test_commands.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | 1 | 2020-05-25T08:55:19.000Z | 2020-05-25T08:55:19.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import importlib
import os
import shutil
from django.apps import apps
from django.db import connection, models
from django.core.management import call_command, CommandError
from django.db.migrations import questioner
from django.test import ignore_warnings, mock, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from .models import UnicodeModel, UnserializableModel
from .test_base import MigrationTestBase
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_migrate_conflict_exit(self):
"""
Makes sure that migrate exits if it detects a conflict.
"""
with self.assertRaisesMessage(CommandError, "Conflicting migrations detected"):
call_command("migrate", "migrations")
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_list(self):
"""
Tests --list output of migrate command
"""
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("migrate", list=True, stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = six.StringIO()
# Giving the explicit app_label tests for selective `show_migration_list` in the command
call_command("migrate", "migrations", list=True, stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
Tests --list output of showmigrations command
"""
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = six.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertIn(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertIn(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial)",
out.getvalue().lower()
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertIn(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertIn(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial)",
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"})
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual("", out.getvalue().lower())
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual("", out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto ... (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto ... (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate(self):
"""
Makes sure that sqlmigrate does something.
"""
# Make sure the output is wrapped in a transaction
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue()
self.assertIn(connection.ops.start_transaction_sql(), output)
self.assertIn(connection.ops.end_transaction_sql(), output)
# Test forwards. All the databases agree on CREATE TABLE, at least.
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
self.assertIn("create table", out.getvalue().lower())
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
# And backwards is a DROP TABLE
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
self.assertIn("drop table", out.getvalue().lower())
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app"])
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
https://code.djangoproject.com/ticket/22823
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations.
"""
call_command("migrate", "migrated_unapplied_app", stdout=six.StringIO())
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
# Because the `import_module` performed in `MigrationLoader` will cache
# the migrations package, we can't reuse the same migration package
# between tests. This is only a problem for testing, since `makemigrations`
# is normally called in its own process.
creation_counter = 0
def setUp(self):
MakeMigrationsTests.creation_counter += 1
self.migration_dir = os.path.join(self.test_dir, 'migrations_%d' % self.creation_counter)
self.migration_pkg = "migrations.migrations_%d" % self.creation_counter
self._old_models = apps.app_configs['migrations'].models.copy()
def tearDown(self):
apps.app_configs['migrations'].models = self._old_models
apps.all_models['migrations'] = self._old_models
apps.clear_cache()
_cwd = os.getcwd()
os.chdir(self.test_dir)
try:
try:
self._rmrf(self.migration_dir)
except OSError:
pass
try:
self._rmrf(os.path.join(self.test_dir,
"test_migrations_path_doesnt_exist"))
except OSError:
pass
finally:
os.chdir(_cwd)
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model('migrations', UnicodeModel)
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "migrations", verbosity=0)
init_file = os.path.join(self.migration_dir, "__init__.py")
# Check for existing __init__.py file in migrations folder
self.assertTrue(os.path.exists(init_file))
with open(init_file, 'r') as fp:
content = force_text(fp.read())
self.assertEqual(content, '')
initial_file = os.path.join(self.migration_dir, "0001_initial.py")
# Check for existing 0001_initial.py file in migration folder
self.assertTrue(os.path.exists(initial_file))
with codecs.open(initial_file, 'r', encoding='utf-8') as fp:
content = fp.read()
self.assertIn('# -*- coding: utf-8 -*-', content)
self.assertIn('migrations.CreateModel', content)
if six.PY3:
self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name
self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural
self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name
self.assertIn('“Ðjáñgó”', content) # title.default
else:
self.assertIn('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8', content) # Meta.verbose_name
self.assertIn('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8\\xdf', content) # Meta.verbose_name_plural
self.assertIn('\\xda\\xd1\\xcd\\xa2\\xd3\\xd0\\xc9', content) # title.verbose_name
self.assertIn('\\u201c\\xd0j\\xe1\\xf1g\\xf3\\u201d', content) # title.default
def test_failing_migration(self):
#21280 - If a migration fails to serialize, it shouldn't generate an empty file.
apps.register_model('migrations', UnserializableModel)
with six.assertRaisesRegex(self, ValueError, r'Cannot serialize'):
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(self.migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_conflict_exit(self):
"""
Makes sure that makemigrations exits if it detects a conflict.
"""
with self.assertRaises(CommandError):
call_command("makemigrations")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_makemigrations_merge_no_conflict(self):
"""
Makes sure that makemigrations exits if in merge mode with no conflicts.
"""
out = six.StringIO()
try:
call_command("makemigrations", merge=True, stdout=out)
except CommandError:
self.fail("Makemigrations errored in merge mode with no conflicts")
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_no_app_sys_exit(self):
"""
Makes sure that makemigrations exits if a non-existent app is specified.
"""
err = six.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "this_app_does_not_exist", stderr=err)
self.assertIn("'this_app_does_not_exist' could not be found.", err.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
Makes sure that makemigrations exits if no app is specified with 'empty' mode.
"""
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
self.assertRaises(CommandError, call_command, "makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
Makes sure that makemigrations properly constructs an empty migration.
"""
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
try:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
except CommandError:
self.fail("Makemigrations errored in creating empty migration for a proper app.")
initial_file = os.path.join(self.migration_dir, "0001_initial.py")
# Check for existing 0001_initial.py file in migration folder
self.assertTrue(os.path.exists(initial_file))
with codecs.open(initial_file, 'r', encoding='utf-8') as fp:
content = fp.read()
self.assertIn('# -*- coding: utf-8 -*-', content)
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(' ', '')
self.assertIn('dependencies=[\n]', content)
self.assertIn('operations=[\n]', content)
def test_makemigrations_no_changes_no_apps(self):
"""
Makes sure that makemigrations exits when there are no changes and no apps are specified.
"""
out = six.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_changes"})
def test_makemigrations_no_changes(self):
"""
Makes sure that makemigrations exits when there are no changes to an app.
"""
out = six.StringIO()
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
Makes sure that makemigrations announces the migration at the default verbosity level.
"""
out = six.StringIO()
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_ancestor"})
def test_makemigrations_no_common_ancestor(self):
"""
Makes sure that makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_interactive_reject(self):
"""
Makes sure that makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
old_input = questioner.input
questioner.input = lambda _: "N"
try:
call_command("makemigrations", "migrations", merge=True, interactive=True, verbosity=0)
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
finally:
questioner.input = old_input
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_interactive_accept(self):
"""
Makes sure that makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
old_input = questioner.input
questioner.input = lambda _: "y"
out = six.StringIO()
try:
call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
os.remove(merge_file)
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
finally:
questioner.input = old_input
self.assertIn("Created new merge migration", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_handle_merge(self):
"""
Makes sure that makemigrations properly merges the conflicting migrations with --noinput.
"""
out = six.StringIO()
call_command("makemigrations", "migrations", merge=True, interactive=False, stdout=out)
self.assertIn("Merging migrations", out.getvalue())
self.assertIn("Branch 0002_second", out.getvalue())
self.assertIn("Branch 0002_conflicting_second", out.getvalue())
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
os.remove(merge_file)
self.assertFalse(os.path.exists(merge_file))
self.assertIn("Created new merge migration", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_default"})
def test_makemigrations_dry_run(self):
"""
Ticket #22676 -- `makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
class Meta:
app_label = "migrations"
out = six.StringIO()
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_default"})
def test_makemigrations_dry_run_verbosity_3(self):
"""
Ticket #22675 -- Allow `makemigrations --dry-run` to output the
migrations file to stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = six.StringIO()
call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("# -*- coding: utf-8 -*-", out.getvalue())
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_path_doesnt_exist.foo.bar"})
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
Ticket #22682 -- Makemigrations fails when specifying custom location
for migration files (using MIGRATION_MODULES) if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = six.StringIO()
call_command("makemigrations", "migrations", stdout=out)
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
# Migrations file is actually created in the expected path.
self.assertTrue(os.path.isfile(os.path.join(self.test_dir,
"test_migrations_path_doesnt_exist", "foo", "bar",
"0001_initial.py")))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_makemigrations_interactive_by_default(self):
"""
Makes sure that the user is prompted to merge by default if there are
conflicts and merge is True. Answer negative to differentiate it from
behavior when --noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
old_input = questioner.input
questioner.input = lambda _: "N"
out = six.StringIO()
merge_file = os.path.join(self.test_dir, 'test_migrations_conflict', '0003_merge.py')
try:
call_command("makemigrations", "migrations", merge=True, stdout=out)
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
finally:
questioner.input = old_input
if os.path.exists(merge_file):
os.remove(merge_file)
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_changes"},
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
Makes sure that makemigrations does not raise a CommandError when an
unspecified app has conflicting migrations.
"""
try:
call_command("makemigrations", "migrations", merge=False, verbosity=0)
except CommandError:
self.fail("Makemigrations fails resolving conflicts in an unspecified app")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
Makes sure that makemigrations does not create a merge for an
unspecified app even if it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
old_input = questioner.input
questioner.input = lambda _: "y"
out = six.StringIO()
merge_file = os.path.join(self.test_dir,
'migrations_test_apps',
'unspecified_app_with_conflict',
'migrations',
'0003_merge.py')
try:
call_command("makemigrations", "migrated_app", merge=True, interactive=True, stdout=out)
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
except CommandError:
self.fail("Makemigrations fails resolving conflicts in an unspecified app")
finally:
questioner.input = old_input
if os.path.exists(merge_file):
os.remove(merge_file)
def test_makemigrations_with_custom_name(self):
"""
Makes sure that makemigrations generate a custom migration.
"""
def cmd(migration_count, migration_name, *args):
with override_settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
try:
call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args)
except CommandError:
self.fail("Makemigrations errored in creating empty migration with custom name for a proper app.")
migration_file = os.path.join(self.migration_dir, "%s_%s.py" % (migration_count, migration_name))
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with codecs.open(migration_file, "r", encoding="utf-8") as fp:
content = fp.read()
self.assertIn("# -*- coding: utf-8 -*-", content)
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn("dependencies=[\n]", content)
# Python 3.3+ importlib caches os.listdir() on some platforms like
# Mac OS X (#23850).
if hasattr(importlib, 'invalidate_caches'):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content)
self.assertIn("operations=[\n]", content)
def test_makemigrations_exit(self):
"""
makemigrations --exit should exit with sys.exit(1) when there are no
changes to an app.
"""
with self.settings(MIGRATION_MODULES={"migrations": self.migration_pkg}):
call_command("makemigrations", "--exit", "migrations", verbosity=0)
with self.settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_no_changes"}):
with self.assertRaises(SystemExit):
call_command("makemigrations", "--exit", "migrations", verbosity=0)
class SquashMigrationsTest(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
path = "test_migrations/0001_squashed_0002_second.py"
path = os.path.join(MigrationTestBase.test_dir, path)
def tearDown(self):
if os.path.exists(self.path):
os.remove(self.path)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_squashmigrations_squashes(self):
"""
Tests that squashmigrations squashes migrations.
"""
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
self.assertTrue(os.path.exists(self.path))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_squashmigrations_optimizes(self):
"""
Tests that squashmigrations optimizes operations.
"""
out = six.StringIO()
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out)
self.assertIn("Optimized from 7 operations to 5 operations.", out.getvalue())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
Makes sure that squashmigrations --no-optimize really doesn't optimize operations.
"""
out = six.StringIO()
call_command("squashmigrations", "migrations", "0002",
interactive=False, verbosity=1, no_optimize=True, stdout=out)
self.assertIn("Skipping optimization", out.getvalue())
| 43.325798 | 136 | 0.642798 |
1c83508f27a8e46d884ad975a58307b08a7140e7 | 6,082 | py | Python | Chapter06/02_dqn_pong.py | svenkilian/Deep-Reinforcement-Learning-Hands-On | b9ab995722bd692828ce3e6f50026fa87e5f924b | [
"MIT"
] | null | null | null | Chapter06/02_dqn_pong.py | svenkilian/Deep-Reinforcement-Learning-Hands-On | b9ab995722bd692828ce3e6f50026fa87e5f924b | [
"MIT"
] | null | null | null | Chapter06/02_dqn_pong.py | svenkilian/Deep-Reinforcement-Learning-Hands-On | b9ab995722bd692828ce3e6f50026fa87e5f924b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from lib import wrappers
from lib import dqn_model
import argparse
import time
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
DEFAULT_ENV_NAME = "PongNoFrameskip-v4"
MEAN_REWARD_BOUND = 19.5
GAMMA = 0.99
BATCH_SIZE = 32
REPLAY_SIZE = 10000
LEARNING_RATE = 1e-4
SYNC_TARGET_FRAMES = 1000
REPLAY_START_SIZE = 10000
EPSILON_DECAY_LAST_FRAME = 10**5
EPSILON_START = 1.0
EPSILON_FINAL = 0.02
Experience = collections.namedtuple('Experience', field_names=[
'state', 'action', 'reward', 'done', 'new_state'])
class ExperienceBuffer:
def __init__(self, capacity):
self.buffer = collections.deque(maxlen=capacity)
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
indices = np.random.choice(len(self.buffer), batch_size, replace=False)
states, actions, rewards, dones, next_states = zip(
*[self.buffer[idx] for idx in indices])
return np.array(states), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states)
class Agent:
def __init__(self, env, exp_buffer):
self.env = env
self.exp_buffer = exp_buffer
self._reset()
def _reset(self):
self.state = env.reset()
self.total_reward = 0.0
def play_step(self, net, epsilon=0.0, device="cpu"):
done_reward = None
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
state_a = np.array([self.state], copy=False)
state_v = torch.tensor(state_a).to(device)
q_vals_v = net(state_v)
_, act_v = torch.max(q_vals_v, dim=1)
action = int(act_v.item())
# do step in the environment
new_state, reward, is_done, _ = self.env.step(action)
self.total_reward += reward
exp = Experience(self.state, action, reward, is_done, new_state)
self.exp_buffer.append(exp)
self.state = new_state
if is_done:
done_reward = self.total_reward
self._reset()
return done_reward
def calc_loss(batch, net, tgt_net, device="cpu"):
states, actions, rewards, dones, next_states = batch
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.ByteTensor(dones).to(device)
state_action_values = net(states_v).gather(
1, actions_v.unsqueeze(-1)).squeeze(-1)
next_state_values = tgt_net(next_states_v).max(1)[0]
next_state_values[done_mask] = 0.0
next_state_values = next_state_values.detach()
expected_state_action_values = next_state_values * GAMMA + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False,
action="store_true", help="Enable cuda")
parser.add_argument("--env", default=DEFAULT_ENV_NAME,
help="Name of the environment, default=" + DEFAULT_ENV_NAME)
parser.add_argument("--reward", type=float, default=MEAN_REWARD_BOUND,
help="Mean reward boundary for stop of training, default=%.2f" % MEAN_REWARD_BOUND)
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = wrappers.make_env(args.env)
net = dqn_model.DQN(env.observation_space.shape,
env.action_space.n).to(device)
tgt_net = dqn_model.DQN(env.observation_space.shape,
env.action_space.n).to(device)
writer = SummaryWriter(comment="-" + args.env)
print(net)
buffer = ExperienceBuffer(REPLAY_SIZE)
agent = Agent(env, buffer)
epsilon = EPSILON_START
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
total_rewards = []
frame_idx = 0
ts_frame = 0
ts = time.time()
best_mean_reward = None
while True:
frame_idx += 1
epsilon = max(EPSILON_FINAL, EPSILON_START -
frame_idx / EPSILON_DECAY_LAST_FRAME)
reward = agent.play_step(net, epsilon, device=device)
if reward is not None:
total_rewards.append(reward)
speed = (frame_idx - ts_frame) / (time.time() - ts)
ts_frame = frame_idx
ts = time.time()
mean_reward = np.mean(total_rewards[-100:])
print("%d: done %d games, mean reward %.3f, eps %.2f, speed %.2f f/s" % (
frame_idx, len(total_rewards), mean_reward, epsilon,
speed
))
writer.add_scalar("epsilon", epsilon, frame_idx)
writer.add_scalar("speed", speed, frame_idx)
writer.add_scalar("reward_100", mean_reward, frame_idx)
writer.add_scalar("reward", reward, frame_idx)
if best_mean_reward is None or best_mean_reward < mean_reward:
torch.save(net.state_dict(), args.env + "-best.dat")
if best_mean_reward is not None:
print("Best mean reward updated %.3f -> %.3f, model saved" %
(best_mean_reward, mean_reward))
best_mean_reward = mean_reward
if mean_reward > args.reward:
print("Solved in %d frames!" % frame_idx)
break
if len(buffer) < REPLAY_START_SIZE:
continue
if frame_idx % SYNC_TARGET_FRAMES == 0:
tgt_net.load_state_dict(net.state_dict())
optimizer.zero_grad()
batch = buffer.sample(BATCH_SIZE)
loss_t = calc_loss(batch, net, tgt_net, device=device)
loss_t.backward()
optimizer.step()
writer.close()
| 33.60221 | 107 | 0.628905 |
da2814472f2891ae25416718f47be0badad333ac | 6,402 | py | Python | nnlm/nnm/lstm.py | dengliangshi/pynnlms | 1a08b12767cf028626f0368b993933092390f28d | [
"MIT"
] | 11 | 2017-06-21T08:54:03.000Z | 2019-05-07T06:54:20.000Z | nnlm/nnm/lstm.py | dengliangshi/pynnlms | 1a08b12767cf028626f0368b993933092390f28d | [
"MIT"
] | null | null | null | nnlm/nnm/lstm.py | dengliangshi/pynnlms | 1a08b12767cf028626f0368b993933092390f28d | [
"MIT"
] | null | null | null | #encoding utf-8
# ---------------------------------------------------------Libraries--------------------------------------------------------
# Standard Library
# Third-party Libraries
import numpy as np
# User Define Module
from nn import NN
from nodes import Nodes
from acfun import AcFun
# --------------------------------------------------------Global Strings----------------------------------------------------
"""
Recurrent Neural Network built here can be represented as:
i(t) = G(Ui*x(t) + Wi*s(t-1) + Vi*c(t-1) + bi)
f(t) = G(Uf*x(t) + Wf*s(t-1) + Vf*c(t-1) + bf)
g(t) = F(U*x(t) + W*s(t-1) + b)
c(t) = f(t) * c(t-1) + i(t) * g(t)
o(t) = G(Uo*x(t) + Wo*s(t-1) + Vo*c(t) + b0)
h(t) = F(c(t))
s(t) = o(t) * h(t)
"""
# -------------------------------------------------------------Main---------------------------------------------------------
class LSTM(NN):
"""Long Short-Term Memory Recurrent Neural Network.
"""
def init_model(self, input_size, hidden_size, **kwargs):
"""Initialize neral network model.
:Param input_size: size of input layer
:Param hidden_size: size of hidden layer
"""
# activation function for gates
self.gatefun = AcFun(kwargs.get('GATE_FUN') or 'sigmoid')
# parameters for input gate
self.igate = Nodes(input_size, hidden_size, kwargs.get('EN_BIAS'))
# parameters for forget gate
self.fgate = Nodes(input_size, hidden_size, kwargs.get('EN_BIAS'))
# parameters for output gate
self.ogate = Nodes(input_size, hidden_size, kwargs.get('EN_BIAS'))
super(LSTM, self).init_model(input_size, hidden_size, **kwargs)
def update(self, dLds, alpha, beta):
"""Update neural network's parameters using stochastic gradient descent(SGD) method.
:Param dLds: gradient of error in hidden layer.
"""
T = len(self.x)
self.nodes.reset_error()
self.igate.reset_error()
self.fgate.reset_error()
self.ogate.reset_error()
dLdx = np.zeros((T, self.input_size))
dLdc = np.zeros(self.hidden_size)
for t in xrange(T-1, -1, -1):
dLdpo = dLds[t] * self.h[t] * self.gatefun.derivate(self.o[t])
# parameters for output gate
self.ogate.dLdu += np.outer(dLdpo, self.x[t])
self.ogate.dLdw += np.outer(dLdpo, self.s[t-1])
self.ogate.dLdv += np.outer(dLdpo, self.c[t-1])
dLds[t-1] += np.dot(self.ogate.w.T, dLdpo)
dLdx[t] += np.dot(self.ogate.u.T, dLdpo)
dLdc += np.dot(self.ogate.v.T, dLdpo)
dLdc += dLds[t] * self.o[t] * self.acfun.derivate(self.h[t])
dLdpi = dLdc * self.g[t] * self.gatefun.derivate(self.i[t])
dLdpf = dLdc * self.c[t-1] * self.gatefun.derivate(self.f[t])
dLdpg = dLdc * self.i[t] * self.acfun.derivate(self.g[t])
dLdc = dLdc * self.f[t]
# parameters for nodes in hidden layer
self.nodes.dLdu += np.outer(dLdpg, self.x[t])
self.nodes.dLdw += np.outer(dLdpg, self.s[t-1])
dLds[t-1] += np.dot(self.nodes.w.T, dLdpg)
dLdx[t] += np.dot(self.nodes.u.T, dLdpg)
# parameters for input gate
self.igate.dLdu += np.outer(dLdpi, self.x[t])
self.igate.dLdw += np.outer(dLdpi, self.s[t-1])
self.igate.dLdv += np.outer(dLdpi, self.c[t-1])
dLds[t-1] += np.dot(self.igate.w.T, dLdpi)
dLdx[t] += np.dot(self.igate.u.T, dLdpi)
dLdc += np.dot(self.igate.v.T, dLdpi)
# parameters for forget gate
self.fgate.dLdu += np.outer(dLdpf, self.x[t])
self.fgate.dLdw += np.outer(dLdpf, self.s[t-1])
self.fgate.dLdv += np.outer(dLdpf, self.c[t-1])
dLds[t-1] += np.dot(self.fgate.w.T, dLdpf)
dLdx[t] += np.dot(self.fgate.u.T, dLdpf)
dLdc += np.dot(self.fgate.v.T, dLdpf)
if self.en_bias:
self.nodes.dLdb += dLdpg
self.igate.dLdb += dLdpi
self.fgate.dLdb += dLdpf
self.ogate.dLdb += dLdpo
# update weight matrix of current hidden node
self.nodes.update(alpha, beta)
self.igate.update(alpha, beta)
self.fgate.update(alpha, beta)
self.ogate.update(alpha, beta)
return dLdx
def run(self, x):
"""Forward propagation, calculate the network for given input.
:Param x: input sequence
"""
T = len(x)
self.x = x
self.i = np.zeros((T, self.hidden_size))
self.f = np.zeros((T, self.hidden_size))
self.o = np.zeros((T, self.hidden_size))
self.g = np.zeros((T, self.hidden_size))
self.h = np.zeros((T, self.hidden_size))
self.c = np.zeros((T+1, self.hidden_size))
self.s = np.zeros((T+1, self.hidden_size))
for t in xrange(T):
# input gate
self.i[t] = self.gatefun.compute(np.dot(self.igate.u, x[t])
+ np.dot(self.igate.w, self.s[t-1])
+ np.dot(self.igate.v, self.c[t-1]) + self.igate.b)
# forget gate
self.f[t] = self.gatefun.compute(np.dot(self.fgate.u, x[t])
+ np.dot(self.fgate.w, self.s[t-1])
+ np.dot(self.fgate.v, self.c[t-1]) + self.fgate.b)
# current hidden node state
self.g[t] = self.acfun.compute(np.dot(self.nodes.u, x[t]) +
np.dot(self.nodes.w, self.s[t-1]) + self.nodes.b)
# internal memoery
self.c[t] = self.f[t] * self.c[t-1] + self.i[t] * self.g[t]
# output gate
self.o[t] = self.gatefun.compute(np.dot(self.ogate.u, x[t])
+ np.dot(self.ogate.w, self.s[t-1])
+ np.dot(self.ogate.v, self.c[t]) + self.ogate.b)
self.h[t] = self.acfun.compute(self.c[t])
self.s[t] = np.clip(self.o[t] * self.h[t], -50, 50)
return self.s[:-1]
def store(self):
"""Backup models' parameters.
"""
self.igate.store()
self.fgate.store()
self.ogate.store()
super(LSTM, self).store()
def restore(self):
"""Roll back to previous iteration.
"""
self.igate.restore()
self.fgate.restore()
self.ogate.restore()
super(LSTM, self).restore() | 42.118421 | 124 | 0.51687 |
a3cd62b35e01e79fe23b52d5597fdca13fb953bf | 5,044 | py | Python | GetCryptoPortfolio.py | jeev20/CryptoAssetReport | 7d8b7449f4e7fcd2c46098aeb47b76bbd12e4309 | [
"MIT"
] | 1 | 2021-04-06T04:32:32.000Z | 2021-04-06T04:32:32.000Z | GetCryptoPortfolio.py | jeev20/CryptocurrencyAssetReporter | 7d8b7449f4e7fcd2c46098aeb47b76bbd12e4309 | [
"MIT"
] | null | null | null | GetCryptoPortfolio.py | jeev20/CryptocurrencyAssetReporter | 7d8b7449f4e7fcd2c46098aeb47b76bbd12e4309 | [
"MIT"
] | null | null | null | # +
import requests
import pandas as pd
import time
import json
def GetCryptoPortfolio(year, cryptoCurrencies, inFiatCurrency):
"""
This function returns the value of the crypto currency
on the first day of the year. This can help in reporting crypto portfolio in Norway.
Thanks to Coingecko for an amazing free API.
Parameters:
year (string):The year in YYYY format.
cryptoCurrencies (dictionary):Keys are name of the cryptocurrency and values
are positions for each cryptocurrency.
inFiatCurrency (string):The fiat currency in which the value needs to be returned
input examples "usd", "nok", "sek" etc.
Returns:
df(dataframe):The dataframe containing the portfolio value.
"""
date = "01-01-"+year
apiData = {"Date" : [],
"Cryptocurrency": [],
"Position":[],
"Price in {0}".format(inFiatCurrency.upper()): []} # Creating a dictionary
for i in cryptoCurrencies.keys():
"""
This loop sends API calls and retrieves the values and updated the apiData dictionary
"""
url ="https://api.coingecko.com/api/v3/coins/{0}/history?date={1}".format(i.lower(), date)
x = requests.get(url)
response = x.json()
priceinCurrency = round(response["market_data"]["current_price"][inFiatCurrency.lower()],2)
apiData["Date"].append(date)
apiData["Cryptocurrency"].append(i)
apiData["Position"].append(cryptoCurrencies[i])
apiData["Price in {0}".format(inFiatCurrency.upper())].append(priceinCurrency)
time.sleep(5) #avoiding multiple requests to the api in a short time scale
df = pd.DataFrame(apiData)
df["CryptoValue in {}".format(inFiatCurrency.upper())] = round((df["Price in {0}".format(inFiatCurrency.upper())] * df["Position"]), 2)
df.to_csv(r"output/CryptoPortfolio.csv", index=False) # Save as a CSV
return df # Return the resulting dataframe
# +
# Testing : GetCryptoPortfolio
#year = "2021"
#cryptos = {"Bitcoin": 2,"Ethereum": 4}
#fiat = "nok"
#df = GetCryptoPortfolio(year, cryptos, fiat)
#df
# -
def CreatePortfolioPieChart(fiatCurrency):
"""
This function generates a pie chart of the portfolio in the input fiat currency.
Parameters:
inFiatCurrency (string):The fiat currency in which the value needs to be returned
input examples "usd", "nok", "sek" etc.
Returns:
none
"""
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv("output/CryptoPortfolio.csv")
plt.figure()
plt.rcParams["figure.figsize"] = (10,3)
plt.rcParams['font.size'] = 8
values = df.iloc[:, 4].to_list()
labels = df.iloc[:, 1].to_list()
explode = (0.08,) * df.iloc[:, 4].count()
colors = ['#008DB8', '#00AAAA', '#00C69C', '#00E28E', '#00FF80', '#191970', '#001CF0', '#0038E2', '#0055D4', '#0071C6', ]
colors = colors[0:df.iloc[:, 4].count()]
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return fiatCurrency.upper() +' {v:d}'.format(v=val)
return my_autopct
fig1, ax1= plt.subplots()
ax1.pie(values, colors=colors, explode=explode,
autopct=make_autopct(values),
shadow=False, startangle=30)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Crypto Portfolio as of {0}".format(df.iat[0,0]))
plt.legend(labels=labels, loc="best")
plt.tight_layout()
plt.savefig('output/CryptoPortfolioPieChart.png', dpi=200)
# +
# Testing : CreatePortfolioPieChart
#CreatePortfolioPieChart("nok")
# -
def CreatePortfolioTreeMapChart(fiatCurrency):
"""
This function generates a TreeMap chart of the portfolio in the input fiat currency.
Parameters:
inFiatCurrency (string):The fiat currency in which the value needs to be returned
input examples "usd", "nok", "sek" etc.
Returns:
none
"""
import matplotlib.pyplot as plt
import pandas as pd
import squarify
df = pd.read_csv("output/CryptoPortfolio.csv")
plt.figure()
plt.rcParams["figure.figsize"] = (10,3)
plt.rcParams['font.size'] = 8
labels =[]
colors = ['#008DB8', '#00AAAA', '#00C69C', '#00E28E', '#00FF80', '#191970', '#001CF0', '#0038E2', '#0055D4', '#0071C6', ]
colors = colors[0:df.iloc[:, 4].count()]
for index, row in df.iterrows():
labels.append(row["Cryptocurrency"] + "\n"+fiatCurrency.upper()+" "+str(row["CryptoValue in {}".format(fiatCurrency.upper())]))
values = df.iloc[:, 4].to_list()
squarify.plot(sizes=values, label=labels, color=colors, alpha=0.6)
plt.title("Crypto Portfolio as of {0}".format(df.iat[0,0]))
plt.axis('off')
plt.tight_layout()
plt.savefig('output/CryptoPortfolioTreeMapChart.png', dpi=200)
# +
# Testing : CreatePortfolioTreeMapChart
#CreatePortfolioTreeMapChart("nok")
# -
| 35.272727 | 140 | 0.63858 |
e268c285288c1665446d429cacd80e670edd2518 | 4,133 | py | Python | point_cloud/handcrafted_registrator.py | humanpose1/riedones3d | 53a393d7cead6daf13d477d6254db543de5b2e76 | [
"MIT"
] | 2 | 2022-02-09T16:55:43.000Z | 2022-03-26T09:44:07.000Z | point_cloud/handcrafted_registrator.py | humanpose1/riedones3d | 53a393d7cead6daf13d477d6254db543de5b2e76 | [
"MIT"
] | null | null | null | point_cloud/handcrafted_registrator.py | humanpose1/riedones3d | 53a393d7cead6daf13d477d6254db543de5b2e76 | [
"MIT"
] | null | null | null | iS_HANDCRAFT_DESC_IMPORT = True
try:
import handcrafted_descriptor as hd
except ImportError:
iS_HANDCRAFT_DESC_IMPORT = False
import open3d as o3d
import numpy as np
import torch
from point_cloud.base_registration import FeatureBasedRegistrator
from point_cloud.visu import torch2o3d
class FPFHRegistrator(FeatureBasedRegistrator):
def __init__(self,
radius=0.5,
max_nn=30,
transform=None,
icp=None,
num_points=5000,
max_norm=0.5,
robust_estimator="teaser",
noise_bound_teaser=0.5,
num_iteration=80000,
max_num_matches=100,
min_num_matches=20,
verbose=True,
rot_thresh=5,
trans_thresh=0.3, **kwargs):
FeatureBasedRegistrator.__init__(
self,
transform=transform,
icp=icp,
num_points=num_points,
max_norm=max_norm,
robust_estimator=robust_estimator,
noise_bound_teaser=noise_bound_teaser,
num_iteration=num_iteration,
max_num_matches=max_num_matches,
min_num_matches=min_num_matches,
verbose=verbose,
rot_thresh=rot_thresh,
trans_thresh=trans_thresh, **kwargs)
self.kd_tree = o3d.geometry.KDTreeSearchParamHybrid(radius=radius, max_nn=max_nn)
def _compute_features(self, data_s, data_t, rand_s, rand_t):
pcd_s = torch2o3d(data_s)
fpfh_s = np.asarray(o3d.pipelines.registration.compute_fpfh_feature(pcd_s, self.kd_tree).data).T
output_s = torch.from_numpy(fpfh_s).to(data_s.pos)
pcd_t = torch2o3d(data_t)
fpfh_t = np.asarray(o3d.pipelines.registration.compute_fpfh_feature(pcd_t, self.kd_tree).data).T
output_t = torch.from_numpy(fpfh_t).to(data_t.pos)
return output_s[rand_s], output_t[rand_t]
class SHOTRegistrator(FeatureBasedRegistrator):
def __init__(self,
radius=0.5,
transform=None,
icp=None,
num_points=5000,
max_norm=0.5,
robust_estimator="teaser",
noise_bound_teaser=0.5,
num_iteration=80000,
max_num_matches=100,
min_num_matches=20,
verbose=True,
rot_thresh=5,
trans_thresh=0.3, **kwargs):
if not iS_HANDCRAFT_DESC_IMPORT:
raise ImportError("Cannot import the lib to compute shot descriptor")
FeatureBasedRegistrator.__init__(
self,
transform=transform,
icp=icp,
num_points=num_points,
max_norm=max_norm,
robust_estimator=robust_estimator,
noise_bound_teaser=noise_bound_teaser,
num_iteration=num_iteration,
max_num_matches=max_num_matches,
min_num_matches=min_num_matches,
verbose=verbose,
rot_thresh=rot_thresh,
trans_thresh=trans_thresh, **kwargs)
self.radius = radius
def compute_shot_descriptor(self, data, rand):
assert getattr(data, "norm", None) is not None
assert getattr(data, "pos", None) is not None
pos = data.pos.detach().cpu().numpy().astype(float)
norm = data.norm.detach().cpu().numpy().astype(float)
norm = np.asarray(norm).astype(float)
small_pos = data.pos[rand].detach().cpu().numpy().astype(float)
small_pos = np.asarray(small_pos).astype(float)
small_norm = data.norm[rand].detach().cpu().numpy().astype(float)
small_norm = np.asarray(small_norm).astype(float)
feat = hd.compute_shot(pos, norm, small_pos, small_norm, self.radius)
return torch.from_numpy(feat).to(data.pos)
def _compute_features(self, data_s, data_t, rand_s, rand_t):
feat_s = self.compute_shot_descriptor(data_s, rand_s)
feat_t = self.compute_shot_descriptor(data_t, rand_t)
return feat_s, feat_t
| 35.62931 | 104 | 0.61142 |
8cdfecb6d5f509982d646f9f7bcf8126a31b38e6 | 5,499 | py | Python | host/query-agent/pathdumpapi.py | PathDump/PathDump | d7fe23f303c4d7c2a41b2870c5034a52e3024985 | [
"Apache-2.0"
] | 8 | 2018-04-11T17:06:33.000Z | 2020-03-04T07:15:53.000Z | host/query-agent/pathdumpapi.py | PathDump/PathDump | d7fe23f303c4d7c2a41b2870c5034a52e3024985 | [
"Apache-2.0"
] | null | null | null | host/query-agent/pathdumpapi.py | PathDump/PathDump | d7fe23f303c4d7c2a41b2870c5034a52e3024985 | [
"Apache-2.0"
] | 4 | 2018-04-19T05:11:54.000Z | 2019-04-01T14:03:14.000Z | from pymongo import MongoClient
import pymongo
from datetime import datetime
import helperapi as helper
import confparser as cp
import tcpmon
client=MongoClient('localhost', 27017)
database=client['PathDump']
collection=database['TIB']
def getFlows (linkID, timeRange):
link_fltr = helper.buildLinkFilter (linkID)
(stime_fltr, etime_fltr) = helper.buildTimeFilter (timeRange)
data_fltr = helper.doAndFilters ([link_fltr, stime_fltr, etime_fltr])
proj_fltr = {'sip': 1, 'sport': 1, 'dip': 1, 'dport': 1, 'proto': 1,
'path': 1, 'bytes': 1, 'pkts': 1, 'start': 1, 'end': 1,
'_id': 0}
if data_fltr == '':
cursor = collection.find (None, proj_fltr)
else:
cursor = collection.find (data_fltr, proj_fltr)
flows = []
for f in cursor:
print "entry ",f
flow = {'flowid': {}, 'path': None}
flow['flowid']['sip'] = f['sip']
flow['flowid']['sport'] = f['sport']
flow['flowid']['dip'] = f['dip']
flow['flowid']['dport'] = f['dport']
flow['flowid']['proto'] = f['proto']
flow['path'] = f['path']
flow['bytes'] = f['bytes']
flow['pkts'] = f['pkts']
flow['start'] = (f['start'] - datetime(1970,1,1)).total_seconds()
flow['end'] = (f['end'] - datetime(1970,1,1)).total_seconds()
flows.append (flow)
return flows
def getPaths (flowID, linkID, timeRange):
(sip_fltr, sport_fltr, dip_fltr, dport_fltr, proto_fltr) = \
helper.buildFlowidFilter (flowID)
link_fltr = helper.buildLinkFilter (linkID)
(stime_fltr, etime_fltr) = helper.buildTimeFilter (timeRange)
data_fltr = helper.doAndFilters ([sip_fltr, sport_fltr, dip_fltr,
dport_fltr, proto_fltr,
stime_fltr, etime_fltr])
proj_fltr = {'path': 1, '_id': 0}
if data_fltr == '':
cursor = collection.find (None, proj_fltr)
else:
cursor = collection.find (data_fltr, proj_fltr)
paths = []
ht = {}
for path in cursor:
p = ''.join (path['path'])
if p not in ht:
paths.append (path)
ht[p] = True
return paths
def getCount (Flow, timeRange):
if Flow['bytes'] and Flow['pkts']:
return (Flow['bytes'], Flow['pkts'])
(sip_fltr, sport_fltr, dip_fltr, dport_fltr, proto_fltr) = \
helper.buildFlowidFilter (Flow['flowid'])
path_fltr = helper.buildPathFilter (Flow['path'])
(stime_fltr, etime_fltr) = helper.buildTimeFilter (timeRange)
data_fltr = helper.doAndFilters ([sip_fltr, sport_fltr, dip_fltr,
dport_fltr, proto_fltr, path_fltr,
stime_fltr, etime_fltr])
proj_fltr = {'bytes': 1, 'pkts': 1, '_id': 0}
if data_fltr == '':
cursor = collection.find (None, proj_fltr)
else:
cursor = collection.find (data_fltr, proj_fltr)
bytec = 0 # byte count
pktc = 0 # packet count
for c in cursor:
bytec += c['bytes']
pktc += c['pkts']
return (bytec, pktc)
def getDuration (Flow, timeRange):
(sip_fltr, sport_fltr, dip_fltr, dport_fltr, proto_fltr) = \
helper.buildFlowidFilter (Flow['flowid'])
path_fltr = helper.buildPathFilter (Flow['path'])
(stime_fltr, etime_fltr) = helper.buildTimeFilter (timeRange)
data_fltr = helper.doAndFilters ([sip_fltr, sport_fltr, dip_fltr,
dport_fltr, proto_fltr, path_fltr,
stime_fltr, etime_fltr])
proj_fltr = {'start': 1, 'end': 1, '_id': 0}
if data_fltr == '':
cursor = collection.find (None, proj_fltr)
else:
cursor = collection.find (data_fltr, proj_fltr)
start = -1
end = -1
for c in cursor:
if start == -1 or start > c['start']:
start = c['start']
if end == -1 or end < c['end']:
end = c['end']
delta = end - start
return delta.total_seconds()
def postFlow (flowID, Reason, Paths):
req = {'api': 'postFlow'}
req.update ({'fid': flowID})
req.update ({'reason': Reason})
req.update ({'paths': Paths})
return helper.httpcmd (cp.options['controller'], req)
def getPoorTCPFlows (freq):
tcpmon.init()
poorFlows = tcpmon.updatePoorFlows (freq)
flowIDs = []
for fid in poorFlows:
tokens = fid.split (':')
if len (fields) != 5:
continue
flowid = {}
flowid['sip'] = tokens[0]
flowid['sport'] = tokens[1]
flowid['dip'] = tokens[2]
flowid['dport'] = tokens[3]
flowid['proto'] = tokens[4]
flowIDs.append (flowid)
return flowIDs
# linkID = ('*', '16')
# timeRange = ('*', datetime.datetime(2015, 11, 9, 19, 10, 32, 765000))
# linkID = ('7', '16')
# timeRange = (datetime.datetime(2015, 11, 9, 19, 10, 30, 765000), datetime.datetime(2015, 11, 9, 19, 10, 32, 765000))
# linkID = ('7', '16')
# timeRange = ('*', '*')
# flowID = {'sip': '10.4.2.3', 'sport': '9000', 'dip': '10.3.2.3',
# 'dport': '60217', 'proto': '6'}
# flowID = {'sip': '*', 'sport': '9000', 'dip': '10.3.2.3',
# 'dport': '*', 'proto': '6'}
# Path = ['8-15', '15-18', '18-13', '13-6']
# Flow = {'flowid': flowID, 'path': Path}
# linkID = ('*', '*')
# timeRange = (datetime.datetime(2015, 11, 9, 19, 10, 30, 765000), datetime.datetime(2015, 11, 9, 19, 10, 32, 765000))
# getDuration (Flow, timeRange)
| 33.944444 | 118 | 0.563557 |
3efff73c2435c6b52230eecf7fc4e4aadb242905 | 2,337 | py | Python | transifex/native/settings.py | transifex/transifex-python | d467e82bba7f0d620a021cf9e7e58c987ba2fbb5 | [
"Apache-2.0"
] | 14 | 2020-04-10T20:54:59.000Z | 2022-03-07T16:13:22.000Z | transifex/native/settings.py | transifex/transifex-python | d467e82bba7f0d620a021cf9e7e58c987ba2fbb5 | [
"Apache-2.0"
] | 60 | 2020-04-14T12:41:06.000Z | 2022-03-29T06:38:09.000Z | transifex/native/settings.py | transifex/transifex-python | d467e82bba7f0d620a021cf9e7e58c987ba2fbb5 | [
"Apache-2.0"
] | 6 | 2021-01-01T10:28:11.000Z | 2021-06-10T09:50:26.000Z | from transifex.common.utils import import_to_python
from transifex.native.cache import AbstractCache
from transifex.native.rendering import (AbstractErrorPolicy,
AbstractRenderingPolicy, ChainedPolicy)
def parse_setting_class(obj):
# obj is a tuple like (<path>, <params>)
# or a string like <path>
try:
path, params = obj
except ValueError:
path, params = obj, None
_class = import_to_python(path)
if params:
return _class(**params)
return _class()
def parse_rendering_policy(policy):
"""Parse the given rendering policy and return an AbstractRenderingPolicy
subclass.
:param Union[AbstractRenderingPolicy, str, tuple(str, dict), list] policy:
could be
- an instance of AbstractRenderingPolicy
- a tuple of the class's path and parameters
- the class's path
- a list of AbstractRenderingPolicy objects or tuples or string paths
:return: an AbstractRenderingPolicy object
:rtype: AbstractRenderingPolicy
"""
if isinstance(policy, AbstractRenderingPolicy) or policy is None:
return policy
if isinstance(policy, list):
return ChainedPolicy(*[parse_rendering_policy(p) for p in policy])
return parse_setting_class(policy)
def parse_error_policy(policy):
"""Parse the given error policy and return an AbstractErrorPolicy
subclass.
:param Union[AbstractRenderingPolicy, str, tuple(str, dict)] policy:
could be
- an instance of AbstractErrorPolicy
- a tuple of the class's path and parameters
- the class's path
:return: an AbstractErrorPolicy object
:rtype: AbstractErrorPolicy
"""
if isinstance(policy, AbstractErrorPolicy) or policy is None:
return policy
return parse_setting_class(policy)
def parse_cache(cache):
"""Parse the given cache and return an AbstractCache subclass.
:param Union[AbstractCache, str, tuple(str, dict)] cache:
could be
- an instance of AbstractCache
- a tuple of the class's path and parameters
- the class's path
:return: an AbstractCache object
:rtype: AbstractCache
"""
if isinstance(cache, AbstractCache) or cache is None:
return cache
return parse_setting_class(cache)
| 30.75 | 79 | 0.685494 |
23b737e426710356e358ac34562dffa6a0bcb315 | 666 | py | Python | fixture/session.py | AdKrajan/python_training | 1315b58c07db4d02bc46ae65e5c358fdbf78dbc0 | [
"Apache-2.0"
] | null | null | null | fixture/session.py | AdKrajan/python_training | 1315b58c07db4d02bc46ae65e5c358fdbf78dbc0 | [
"Apache-2.0"
] | null | null | null | fixture/session.py | AdKrajan/python_training | 1315b58c07db4d02bc46ae65e5c358fdbf78dbc0 | [
"Apache-2.0"
] | null | null | null | class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click() | 35.052632 | 76 | 0.641141 |
8fd24844733cb45fafa55db443368d908351334d | 203,695 | py | Python | tests/migrations/test_operations.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/migrations/test_operations.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/migrations/test_operations.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | from django.core.exceptions import FieldDoesNotExist
from django.db import (
IntegrityError,
connection,
migrations,
models,
transaction,
)
from django.db.migrations.migration import Migration
from django.db.migrations.operations.fields import FieldOperation
from django.db.migrations.state import ModelState, ProjectState
from django.db.models.functions import Abs
from django.db.transaction import atomic
from django.test import SimpleTestCase, override_settings, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import FoodManager, FoodQuerySet, UnicodeModel
from .test_base import OperationTestBase
class Mixin:
pass
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
self.assertEqual(operation.migration_name_fragment, "pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel(
"Foo", fields=[], managers=[("objects", models.Manager())]
)
definition = operation.deconstruct()
self.assertNotIn("managers", definition[2])
def test_create_model_with_duplicate_field_name(self):
with self.assertRaisesMessage(
ValueError, "Found duplicate value pink in CreateModel fields argument."
):
migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.TextField()),
("pink", models.IntegerField(default=1)),
],
)
def test_create_model_with_duplicate_base(self):
message = "Found duplicate value test_crmo.pony in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.Pony",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.pony",
),
)
message = "Found duplicate value migrations.unicodemodel in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
UnicodeModel,
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.unicodemodel",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.UnicodeModel",
),
)
message = "Found duplicate value <class 'django.db.models.base.Model'> in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
models.Model,
models.Model,
),
)
message = "Found duplicate value <class 'migrations.test_operations.Mixin'> in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
Mixin,
Mixin,
),
)
def test_create_model_with_duplicate_manager_name(self):
with self.assertRaisesMessage(
ValueError,
"Found duplicate value objects in CreateModel managers argument.",
):
migrations.CreateModel(
"Pony",
fields=[],
managers=[
("objects", models.Manager()),
("objects", models.Manager()),
],
)
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards(
"test_crmoua", editor, project_state, new_state
)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables")),
],
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmomm", editor, new_state, project_state
)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony_ptr",
models.OneToOneField(
"test_crmoih.Pony",
models.CASCADE,
auto_created=True,
primary_key=True,
to_field="id",
serialize=False,
),
),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmoih", editor, new_state, project_state
)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crprmo", editor, new_state, project_state
)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crummo", editor, new_state, project_state
)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_constraint(self):
where = models.Q(pink__gt=2)
check_constraint = models.CheckConstraint(
check=where, name="test_constraint_pony_pink_gt_2"
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [check_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)")
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]["options"]["constraints"], [check_constraint])
def test_create_model_with_partial_unique_constraint(self):
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [partial_unique_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# Test constraint works
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"], [partial_unique_constraint]
)
def test_create_model_with_deferred_unique_constraint(self):
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferrable_pink_constraint",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [deferred_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[deferred_unique_constraint],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_create_model_with_covering_unique_constraint(self):
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
include=["weight"],
name="test_constraint_pony_pink_covering_weight",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [covering_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[covering_unique_constraint],
)
def test_create_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
self.assertEqual(operation.migration_name_fragment, "delete_pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlprmo", editor, new_state, project_state
)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_delete_mti_model(self):
project_state = self.set_up_test_model("test_dlmtimo", mti_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ShetlandPony")
new_state = project_state.clone()
operation.state_forwards("test_dlmtimo", new_state)
self.assertIn(("test_dlmtimo", "shetlandpony"), project_state.models)
self.assertNotIn(("test_dlmtimo", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_dlmtimo", editor, project_state, new_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableNotExists("test_dlmtimo_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlmtimo", editor, new_state, project_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
self.assertEqual(operation.migration_name_fragment, "rename_pony_horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate forwards
new_state = project_state.clone()
atomic_rename = connection.features.supports_atomic_references_rename
new_state = self.apply_operations(
"test_rnmo", new_state, [operation], atomic=atomic_rename
)
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
new_state.models["test_rnmo", "rider"].fields["pony"].remote_field.model,
"test_rnmo.Horse",
)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate backwards
original_state = self.unapply_operations(
"test_rnmo", project_state, [operation], atomic=atomic_rename
)
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual(
original_state.models["test_rnmo", "rider"]
.fields["pony"]
.remote_field.model,
"Pony",
)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"old_name": "Pony", "new_name": "Horse"})
def test_rename_model_state_forwards(self):
"""
RenameModel operations shouldn't trigger the caching of rendered apps
on state without prior apps.
"""
state = ProjectState()
state.add_model(ModelState("migrations", "Foo", []))
operation = migrations.RenameModel("Foo", "Bar")
operation.state_forwards("migrations", state)
self.assertNotIn("apps", state.__dict__)
self.assertNotIn(("migrations", "foo"), state.models)
self.assertIn(("migrations", "bar"), state.models)
# Now with apps cached.
apps = state.apps
operation = migrations.RenameModel("Bar", "Foo")
operation.state_forwards("migrations", state)
self.assertIs(state.apps, apps)
self.assertNotIn(("migrations", "bar"), state.models)
self.assertIn(("migrations", "foo"), state.models)
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
"self",
new_state.models["test_rmwsrf", "horserider"]
.fields["friend"]
.remote_field.model,
)
HorseRider = new_state.apps.get_model("test_rmwsrf", "horserider")
self.assertIs(
HorseRider._meta.get_field("horserider").remote_field.model, HorseRider
)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKExists(
"test_rmwsrf_horserider",
["friend_id"],
("test_rmwsrf_horserider", "id"),
)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(
"test_rmwsrf", editor, new_state, project_state
)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model(
"test_rmwsc", related_model=True, mti_model=True
)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(
operation.describe(), "Rename model ShetlandPony to LittleHorse"
)
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"]
.fields["pony"]
.remote_field.model,
new_state.models["test_rmwsc", "rider"].fields["pony"].remote_field.model,
)
# Before running the migration we have a table for Shetland Pony, not Little Horse
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")
)
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")
)
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"ReflexivePony",
fields=[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("self")),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Pony", "Pony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Rider", "Rider2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_m2m_through_model(self):
app_label = "test_rename_through"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"test_rename_through.Rider", models.CASCADE
),
),
(
"pony",
models.ForeignKey(
"test_rename_through.Pony", models.CASCADE
),
),
],
),
migrations.AddField(
"Pony",
"riders",
models.ManyToManyField(
"test_rename_through.Rider",
through="test_rename_through.PonyRider",
),
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
pony = Pony.objects.create()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("PonyRider", "PonyRider2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider2")
pony = Pony.objects.first()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
self.assertEqual(Pony.objects.count(), 1)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(PonyRider.objects.count(), 2)
self.assertEqual(pony.riders.count(), 2)
def test_rename_m2m_model_after_rename_field(self):
"""RenameModel renames a many-to-many column after a RenameField."""
app_label = "test_rename_multiple"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=20)),
],
),
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"test_rename_multiple.Pony", models.CASCADE
),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
migrations.RenameField(
model_name="pony", old_name="name", new_name="fancy_name"
),
migrations.RenameModel(old_name="Rider", new_name="Jockey"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Jockey = project_state.apps.get_model(app_label, "Jockey")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
# No "no such column" error means the column was renamed correctly.
pony = Pony.objects.create(fancy_name="a good name")
jockey = Jockey.objects.create(pony=pony)
ponyrider = PonyRider.objects.create()
ponyrider.riders.add(jockey)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
self.assertEqual(operation.migration_name_fragment, "pony_height")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = new_state.models["test_adfl", "pony"].fields["height"]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adchfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adtxtfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adbinfl",
project_state,
[
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_regr22168", editor, project_state, new_state
)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = new_state.models["test_adflpd", "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]
)
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")
)
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adflmm", editor, new_state, project_state
)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field("stables").blank)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"stables",
models.ManyToManyField(
to="Stable", related_name="ponies", blank=True
),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field("stables").blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model(
"test_alflmm", second_model=True, third_model=True
)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"places",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"places",
models.ManyToManyField(to="Van", related_name="ponies"),
)
],
)
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations(
"test_rmflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations(
"test_rmflmm", project_state, operations=operations
)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations(
"test_rmflmmwt",
project_state,
operations=[
migrations.CreateModel(
"PonyStables",
fields=[
(
"pony",
models.ForeignKey("test_rmflmmwt.Pony", models.CASCADE),
),
(
"stable",
models.ForeignKey("test_rmflmmwt.Stable", models.CASCADE),
),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField(
"Stable",
related_name="ponies",
through="test_rmflmmwt.PonyStables",
),
),
],
)
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [
migrations.RemoveField("Pony", "stables"),
migrations.DeleteModel("PonyStables"),
]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
self.assertEqual(operation.migration_name_fragment, "remove_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pink"})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(
operation.describe(), "Rename table for Pony to test_almota_pony_2"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_table")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony_2",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "table": "test_almota_pony_2"})
def test_alter_model_table_none(self):
"""
Tests the AlterModelTable operation if the table name is set to None.
"""
operation = migrations.AlterModelTable("Pony", None)
self.assertEqual(operation.describe(), "Rename table for Pony to (default)")
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = "pony_foo"
project_state = self.set_up_test_model(
app_label, second_model=True, db_table=pony_db_table
)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable")
)
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name="pony", table=None)
operation.state_forwards(app_label, second_state)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
self.assertEqual(operation.describe(), "Alter field pink on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertIs(
project_state.models["test_alfl", "pony"].fields["pink"].null, False
)
self.assertIs(new_state.models["test_alfl", "pony"].fields["pink"].null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_add_db_column_noop(self):
"""
AlterField operation is a noop when adding only a db_column and the
column name is not changed.
"""
app_label = "test_afadbn"
project_state = self.set_up_test_model(app_label, related_model=True)
pony_table = "%s_pony" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_column="weight")
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "pony"].fields["weight"].db_column,
)
self.assertEqual(
new_state.models[app_label, "pony"].fields["weight"].db_column,
"weight",
)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
app_label, editor, new_state, project_state
)
self.assertColumnExists(pony_table, "weight")
rider_table = "%s_rider" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Rider",
"pony",
models.ForeignKey("Pony", models.CASCADE, db_column="pony_id"),
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "rider"].fields["pony"].db_column,
)
self.assertIs(
new_state.models[app_label, "rider"].fields["pony"].db_column,
"pony_id",
)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertColumnExists(rider_table, "pony_id")
def test_alter_field_pk(self):
"""
Tests the AlterField operation on primary keys (for things like PostgreSQL's SERIAL weirdness)
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.IntegerField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(
project_state.models["test_alflpk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpk", "pony"].fields["id"],
models.IntegerField,
)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpk", editor, new_state, project_state
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
project_state = self.apply_operations(
"test_alflpkfk",
project_state,
[
migrations.CreateModel(
"Stable",
fields=[
("ponies", models.ManyToManyField("Pony")),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
),
],
)
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.FloatField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(
project_state.models["test_alflpkfk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpkfk", "pony"].fields["id"],
models.FloatField,
)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_pony"
)
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_rider"
)
if c.name == "pony_id"
][0]
m2m_fk_type, m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_pony_stables",
)
if c.name == "pony_id"
][0]
remote_m2m_fk_type, remote_m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_stable_ponies",
)
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_type, m2m_fk_type)
self.assertEqual(id_type, remote_m2m_fk_type)
self.assertEqual(id_null, fk_null)
self.assertEqual(id_null, m2m_fk_null)
self.assertEqual(id_null, remote_m2m_fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alflpkfk", editor, project_state, new_state
)
assertIdTypeEqualsFkType()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpkfk", editor, new_state, project_state
)
assertIdTypeEqualsFkType()
def test_alter_field_pk_mti_fk(self):
app_label = "test_alflpkmtifk"
project_state = self.set_up_test_model(app_label, mti_model=True)
project_state = self.apply_operations(
app_label,
project_state,
[
migrations.CreateModel(
"ShetlandRider",
fields=[
(
"pony",
models.ForeignKey(
f"{app_label}.ShetlandPony", models.CASCADE
),
),
],
),
],
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
mti_id_type = _get_column_id_type(cursor, "shetlandrider", "pony_id")
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, mti_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_on_fk_with_to_field_target_type_change(self):
app_label = "test_alflrsfkwtflttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="code"
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
id_type, id_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_rider" % app_label)
if c.name == "code"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_pony" % app_label)
if c.name == "rider_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_on_fk_with_to_field_related_name_target_type_change(
self,
):
app_label = "test_alflrsfkwtflrnttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.PositiveIntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label,
models.CASCADE,
to_field="code",
related_name="+",
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
def test_alter_field_reloads_state_on_fk_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "id", models.CharField(primary_key=True, max_length=99)
),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
)
def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_with_to_field_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="slug"
),
),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"%s.Pony" % app_label, models.CASCADE, to_field="slug"
),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "slug", models.CharField(unique=True, max_length=99)
),
migrations.AlterField(
"Pony", "slug", models.CharField(unique=True, max_length=99)
),
],
)
def test_rename_field_reloads_state_on_fk_target_changes(self):
"""
If RenameField doesn't reload state appropriately, the AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_rename_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameField("Rider", "id", "id2"),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
atomic=connection.features.supports_atomic_references_rename,
)
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model(
"test_rnfl", unique_together=True, index_together=True
)
# Test the state alteration
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
self.assertEqual(operation.migration_name_fragment, "rename_pink_pony_blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", new_state.models["test_rnfl", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].fields)
# Make sure the unique_together has the renamed column too
self.assertIn(
"blue", new_state.models["test_rnfl", "pony"].options["unique_together"][0]
)
self.assertNotIn(
"pink", new_state.models["test_rnfl", "pony"].options["unique_together"][0]
)
# Make sure the index_together has the renamed column too
self.assertIn(
"blue", new_state.models["test_rnfl", "pony"].options["index_together"][0]
)
self.assertNotIn(
"pink", new_state.models["test_rnfl", "pony"].options["index_together"][0]
)
# Test the database alteration
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Ensure the unique constraint has been ported over
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_rnfl_pony (blue, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_rnfl_pony")
# Ensure the index constraint has been ported over
self.assertIndexExists("test_rnfl_pony", ["weight", "blue"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Ensure the index constraint has been reset
self.assertIndexExists("test_rnfl_pony", ["weight", "pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "old_name": "pink", "new_name": "blue"},
)
def test_rename_field_with_db_column(self):
project_state = self.apply_operations(
"test_rfwdbc",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(db_column="db_field")),
(
"fk_field",
models.ForeignKey(
"Pony",
models.CASCADE,
db_column="db_fk_field",
),
),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "renamed_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn("renamed_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertNotIn("field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "fk_field", "renamed_fk_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn(
"renamed_fk_field", new_state.models["test_rfwdbc", "pony"].fields
)
self.assertNotIn("fk_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
def test_rename_field_case(self):
project_state = self.apply_operations(
"test_rfmx",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "FiElD")
operation.state_forwards("test_rfmx", new_state)
self.assertIn("FiElD", new_state.models["test_rfmx", "pony"].fields)
self.assertColumnExists("test_rfmx_pony", "field")
with connection.schema_editor() as editor:
operation.database_forwards("test_rfmx", editor, project_state, new_state)
self.assertColumnExists(
"test_rfmx_pony",
connection.introspection.identifier_converter("FiElD"),
)
with connection.schema_editor() as editor:
operation.database_backwards("test_rfmx", editor, new_state, project_state)
self.assertColumnExists("test_rfmx_pony", "field")
def test_rename_missing_field(self):
state = ProjectState()
state.add_model(ModelState("app", "model", []))
with self.assertRaisesMessage(
FieldDoesNotExist, "app.model has no field named 'field'"
):
migrations.RenameField("model", "field", "new_field").state_forwards(
"app", state
)
def test_rename_referenced_field_state_forward(self):
state = ProjectState()
state.add_model(
ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
)
)
state.add_model(
ModelState(
"app",
"OtherModel",
[
("id", models.AutoField(primary_key=True)),
(
"fk",
models.ForeignKey("Model", models.CASCADE, to_field="field"),
),
(
"fo",
models.ForeignObject(
"Model",
models.CASCADE,
from_fields=("fk",),
to_fields=("field",),
),
),
],
)
)
operation = migrations.RenameField("Model", "field", "renamed")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].from_fields, ["self"]
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].to_fields, ("renamed",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields, ("fk",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
operation = migrations.RenameField("OtherModel", "fk", "renamed_fk")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"]
.fields["renamed_fk"]
.remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].from_fields,
("self",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].to_fields,
("renamed",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields,
("renamed_fk",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_unique_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
project_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alunto", editor, project_state, new_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alunto", editor, new_state, project_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "unique_together": {("pink", "weight")}}
)
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (0 constraint(s))"
)
def test_add_index(self):
"""
Test the AddIndex operation.
"""
project_state = self.set_up_test_model("test_adin")
msg = (
"Indexes passed to AddIndex operations require a name argument. "
"<Index: fields=['pink']> doesn't have one."
)
with self.assertRaisesMessage(ValueError, msg):
migrations.AddIndex("Pony", models.Index(fields=["pink"]))
index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx")
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_adin_pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adin_pony_pink_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_adin", new_state)
# Test the database alteration
self.assertEqual(
len(new_state.models["test_adin", "pony"].options["indexes"]), 1
)
self.assertIndexNotExists("test_adin_pony", ["pink"])
with connection.schema_editor() as editor:
operation.database_forwards("test_adin", editor, project_state, new_state)
self.assertIndexExists("test_adin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adin", editor, new_state, project_state)
self.assertIndexNotExists("test_adin_pony", ["pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
def test_remove_index(self):
"""
Test the RemoveIndex operation.
"""
project_state = self.set_up_test_model("test_rmin", multicol_index=True)
self.assertTableExists("test_rmin_pony")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
operation = migrations.RemoveIndex("Pony", "pony_test_idx")
self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony")
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_rmin", new_state)
# Test the state alteration
self.assertEqual(
len(new_state.models["test_rmin", "pony"].options["indexes"]), 0
)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_rmin", editor, project_state, new_state)
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmin", editor, new_state, project_state)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pony_test_idx"})
# Also test a field dropped with index - sqlite remake issue
operations = [
migrations.RemoveIndex("Pony", "pony_test_idx"),
migrations.RemoveField("Pony", "pink"),
]
self.assertColumnExists("test_rmin_pony", "pink")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test database alteration
new_state = project_state.clone()
self.apply_operations("test_rmin", new_state, operations=operations)
self.assertColumnNotExists("test_rmin_pony", "pink")
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
self.unapply_operations("test_rmin", project_state, operations=operations)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
def test_add_index_state_forwards(self):
project_state = self.set_up_test_model("test_adinsf")
index = models.Index(fields=["pink"], name="test_adinsf_pony_pink_idx")
old_model = project_state.apps.get_model("test_adinsf", "Pony")
new_state = project_state.clone()
operation = migrations.AddIndex("Pony", index)
operation.state_forwards("test_adinsf", new_state)
new_model = new_state.apps.get_model("test_adinsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_remove_index_state_forwards(self):
project_state = self.set_up_test_model("test_rminsf")
index = models.Index(fields=["pink"], name="test_rminsf_pony_pink_idx")
migrations.AddIndex("Pony", index).state_forwards("test_rminsf", project_state)
old_model = project_state.apps.get_model("test_rminsf", "Pony")
new_state = project_state.clone()
operation = migrations.RemoveIndex("Pony", "test_rminsf_pony_pink_idx")
operation.state_forwards("test_rminsf", new_state)
new_model = new_state.apps.get_model("test_rminsf", "Pony")
self.assertIsNot(old_model, new_model)
@skipUnlessDBFeature("supports_expression_indexes")
def test_add_func_index(self):
app_label = "test_addfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
index = models.Index(Abs("weight"), name=index_name)
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_addfuncin_pony_abs_idx on Abs(F(weight)) on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_addfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 1)
self.assertIndexNameNotExists(table_name, index_name)
# Add index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
@skipUnlessDBFeature("supports_expression_indexes")
def test_remove_func_index(self):
app_label = "test_rmfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[
models.Index(Abs("weight"), name=index_name),
],
)
self.assertTableExists(table_name)
self.assertIndexNameExists(table_name, index_name)
operation = migrations.RemoveIndex("Pony", index_name)
self.assertEqual(
operation.describe(),
"Remove index test_rmfuncin_pony_abs_idx from Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 0)
# Remove index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": index_name})
def test_alter_field_with_index(self):
"""
Test AlterField operation with an index to ensure indexes created via
Meta.indexes don't get dropped with sqlite3 remake.
"""
project_state = self.set_up_test_model("test_alflin", index=True)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflin", new_state)
# Test the database alteration
self.assertColumnNotNull("test_alflin_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alflin", editor, project_state, new_state)
# Index hasn't been dropped
self.assertIndexExists("test_alflin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflin", editor, new_state, project_state
)
# Ensure the index is still there
self.assertIndexExists("test_alflin_pony", ["pink"])
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter index_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_index_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(
len(
project_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
1,
)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alinto", editor, new_state, project_state
)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "index_together": {("pink", "weight")}}
)
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter index_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_alter_index_together_remove_with_unique_together(self):
app_label = "test_alintoremove_wunto"
table_name = "%s_pony" % app_label
project_state = self.set_up_test_model(app_label, unique_together=True)
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
# Add index together.
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ["pink", "weight"])
# Remove index together.
project_state = new_state
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ["pink", "weight"])
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint(self):
project_state = self.set_up_test_model("test_addconstraint")
gt_check = models.Q(pink__gt=2)
gt_constraint = models.CheckConstraint(
check=gt_check, name="test_add_constraint_pony_pink_gt_2"
)
gt_operation = migrations.AddConstraint("Pony", gt_constraint)
self.assertEqual(
gt_operation.describe(),
"Create constraint test_add_constraint_pony_pink_gt_2 on model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"pony_test_add_constraint_pony_pink_gt_2",
)
# Test the state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
1,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Add another one.
lt_check = models.Q(pink__lt=100)
lt_constraint = models.CheckConstraint(
check=lt_check, name="test_add_constraint_pony_pink_lt_100"
)
lt_operation = migrations.AddConstraint("Pony", lt_constraint)
lt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
2,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 2)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_addconstraint", editor, new_state, project_state
)
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"model_name": "Pony", "constraint": gt_constraint}
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_percent_escaping(self):
app_label = "add_constraint_string_quoting"
operations = [
migrations.CreateModel(
"Author",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("surname", models.CharField(max_length=100, default="")),
("rebate", models.CharField(max_length=100)),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
# "%" generated in startswith lookup should be escaped in a way that is
# considered a leading wildcard.
check = models.Q(name__startswith="Albert")
constraint = models.CheckConstraint(check=check, name="name_constraint")
operation = migrations.AddConstraint("Author", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Artur")
# Literal "%" should be escaped in a way that is not a considered a
# wildcard.
check = models.Q(rebate__endswith="%")
constraint = models.CheckConstraint(check=check, name="rebate_constraint")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", rebate="10$")
author = Author.objects.create(name="Albert", rebate="10%")
self.assertEqual(Author.objects.get(), author)
# Right-hand-side baked "%" literals should not be used for parameters
# interpolation.
check = ~models.Q(surname__startswith=models.F("name"))
constraint = models.CheckConstraint(check=check, name="name_constraint_rhs")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", surname="Alberto")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_or_constraint(self):
app_label = "test_addorconstraint"
constraint_name = "add_constraint_or"
from_state = self.set_up_test_model(app_label)
check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0)
constraint = models.CheckConstraint(check=check, name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Pony = to_state.apps.get_model(app_label, "Pony")
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=2, weight=3.0)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=3, weight=1.0)
Pony.objects.bulk_create(
[
Pony(pink=3, weight=-1.0),
Pony(pink=1, weight=-1.0),
Pony(pink=3, weight=3.0),
]
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_combinable(self):
app_label = "test_addconstraint_combinable"
operations = [
migrations.CreateModel(
"Book",
fields=[
("id", models.AutoField(primary_key=True)),
("read", models.PositiveIntegerField()),
("unread", models.PositiveIntegerField()),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
constraint = models.CheckConstraint(
check=models.Q(read=(100 - models.F("unread"))),
name="test_addconstraint_combinable_sum_100",
)
operation = migrations.AddConstraint("Book", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Book = to_state.apps.get_model(app_label, "Book")
with self.assertRaises(IntegrityError), transaction.atomic():
Book.objects.create(read=70, unread=10)
Book.objects.create(read=70, unread=30)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_remove_constraint(self):
project_state = self.set_up_test_model(
"test_removeconstraint",
constraints=[
models.CheckConstraint(
check=models.Q(pink__gt=2),
name="test_remove_constraint_pony_pink_gt_2",
),
models.CheckConstraint(
check=models.Q(pink__lt=100),
name="test_remove_constraint_pony_pink_lt_100",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_gt_2"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"remove_pony_test_remove_constraint_pony_pink_gt_2",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
1,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=1, weight=1.0).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Remove the other one.
lt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_lt_100"
)
lt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
0,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=100, weight=1.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removeconstraint", editor, new_state, project_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "name": "test_remove_constraint_pony_pink_gt_2"},
)
def test_add_partial_unique_constraint(self):
project_state = self.set_up_test_model("test_addpartialuniqueconstraint")
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.AddConstraint("Pony", partial_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq "
"on model Pony",
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_addpartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_addpartialuniqueconstraint", "pony"].options[
"constraints"
]
),
1,
)
Pony = new_state.apps.get_model("test_addpartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_addpartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint works
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_addpartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": partial_unique_constraint},
)
def test_remove_partial_unique_constraint(self):
project_state = self.set_up_test_model(
"test_removepartialuniqueconstraint",
constraints=[
models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_constraint_pony_pink_for_weight_gt_5_uniq"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from model Pony",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removepartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removepartialuniqueconstraint", "pony"].options[
"constraints"
]
),
0,
)
Pony = new_state.apps.get_model("test_removepartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removepartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
Pony.objects.create(pink=1, weight=7.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removepartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint works
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "test_constraint_pony_pink_for_weight_gt_5_uniq",
},
)
def test_add_deferred_unique_constraint(self):
app_label = "test_adddeferred_uc"
project_state = self.set_up_test_model(app_label)
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_add",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.AddConstraint("Pony", deferred_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint deferred_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": deferred_unique_constraint},
)
def test_remove_deferred_unique_constraint(self):
app_label = "test_removedeferred_uc"
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_rm",
deferrable=models.Deferrable.DEFERRED,
)
project_state = self.set_up_test_model(
app_label, constraints=[deferred_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", deferred_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint deferred_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_deferrable_unique_constraints:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "deferred_pink_constraint_rm",
},
)
def test_add_covering_unique_constraint(self):
app_label = "test_addcovering_uc"
project_state = self.set_up_test_model(app_label)
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_add",
include=["weight"],
)
operation = migrations.AddConstraint("Pony", covering_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint covering_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": covering_unique_constraint},
)
def test_remove_covering_unique_constraint(self):
app_label = "test_removecovering_uc"
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_rm",
include=["weight"],
)
project_state = self.set_up_test_model(
app_label, constraints=[covering_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", covering_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint covering_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_covering_indexes:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "covering_pink_constraint_rm",
},
)
def test_add_func_unique_constraint(self):
app_label = "test_adfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
constraint = models.UniqueConstraint(Abs("weight"), name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_adfuncuc_pony_abs_uq on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
self.assertIndexNameNotExists(table_name, constraint_name)
# Add constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "Pony")
Pony.objects.create(weight=4.0)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": constraint},
)
def test_remove_func_unique_constraint(self):
app_label = "test_rmfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint(Abs("weight"), name=constraint_name),
],
)
self.assertTableExists(table_name)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
operation = migrations.RemoveConstraint("Pony", constraint_name)
self.assertEqual(
operation.describe(),
"Remove constraint test_rmfuncuc_pony_abs_uq from model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Remove constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=-4.0).delete()
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": constraint_name})
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions(
"Pony", {"permissions": [("can_groom", "Can groom")]}
)
self.assertEqual(operation.describe(), "Change Meta options on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_options")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
0,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
1,
)
self.assertEqual(
new_state.models["test_almoop", "pony"].options["permissions"][0][0],
"can_groom",
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"name": "Pony", "options": {"permissions": [("can_groom", "Can groom")]}},
)
def test_alter_model_options_emptying(self):
"""
The AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
1,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
0,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "options": {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(
operation.describe(), "Set order_with_respect_to on Rider to pony"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_rider_order_with_respect_to",
)
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
),
"pony",
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(
weight=50
)
rider1 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider1.friend = rider1
rider1.save()
rider2 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider2.friend = rider2
rider2.save()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alorwrtto", editor, project_state, new_state
)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model(
"test_alorwrtto", "Rider"
).objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alorwrtto", editor, new_state, project_state
)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Rider", "order_with_respect_to": "pony"}
)
def test_alter_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Change managers on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_managers")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
rendered_state = new_state.apps
model = rendered_state.get_model("test_almoma", "pony")
self.assertIsInstance(model.food_qs, models.Manager)
self.assertIsInstance(model.food_mgr, FoodManager)
self.assertIsInstance(model.food_mgr_kwargs, FoodManager)
def test_alter_model_managers_emptying(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards(
"test_alfk", editor, project_state, create_state
)
alter_operation.database_forwards(
"test_alfk", editor, create_state, alter_state
)
def test_alter_fk_non_fk(self):
"""
Altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state(
"test_afknfk", operation, related_model=True
)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_afknfk", editor, new_state, project_state
)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on SQLite and MySQL respectively
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[
migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(
len(new_state.models["test_runsql", "somethingelse"].fields), 1
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards(
"test_runsql", editor, project_state, new_state
)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunSQL("SELECT 1 FROM void;", elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
[
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);",
["Ponies"],
],
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);",
(
3,
"Python",
),
),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
(
"DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;",
[3, "Python"],
),
],
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[["INSERT INTO foo (bar) VALUES ('buz');"]],
# backwards
(("DELETE FROM foo WHERE bar = 'buz';", "invalid", "parameter count"),),
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"):
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"):
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_sql_add_missing_semicolon_on_collect_sql(self):
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
tests = [
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1);\n",
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1)\n",
]
for sql in tests:
with self.subTest(sql=sql):
operation = migrations.RunSQL(sql, migrations.RunPython.noop)
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
collected_sql = "\n".join(editor.collected_sql)
self.assertEqual(collected_sql.count(";"), 1)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(
inner_method, reverse_code=inner_method_reverse
)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
# Now test we can't use a string
with self.assertRaisesMessage(
ValueError, "RunPython must be supplied with a callable"
):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but without reverse_code set
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6
)
self.assertEqual(
project_state.apps.get_model(
"test_runpython", "ShetlandPony"
).objects.count(),
2,
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunPython(inner_method, elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
# Verify atomicity when applying.
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method)
]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)
]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Reset object count to zero and verify atomicity when unapplying.
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.all().delete()
# On a fully-transactional database, both versions rollback.
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation leaves a row there.
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Verify deconstruction.
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"])
def test_run_python_related_assignment(self):
"""
#24282 - Model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE)),
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards(
"test_books", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards(
"test_books", editor, project_state, new_state
)
def test_model_with_bigautofield(self):
"""
A model with BigAutoField can be created.
"""
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
Book.objects.create(id=2 ** 33, title="A farewell to arms", author=author1)
author2 = Author.objects.create(id=2 ** 33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
[
("id", models.BigAutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
(
"author",
models.ForeignKey(
to="test_author.Author", on_delete=models.CASCADE
),
),
],
options={},
)
fill_data = migrations.RunPython(create_data)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_author", new_state)
create_author.database_forwards(
"test_author", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_book", new_state)
create_book.database_forwards("test_book", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_data.state_forwards("fill_data", new_state)
fill_data.database_forwards("fill_data", editor, project_state, new_state)
def _test_autofield_foreignfield_growth(
self, source_field, target_field, target_value
):
"""
A field may be migrated in the following ways:
- AutoField to BigAutoField
- SmallAutoField to AutoField
- SmallAutoField to BigAutoField
"""
def create_initial_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog = Blog.objects.create(name="web development done right")
Article.objects.create(name="Frameworks", blog=blog)
Article.objects.create(name="Programming Languages", blog=blog)
def create_big_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog2 = Blog.objects.create(name="Frameworks", id=target_value)
Article.objects.create(name="Django", blog=blog2)
Article.objects.create(id=target_value, name="Django2", blog=blog2)
create_blog = migrations.CreateModel(
"Blog",
[
("id", source_field(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_article = migrations.CreateModel(
"Article",
[
("id", source_field(primary_key=True)),
(
"blog",
models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE),
),
("name", models.CharField(max_length=100)),
("data", models.TextField(default="")),
],
options={},
)
fill_initial_data = migrations.RunPython(
create_initial_data, create_initial_data
)
fill_big_data = migrations.RunPython(create_big_data, create_big_data)
grow_article_id = migrations.AlterField(
"Article", "id", target_field(primary_key=True)
)
grow_blog_id = migrations.AlterField(
"Blog", "id", target_field(primary_key=True)
)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_blog.state_forwards("test_blog", new_state)
create_blog.database_forwards("test_blog", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_article.state_forwards("test_article", new_state)
create_article.database_forwards(
"test_article", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_initial_data.state_forwards("fill_initial_data", new_state)
fill_initial_data.database_forwards(
"fill_initial_data", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_article_id.state_forwards("test_article", new_state)
grow_article_id.database_forwards(
"test_article", editor, project_state, new_state
)
state = new_state.clone()
article = state.apps.get_model("test_article.Article")
self.assertIsInstance(article._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_blog_id.state_forwards("test_blog", new_state)
grow_blog_id.database_forwards(
"test_blog", editor, project_state, new_state
)
state = new_state.clone()
blog = state.apps.get_model("test_blog.Blog")
self.assertIsInstance(blog._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_big_data.state_forwards("fill_big_data", new_state)
fill_big_data.database_forwards(
"fill_big_data", editor, project_state, new_state
)
def test_autofield__bigautofield_foreignfield_growth(self):
"""A field may be migrated from AutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.AutoField,
models.BigAutoField,
2 ** 33,
)
def test_smallfield_autofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to AutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.AutoField,
2 ** 22,
)
def test_smallfield_bigautofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.BigAutoField,
2 ** 33,
)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(
migrations.RunPython.noop, migrations.RunPython.noop
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;",
)
state_operation = migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation], database_operations=[database_operation]
)
self.assertEqual(
operation.describe(), "Custom state/database change combination"
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(
len(
new_state.models[
"test_separatedatabaseandstate", "somethingelse"
].fields
),
1,
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_separatedatabaseandstate", editor, project_state, new_state
)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_separatedatabaseandstate", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["database_operations", "state_operations"]
)
def test_separate_database_and_state2(self):
"""
A complex SeparateDatabaseAndState operation: Multiple operations both
for state and database. Verify the state dependencies within each list
and that state ops don't affect the database.
"""
app_label = "test_separatedatabaseandstate2"
project_state = self.set_up_test_model(app_label)
# Create the operation
database_operations = [
migrations.CreateModel(
"ILovePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveponies"},
),
migrations.CreateModel(
"ILoveMorePonies",
# We use IntegerField and not AutoField because
# the model is going to be deleted immediately
# and with an AutoField this fails on Oracle
[("id", models.IntegerField(primary_key=True))],
options={"db_table": "ilovemoreponies"},
),
migrations.DeleteModel("ILoveMorePonies"),
migrations.CreateModel(
"ILoveEvenMorePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveevenmoreponies"},
),
]
state_operations = [
migrations.CreateModel(
"SomethingElse",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingelse"},
),
migrations.DeleteModel("SomethingElse"),
migrations.CreateModel(
"SomethingCompletelyDifferent",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingcompletelydifferent"},
),
]
operation = migrations.SeparateDatabaseAndState(
state_operations=state_operations,
database_operations=database_operations,
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
def assertModelsAndTables(after_db):
# Tables and models exist, or don't, as they should:
self.assertNotIn((app_label, "somethingelse"), new_state.models)
self.assertEqual(
len(new_state.models[app_label, "somethingcompletelydifferent"].fields),
1,
)
self.assertNotIn((app_label, "iloveponiesonies"), new_state.models)
self.assertNotIn((app_label, "ilovemoreponies"), new_state.models)
self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models)
self.assertTableNotExists("somethingelse")
self.assertTableNotExists("somethingcompletelydifferent")
self.assertTableNotExists("ilovemoreponies")
if after_db:
self.assertTableExists("iloveponies")
self.assertTableExists("iloveevenmoreponies")
else:
self.assertTableNotExists("iloveponies")
self.assertTableNotExists("iloveevenmoreponies")
assertModelsAndTables(after_db=False)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertModelsAndTables(after_db=True)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertModelsAndTables(after_db=False)
class SwappableOperationTests(OperationTestBase):
"""
Key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = ["migrations"]
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
The CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crigsw", editor, new_state, project_state
)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_adfligsw", editor, project_state, new_state
)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adfligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_adfligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_indexes_ignore_swapped(self):
"""
Add/RemoveIndex operations ignore swapped models.
"""
operation = migrations.AddIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_adinigsw", operation)
with connection.schema_editor() as editor:
# No database queries should be run for swapped models
operation.database_forwards(
"test_adinigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_adinigsw", editor, new_state, project_state
)
operation = migrations.RemoveIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_rminigsw", operation)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_rminigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_rminigsw", editor, new_state, project_state
)
class TestCreateModel(SimpleTestCase):
def test_references_model_mixin(self):
migrations.CreateModel(
"name",
fields=[],
bases=(Mixin, models.Model),
).references_model("other_model", "migrations")
class FieldOperationTests(SimpleTestCase):
def test_references_model(self):
operation = FieldOperation(
"MoDel", "field", models.ForeignKey("Other", models.CASCADE)
)
# Model name match.
self.assertIs(operation.references_model("mOdEl", "migrations"), True)
# Referenced field.
self.assertIs(operation.references_model("oTher", "migrations"), True)
# Doesn't reference.
self.assertIs(operation.references_model("Whatever", "migrations"), False)
def test_references_field_by_name(self):
operation = FieldOperation("MoDel", "field", models.BooleanField(default=False))
self.assertIs(operation.references_field("model", "field", "migrations"), True)
def test_references_field_by_remote_field_model(self):
operation = FieldOperation(
"Model", "field", models.ForeignKey("Other", models.CASCADE)
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_from_fields(self):
operation = FieldOperation(
"Model",
"field",
models.fields.related.ForeignObject(
"Other", models.CASCADE, ["from"], ["to"]
),
)
self.assertIs(operation.references_field("Model", "from", "migrations"), True)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
self.assertIs(operation.references_field("Other", "from", "migrations"), False)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
def test_references_field_by_to_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ForeignKey("Other", models.CASCADE, to_field="field"),
)
self.assertIs(operation.references_field("Other", "field", "migrations"), True)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_through(self):
operation = FieldOperation(
"Model", "field", models.ManyToManyField("Other", through="Through")
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_reference_field_by_through_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ManyToManyField(
"Other", through="Through", through_fields=("first", "second")
),
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Through", "first", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "second", "migrations"), True
)
| 42.06836 | 115 | 0.590093 |
40a3843fabb075a0af899ff3fcb45fa00017c9ee | 1,150 | py | Python | algorithm/implementation/forming_a_magic_square/forming_a_magic_square.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | 1 | 2017-07-02T01:35:39.000Z | 2017-07-02T01:35:39.000Z | algorithm/implementation/forming_a_magic_square/forming_a_magic_square.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | null | null | null | algorithm/implementation/forming_a_magic_square/forming_a_magic_square.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | 1 | 2018-04-03T15:11:56.000Z | 2018-04-03T15:11:56.000Z | #!/usr/bin/env python3
"""
Forming a Magic Square
:author: Dela Anthonio
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/challenges/magic-square-forming
"""
from typing import List
magic_squares = [
[[8, 1, 6], [3, 5, 7], [4, 9, 2]],
[[6, 1, 8], [7, 5, 3], [2, 9, 4]],
[[4, 9, 2], [3, 5, 7], [8, 1, 6]],
[[2, 9, 4], [7, 5, 3], [6, 1, 8]],
[[8, 3, 4], [1, 5, 9], [6, 7, 2]],
[[4, 3, 8], [9, 5, 1], [2, 7, 6]],
[[6, 7, 2], [1, 5, 9], [8, 3, 4]],
[[2, 7, 6], [9, 5, 1], [4, 3, 8]],
]
def magic_square(square: List[List[int]]) -> int:
def conversion_cost(src: List[List[int]], tgt: List[List[int]]):
cost = 0
for src_row, tgt_row in zip(src, tgt):
cost += sum(abs(src_col - tgt_col)
for src_col, tgt_col in zip(src_row, tgt_row))
return cost
return min(conversion_cost(square, msqr) for msqr in magic_squares)
def main():
square = []
for _ in range(3):
square.append([int(x) for x in input().split()])
cost = magic_square(square)
print(cost)
if __name__ == '__main__':
main()
| 25.555556 | 71 | 0.525217 |
aa42ff50bd29a1e7d7257a133b8d0b8f96f903f3 | 2,084 | py | Python | swig-2.0.4/Examples/test-suite/python/reference_global_vars_runme.py | vidkidz/crossbridge | ba0bf94aee0ce6cf7eb5be882382e52bc57ba396 | [
"MIT"
] | 1 | 2016-04-09T02:58:13.000Z | 2016-04-09T02:58:13.000Z | swig-2.0.4/Examples/test-suite/python/reference_global_vars_runme.py | vidkidz/crossbridge | ba0bf94aee0ce6cf7eb5be882382e52bc57ba396 | [
"MIT"
] | null | null | null | swig-2.0.4/Examples/test-suite/python/reference_global_vars_runme.py | vidkidz/crossbridge | ba0bf94aee0ce6cf7eb5be882382e52bc57ba396 | [
"MIT"
] | null | null | null | from reference_global_vars import *
# const class reference variable
if getconstTC().num != 33:
raise RuntimeError
# primitive reference variables
cvar.var_bool = createref_bool(0)
if value_bool(cvar.var_bool) != 0:
raise RuntimeError
cvar.var_bool = createref_bool(1)
if value_bool(cvar.var_bool) != 1:
raise RuntimeError
cvar.var_char = createref_char('w')
if value_char(cvar.var_char) != 'w':
raise RuntimeError
cvar.var_unsigned_char = createref_unsigned_char(10)
if value_unsigned_char(cvar.var_unsigned_char) != 10:
raise RuntimeError
cvar.var_signed_char = createref_signed_char(10)
if value_signed_char(cvar.var_signed_char) != 10:
raise RuntimeError
cvar.var_short = createref_short(10)
if value_short(cvar.var_short) != 10:
raise RuntimeError
cvar.var_unsigned_short = createref_unsigned_short(10)
if value_unsigned_short(cvar.var_unsigned_short) != 10:
raise RuntimeError
cvar.var_int = createref_int(10)
if value_int(cvar.var_int) != 10:
raise RuntimeError
cvar.var_unsigned_int = createref_unsigned_int(10)
if value_unsigned_int(cvar.var_unsigned_int) != 10:
raise RuntimeError
cvar.var_long = createref_long(10)
if value_long(cvar.var_long) != 10:
raise RuntimeError
cvar.var_unsigned_long = createref_unsigned_long(10)
if value_unsigned_long(cvar.var_unsigned_long) != 10:
raise RuntimeError
cvar.var_long_long = createref_long_long(0x6FFFFFFFFFFFFFF8)
if value_long_long(cvar.var_long_long) != 0x6FFFFFFFFFFFFFF8:
raise RuntimeError
#ull = abs(0xFFFFFFF2FFFFFFF0)
ull = 55834574864
cvar.var_unsigned_long_long = createref_unsigned_long_long(ull)
if value_unsigned_long_long(cvar.var_unsigned_long_long) != ull:
raise RuntimeError
cvar.var_float = createref_float(10.5)
if value_float(cvar.var_float) != 10.5:
raise RuntimeError
cvar.var_double = createref_double(10.5)
if value_double(cvar.var_double) != 10.5:
raise RuntimeError
# class reference variable
cvar.var_TestClass = createref_TestClass(TestClass(20))
if value_TestClass(cvar.var_TestClass).num != 20:
raise RuntimeError
| 27.786667 | 64 | 0.786468 |
72aad9e32d91f3df966c6172f58663ca65f604db | 67,907 | py | Python | bridge/marks/test.py | lutovna/klever | 29c0e4fa60def241032a2ea2b81103d817994eef | [
"Apache-2.0"
] | 1 | 2021-01-09T08:44:37.000Z | 2021-01-09T08:44:37.000Z | bridge/marks/test.py | lutovna/klever | 29c0e4fa60def241032a2ea2b81103d817994eef | [
"Apache-2.0"
] | 3 | 2021-03-19T09:15:16.000Z | 2021-09-22T19:24:40.000Z | bridge/marks/test.py | lutovna/klever | 29c0e4fa60def241032a2ea2b81103d817994eef | [
"Apache-2.0"
] | 1 | 2020-05-22T15:53:39.000Z | 2020-05-22T15:53:39.000Z | #
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from bridge.utils import KleverTestCase, ArchiveFileContent
from bridge.vars import (
SAFE_VERDICTS, UNSAFE_VERDICTS, MARK_SAFE, MARK_UNSAFE, MARK_STATUS, PROBLEM_DESC_FILE, ASSOCIATION_TYPE
)
from users.models import User
from jobs.models import Job
from reports.models import ReportSafe, ReportUnsafe, ReportUnknown, ReportComponent
from marks.models import (
MarkSafe, MarkUnsafe, MarkUnknown, MarkSafeHistory, MarkUnsafeHistory, MarkUnknownHistory,
SafeTag, UnsafeTag, MarkSafeTag, MarkUnsafeTag, MarkSafeReport, MarkUnsafeReport, MarkUnknownReport,
SafeAssociationLike, UnsafeAssociationLike, UnknownAssociationLike
)
from reports.test import DecideJobs, SJC_1
REPORT_ARCHIVES = os.path.join(settings.BASE_DIR, 'reports', 'test_files')
class TestMarks(KleverTestCase):
def setUp(self):
super(TestMarks, self).setUp()
User.objects.create_superuser('superuser', '', 'top_secret')
populate_users(
manager={'username': 'manager', 'password': 'manager'},
service={'username': 'service', 'password': 'service'}
)
self.client.post(reverse('users:login'), {'username': 'manager', 'password': 'manager'})
self.client.post(reverse('population'))
self.job = Job.objects.all().first()
self.assertIsNotNone(self.job)
self.client.post('/jobs/run_decision/%s/' % self.job.pk, {'mode': 'default', 'conf_name': 'development'})
DecideJobs('service', 'service', SJC_1)
self.safe_archive = 'test_safemark.zip'
self.unsafe_archive = 'test_unsafemark.zip'
self.unknown_archive = 'test_unknownmark.zip'
self.test_tagsfile = 'test_tags.json'
self.all_marks_arch = 'All-marks.zip'
def test_safe(self):
self.assertEqual(Job.objects.get(pk=self.job.pk).status, JOB_STATUS[3][0])
# Delete populated marks
response = self.client.post('/marks/delete/', {
'type': 'safe', 'ids': json.dumps(list(MarkSafe.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/marks/delete/', {
'type': 'unsafe', 'ids': json.dumps(list(MarkUnsafe.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/marks/delete/', {
'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
# Create 5 safe tags
created_tags = []
response = self.client.post('/marks/tags/save_tag/', {
'action': 'create', 'tag_type': 'safe', 'parent_id': '0', 'name': 'test:safe:tag:1',
'description': 'Test safe tag description'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
try:
created_tags.append(SafeTag.objects.get(tag='test:safe:tag:1'))
except ObjectDoesNotExist:
self.fail('Safe tag was not created')
self.assertEqual(created_tags[0].description, 'Test safe tag description')
self.assertEqual(created_tags[0].parent, None)
for i in range(2, 6):
self.client.post('/marks/tags/save_tag/', {
'action': 'create', 'tag_type': 'safe',
'parent_id': created_tags[i - 2].pk, 'name': 'test:safe:tag:%s' % i, 'description': ''
})
created_tags.append(SafeTag.objects.get(tag='test:safe:tag:%s' % i))
self.assertEqual(created_tags[i - 1].parent, created_tags[i - 2])
# Get tag parents for editing tag 'test:safe:tag:3'
response = self.client.post('/marks/tags/safe/get_tag_data/', {'tag_id': created_tags[2].pk})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Get tag parents for creating new tag
response = self.client.post('/marks/tags/safe/get_tag_data/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Edit 5th tag
response = self.client.post('/marks/tags/save_tag/', {
'action': 'edit', 'tag_type': 'safe', 'parent_id': created_tags[2].pk,
'name': 'test:safe:tag:5', 'tag_id': created_tags[4].pk,
'description': 'Test safe tag 5 description'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
try:
created_tags[4] = SafeTag.objects.get(tag='test:safe:tag:5')
except ObjectDoesNotExist:
self.fail('Tag 5 was not found after editing')
self.assertEqual(created_tags[4].parent, created_tags[2])
self.assertEqual(created_tags[4].description, 'Test safe tag 5 description')
# Remove 3d tag and check that its children (tag4 and tag5) are also removed
response = self.client.post('/marks/tags/safe/delete/%s/' % created_tags[2].pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(
SafeTag.objects.filter(tag__in=['test:safe:tag:3', 'test:safe:tag:4', 'test:safe:tag:5']).count(), 0
)
del created_tags[2:]
# Get tags data (for edit/create mark page). Just check that there is no error in response.
response = self.client.post('/marks/safe/tags_data/', {'selected_tags': json.dumps([created_tags[1].pk])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Download tags
response = self.client.get(reverse('marks:download_tags', args=['safe']))
self.assertEqual(response.status_code, 200)
with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='wb') as fp:
for chunk in response.streaming_content:
fp.write(chunk)
SafeTag.objects.all().delete()
# Upload tags
with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='rb') as fp:
response = self.client.post('/marks/tags/safe/upload/', {'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
for i in range(0, len(created_tags)):
try:
created_tags[i] = SafeTag.objects.get(tag=created_tags[i].tag)
except ObjectDoesNotExist:
self.fail("Tags weren't uploaded")
# Tags tree page
response = self.client.get(reverse('marks:tags', args=['safe']))
self.assertEqual(response.status_code, 200)
# Get report
safe = ReportSafe.objects.filter(root__job_id=self.job.pk).first()
self.assertIsNotNone(safe)
# Inline mark form
response = self.client.get('/marks/safe/%s/create/inline/' % safe.id)
self.assertEqual(response.status_code, 200)
# Create mark page
response = self.client.get(reverse('marks:mark_form', args=['safe', safe.pk, 'create']))
self.assertEqual(response.status_code, 200)
# Save mark
compare_attrs = list({'is_compare': associate, 'attr': a_name}
for a_name, associate in safe.attrs.values_list('attr__name__name', 'associate'))
response = self.client.post(reverse('marks:mark_form', args=['safe', safe.pk, 'create']), {
'data': json.dumps({
'description': 'Mark description',
'is_modifiable': True,
'verdict': MARK_SAFE[1][0],
'status': MARK_STATUS[2][0],
'tags': [created_tags[1].pk],
'attrs': compare_attrs
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIsNone(res.get('error'))
self.assertIn('cache_id', res)
cache_id = res['cache_id']
# Check mark's tables
try:
mark = MarkSafe.objects.get(job=self.job, author__username='manager')
except ObjectDoesNotExist:
self.fail('Mark was not created')
self.assertEqual(mark.type, MARK_TYPE[0][0])
self.assertEqual(mark.verdict, MARK_SAFE[1][0])
self.assertEqual(mark.status, MARK_STATUS[2][0])
self.assertEqual(mark.version, 1)
self.assertEqual(mark.description, 'Mark description')
self.assertEqual(mark.is_modifiable, True)
self.assertEqual(len(mark.versions.all()), 1)
mark_version = MarkSafeHistory.objects.get(mark=mark)
self.assertEqual(mark_version.verdict, mark.verdict)
self.assertEqual(mark_version.version, 1)
self.assertEqual(mark_version.author.username, 'manager')
self.assertEqual(mark_version.status, mark.status)
self.assertEqual(mark_version.description, mark.description)
for mark_attr in mark_version.attrs.all():
self.assertIn({'is_compare': mark_attr.is_compare, 'attr': mark_attr.attr.name.name}, compare_attrs)
self.assertEqual(ReportSafe.objects.get(pk=safe.pk).verdict, SAFE_VERDICTS[1][0])
self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[1][0]).count(), 1)
self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1)
self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[1])), 1)
try:
rst = ReportSafeTag.objects.get(report__root__job=self.job, report__parent=None, tag=created_tags[0])
self.assertEqual(rst.number, 1)
rst = ReportSafeTag.objects.get(report__root__job=self.job, report__parent=None, tag=created_tags[1])
self.assertEqual(rst.number, 1)
rst = ReportSafeTag.objects.get(report__root__job=self.job, report_id=safe.parent_id, tag=created_tags[0])
self.assertEqual(rst.number, 1)
rst = ReportSafeTag.objects.get(report__root__job=self.job, report__id=safe.parent_id, tag=created_tags[1])
self.assertEqual(rst.number, 1)
srt = SafeReportTag.objects.get(report=safe, tag=created_tags[0])
self.assertEqual(srt.number, 1)
srt = SafeReportTag.objects.get(report=safe, tag=created_tags[1])
self.assertEqual(srt.number, 1)
except ObjectDoesNotExist:
self.fail('Reports tags cache was not filled')
# Associations changes
response = self.client.get('/marks/safe/association_changes/%s/' % cache_id)
self.assertEqual(response.status_code, 200)
# Edit mark page
response = self.client.get(reverse('marks:mark_form', args=['safe', mark.pk, 'edit']))
self.assertEqual(response.status_code, 200)
# Edit mark
response = self.client.post(reverse('marks:mark_form', args=['safe', mark.pk, 'edit']), {
'data': json.dumps({
'description': 'New mark description',
'is_modifiable': True,
'verdict': MARK_SAFE[2][0],
'status': MARK_STATUS[2][0],
'tags': [created_tags[0].pk],
'attrs': compare_attrs,
'comment': 'Change 1'
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIsNone(res.get('error'))
self.assertIn('cache_id', res)
cache_id = res['cache_id']
# Check mark's tables
try:
mark = MarkSafe.objects.get(job=self.job, author__username='manager')
except ObjectDoesNotExist:
self.fail('Mark was not created')
self.assertEqual(mark.verdict, MARK_SAFE[2][0])
self.assertEqual(mark.version, 2)
self.assertEqual(mark.description, 'New mark description')
self.assertEqual(mark.is_modifiable, True)
self.assertEqual(len(mark.versions.all()), 2)
mark_version = MarkSafeHistory.objects.filter(mark=mark).order_by('-version').first()
self.assertEqual(mark_version.version, 2)
self.assertEqual(mark_version.verdict, mark.verdict)
self.assertEqual(mark_version.author.username, 'manager')
self.assertEqual(mark_version.description, mark.description)
self.assertEqual(mark_version.comment, 'Change 1')
self.assertEqual(ReportSafe.objects.get(pk=safe.pk).verdict, SAFE_VERDICTS[2][0])
self.assertEqual(len(MarkSafeReport.objects.filter(mark=mark, report=safe)), 1)
self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1)
self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[1])), 0)
self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__parent=None)), 1)
self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__id=safe.parent_id)), 1)
try:
srt = SafeReportTag.objects.get(report=safe, tag=created_tags[0])
self.assertEqual(srt.number, 1)
except ObjectDoesNotExist:
self.fail('Reports tags cache was not filled')
self.assertEqual(len(SafeReportTag.objects.filter(report=safe, tag=created_tags[1])), 0)
# Associations changes
response = self.client.get('/marks/safe/association_changes/%s/' % cache_id)
self.assertEqual(response.status_code, 200)
# Safe marks list page
response = self.client.get(reverse('marks:list', args=['safe']))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('marks:mark', args=['safe', mark.id]))
self.assertEqual(response.status_code, 200)
# Inline mark form
response = self.client.get('/marks/safe/%s/edit/inline/' % mark.id)
self.assertEqual(response.status_code, 200)
# Confirm/unconfirm association
# Mark is automatically associated after its changes
self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[0][0]).count(), 1)
response = self.client.post('/marks/association/safe/%s/%s/unconfirm/' % (safe.pk, mark.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[2][0]).count(), 1)
response = self.client.post('/marks/association/safe/%s/%s/confirm/' % (safe.pk, mark.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[1][0]).count(), 1)
# Like/dislike association
response = self.client.post('/marks/association/safe/%s/%s/like/' % (safe.id, mark.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(SafeAssociationLike.objects.filter(
association__report=safe, association__mark=mark, dislike=False
).count(), 1)
response = self.client.post('/marks/association/safe/%s/%s/dislike/' % (safe.id, mark.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(SafeAssociationLike.objects.filter(
association__report=safe, association__mark=mark, dislike=True
).count(), 1)
self.assertEqual(SafeAssociationLike.objects.filter(
association__report=safe, association__mark=mark, dislike=False
).count(), 0)
# Download mark
response = self.client.get(reverse('marks:safe-download', args=[mark.pk]))
self.assertEqual(response.status_code, 200)
self.assertIn(response['Content-Type'], {'application/x-zip-compressed', 'application/zip'})
with open(os.path.join(settings.MEDIA_ROOT, self.safe_archive), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Download mark in preset format
response = self.client.get(reverse('marks:safe-download-preset', args=[mark.pk]))
self.assertEqual(response.status_code, 200)
# Delete mark
response = self.client.post('/marks/delete/', {'type': 'safe', 'ids': json.dumps([mark.id])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(len(MarkSafe.objects.all()), 0)
self.assertEqual(len(MarkSafeReport.objects.all()), 0)
self.assertEqual(ReportSafe.objects.all().first().verdict, SAFE_VERDICTS[4][0])
# Upload mark
with open(os.path.join(settings.MEDIA_ROOT, self.safe_archive), mode='rb') as fp:
response = self.client.post('/marks/upload/', {'file': fp})
fp.close()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIn('id', res)
self.assertEqual(res.get('type'), 'safe')
self.assertEqual(len(MarkSafe.objects.all()), 1)
try:
newmark = MarkSafe.objects.get(pk=res['id'])
except ObjectDoesNotExist:
self.fail('Mark was not uploaded')
self.assertEqual(newmark.type, MARK_TYPE[2][0])
self.assertEqual(newmark.verdict, MARK_SAFE[2][0])
self.assertEqual(newmark.version, 2)
self.assertEqual(newmark.description, 'New mark description')
self.assertEqual(newmark.is_modifiable, True)
self.assertEqual(len(newmark.versions.all()), 2)
newmark_version = MarkSafeHistory.objects.filter(mark=newmark).order_by('-version').first()
self.assertEqual(newmark_version.version, 2)
self.assertEqual(newmark_version.verdict, mark.verdict)
self.assertEqual(newmark_version.author.username, 'manager')
self.assertEqual(newmark_version.description, mark.description)
self.assertEqual(newmark_version.comment, 'Change 1')
self.assertEqual(ReportSafe.objects.get(pk=safe.pk).verdict, SAFE_VERDICTS[2][0])
self.assertEqual(len(MarkSafeReport.objects.filter(mark=newmark, report=safe)), 1)
self.assertEqual(len(MarkSafeReport.objects.filter(report=safe)), 1)
self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[0])), 1)
self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[1])), 0)
self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__parent=None)), 1)
self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__id=safe.parent_id)), 1)
# Some more mark changes
for i in range(3, 6):
response = self.client.post(reverse('marks:mark_form', args=['safe', newmark.pk, 'edit']), {
'data': json.dumps({
'description': 'New mark description',
'is_modifiable': True,
'verdict': MARK_SAFE[2][0],
'status': MARK_STATUS[2][0],
'tags': [created_tags[0].pk],
'attrs': compare_attrs,
'comment': 'Change %s' % i
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(len(MarkSafeHistory.objects.filter(mark=newmark)), 5)
# Get 3d version data
response = self.client.get(reverse('marks:mark_form', args=['safe', newmark.pk, 'edit']),
params={'version': 3})
self.assertEqual(response.status_code, 200)
# Compare 1st and 4th versions
response = self.client.post('/marks/safe/%s/compare_versions/' % newmark.pk, {'v1': 1, 'v2': 4})
self.assertEqual(response.status_code, 200)
# Remove 2nd and 4th versions
response = self.client.post('/marks/safe/%s/remove_versions/' % newmark.pk, {'versions': json.dumps([2, 4])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('success', res)
self.assertEqual(len(MarkSafeHistory.objects.filter(mark=newmark)), 3)
# Reports' lists pages
root_comp = ReportComponent.objects.get(root__job_id=self.job.pk, parent=None)
response = self.client.get('%s?tag=%s' % (reverse('reports:safes', args=[root_comp.pk]), created_tags[0].pk))
self.assertIn(response.status_code, {200, 302})
response = self.client.get('%s?tag=%s' % (reverse('reports:safes', args=[root_comp.pk]), created_tags[1].pk))
self.assertIn(response.status_code, {200, 302})
response = self.client.get(
'%s?verdict=%s' % (reverse('reports:safes', args=[root_comp.pk]), SAFE_VERDICTS[0][0])
)
self.assertIn(response.status_code, {200, 302})
response = self.client.get(
'%s?verdict=%s' % (reverse('reports:safes', args=[root_comp.pk]), SAFE_VERDICTS[2][0])
)
self.assertIn(response.status_code, {200, 302})
# Download all marks
response = self.client.get('/marks/api/download-all/')
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response['Content-Type'], 'application/json')
with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Delete all safe marks
self.client.post('/marks/delete/', {
'type': 'safe', 'ids': json.dumps(list(MarkSafe.objects.values_list('id', flat=True)))
})
self.assertEqual(MarkSafe.objects.count(), 0)
# All verdicts must be "safe unmarked"
self.assertEqual(
len(ReportSafe.objects.filter(verdict=SAFE_VERDICTS[4][0])),
len(ReportSafe.objects.all())
)
self.assertEqual(len(MarkSafeReport.objects.all()), 0)
# Upload all marks
with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='rb') as fp:
response = self.client.post('/marks/upload-all/', {'delete': 1, 'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['fail']), 0)
self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['safe']), 1)
def test_unsafe(self):
self.assertEqual(Job.objects.get(pk=self.job.pk).status, JOB_STATUS[3][0])
# Delete populated marks
response = self.client.post('/marks/delete/', {
'type': 'safe', 'ids': json.dumps(list(MarkSafe.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/marks/delete/', {
'type': 'unsafe', 'ids': json.dumps(list(MarkUnsafe.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/marks/delete/', {
'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
# Create 5 unsafe tags
created_tags = []
response = self.client.post('/marks/tags/save_tag/', {
'action': 'create', 'tag_type': 'unsafe', 'parent_id': '0', 'name': 'test:unsafe:tag:1',
'description': 'Test unsafe tag description'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
try:
created_tags.append(UnsafeTag.objects.get(tag='test:unsafe:tag:1'))
except ObjectDoesNotExist:
self.fail('Unsafe tag was not created')
self.assertEqual(created_tags[0].description, 'Test unsafe tag description')
self.assertEqual(created_tags[0].parent, None)
for i in range(2, 6):
self.client.post('/marks/tags/save_tag/', {
'action': 'create', 'tag_type': 'unsafe',
'parent_id': created_tags[i - 2].pk, 'name': 'test:unsafe:tag:%s' % i, 'description': ''
})
created_tags.append(UnsafeTag.objects.get(tag='test:unsafe:tag:%s' % i))
self.assertEqual(created_tags[i - 1].parent, created_tags[i - 2])
# Get tag parents for editing tag 'test:unsafe:tag:3'
response = self.client.post('/marks/tags/unsafe/get_tag_data/', {'tag_id': created_tags[2].pk})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Get tag parents for creating new tag
response = self.client.post('/marks/tags/unsafe/get_tag_data/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Edit 5th tag
response = self.client.post('/marks/tags/save_tag/', {
'action': 'edit', 'tag_type': 'unsafe', 'parent_id': created_tags[2].pk,
'name': 'test:unsafe:tag:5', 'tag_id': created_tags[4].pk,
'description': 'Test unsafe tag 5 description'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
try:
created_tags[4] = UnsafeTag.objects.get(tag='test:unsafe:tag:5')
except ObjectDoesNotExist:
self.fail('Tag 5 was not found after editing')
self.assertEqual(created_tags[4].parent, created_tags[2])
self.assertEqual(created_tags[4].description, 'Test unsafe tag 5 description')
# Remove 3d tag and check that its children (tag4 and tag5) are also removed
response = self.client.post('/marks/tags/unsafe/delete/%s/' % created_tags[2].pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(
len(UnsafeTag.objects.filter(tag__in=['test:unsafe:tag:3', 'test:unsafe:tag:4', 'test:unsafe:tag:5'])), 0
)
del created_tags[2:]
# Get tags data (for edit/create mark page). Just check that there is no error in response.
response = self.client.post('/marks/unsafe/tags_data/', {'selected_tags': json.dumps([created_tags[1].pk])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Download tags
response = self.client.get(reverse('marks:download_tags', args=['unsafe']))
self.assertEqual(response.status_code, 200)
with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='wb') as fp:
for chunk in response.streaming_content:
fp.write(chunk)
UnsafeTag.objects.all().delete()
# Upload tags
with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='rb') as fp:
response = self.client.post('/marks/tags/unsafe/upload/', {'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
for i in range(0, len(created_tags)):
try:
created_tags[i] = UnsafeTag.objects.get(tag=created_tags[i].tag)
except ObjectDoesNotExist:
self.fail("Tags weren't uploaded")
# Tags tree page
response = self.client.get(reverse('marks:tags', args=['unsafe']))
self.assertEqual(response.status_code, 200)
# Get report
unsafe = ReportUnsafe.objects.filter(root__job_id=self.job.pk).first()
self.assertIsNotNone(unsafe)
# Inline mark form
response = self.client.get('/marks/unsafe/%s/create/inline/' % unsafe.id)
self.assertEqual(response.status_code, 200)
# Create mark page
response = self.client.get(reverse('marks:mark_form', args=['unsafe', unsafe.pk, 'create']))
self.assertEqual(response.status_code, 200)
# Error trace compare function description
try:
compare_f = MarkUnsafeCompare.objects.get(name=DEFAULT_COMPARE)
except ObjectDoesNotExist:
self.fail("Population hasn't created compare error trace functions")
response = self.client.post('/marks/get_func_description/%s/' % compare_f.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Save mark
compare_attrs = list({'is_compare': associate, 'attr': a_name}
for a_name, associate in unsafe.attrs.values_list('attr__name__name', 'associate'))
response = self.client.post(reverse('marks:mark_form', args=['unsafe', unsafe.pk, 'create']), {
'data': json.dumps({
'compare_id': compare_f.pk,
'description': 'Mark description',
'is_modifiable': True,
'verdict': MARK_UNSAFE[1][0],
'status': MARK_STATUS[2][0],
'tags': [created_tags[0].pk],
'attrs': compare_attrs
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('cache_id', res)
cache_id = res['cache_id']
# Check mark's tables
try:
mark = MarkUnsafe.objects.get(job=self.job, author__username='manager')
except ObjectDoesNotExist:
self.fail('Mark was not created')
self.assertEqual(mark.type, MARK_TYPE[0][0])
self.assertEqual(mark.verdict, MARK_UNSAFE[1][0])
self.assertEqual(mark.status, MARK_STATUS[2][0])
self.assertEqual(mark.version, 1)
self.assertEqual(mark.description, 'Mark description')
self.assertEqual(mark.function.name, DEFAULT_COMPARE)
self.assertEqual(mark.is_modifiable, True)
self.assertEqual(len(mark.versions.all()), 1)
mark_version = MarkUnsafeHistory.objects.get(mark=mark)
self.assertEqual(mark_version.verdict, mark.verdict)
self.assertEqual(mark_version.version, 1)
self.assertEqual(mark_version.author.username, 'manager')
self.assertEqual(mark_version.status, mark.status)
self.assertEqual(mark_version.description, mark.description)
for mark_attr in mark_version.attrs.all().select_related('attr__name'):
self.assertIn({'is_compare': mark_attr.is_compare, 'attr': mark_attr.attr.name.name}, compare_attrs)
self.assertEqual(ReportUnsafe.objects.get(pk=unsafe.pk).verdict, UNSAFE_VERDICTS[1][0])
self.assertEqual(len(MarkUnsafeReport.objects.filter(mark=mark, report=unsafe, type=ASSOCIATION_TYPE[1][0])), 1)
self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1)
try:
rst = ReportUnsafeTag.objects.get(report__root__job=self.job, report__parent=None, tag=created_tags[0])
# The number of unsafes for root report with specified tag equals the number of marked unsafes
self.assertEqual(rst.number, len(ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[1][0])))
rst = ReportUnsafeTag.objects.get(
report__root__job=self.job, report_id=unsafe.parent_id, tag=created_tags[0]
)
# The number of unsafes for parent report (for unsafe) with specified tag
# equals 1 due to only one unsafe is child for report
self.assertEqual(rst.number, 1)
srt = UnsafeReportTag.objects.get(report=unsafe, tag=created_tags[0])
self.assertEqual(srt.number, 1)
except ObjectDoesNotExist:
self.fail('Reports tags cache was not filled')
# Associations changes
response = self.client.get('/marks/unsafe/association_changes/%s/' % cache_id)
self.assertEqual(response.status_code, 200)
# Edit mark page
response = self.client.get(reverse('marks:mark_form', args=['unsafe', mark.pk, 'edit']))
self.assertEqual(response.status_code, 200)
# Edit mark
with mark_version.error_trace.file as fp:
error_trace = fp.read().decode('utf8')
response = self.client.post(reverse('marks:mark_form', args=['unsafe', mark.pk, 'edit']), {
'data': json.dumps({
'compare_id': compare_f.pk,
'description': 'New mark description',
'is_modifiable': True,
'verdict': MARK_UNSAFE[2][0],
'status': MARK_STATUS[2][0],
'tags': [created_tags[1].pk],
'attrs': compare_attrs,
'comment': 'Change 1',
'error_trace': error_trace
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('cache_id', res)
cache_id = res['cache_id']
# Check mark's tables
try:
mark = MarkUnsafe.objects.get(job=self.job, author__username='manager')
except ObjectDoesNotExist:
self.fail('Mark was not created')
self.assertEqual(mark.verdict, MARK_UNSAFE[2][0])
self.assertEqual(mark.version, 2)
self.assertEqual(mark.description, 'New mark description')
self.assertEqual(mark.is_modifiable, True)
self.assertEqual(len(mark.versions.all()), 2)
mark_version = MarkUnsafeHistory.objects.filter(mark=mark).order_by('-version').first()
self.assertEqual(mark_version.version, 2)
self.assertEqual(mark_version.verdict, mark.verdict)
self.assertEqual(mark_version.author.username, 'manager')
self.assertEqual(mark_version.description, mark.description)
self.assertEqual(mark_version.comment, 'Change 1')
self.assertEqual(ReportUnsafe.objects.get(pk=unsafe.pk).verdict, SAFE_VERDICTS[2][0])
self.assertEqual(len(MarkUnsafeReport.objects.filter(mark=mark, report=unsafe)), 1)
self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1)
self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=mark_version, tag=created_tags[1])), 1)
self.assertEqual(len(ReportUnsafeTag.objects.filter(report__root__job=self.job, report__parent=None)), 2)
self.assertEqual(len(
ReportUnsafeTag.objects.filter(report__root__job=self.job, report__id=unsafe.parent_id)
), 2)
try:
urt = UnsafeReportTag.objects.get(report=unsafe, tag=created_tags[0])
self.assertEqual(urt.number, 1)
urt = UnsafeReportTag.objects.get(report=unsafe, tag=created_tags[1])
self.assertEqual(urt.number, 1)
except ObjectDoesNotExist:
self.fail('Reports tags cache was not filled')
# Associations changes
response = self.client.get('/marks/unsafe/association_changes/%s/' % cache_id)
self.assertEqual(response.status_code, 200)
# Unsafe marks list page
response = self.client.get(reverse('marks:list', args=['unsafe']))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('marks:mark', args=['unsafe', mark.id]))
self.assertEqual(response.status_code, 200)
# Inline mark form
response = self.client.get('/marks/unsafe/%s/edit/inline/' % mark.id)
self.assertEqual(response.status_code, 200)
# Confirm/unconfirm association
# Mark is automatically associated after its changes
self.assertEqual(
MarkUnsafeReport.objects.filter(mark=mark, report=unsafe, type=ASSOCIATION_TYPE[0][0]).count(), 1
)
response = self.client.post('/marks/association/unsafe/%s/%s/unconfirm/' % (unsafe.pk, mark.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(MarkUnsafeReport.objects.filter(
mark=mark, report=unsafe, type=ASSOCIATION_TYPE[2][0]).count(), 1)
response = self.client.post('/marks/association/unsafe/%s/%s/confirm/' % (unsafe.pk, mark.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(MarkUnsafeReport.objects.filter(
mark=mark, report=unsafe, type=ASSOCIATION_TYPE[1][0]).count(), 1)
# Like/dislike association
response = self.client.post('/marks/association/unsafe/%s/%s/like/' % (unsafe.id, mark.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(UnsafeAssociationLike.objects.filter(
association__report=unsafe, association__mark=mark, dislike=False
).count(), 1)
response = self.client.post('/marks/association/unsafe/%s/%s/dislike/' % (unsafe.id, mark.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(UnsafeAssociationLike.objects.filter(
association__report=unsafe, association__mark=mark, dislike=True
).count(), 1)
self.assertEqual(UnsafeAssociationLike.objects.filter(
association__report=unsafe, association__mark=mark, dislike=False
).count(), 0)
# Download mark
response = self.client.get(reverse('marks:unsafe-download', args=[mark.pk]))
self.assertEqual(response.status_code, 200)
self.assertIn(response['Content-Type'], {'application/x-zip-compressed', 'application/zip'})
with open(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Download mark in preset format
response = self.client.get(reverse('marks:unsafe-download-preset', args=[mark.pk]))
self.assertEqual(response.status_code, 200)
# Delete mark
response = self.client.post('/marks/delete/', {'type': 'unsafe', 'ids': json.dumps([mark.id])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertEqual(len(MarkUnsafe.objects.all()), 0)
self.assertEqual(len(MarkUnsafeReport.objects.all()), 0)
self.assertEqual(ReportUnsafe.objects.all().first().verdict, UNSAFE_VERDICTS[5][0])
# Upload mark
with open(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive), mode='rb') as fp:
response = self.client.post('/marks/upload/', {'file': fp})
fp.close()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIn('id', res)
self.assertEqual(res.get('type'), 'unsafe')
self.assertEqual(len(MarkUnsafe.objects.all()), 1)
try:
newmark = MarkUnsafe.objects.get(pk=res['id'])
except ObjectDoesNotExist:
self.fail('Mark was not uploaded')
self.assertEqual(newmark.type, MARK_TYPE[2][0])
self.assertEqual(newmark.verdict, MARK_UNSAFE[2][0])
self.assertEqual(newmark.version, 2)
self.assertEqual(newmark.description, 'New mark description')
self.assertEqual(newmark.is_modifiable, True)
self.assertEqual(len(newmark.versions.all()), 2)
newmark_version = MarkUnsafeHistory.objects.filter(mark=newmark).order_by('-version').first()
self.assertEqual(newmark_version.version, 2)
self.assertEqual(newmark_version.verdict, mark.verdict)
self.assertEqual(newmark_version.author.username, 'manager')
self.assertEqual(newmark_version.description, mark.description)
self.assertEqual(newmark_version.comment, 'Change 1')
self.assertEqual(ReportUnsafe.objects.get(pk=unsafe.pk).verdict, UNSAFE_VERDICTS[2][0])
self.assertEqual(len(MarkUnsafeReport.objects.filter(mark=newmark, report=unsafe)), 1)
self.assertEqual(len(MarkUnsafeReport.objects.filter(report=unsafe)), 1)
self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[0])), 1)
self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[1])), 1)
# The tag has parent which is also added to mark
self.assertEqual(
len(ReportUnsafeTag.objects.filter(report__root__job=self.job, report__parent=None)),
len(ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[2][0])) * 2
)
self.assertEqual(len(ReportUnsafeTag.objects.filter(
report__root__job=self.job, report__id=unsafe.parent_id
)), 2)
# Some more mark changes
for i in range(3, 6):
response = self.client.post(reverse('marks:mark_form', args=['unsafe', newmark.pk, 'edit']), {
'data': json.dumps({
'compare_id': compare_f.pk,
'description': 'New mark description',
'is_modifiable': True,
'verdict': MARK_UNSAFE[2][0],
'status': MARK_STATUS[2][0],
'tags': [created_tags[0].pk],
'attrs': compare_attrs,
'comment': 'Change %s' % i,
'error_trace': error_trace
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(
len(ReportUnsafeTag.objects.filter(report__root__job=self.job, report__parent=None)),
len(ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[2][0]))
)
self.assertEqual(len(MarkUnsafeHistory.objects.filter(mark=newmark)), 5)
# Get 3d version data
response = self.client.get(reverse('marks:mark_form', args=['unsafe', newmark.pk, 'edit']),
params={'version': 3})
self.assertEqual(response.status_code, 200)
# Compare 1st and 4th versions
response = self.client.post('/marks/unsafe/%s/compare_versions/' % newmark.pk, {'v1': 1, 'v2': 4})
self.assertEqual(response.status_code, 200)
# Remove 2nd and 4th versions
response = self.client.post('/marks/unsafe/%s/remove_versions/' % newmark.pk, {'versions': json.dumps([2, 4])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('success', res)
self.assertEqual(len(MarkUnsafeHistory.objects.filter(mark=newmark)), 3)
# Reports' lists pages
root_comp = ReportComponent.objects.get(root__job_id=self.job.pk, parent=None)
response = self.client.get('%s?tag=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), created_tags[0].pk))
self.assertIn(response.status_code, {200, 302})
response = self.client.get('%s?tag=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), created_tags[1].pk))
self.assertIn(response.status_code, {200, 302})
response = self.client.get(
'%s?verdict=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), UNSAFE_VERDICTS[0][0])
)
self.assertIn(response.status_code, {200, 302})
response = self.client.get(
'%s?verdict=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), UNSAFE_VERDICTS[2][0])
)
self.assertIn(response.status_code, {200, 302})
# Download all marks
response = self.client.get('/marks/api/download-all/')
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response['Content-Type'], 'application/json')
with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Delete all unsafe marks
self.client.post('/marks/delete/', {
'type': 'unsafe', 'ids': json.dumps(list(MarkUnsafe.objects.values_list('id', flat=True)))
})
self.assertEqual(MarkUnsafe.objects.count(), 0)
# All verdicts must be "unsafe unmarked"
self.assertEqual(
ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[5][0]).count(), ReportUnsafe.objects.all().count()
)
self.assertEqual(MarkUnsafeReport.objects.count(), 0)
# Upload all marks
with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='rb') as fp:
response = self.client.post('/marks/upload-all/', {'delete': 1, 'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['fail']), 0)
self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['unsafe']), 1)
def test_unknown(self):
self.assertEqual(Job.objects.get(pk=self.job.pk).status, JOB_STATUS[3][0])
# Do not remove populated safe/unsafe marks as there are no problems with uploading populated marks
response = self.client.post('/marks/delete/', {
'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True)))
})
self.assertEqual(response.status_code, 200)
# Get report
unknown = None
for u in ReportUnknown.objects.filter(root__job_id=self.job.pk):
afc = ArchiveFileContent(u, 'problem_description', PROBLEM_DESC_FILE)
if afc.content == b'KeyError: \'attr\' was not found.':
unknown = u
break
if unknown is None:
self.fail("Unknown with needed problem description was not found in test job decision")
parent = ReportComponent.objects.get(pk=unknown.parent_id)
# Inline mark form
response = self.client.get('/marks/unknown/%s/create/inline/' % unknown.id)
self.assertEqual(response.status_code, 200)
# Create mark page
response = self.client.get(reverse('marks:mark_form', args=['unknown', unknown.pk, 'create']))
self.assertEqual(response.status_code, 200)
# Check regexp function
response = self.client.post('/marks/check-unknown-mark/%s/' % unknown.pk, {
'function': "KeyError:\s'(\S*)'\swas\snot\sfound\.",
'pattern': 'KeyE: {0}',
'is_regex': 'true'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Save mark
response = self.client.post(reverse('marks:mark_form', args=['unknown', unknown.pk, 'create']), {
'data': json.dumps({
'description': 'Mark description',
'is_modifiable': True,
'status': MARK_STATUS[2][0],
'function': "KeyError:\s'(\S*)'\swas\snot\sfound\.",
'problem': 'KeyE: {0}',
'link': 'http://mysite.com/',
'is_regexp': True
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('cache_id', res)
cache_id = res['cache_id']
# Check mark's tables
try:
mark = MarkUnknown.objects.get(job=self.job, author__username='manager')
except ObjectDoesNotExist:
self.fail('Mark was not created')
self.assertEqual(mark.type, MARK_TYPE[0][0])
self.assertEqual(mark.status, MARK_STATUS[2][0])
self.assertEqual(mark.version, 1)
self.assertEqual(mark.description, 'Mark description')
self.assertEqual(mark.link, 'http://mysite.com/')
self.assertEqual(mark.problem_pattern, 'KeyE: {0}')
self.assertEqual(mark.function, "KeyError:\s'(\S*)'\swas\snot\sfound\.")
self.assertEqual(mark.is_modifiable, True)
self.assertEqual(len(mark.versions.all()), 1)
mark_version = MarkUnknownHistory.objects.get(mark=mark)
self.assertEqual(mark_version.version, 1)
self.assertEqual(mark_version.author.username, 'manager')
self.assertEqual(mark_version.status, mark.status)
self.assertEqual(mark_version.description, mark.description)
self.assertEqual(mark_version.link, mark.link)
self.assertEqual(mark_version.problem_pattern, mark.problem_pattern)
self.assertEqual(mark_version.function, mark.function)
self.assertEqual(len(UnknownProblem.objects.filter(name='KeyE: attr')), 1)
self.assertEqual(len(MarkUnknownReport.objects.filter(mark=mark, report=unknown)), 1)
# Associations changes
response = self.client.get('/marks/unknown/association_changes/%s/' % cache_id)
self.assertEqual(response.status_code, 200)
# Edit mark page
response = self.client.get(reverse('marks:mark_form', args=['unknown', mark.pk, 'edit']))
self.assertEqual(response.status_code, 200)
# Edit mark
response = self.client.post(reverse('marks:mark_form', args=['unknown', mark.pk, 'edit']), {
'data': json.dumps({
'description': 'New mark description',
'is_modifiable': True,
'status': MARK_STATUS[1][0],
'function': "KeyError:\s'(\S*)'.*",
'problem': 'KeyE: {0}',
'link': 'http://mysite.com/',
'is_regexp': True,
'comment': 'Change 1'
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('cache_id', res)
cache_id = res['cache_id']
# Check mark's tables
try:
mark = MarkUnknown.objects.get(job=self.job, author__username='manager')
except ObjectDoesNotExist:
self.fail('Mark was not created')
self.assertEqual(mark.version, 2)
self.assertEqual(mark.description, 'New mark description')
self.assertEqual(mark.is_modifiable, True)
self.assertEqual(len(mark.versions.all()), 2)
mark_version = MarkUnknownHistory.objects.filter(mark=mark).order_by('-version').first()
self.assertEqual(mark_version.version, 2)
self.assertEqual(mark_version.author.username, 'manager')
self.assertEqual(mark_version.description, mark.description)
self.assertEqual(mark_version.comment, 'Change 1')
self.assertEqual(mark_version.link, mark.link)
self.assertEqual(mark_version.problem_pattern, mark.problem_pattern)
self.assertEqual(mark_version.function, mark.function)
self.assertEqual(len(UnknownProblem.objects.filter(name='KeyE: attr')), 1)
self.assertEqual(len(MarkUnknownReport.objects.filter(mark=mark, report=unknown)), 1)
# Associations changes
response = self.client.get('/marks/unknown/association_changes/%s/' % cache_id)
self.assertEqual(response.status_code, 200)
# Unknown marks list page
response = self.client.get(reverse('marks:list', args=['unknown']))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('marks:mark', args=['unknown', mark.id]))
self.assertEqual(response.status_code, 200)
# Inline mark eddit form
response = self.client.get('/marks/unknown/%s/edit/inline/' % mark.id)
self.assertEqual(response.status_code, 200)
# Confirm/unconfirm association
# Mark is automatically associated after its changes
self.assertEqual(
MarkUnknownReport.objects.filter(mark=mark, report=unknown, type=ASSOCIATION_TYPE[0][0]).count(), 1
)
response = self.client.post('/marks/association/unknown/%s/%s/unconfirm/' % (unknown.pk, mark.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(MarkUnknownReport.objects.filter(
mark=mark, report=unknown, type=ASSOCIATION_TYPE[2][0]).count(), 1)
response = self.client.post('/marks/association/unknown/%s/%s/confirm/' % (unknown.pk, mark.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(MarkUnknownReport.objects.filter(
mark=mark, report=unknown, type=ASSOCIATION_TYPE[1][0]).count(), 1)
# Like/dislike association
response = self.client.post('/marks/association/unknown/%s/%s/like/' % (unknown.id, mark.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(UnknownAssociationLike.objects.filter(
association__report=unknown, association__mark=mark, dislike=False
).count(), 1)
response = self.client.post('/marks/association/unknown/%s/%s/dislike/' % (unknown.id, mark.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(UnknownAssociationLike.objects.filter(
association__report=unknown, association__mark=mark, dislike=True
).count(), 1)
self.assertEqual(UnknownAssociationLike.objects.filter(
association__report=unknown, association__mark=mark, dislike=False
).count(), 0)
# Download mark
response = self.client.get(reverse('marks:unknown-download', args=[mark.pk]))
self.assertEqual(response.status_code, 200)
self.assertIn(response['Content-Type'], {'application/x-zip-compressed', 'application/zip'})
with open(os.path.join(settings.MEDIA_ROOT, self.unknown_archive), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Download mark in preset format
response = self.client.get(reverse('marks:unknown-download-preset', args=[mark.pk]))
self.assertEqual(response.status_code, 200)
# Delete mark
response = self.client.post('/marks/delete/', {'type': 'unknown', 'ids': json.dumps([mark.id])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertEqual(len(MarkUnknown.objects.all()), 0)
self.assertEqual(len(MarkUnknownReport.objects.all()), 0)
# Upload mark
with open(os.path.join(settings.MEDIA_ROOT, self.unknown_archive), mode='rb') as fp:
response = self.client.post('/marks/upload/', {'file': fp})
fp.close()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIn('id', res)
self.assertEqual(res.get('type'), 'unknown')
try:
newmark = MarkUnknown.objects.get(pk=res['id'])
except ObjectDoesNotExist:
self.fail('Mark was not uploaded')
self.assertEqual(newmark.version, 2)
self.assertEqual(newmark.description, 'New mark description')
self.assertEqual(newmark.is_modifiable, True)
self.assertEqual(len(newmark.versions.all()), 2)
newmark_version = MarkUnknownHistory.objects.filter(mark=newmark).order_by('-version').first()
self.assertEqual(newmark_version.version, 2)
self.assertEqual(newmark_version.author.username, 'manager')
self.assertEqual(newmark_version.comment, 'Change 1')
self.assertEqual(len(MarkUnknownReport.objects.filter(mark=newmark, report=unknown)), 1)
self.assertEqual(len(MarkUnknownReport.objects.filter(report=unknown)), 1)
self.assertEqual(len(UnknownProblem.objects.filter(name='KeyE: attr')), 1)
# Check non-regexp function
response = self.client.post('/marks/check-unknown-mark/%s/' % unknown.pk, {
'function': "KeyError: 'attr' was not found.",
'pattern': 'KeyE: attr',
'is_regex': 'false'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Non-regexp function change
response = self.client.post(reverse('marks:mark_form', args=['unknown', newmark.pk, 'edit']), {
'data': json.dumps({
'description': 'New mark description',
'is_modifiable': True,
'status': MARK_STATUS[2][0],
'function': "KeyError: 'attr' was not found.",
'problem': 'KeyE: attr',
'link': 'http://mysite.com/',
'is_regexp': False,
'comment': 'Change 3'
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Some more mark changes
for i in range(4, 6):
response = self.client.post(reverse('marks:mark_form', args=['unknown', newmark.pk, 'edit']), {
'data': json.dumps({
'description': 'No regexp',
'is_modifiable': True,
'status': MARK_STATUS[2][0],
'function': "KeyError:.*'(\S*)'",
'problem': 'KeyE: {0}',
'link': 'http://mysite.com/',
'is_regexp': True,
'comment': 'Change %s' % i
})
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(len(MarkUnknownHistory.objects.filter(mark=newmark)), 5)
# Get 3d version data
response = self.client.get(reverse('marks:mark_form', args=['unknown', newmark.pk, 'edit']),
params={'version': 3})
self.assertEqual(response.status_code, 200)
# Compare 1st and 4th versions
response = self.client.post('/marks/unknown/%s/compare_versions/' % newmark.pk, {'v1': 1, 'v2': 4})
self.assertEqual(response.status_code, 200)
# Remove 2nd and 4th versions
response = self.client.post('/marks/unknown/%s/remove_versions/' % newmark.pk, {'versions': json.dumps([2, 4])})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('success', res)
self.assertEqual(len(MarkUnknownHistory.objects.filter(mark=newmark)), 3)
# Reports' lists pages
root_comp = ReportComponent.objects.get(root__job_id=self.job.pk, parent=None)
response = self.client.get(
'%s?component=%s' % (reverse('reports:unknowns', args=[root_comp.pk]), parent.component_id)
)
self.assertIn(response.status_code, {200, 302})
try:
problem_id = UnknownProblem.objects.get(name='KeyE: attr').pk
except ObjectDoesNotExist:
self.fail("Can't find unknown problem")
response = self.client.get('%s?component=%s&problem=%s' % (
reverse('reports:unknowns', args=[root_comp.pk]), parent.component_id, problem_id
))
self.assertIn(response.status_code, {200, 302})
# Download all marks
response = self.client.get('/marks/api/download-all/')
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response['Content-Type'], 'application/json')
with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Delete all marks
self.client.post('/marks/delete/', {
'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True)))
})
self.assertEqual(MarkUnknown.objects.count(), 0)
# All verdicts must be "unknown unmarked"
self.assertEqual(MarkUnknownReport.objects.all().count(), 0)
# Upload all marks
with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='rb') as fp:
response = self.client.post('/marks/upload-all/', {'delete': 1, 'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['fail']), 0)
self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['unknown']), 1)
def tearDown(self):
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.safe_archive)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.safe_archive))
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive))
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.unknown_archive)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.unknown_archive))
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile))
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch))
super(TestMarks, self).tearDown()
| 52.845914 | 120 | 0.642555 |
c30ea2dfd401c1916572054375002b08df4e6142 | 40,848 | py | Python | test/functional/wallet_importmulti.py | acejr1337/TerryDavisCoin | 72a5c737f68042fa8f73d2879c13b0c120deb4aa | [
"MIT"
] | null | null | null | test/functional/wallet_importmulti.py | acejr1337/TerryDavisCoin | 72a5c737f68042fa8f73d2879c13b0c120deb4aa | [
"MIT"
] | 3 | 2021-09-25T19:03:51.000Z | 2021-09-27T20:27:25.000Z | test/functional/wallet_importmulti.py | acejr1337/TerryDavisCoin | 72a5c737f68042fa8f73d2879c13b0c120deb4aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
bytes_to_hex_str,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=[]):
"""Run importmulti and assert success"""
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Example label"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + bytes_to_hex_str(CScript([OP_NOP]))
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Import P2WPKH address as watch only
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
# Import P2WPKH address with public key but no private key
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
# Import P2WPKH address with key and check it is spendable
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
# P2WSH multisig address without scripts or keys
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
# Same P2WSH multisig address as above, but now with witnessscript + private keys
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
# P2SH-P2WPKH address with no redeemscript or public or private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
# P2SH-P2WPKH address + redeemscript + public key with no private key
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
# P2SH-P2WPKH address + redeemscript + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
# P2SH-P2WSH multisig + redeemscript with no private key
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Descriptor import test",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Descriptor is invalid")
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": "Descriptor import test",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
label="Descriptor import test")
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor without keys
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
for address in addresses:
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
label="Descriptor import test")
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys shoud not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc("")
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'rtdc1qtmp74ayg7p24uslctssvjm06q5phz4yrc3zpyj', # m/0'/0'/0
'rtdc1q8vprchan07gzagd5e6v9wd7azyucksq2c4ynpe', # m/0'/0'/1
'rtdc1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjlg94gz', # m/0'/0'/2
'rtdc1qau64272ymawq26t90md6an0ps99qkrse22pnz3', # m/0'/0'/3
'rtdc1qsg97266hrh6cpmutqen8s4s962aryy77vv4ql3', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
| 49.814634 | 316 | 0.551581 |
2a64fe0b181389951bcbf5bfb7b835401ef16e55 | 2,219 | py | Python | optimize.py | olaviinha/HiresNeuralStyleTransfer | ff2f742f306819796cf1ec88f6efec0a619ae2f1 | [
"Apache-2.0"
] | 15 | 2018-02-11T16:56:19.000Z | 2021-12-29T21:34:33.000Z | optimize.py | olaviinha/HiresNeuralStyleTransfer | ff2f742f306819796cf1ec88f6efec0a619ae2f1 | [
"Apache-2.0"
] | 4 | 2018-06-08T00:33:51.000Z | 2021-01-01T12:39:06.000Z | optimize.py | olaviinha/HiresNeuralStyleTransfer | ff2f742f306819796cf1ec88f6efec0a619ae2f1 | [
"Apache-2.0"
] | 3 | 2019-11-08T09:37:06.000Z | 2020-08-05T09:36:53.000Z | from lasagne.utils import floatX
import time
import scipy
import numpy as np
# this LossAndGradSimultaneousEvaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class LossAndGradSimultaneousEvaluator(object):
def __init__(self, height, width, generated_image, f_outputs):
self.f_outputs = f_outputs
self.generated_image = generated_image
self.loss_value = None
self.grad_values = None
self.height = height
self.width = width
def eval_loss_and_grads(self, x):
x = x.reshape((1, 3, self.height, self.width))
self.generated_image.set_value(floatX(x))
outs = self.f_outputs()
loss_value = outs[0]
grad_values = outs[1].flatten().astype('float64')
return loss_value, grad_values
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = self.eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
def optimize(x0, generated_image, f_outputs, num_iterations, checkpoint_iterations):
evaluator = LossAndGradSimultaneousEvaluator(x0.shape[2], x0.shape[3], generated_image, f_outputs)
for i in range(num_iterations // checkpoint_iterations + 1):
start_time = time.time()
remaining_iterations = num_iterations - (checkpoint_iterations * i)
if remaining_iterations > 0:
x, min_val, info = scipy.optimize.fmin_l_bfgs_b(evaluator.loss, x0.flatten(), fprime=evaluator.grads, maxfun=min(remaining_iterations, checkpoint_iterations))
x0 = generated_image.get_value().astype('float64')
end_time = time.time()
yield (x0, (i + 1) * checkpoint_iterations, min_val, end_time - start_time)
| 40.345455 | 170 | 0.697161 |
cc272f56ed49ea1e4b3a76d26ade7b06c3153fb2 | 11,670 | py | Python | src/benchmarks/gc/src/analysis/gui_server.py | sandreenko/performance | 11b77ba778e8d952caee4c407c90e7c4eabfb5bc | [
"MIT"
] | 547 | 2018-11-06T21:14:57.000Z | 2022-03-31T21:14:57.000Z | src/benchmarks/gc/src/analysis/gui_server.py | sandreenko/performance | 11b77ba778e8d952caee4c407c90e7c4eabfb5bc | [
"MIT"
] | 1,572 | 2018-11-06T21:30:31.000Z | 2022-03-31T23:31:25.000Z | src/benchmarks/gc/src/analysis/gui_server.py | sandreenko/performance | 11b77ba778e8d952caee4c407c90e7c4eabfb5bc | [
"MIT"
] | 196 | 2018-11-06T20:58:21.000Z | 2022-03-29T21:04:21.000Z | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
"""
WARN: The server code is still here but the JS gui code is not in this repo
because we need to know if it's OK to include vendored requirements.
(Or, we could update it to get them from NPM).
"""
from dataclasses import dataclass
from math import inf
from pathlib import Path
import socket
from threading import Lock
from typing import Any, Callable, cast, Dict, Optional, Sequence, Tuple
from flask import Flask, jsonify, request, Request, Response, send_from_directory
from ..commonlib.collection_util import find_only_matching
from ..commonlib.config import CWD, GC_PATH
from ..commonlib.parse_and_serialize import to_serializable
from ..commonlib.result_utils import match
from ..commonlib.type_utils import with_slots
from .clr import get_clr
from .clr_types import AbstractTraceGC, AbstractTraceLog, AbstractTraceProcess
from .core_analysis import (
find_process,
get_cpu_stack_frame_summary,
get_etl_trace,
get_gcs_from_process,
get_trace_log,
get_traced_processes,
load_module_symbols_for_events,
ProcessPredicate,
)
from .gui_join_analysis import (
GC_JOIN_STAGES_BY_GC_PHASE,
GcJoinStatistics,
get_gc_join_duration_statistics,
get_gc_join_timeframes,
get_gc_thread_state_timeline,
StatsOverAllJoins,
)
from .gui_stolen_cpu_analysis import get_gc_stolen_cpu_instances, get_gc_stolen_cpu_times
def _j(value: object) -> Response:
return jsonify(cast(Any, to_serializable(value)))
@with_slots
@dataclass(frozen=True)
class App:
app: Flask
# Can't make this directly be a Callable: https://github.com/python/mypy/issues/708
load_etl_and_get_process_id: Tuple[Callable[[Path, ProcessPredicate], int],]
_GUI_DIR = GC_PATH / "gui"
def get_app() -> App:
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
symbol_log_path = CWD / "sym_loader.txt"
clr = get_clr()
class InvalidUsage(Exception):
def __init__(self, message: str, status_code: int = 400, payload: Any = None):
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.payload = payload
def to_dict(self) -> Dict[str, Any]:
rv = dict(self.payload or ())
rv["message"] = self.message
return rv
def handle_invalid_usage(error: InvalidUsage) -> Response:
response = _j(error.to_dict())
response.status_code = error.status_code
return response
app.errorhandler(InvalidUsage)(handle_invalid_usage)
def all_exception_handler(error: Exception) -> Response:
raise error
app.errorhandler(Exception)(all_exception_handler)
def route(path: str, fn: Callable[..., Response]) -> None:
assert not path.endswith(
"/"
) # flasks seems to automatically add '/' to all requests. TODO: or is this jquery?
app.route(path)(fn)
app.route(path + "/")(fn)
def send_file(path: str) -> Response:
return send_from_directory(str(_GUI_DIR), path)
app.route("/gui/<path:path>")(send_file)
def find_proc_by_id(proc_id: int) -> AbstractTraceProcess:
return find_only_matching(lambda p: p.ProcessID, proc_id, all_processes)
proc_id_and_gcs: Optional[Tuple[int, Sequence[AbstractTraceGC]]] = None
get_gcs_lock = Lock()
def get_gcs(proc_id: int) -> Sequence[AbstractTraceGC]:
nonlocal proc_id_and_gcs
with get_gcs_lock:
if proc_id_and_gcs is None:
proc_id_and_gcs = (proc_id, get_gcs_from_process(clr, find_proc_by_id(proc_id)))
assert proc_id_and_gcs[0] == proc_id
return proc_id_and_gcs[1]
def get_gc(proc_id: int, gc_id: int) -> AbstractTraceGC:
return get_gcs(proc_id)[gc_id]
def get_gc_basic_info(proc_id: int, gc_id: int) -> Response:
gc = get_gc(proc_id, gc_id)
return _j(
{
"Pause Start Time": gc.PauseStartRelativeMSec,
"GC Start Time": gc.StartRelativeMSec,
"GC End Time": gc.StartRelativeMSec + gc.DurationMSec,
"Pause End Time": gc.PauseStartRelativeMSec + gc.PauseDurationMSec,
"Generation": gc.Generation,
"Type": gc.Type,
"Reason": gc.Reason,
}
)
route("/stats/proc/<int:proc_id>/gc/<int:gc_id>", get_gc_basic_info)
@with_slots
@dataclass(frozen=True)
class GcPauseTime:
DurationMSec: float
PauseDurationMSec: float
gc_number: int
def get_gc_pause_durations() -> Response:
proc_id = _must_fetch_int(request, "proc_id")
gcs = get_gcs(proc_id)
gc_durations = [
GcPauseTime(gc.DurationMSec, gc.PauseDurationMSec, idx) for idx, gc in enumerate(gcs)
]
s = sorted(gc_durations, key=lambda gc: gc.DurationMSec)
return _j(s)
route("/stats/gc/pause_time", get_gc_pause_durations)
def get_gc_stolen_cpu_time() -> Response:
proc_id = _must_fetch_int(request, "proc_id")
# TODO: it used to be a list in case pid wasn't provided, but it always is...
return _j([get_gc_stolen_cpu_times(clr, proc_id, "proc_name", get_gcs(proc_id))])
route("/stats/gc/stolen_cpu/per_phase", get_gc_stolen_cpu_time)
def get_gc_join_imbalance_stats() -> Response:
proc_id = _must_fetch_int(request, "proc_id")
gc_id = _must_fetch_int(request, "gc_id")
def get_gc_join_duration_statistics_not_none(gc: AbstractTraceGC) -> GcJoinStatistics:
return match(
get_gc_join_duration_statistics(gc),
cb_ok=lambda x: x,
# TODO: gui should handle this
cb_err=lambda _: GcJoinStatistics(
statistics_over_all_joins=StatsOverAllJoins(0, 0, 0),
statistics_over_individual_joins={},
statistics_over_individual_gc_phases={},
),
)
return _j(get_gc_join_duration_statistics_not_none(get_gc(proc_id, gc_id)))
route("/stats/gc/join_imbalance", get_gc_join_imbalance_stats)
def get_stolen_cpu_per_phase() -> Response:
proc_id = _must_fetch_int(request, "proc_id")
gc_id = _must_fetch_int(request, "gc_id")
return _j(get_gc_stolen_cpu_instances(clr, get_gc(proc_id, gc_id), trace_log.Events))
route("/chrono/gc/stolen_cpu/per_phase", get_stolen_cpu_per_phase)
def get_gc_heap_join_states() -> Response:
proc_id = _try_fetch_int(request, "proc_id")
gc_id = _try_fetch_int(request, "gc_id")
heap_id = _try_fetch_int(request, "heap_id")
if proc_id is None or gc_id is None:
raise InvalidUsage(
"USAGE: GET\t/chrono/gc/join/per_thread_state\tARGS\tproc_id: "
"Process ID\tgc_id: GC number for this process",
status_code=422,
)
return _j(get_gc_thread_state_timeline(clr, get_gc(proc_id, gc_id), heap_id))
route("/chrono/gc/join/per_thread_state", get_gc_heap_join_states)
def get_gc_join_time_ranges() -> Response:
proc_id = _must_fetch_int(request, "proc_id")
gc_id = _must_fetch_int(request, "gc_id")
if None in [proc_id, gc_id]:
raise InvalidUsage(
"USAGE: GET\t/chrono/gc/join/stages\tARGS\tproc_id: "
"Process ID\tgc_id: GC number for this process",
status_code=422,
)
stages_phases_known, phase_times, stage_times, join_index_times = get_gc_join_timeframes(
clr, get_gc(proc_id, gc_id)
)
for stage in stage_times:
st = stage_times[stage]
for st_time in ("start", "end"):
if getattr(st, st_time) in [inf, -inf]:
setattr(st, st_time, str(getattr(st, st_time)))
return _j(
{
"Join Stages Known": stages_phases_known,
"Join Phases Known": stages_phases_known,
"Timeframes By GC Phase": phase_times,
"Timeframes By Join Stage": stage_times,
"Timeframes By Join Index": join_index_times,
}
)
route("/chrono/gc/join/stages", get_gc_join_time_ranges)
def get_stack_frame_summary_for_cpu(cpu_id: int) -> Response:
proc_ids = [int(arg) for arg in request.args.getlist("proc_id")]
start_time = _must_fetch_float(request, "start_time")
end_time = _must_fetch_float(request, "end_time")
return _j(
get_cpu_stack_frame_summary(clr, trace_log, cpu_id, proc_ids, start_time, end_time)
)
route("/chrono/cpu/<int:cpu_id>/stack_frames/summary", get_stack_frame_summary_for_cpu)
etl_path: Path
trace_log: AbstractTraceLog
all_processes: Sequence[AbstractTraceProcess]
def load_etl_trace() -> Response:
assert "etl_path" in request.form # if "etl_path" in request.form:
etl_path: str = request.form["etl_path"]
do_load_etl(Path(etl_path))
return _j(etl_path)
# Note: I do not allow POST requests to the route without a trailing slash because Flask will
# re-route POST /trace to GET /trace/, instead of POST /trace/ as expected.
# Instead of introducing potential debugging confusion to users of this API,
# it seems better to allow a small amount of inconsistency.
app.route("/trace/", methods=["POST"])(load_etl_trace)
def do_load_etl(the_etl_path: Path) -> None:
nonlocal all_processes, etl_path, trace_log
etl_path = the_etl_path
sym_path = get_etl_trace(
clr, etl_path
).SymPath # TODO: this is only used for .SymPath, seems inefficient
all_processes = tuple(get_traced_processes(clr, etl_path).processes)
trace_log = get_trace_log(clr, etl_path)
load_module_symbols_for_events(clr, sym_path, trace_log, symbol_log_path)
route(
"/processes",
lambda: _j(
[
{"Process Name": p.Name, "PID": p.ProcessID, "Parent PID": p.ParentID}
for p in all_processes
]
),
)
def get_join_stages_per_phase() -> Response:
return _j(
{
gc_phase: [{"name": stage.name, "id": stage.value} for stage in join_stages]
for gc_phase, join_stages in GC_JOIN_STAGES_BY_GC_PHASE.items()
}
)
route("/info/gc/phases/join_stages_per_phase", get_join_stages_per_phase)
def load_etl_and_get_process_id(etl_path: Path, process_predicate: ProcessPredicate) -> int:
do_load_etl(etl_path)
return find_process(clr, all_processes, process_predicate).ProcessID
return App(app, (load_etl_and_get_process_id,))
def is_port_used(port: int) -> bool:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
except socket.error:
return True
return False
def _must_fetch_int(rq: Request, arg_name: str) -> int:
return int(rq.args[arg_name])
def _must_fetch_float(rq: Request, arg_name: str) -> float:
return float(rq.args[arg_name])
def _try_fetch_str(rq: Request, arg_name: str) -> Optional[str]:
return rq.args.get(arg_name, None)
def _try_fetch_int(rq: Request, arg_name: str) -> Optional[int]:
s = _try_fetch_str(rq, arg_name)
return None if s is None else int(s)
| 35.363636 | 97 | 0.65964 |
c9c4026d7a9b3eedc1f459eab7418092a6bf4adf | 300 | py | Python | apps/relation/views.py | Palameng/Demo | f216982851ca7e024a9afebcb47c575765f51622 | [
"Apache-2.0"
] | null | null | null | apps/relation/views.py | Palameng/Demo | f216982851ca7e024a9afebcb47c575765f51622 | [
"Apache-2.0"
] | null | null | null | apps/relation/views.py | Palameng/Demo | f216982851ca7e024a9afebcb47c575765f51622 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.views import View
from .models import Departments, Staffs, Projects, Gloup, Person, Membership
class OpertionManytoManyField(View):
def post(self, request):
return render(request, 'success.html', {
})
| 21.428571 | 76 | 0.723333 |
b42ee812e233c4c53c04d4531322cbed20a74991 | 784 | py | Python | server/globalImports.py | eternalamit5/Highly-Accelerated-Life-Test-HALT-Monitoring-Tool | e04c0f3b5f446acd56de6329fbf1695792504efd | [
"MIT"
] | null | null | null | server/globalImports.py | eternalamit5/Highly-Accelerated-Life-Test-HALT-Monitoring-Tool | e04c0f3b5f446acd56de6329fbf1695792504efd | [
"MIT"
] | null | null | null | server/globalImports.py | eternalamit5/Highly-Accelerated-Life-Test-HALT-Monitoring-Tool | e04c0f3b5f446acd56de6329fbf1695792504efd | [
"MIT"
] | null | null | null | # Flask
from flask import Flask, request, session, send_file
from flask_cors import CORS
# mongo
from flask_pymongo import PyMongo
# mqtt
from flask_mqtt import Mqtt
# web sockets
from flask_sockets import Sockets
# influx
from influxdb import InfluxDBClient
# numpy
import numpy as np
import pandas as pd
# time, os
import datetime
import time, os
# json
import json
from bson.json_util import dumps
# password and uuid
from werkzeug.security import generate_password_hash, check_password_hash
import uuid
# math, FFT and spectrogram
from scipy import fftpack, signal
from skimage import util
import matplotlib.pyplot as plt
# configuration
import conf
from flask_influxdb_client import InfluxDB
from line_protocol_parser import parse_line
from bson.json_util import dumps | 18.232558 | 73 | 0.818878 |
15d15e6e3ed93b34c570b5dbd756a39949b1debc | 4,673 | py | Python | saas/backend/apps/action/tasks.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 7 | 2021-08-13T03:48:16.000Z | 2021-12-20T15:31:38.000Z | saas/backend/apps/action/tasks.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 456 | 2021-08-16T02:13:57.000Z | 2022-03-30T10:02:49.000Z | saas/backend/apps/action/tasks.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 17 | 2021-08-10T04:08:46.000Z | 2022-03-14T14:24:36.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from collections import defaultdict
from typing import Optional
from celery import task
from backend.apps.action.models import AggregateAction
from backend.biz.system import SystemBiz
from backend.service.action import ActionService
from backend.service.models.action import Action
from backend.service.models.instance_selection import ChainNode
@task(ignore_result=True)
def generate_action_aggregate():
# 生成操作聚合配置
systems = SystemBiz().list()
action_svc = ActionService()
# 编译每个系统
for system in systems:
system_id = system.id
actions = action_svc.list(system_id)
# 使用操作的实例视图生成聚合操作的结构
resource_type_actions = aggregate_actions_group_by_selection_node(actions)
# 查询系统已存在的聚合操作
exists_agg_actions = query_exists_aggregate_actions(system_id)
# 分离处理需要删除的聚合操作
delete_agg_actions = [
agg_action
for resource_type, agg_action in exists_agg_actions.items()
if resource_type not in resource_type_actions
]
# 分离出需要创建与需要更新的聚合操作
create_agg_actions, update_agg_actions = [], []
for resource_type, action_ids in resource_type_actions.items():
if resource_type in exists_agg_actions:
agg_action = exists_agg_actions[resource_type]
if len(action_ids) == 1:
delete_agg_actions.append(agg_action)
continue
if set(agg_action.action_ids) != set(action_ids):
agg_action.action_ids = action_ids
update_agg_actions.append(agg_action)
continue
if len(action_ids) == 1:
continue
agg_action = AggregateAction(system_id=system_id)
agg_action.action_ids = action_ids
agg_action.aggregate_resource_type = {"system_id": resource_type[0], "id": resource_type[1]}
create_agg_actions.append(agg_action)
# 执行CURD
if create_agg_actions:
AggregateAction.objects.bulk_create(create_agg_actions)
if update_agg_actions:
AggregateAction.objects.bulk_update(update_agg_actions, ["_action_ids"])
if delete_agg_actions:
AggregateAction.objects.filter(id__in=[agg_action.id for agg_action in delete_agg_actions]).delete()
def query_exists_aggregate_actions(system_id):
"""
查询已存在的聚合操作配置
"""
agg_actions = AggregateAction.objects.filter(system_id=system_id)
resource_type_agg_action = {}
for aa in agg_actions:
resource_type = aa.aggregate_resource_type
resource_type_agg_action[(resource_type["system_id"], resource_type["id"])] = aa
return resource_type_agg_action
def aggregate_actions_group_by_selection_node(actions):
"""
使用实例视图的节点聚合操作
"""
resource_type_actions = defaultdict(list)
for action in actions:
# 操作关联多个资源, 或者不关联资源不能聚合
if len(action.related_resource_types) != 1:
continue
first_node = get_action_selection_first_node(action)
if first_node is None:
continue
resource_type_actions[(first_node.system_id, first_node.id)].append(action.id)
return resource_type_actions
def get_action_selection_first_node(action: Action) -> Optional[ChainNode]:
"""
获取操作的关联的资源类型的实例视图的第一个节点
如果有多个实例视图, 多个实例视图的第一个节点必须一样才返回
!!!只能用于只关联了1个资源类型的操作
"""
resource_type = action.related_resource_types[0]
if not resource_type.instance_selections:
return None
first_node = resource_type.instance_selections[0].resource_type_chain[0]
# 遍历余下的所有实例视图, 如果第一个节点与第一个视图的第一个节点不一致, 返回None
for instance_selection in resource_type.instance_selections[1:]:
other_first_node = instance_selection.resource_type_chain[0]
if first_node.system_id != other_first_node.system_id or first_node.id != other_first_node.id:
return None
return first_node
| 36.224806 | 115 | 0.707682 |
7616964ef7a2e5e8721685c2d72319df48fcd197 | 19,125 | py | Python | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 64 | 2016-10-20T15:47:18.000Z | 2021-11-11T11:57:32.000Z | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 126 | 2016-10-05T10:36:14.000Z | 2019-05-15T08:43:23.000Z | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 63 | 2016-11-07T15:23:08.000Z | 2021-09-22T14:41:16.000Z | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"interface-ref",
"config",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"interface-ref",
"config",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
| 39.926931 | 352 | 0.617098 |
df1585c121fb6d02fa578d389f1f702a63360267 | 3,435 | py | Python | Python-Data-Cleaning-Cookbook-master/Visualization/violin_plots.py | Dongfang1021/Python_data_analysis_notebook | 210c8bbe1b17736e639bbdbcae19df795fb702d5 | [
"MIT"
] | 1 | 2021-05-31T03:17:00.000Z | 2021-05-31T03:17:00.000Z | Python-Data-Cleaning-Cookbook-master/Visualization/violin_plots.py | Dongfang1021/Python_data_analysis_notebook | 210c8bbe1b17736e639bbdbcae19df795fb702d5 | [
"MIT"
] | null | null | null | Python-Data-Cleaning-Cookbook-master/Visualization/violin_plots.py | Dongfang1021/Python_data_analysis_notebook | 210c8bbe1b17736e639bbdbcae19df795fb702d5 | [
"MIT"
] | null | null | null | # import pandas, numpy, and matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.width', 80)
pd.set_option('display.max_columns', 7)
pd.set_option('display.max_rows', 200)
pd.options.display.float_format = '{:,.0f}'.format
covidtotals = pd.read_pickle("data/covidtotals720.pkl")
nls97 = pd.read_pickle("data/nls97.pkl")
# do a violin plot of sat verbal scores
sns.violinplot(nls97.satverbal, color="wheat", orient="v")
plt.title("Violin Plot of SAT Verbal Score")
plt.ylabel("SAT Verbal")
plt.text(0.08, 780, "outlier threshold", horizontalalignment='center', size='x-small')
plt.text(0.065, nls97.satverbal.quantile(0.75), "3rd quartile",\ horizontalalignment='center', size='x-small')
plt.text(0.05, nls97.satverbal.median(), "Median", horizontalalignment='center', size='x-small')
plt.text(0.065, nls97.satverbal.quantile(0.25), "1st quartile", horizontalalignment='center', size='x-small')
plt.text(0.08, 210, "outlier threshold", horizontalalignment='center', size='x-small')
plt.text(-0.4, 500, "frequency", horizontalalignment='center', size='x-small')
plt.show()
# get some descriptives
nls97.loc[:, ['weeksworked16','weeksworked17','wageincome']].describe()
nls97.wageincome.quantile([0.98,0.99])
# show weeks worked for 2016 and 2017
myplt = sns.violinplot(data=nls97.loc[:, ['weeksworked16','weeksworked17']])
myplt.set_title("Violin Plots of Weeks Worked")
myplt.set_xticklabels(["Weeks Worked 2016","Weeks Worked 2017"])
plt.show()
# do a violin plot of wage income by gender
nls97["maritalstatuscollapsed"] = nls97.maritalstatus.\
replace(['Married','Never-married','Divorced','Separated','Widowed'],\
['Married','Never Married','Not Married','Not Married','Not Married'])
sns.violinplot(nls97.gender, nls97.wageincome, hue=nls97.maritalstatuscollapsed, scale="count")
plt.title("Violin Plots of Wage Income by Gender and Marital Status")
plt.xlabel('Gender')
plt.ylabel('Wage Income 2017')
plt.legend(title="", loc="upper center", framealpha=0, fontsize=8)
plt.tight_layout()
plt.show()
# do a violin plot of weeks worked by degree attainment
myplt = sns.violinplot('highestdegree','weeksworked17', data=nls97, rotation=40)
myplt.set_xticklabels(myplt.get_xticklabels(), rotation=60, horizontalalignment='right')
myplt.set_title("Violin Plots of Weeks Worked by Highest Degree")
myplt.set_xlabel('Highest Degree Attained')
myplt.set_ylabel('Weeks Worked 2017')
plt.tight_layout()
plt.show()
# do a violin plot of covid cases by selected regions
showregions = ['Oceania / Aus','East Asia','Africa (other)','Western Europe']
covidselect = covidtotals.loc[covidtotals.region.isin(showregions)]
sns.violinplot('region', 'total_cases_pm', order=showregions, data=covidselect)
sns.swarmplot(x="region", y="total_cases_pm", order=showregions,\
data=covidselect, size=3, color=".3", linewidth=0)
plt.title("Violin Plot of Total Cases Per Million by Region")
plt.xlabel("Cases Per Million")
plt.ylabel("Region")
plt.tight_layout()
plt.show()
# use matplotlib for violin plot
plt.violinplot(nls97.satverbal.dropna(), showmedians=True, quantiles=[0.05, 0.1, 0.8, 0.9], bw_method=0.5)
plt.title("Violin Plot of SAT Verbal Score")
plt.show()
myplt = sns.violinplot(data=nls97.loc[:, ['weeksworked16','weeksworked17']])
myplt.set_title("Violin Plots of Weeks Worked")
myplt.set_xticklabels(["Weeks Worked 2016","Weeks Worked 2017"])
plt.show()
| 44.038462 | 110 | 0.751965 |
ec354932ade8e2ec4ac7211d40027615ba515fa0 | 6,037 | py | Python | tensorflow_addons/image/tests/connected_components_test.py | leandro-gracia-gil/addons | d981b0f1d1bc23f697d159eb1510c24b3c476d28 | [
"Apache-2.0"
] | 2 | 2021-02-22T12:15:33.000Z | 2021-05-02T15:22:13.000Z | tensorflow_addons/image/tests/connected_components_test.py | Ankur3107/addons | af6866a2e6d9ddbc79d612d7cb04a8a5befe4a47 | [
"Apache-2.0"
] | null | null | null | tensorflow_addons/image/tests/connected_components_test.py | Ankur3107/addons | af6866a2e6d9ddbc79d612d7cb04a8a5befe4a47 | [
"Apache-2.0"
] | 1 | 2020-07-23T01:10:18.000Z | 2020-07-23T01:10:18.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for connected component analysis."""
import pytest
import logging
import tensorflow as tf
import numpy as np
from tensorflow_addons.image.connected_components import connected_components
# Image for testing connected_components, with a single, winding component.
SNAKE = np.asarray(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_disconnected():
arr = tf.cast(
[
[1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
],
tf.bool,
)
expected = [
[1, 0, 0, 2, 0, 0, 0, 0, 3],
[0, 4, 0, 0, 0, 5, 0, 6, 0],
[7, 0, 8, 0, 0, 0, 9, 0, 0],
[0, 0, 0, 0, 10, 0, 0, 0, 0],
[0, 0, 11, 0, 0, 0, 0, 0, 0],
]
np.testing.assert_equal(connected_components(arr).numpy(), expected)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_simple():
arr = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
# Single component with id 1.
np.testing.assert_equal(connected_components(tf.cast(arr, tf.bool)).numpy(), arr)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_snake():
# Single component with id 1.
np.testing.assert_equal(
connected_components(tf.cast(SNAKE, tf.bool)).numpy(), SNAKE
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_snake_disconnected():
for i in range(SNAKE.shape[0]):
for j in range(SNAKE.shape[1]):
# If we disconnect any part of the snake except for the endpoints,
# there will be 2 components.
if SNAKE[i, j] and (i, j) not in [(1, 1), (6, 3)]:
disconnected_snake = SNAKE.copy()
disconnected_snake[i, j] = 0
components = connected_components(tf.cast(disconnected_snake, tf.bool))
assert np.max(components) == 2
bins = np.bincount(components.numpy().ravel())
# Nonzero number of pixels labeled 0, 1, or 2.
assert bins[0] > 0
assert bins[1] > 0
assert bins[2] > 0
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_multiple_images():
images = tf.cast(
[
[[1, 1, 1, 1], [1, 0, 0, 1], [1, 0, 0, 1], [1, 1, 1, 1]],
[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]],
[[1, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]],
],
tf.bool,
)
expected = [
[[1, 1, 1, 1], [1, 0, 0, 1], [1, 0, 0, 1], [1, 1, 1, 1]],
[[2, 0, 0, 3], [0, 0, 0, 0], [0, 0, 0, 0], [4, 0, 0, 5]],
[[6, 6, 0, 7], [0, 6, 6, 0], [8, 0, 6, 0], [0, 0, 6, 6]],
]
np.testing.assert_equal(connected_components(images).numpy(), expected)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_zeros():
np.testing.assert_equal(
connected_components(tf.zeros((100, 20, 50), tf.bool)), np.zeros((100, 20, 50)),
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_ones():
np.testing.assert_equal(
connected_components(tf.ones((100, 20, 50), tf.bool)),
np.tile(np.arange(100)[:, None, None] + 1, [1, 20, 50]),
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_ones_small():
np.testing.assert_equal(
connected_components(tf.ones((3, 5), tf.bool)).numpy(), np.ones((3, 5)),
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_random_scipy():
np.random.seed(42)
images = np.random.randint(0, 2, size=(10, 100, 200)).astype(np.bool)
expected = connected_components_reference_implementation(images)
if expected is None:
return
np.testing.assert_equal(connected_components(images).numpy(), expected)
def connected_components_reference_implementation(images):
try:
from scipy.ndimage import measurements
except ImportError:
logging.exception("Skipping test method because scipy could not be loaded")
return
image_or_images = np.asarray(images)
if len(image_or_images.shape) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.shape) == 3:
images = image_or_images
components = np.asarray([measurements.label(image)[0] for image in images])
# Get the count of nonzero ids for each image, and offset each image's nonzero
# ids using the cumulative sum.
num_ids_per_image = components.reshape(
[-1, components.shape[1] * components.shape[2]]
).max(axis=-1)
positive_id_start_per_image = np.cumsum(num_ids_per_image)
for i in range(components.shape[0]):
new_id_start = positive_id_start_per_image[i - 1] if i > 0 else 0
components[i, components[i] > 0] += new_id_start
if len(image_or_images.shape) == 2:
return components[0, :, :]
else:
return components
| 34.107345 | 88 | 0.578433 |
bd6f316bc644d0a52b0a278b7794b5ab9ef3de67 | 1,717 | py | Python | fbotics/models/request.py | pasmod/fbotics | 6ddd5d1f2396f43a90a1fb529f896d15e47f72b3 | [
"BSD-3-Clause"
] | 3 | 2018-11-16T13:20:08.000Z | 2019-04-16T16:43:25.000Z | fbotics/models/request.py | pasmod/fbotics | 6ddd5d1f2396f43a90a1fb529f896d15e47f72b3 | [
"BSD-3-Clause"
] | 1 | 2018-12-16T16:58:58.000Z | 2018-12-16T16:58:58.000Z | fbotics/models/request.py | pasmod/fbotics | 6ddd5d1f2396f43a90a1fb529f896d15e47f72b3 | [
"BSD-3-Clause"
] | 2 | 2018-12-16T16:30:18.000Z | 2021-10-03T12:43:54.000Z | from fbotics.models.message import Message
from fbotics.models.recipient import Recipient
from schematics.exceptions import ValidationError
from schematics.models import Model
from schematics.types import StringType
from schematics.types.compound import ModelType
class Request(Model):
"""Model for a request to be send by the client.
# Arguments
messaging_type: The messaging type of the message being sent.
recipient: recipient object.
message: message object. Cannot be sent with sender_action.
tag: Optional. The message tag string.
"""
messaging_type = StringType(
required=True, choices=["RESPONSE", "MESSAGE_TAG", "UPDATE"], default="RESPONSE"
)
tag = StringType(
required=False,
choices=[
"BUSINESS_PRODUCTIVITY",
"COMMUNITY_ALERT",
"CONFIRMED_EVENT_REMINDER",
"NON_PROMOTIONAL_SUBSCRIPTION",
"PAIRING_UPDATE",
"APPLICATION_UPDATE",
"ACCOUNT_UPDATE",
"PAYMENT_UPDATE",
"PERSONAL_FINANCE_UPDATE",
"SHIPPING_UPDATE",
"RESERVATION_UPDATE",
"ISSUE_RESOLUTION",
"APPOINTMENT_UPDATE",
"GAME_EVENT",
"TRANSPORTATION_UPDATE",
"FEATURE_FUNCTIONALITY_UPDATE",
"TICKET_UPDATE",
],
)
recipient = ModelType(Recipient)
message = ModelType(Message)
def validate_messaging_type(self, data, value):
if data["tag"] and data["messaging_type"] != "MESSAGE_TAG":
raise ValidationError(
"Messaging type should be MESSAGE_TAG for tagged messages"
)
return value
| 31.796296 | 88 | 0.630169 |
69f96fa2bee6b48dbc20fdfabe7eb927ce6dde88 | 6,002 | py | Python | corehq/apps/userreports/indicators/factory.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/apps/userreports/indicators/factory.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/apps/userreports/indicators/factory.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from jsonobject.exceptions import BadValueError
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.apps.userreports.expressions import ExpressionFactory
from corehq.apps.userreports.filters import SinglePropertyValueFilter, CustomFilter
from corehq.apps.userreports.filters.factory import FilterFactory
from corehq.apps.userreports.indicators import (
BooleanIndicator,
Column,
CompoundIndicator,
DueListDateIndicator,
LedgerBalancesIndicator,
RawIndicator,
SmallBooleanIndicator,
)
from corehq.apps.userreports.indicators.specs import (
BooleanIndicatorSpec,
ChoiceListIndicatorSpec,
DueListDateIndicatorSpec,
ExpressionIndicatorSpec,
IndicatorSpecBase,
LedgerBalancesIndicatorSpec,
RawIndicatorSpec,
SmallBooleanIndicatorSpec,
)
def _build_count_indicator(spec, context):
wrapped = IndicatorSpecBase.wrap(spec)
return BooleanIndicator(
wrapped.display_name,
wrapped.column_id,
CustomFilter(lambda item, context=None: True),
wrapped,
)
def _build_raw_indicator(spec, context):
wrapped = RawIndicatorSpec.wrap(spec)
column = Column(
id=wrapped.column_id,
datatype=wrapped.datatype,
is_nullable=wrapped.is_nullable,
is_primary_key=wrapped.is_primary_key,
create_index=wrapped.create_index,
)
return RawIndicator(
wrapped.display_name,
column,
getter=wrapped.getter,
wrapped_spec=wrapped,
)
def _build_expression_indicator(spec, context):
wrapped = ExpressionIndicatorSpec.wrap(spec)
column = Column(
id=wrapped.column_id,
datatype=wrapped.datatype,
is_nullable=wrapped.is_nullable,
is_primary_key=wrapped.is_primary_key,
create_index=wrapped.create_index,
)
return RawIndicator(
wrapped.display_name,
column,
getter=wrapped.parsed_expression(context),
wrapped_spec=wrapped,
)
def _build_small_boolean_indicator(spec, context):
wrapped = SmallBooleanIndicatorSpec.wrap(spec)
return SmallBooleanIndicator(
wrapped.display_name,
wrapped.column_id,
FilterFactory.from_spec(wrapped.filter, context),
wrapped_spec=wrapped,
)
def _build_boolean_indicator(spec, context):
wrapped = BooleanIndicatorSpec.wrap(spec)
return BooleanIndicator(
wrapped.display_name,
wrapped.column_id,
FilterFactory.from_spec(wrapped.filter, context),
wrapped_spec=wrapped,
)
def _build_choice_list_indicator(spec, context):
wrapped_spec = ChoiceListIndicatorSpec.wrap(spec)
base_display_name = wrapped_spec.display_name
def _construct_display(choice):
return '{base} ({choice})'.format(base=base_display_name, choice=choice)
def _construct_column(choice):
return '{col}_{choice}'.format(col=spec['column_id'], choice=choice)
choice_indicators = [
BooleanIndicator(
display_name=_construct_display(choice),
column_id=_construct_column(choice),
filter=SinglePropertyValueFilter(
expression=wrapped_spec.getter,
operator=wrapped_spec.get_operator(),
reference_expression=ExpressionFactory.from_spec(choice),
),
wrapped_spec=None,
) for choice in spec['choices']
]
return CompoundIndicator(base_display_name, choice_indicators, wrapped_spec)
def _build_ledger_balances_indicator(spec, context):
wrapped_spec = LedgerBalancesIndicatorSpec.wrap(spec)
return LedgerBalancesIndicator(wrapped_spec)
def _build_due_list_date_indicator(spec, context):
wrapped_spec = DueListDateIndicatorSpec.wrap(spec)
return DueListDateIndicator(wrapped_spec)
def _build_repeat_iteration_indicator(spec, context):
return RawIndicator(
"base document iteration",
Column(
id="repeat_iteration",
datatype="integer",
is_nullable=False,
is_primary_key=True,
),
getter=lambda doc, ctx: ctx.iteration,
wrapped_spec=None,
)
def _build_inserted_at(spec, context):
return RawIndicator(
"inserted at",
Column(
id="inserted_at",
datatype="datetime",
is_nullable=False,
is_primary_key=False,
),
getter=lambda doc, ctx: ctx.inserted_timestamp,
wrapped_spec=None,
)
class IndicatorFactory(object):
constructor_map = {
'small_boolean': _build_small_boolean_indicator,
'boolean': _build_boolean_indicator,
'choice_list': _build_choice_list_indicator,
'due_list_date': _build_due_list_date_indicator,
'count': _build_count_indicator,
'expression': _build_expression_indicator,
'inserted_at': _build_inserted_at,
'ledger_balances': _build_ledger_balances_indicator,
'raw': _build_raw_indicator,
'repeat_iteration': _build_repeat_iteration_indicator,
}
@classmethod
def from_spec(cls, spec, context=None):
cls.validate_spec(spec)
try:
return cls.constructor_map[spec['type']](spec, context)
except BadValueError as e:
# for now reraise jsonobject exceptions as BadSpecErrors
raise BadSpecError(str(e))
@classmethod
def validate_spec(self, spec):
if 'type' not in spec:
raise BadSpecError(_('Indicator specification must include a root level type field.'))
elif spec['type'] not in self.constructor_map:
raise BadSpecError(
_('Illegal indicator type: "{0}", must be one of the following choice: ({1})'.format(
spec['type'],
', '.join(self.constructor_map)
))
)
| 31.424084 | 101 | 0.686604 |
9cf78f4e10591400760d3d1d81f5c3cde7969459 | 3,891 | py | Python | test/common/json/config_schemas_test_data/test_access_log_schema.py | rishabhkumar296/envoy | 1b040ff0e029059c7aaa6816fccb2419c02675b1 | [
"Apache-2.0"
] | 27 | 2017-10-27T03:18:58.000Z | 2019-02-07T21:22:20.000Z | test/common/json/config_schemas_test_data/test_access_log_schema.py | rishabhkumar296/envoy | 1b040ff0e029059c7aaa6816fccb2419c02675b1 | [
"Apache-2.0"
] | 14 | 2018-02-16T20:47:38.000Z | 2019-01-19T23:03:01.000Z | test/common/json/config_schemas_test_data/test_access_log_schema.py | rishabhkumar296/envoy | 1b040ff0e029059c7aaa6816fccb2419c02675b1 | [
"Apache-2.0"
] | 7 | 2017-11-26T06:26:49.000Z | 2019-03-26T03:09:00.000Z | from util import get_blob
from util import true, false
ACCESS_LOG_BLOB = {
"access_log": [{
"filter": {
"type":
"logical_and",
"filters": [{
"type": "not_healthcheck"
}, {
"type": "runtime",
"key": "access_log.front_access_log"
}]
},
"path": "/var/log/envoy/access.log"
},
{
"filter": {
"type":
"logical_or",
"filters": [{
"runtime_key": "access_log.access_error.status",
"type": "status_code",
"value": 500,
"op": ">="
}, {
"type": "status_code",
"value": 429,
"op": "="
},
{
"runtime_key": "access_log.access_error.duration",
"type": "duration",
"value": 1000,
"op": ">="
}, {
"type": "traceable_request"
}]
},
"path": "/var/log/envoy/access_error.log"
}]
}
def test(writer):
for idx, item in enumerate(ACCESS_LOG_BLOB["access_log"]):
writer.write_test_file(
'Valid_idx_' + str(idx),
schema='ACCESS_LOG_SCHEMA',
data=get_blob(item),
throws=False,
)
blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1]
blob['filter']['filters'][0]['op'] = '<'
writer.write_test_file(
'FilterOperatorIsNotSupportedLessThan',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1]
blob['filter']['filters'][0]['op'] = '<='
writer.write_test_file(
'FilterOperatorIsNotSupportedLessThanEqual',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = get_blob(ACCESS_LOG_BLOB)['access_log'][1]
blob['filter']['filters'][0]['op'] = '>'
writer.write_test_file(
'FilterOperatorIsNotSupportedGreaterThan',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = {"path": "/dev/null", "filter": {"type": "unknown"}}
writer.write_test_file(
'FilterTypeIsNotSupported',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = {"path": "/dev/null", "filter": {"type": "logical_or", "filters": []}}
writer.write_test_file(
'LessThanTwoFiltersInListNoneLogicalOrThrows',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = {"path": "/dev/null", "filter": {"type": "logical_and", "filters": []}}
writer.write_test_file(
'LessThanTwoFiltersInListNoneLogicalAndThrows',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = {
"path": "/dev/null",
"filter": {
"type": "logical_or",
"filters": [{
"type": "not_healthcheck"
}]
}
}
writer.write_test_file(
'LessThanTwoFiltersInListOneLogicalOrThrows',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
blob = {
"path": "/dev/null",
"filter": {
"type": "logical_and",
"filters": [{
"type": "not_healthcheck"
}]
}
}
writer.write_test_file(
'LessThanTwoFiltersInListOneLogicalAndThrows',
schema='ACCESS_LOG_SCHEMA',
data=blob,
throws=True,
)
| 28.40146 | 93 | 0.457466 |
22d1f65fa4f52b866e26e4b5a5dfdef6512b255f | 11,797 | py | Python | tests/test_observers/test_mongo_observer.py | godmethium/sacred | f669382a8e5b33b674538d6c21253bdba5f52f20 | [
"MIT"
] | 16 | 2021-07-18T12:54:40.000Z | 2022-03-01T02:04:53.000Z | tests/test_observers/test_mongo_observer.py | godmethium/sacred | f669382a8e5b33b674538d6c21253bdba5f52f20 | [
"MIT"
] | 1 | 2021-08-11T09:25:13.000Z | 2021-08-23T04:38:29.000Z | tests/test_observers/test_mongo_observer.py | godmethium/sacred | f669382a8e5b33b674538d6c21253bdba5f52f20 | [
"MIT"
] | 8 | 2019-10-08T21:37:31.000Z | 2021-11-19T13:17:23.000Z | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import datetime
import mock
import pytest
from sacred.metrics_logger import ScalarMetricLogEntry, linearize_metrics
pymongo = pytest.importorskip("pymongo")
mongomock = pytest.importorskip("mongomock")
from sacred.dependencies import get_digest
from sacred.observers.mongo import (MongoObserver, force_bson_encodeable)
T1 = datetime.datetime(1999, 5, 4, 3, 2, 1, 0)
T2 = datetime.datetime(1999, 5, 5, 5, 5, 5, 5)
@pytest.fixture
def mongo_obs():
db = mongomock.MongoClient().db
runs = db.runs
metrics = db.metrics
fs = mock.MagicMock()
return MongoObserver(runs, fs, metrics_collection=metrics)
@pytest.fixture()
def sample_run():
exp = {'name': 'test_exp', 'sources': [], 'doc': '', 'base_dir': '/tmp'}
host = {'hostname': 'test_host', 'cpu_count': 1, 'python_version': '3.4'}
config = {'config': 'True', 'foo': 'bar', 'answer': 42}
command = 'run'
meta_info = {'comment': 'test run'}
return {
'_id': 'FEDCBA9876543210',
'ex_info': exp,
'command': command,
'host_info': host,
'start_time': T1,
'config': config,
'meta_info': meta_info,
}
def test_mongo_observer_started_event_creates_run(mongo_obs, sample_run):
sample_run['_id'] = None
_id = mongo_obs.started_event(**sample_run)
assert _id is not None
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert db_run == {
'_id': _id,
'experiment': sample_run['ex_info'],
'format': mongo_obs.VERSION,
'command': sample_run['command'],
'host': sample_run['host_info'],
'start_time': sample_run['start_time'],
'heartbeat': None,
'info': {},
'captured_out': '',
'artifacts': [],
'config': sample_run['config'],
'meta': sample_run['meta_info'],
'status': 'RUNNING',
'resources': []
}
def test_mongo_observer_started_event_uses_given_id(mongo_obs, sample_run):
_id = mongo_obs.started_event(**sample_run)
assert _id == sample_run['_id']
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert db_run['_id'] == sample_run['_id']
def test_mongo_observer_equality(mongo_obs):
runs = mongo_obs.runs
fs = mock.MagicMock()
m = MongoObserver(runs, fs)
assert mongo_obs == m
assert not mongo_obs != m
assert not mongo_obs == 'foo'
assert mongo_obs != 'foo'
def test_mongo_observer_heartbeat_event_updates_run(mongo_obs, sample_run):
mongo_obs.started_event(**sample_run)
info = {'my_info': [1, 2, 3], 'nr': 7}
outp = 'some output'
mongo_obs.heartbeat_event(info=info, captured_out=outp, beat_time=T2,
result=1337)
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert db_run['heartbeat'] == T2
assert db_run['result'] == 1337
assert db_run['info'] == info
assert db_run['captured_out'] == outp
def test_mongo_observer_completed_event_updates_run(mongo_obs, sample_run):
mongo_obs.started_event(**sample_run)
mongo_obs.completed_event(stop_time=T2, result=42)
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert db_run['stop_time'] == T2
assert db_run['result'] == 42
assert db_run['status'] == 'COMPLETED'
def test_mongo_observer_interrupted_event_updates_run(mongo_obs, sample_run):
mongo_obs.started_event(**sample_run)
mongo_obs.interrupted_event(interrupt_time=T2, status='INTERRUPTED')
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert db_run['stop_time'] == T2
assert db_run['status'] == 'INTERRUPTED'
def test_mongo_observer_failed_event_updates_run(mongo_obs, sample_run):
mongo_obs.started_event(**sample_run)
fail_trace = "lots of errors and\nso\non..."
mongo_obs.failed_event(fail_time=T2,
fail_trace=fail_trace)
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert db_run['stop_time'] == T2
assert db_run['status'] == 'FAILED'
assert db_run['fail_trace'] == fail_trace
def test_mongo_observer_artifact_event(mongo_obs, sample_run):
mongo_obs.started_event(**sample_run)
filename = "setup.py"
name = 'mysetup'
mongo_obs.artifact_event(name, filename)
assert mongo_obs.fs.put.called
assert mongo_obs.fs.put.call_args[1]['filename'].endswith(name)
db_run = mongo_obs.runs.find_one()
assert db_run['artifacts']
def test_mongo_observer_resource_event(mongo_obs, sample_run):
mongo_obs.started_event(**sample_run)
filename = "setup.py"
md5 = get_digest(filename)
mongo_obs.resource_event(filename)
assert mongo_obs.fs.exists.called
mongo_obs.fs.exists.assert_any_call(filename=filename)
db_run = mongo_obs.runs.find_one()
assert db_run['resources'] == [(filename, md5)]
def test_force_bson_encodable_doesnt_change_valid_document():
d = {'int': 1, 'string': 'foo', 'float': 23.87, 'list': ['a', 1, True],
'bool': True, 'cr4zy: _but_ [legal) Key!': '$illegal.key.as.value',
'datetime': datetime.datetime.utcnow(), 'tuple': (1, 2.0, 'three'),
'none': None}
assert force_bson_encodeable(d) == d
def test_force_bson_encodable_substitutes_illegal_value_with_strings():
d = {
'a_module': datetime,
'some_legal_stuff': {'foo': 'bar', 'baz': [1, 23, 4]},
'nested': {
'dict': {
'with': {
'illegal_module': mock
}
}
},
'$illegal': 'because it starts with a $',
'il.legal': 'because it contains a .',
12.7: 'illegal because it is not a string key'
}
expected = {
'a_module': str(datetime),
'some_legal_stuff': {'foo': 'bar', 'baz': [1, 23, 4]},
'nested': {
'dict': {
'with': {
'illegal_module': str(mock)
}
}
},
'@illegal': 'because it starts with a $',
'il,legal': 'because it contains a .',
'12,7': 'illegal because it is not a string key'
}
assert force_bson_encodeable(d) == expected
@pytest.fixture
def logged_metrics():
return [
ScalarMetricLogEntry("training.loss", 10, datetime.datetime.utcnow(), 1),
ScalarMetricLogEntry("training.loss", 20, datetime.datetime.utcnow(), 2),
ScalarMetricLogEntry("training.loss", 30, datetime.datetime.utcnow(), 3),
ScalarMetricLogEntry("training.accuracy", 10, datetime.datetime.utcnow(), 100),
ScalarMetricLogEntry("training.accuracy", 20, datetime.datetime.utcnow(), 200),
ScalarMetricLogEntry("training.accuracy", 30, datetime.datetime.utcnow(), 300),
ScalarMetricLogEntry("training.loss", 40, datetime.datetime.utcnow(), 10),
ScalarMetricLogEntry("training.loss", 50, datetime.datetime.utcnow(), 20),
ScalarMetricLogEntry("training.loss", 60, datetime.datetime.utcnow(), 30)
]
def test_log_metrics(mongo_obs, sample_run, logged_metrics):
"""
Test storing scalar measurements
Test whether measurements logged using _run.metrics.log_scalar_metric
are being stored in the 'metrics' collection
and that the experiment 'info' dictionary contains a valid reference
to the metrics collection for each of the metric.
Metrics are identified by name (e.g.: 'training.loss') and by the
experiment run that produced them. Each metric contains a list of x values
(e.g. iteration step), y values (measured values) and timestamps of when
each of the measurements was taken.
"""
# Start the experiment
mongo_obs.started_event(**sample_run)
# Initialize the info dictionary and standard output with arbitrary values
info = {'my_info': [1, 2, 3], 'nr': 7}
outp = 'some output'
# Take first 6 measured events, group them by metric name
# and store the measured series to the 'metrics' collection
# and reference the newly created records in the 'info' dictionary.
mongo_obs.log_metrics(linearize_metrics(logged_metrics[:6]), info)
# Call standard heartbeat event (store the info dictionary to the database)
mongo_obs.heartbeat_event(info=info, captured_out=outp, beat_time=T1,
result=0)
# There should be only one run stored
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
# ... and the info dictionary should contain a list of created metrics
assert "metrics" in db_run['info']
assert type(db_run['info']["metrics"]) == list
# The metrics, stored in the metrics collection,
# should be two (training.loss and training.accuracy)
assert mongo_obs.metrics.count() == 2
# Read the training.loss metric and make sure it references the correct run
# and that the run (in the info dictionary) references the correct metric record.
loss = mongo_obs.metrics.find_one({"name": "training.loss", "run_id": db_run['_id']})
assert {"name": "training.loss", "id": str(loss["_id"])} in db_run['info']["metrics"]
assert loss["steps"] == [10, 20, 30]
assert loss["values"] == [1, 2, 3]
for i in range(len(loss["timestamps"]) - 1):
assert loss["timestamps"][i] <= loss["timestamps"][i + 1]
# Read the training.accuracy metric and check the references as with the training.loss above
accuracy = mongo_obs.metrics.find_one({"name": "training.accuracy", "run_id": db_run['_id']})
assert {"name": "training.accuracy", "id": str(accuracy["_id"])} in db_run['info']["metrics"]
assert accuracy["steps"] == [10, 20, 30]
assert accuracy["values"] == [100, 200, 300]
# Now, process the remaining events
# The metrics shouldn't be overwritten, but appended instead.
mongo_obs.log_metrics(linearize_metrics(logged_metrics[6:]), info)
mongo_obs.heartbeat_event(info=info, captured_out=outp, beat_time=T2,
result=0)
assert mongo_obs.runs.count() == 1
db_run = mongo_obs.runs.find_one()
assert "metrics" in db_run['info']
# The newly added metrics belong to the same run and have the same names, so the total number
# of metrics should not change.
assert mongo_obs.metrics.count() == 2
loss = mongo_obs.metrics.find_one({"name": "training.loss", "run_id": db_run['_id']})
assert {"name": "training.loss", "id": str(loss["_id"])} in db_run['info']["metrics"]
# ... but the values should be appended to the original list
assert loss["steps"] == [10, 20, 30, 40, 50, 60]
assert loss["values"] == [1, 2, 3, 10, 20, 30]
for i in range(len(loss["timestamps"]) - 1):
assert loss["timestamps"][i] <= loss["timestamps"][i + 1]
accuracy = mongo_obs.metrics.find_one({"name": "training.accuracy", "run_id": db_run['_id']})
assert {"name": "training.accuracy", "id": str(accuracy["_id"])} in db_run['info']["metrics"]
assert accuracy["steps"] == [10, 20, 30]
assert accuracy["values"] == [100, 200, 300]
# Make sure that when starting a new experiment, new records in metrics are created
# instead of appending to the old ones.
sample_run["_id"] = "NEWID"
# Start the experiment
mongo_obs.started_event(**sample_run)
mongo_obs.log_metrics(linearize_metrics(logged_metrics[:4]), info)
mongo_obs.heartbeat_event(info=info, captured_out=outp, beat_time=T1,
result=0)
# A new run has been created
assert mongo_obs.runs.count() == 2
# Another 2 metrics have been created
assert mongo_obs.metrics.count() == 4 | 36.636646 | 97 | 0.653471 |
956666c69f540552e582bb7e8b4c2490dcb1a7a0 | 6,397 | py | Python | api/operators.py | DavidONeill75101/open-transport-operator-api | 9f5a0687093b18fafda8e824f8971b506ffa6f09 | [
"Apache-2.0"
] | null | null | null | api/operators.py | DavidONeill75101/open-transport-operator-api | 9f5a0687093b18fafda8e824f8971b506ffa6f09 | [
"Apache-2.0"
] | null | null | null | api/operators.py | DavidONeill75101/open-transport-operator-api | 9f5a0687093b18fafda8e824f8971b506ffa6f09 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2021 David O'Neill
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
class Operators(object):
def __init__(self):
"""Get data from google spreadsheets and store in instance variables
"""
operator_sheet_id = "1RTFY75b8yv6fQjbe9hSzBlzBhzuZ7lGZN9IC1UxzPzY"
operator_sheet_name = "operator-info-live"
operator_URL = "https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}".format(operator_sheet_id, operator_sheet_name)
self.operator_df = pd.read_csv(operator_URL, dtype={"Operator id":str, "Mode":str})
mode_sheet_id = "10XNEu-CuM8e4GDWwN6-hP6Ep-KzVBNOYljn1tz8bvJ4"
mode_sheet_name = "mode-info-live"
mode_URL = "https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}".format(mode_sheet_id, mode_sheet_name)
self.mode_df = pd.read_csv(mode_URL, dtype={"id":str})
def get_modes(self):
"""Return json string detailing modes of transport available in line with API specification
"""
return self.mode_df.to_dict("records")
def populate_json_template(self, operator):
"""Generate data structure to represent operator in line with API specification
Args:
operator (dict): dictionary representing operator, generated from Pandas dataframe
Returns:
dict: dictionary representing operator in PAS 212 format
"""
template = {
"href": "",
"item-metadata": [
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": ""
},
{
"rel": "urn:X-hypercat:rels:hasHomepage",
"val": ""
},
{
"rel": "urn:X-opentransport:rels:hasId",
"val": ""
},
{
"rel": "urn:X-opentransport:rels:hasEmail",
"val": ""
},
{
"rel": "urn:X-opentransport:rels:hasPhone",
"val": ""
},
{
"rel": "urn:X-opentransport:rels:hasDefaultLanguage",
"val": ""
},
{
"rel": "urn:X-opentransport:rels:hasNumberModes",
"val": ""
},
{
"rel": "urn:X-opentransport:rels:hasMIPTAURL",
"val": ""
}
]
}
template["href"] = operator["Open Transport Account API URL"]
template["item-metadata"][0]["val"] = operator["Operator Description"]
template["item-metadata"][1]["val"] = operator["Operator URL (homepage)"]
template["item-metadata"][2]["val"] = operator["Operator id"]
template["item-metadata"][3]["val"] = operator["Customer Services Contact email"]
template["item-metadata"][4]["val"] = operator["Customer Services Contact Phone"]
template["item-metadata"][5]["val"] = operator["Default Language"]
modes = operator["Mode"].split(",")
template["item-metadata"][6]["val"] = len(modes)
for i, mode in enumerate(modes):
mode_details = [{
"rel": "urn:X-opentransport:rels:hasMode"+str(i+1)+"#Code",
"val": mode
},
{
"rel": "urn:X-opentransport:rels:hasMode"+str(i+1)+"#Description",
"val": self.mode_df.loc[self.mode_df["id"]==mode]["short-desc"].iloc[0]
}]
template["item-metadata"][-1:-1]=mode_details
template["item-metadata"][-1]["val"] = operator["Operator MIPTA URL"]
return template
def get_operator_by_id(self, operator_id):
"""Operator lookup by ID
Args:
operator_id (str):passed in through query parameter to represent the id of the operator being fetched - optional
Returns:
str: json string representing operator details in PAS212 format
"""
json_result = [
{
"catalogue-metadata": [
{
"rel": "urn:X-hypercat:rels:isContentType",
"val": "application/vnd.hypercat.catalogue+json"
},
{
"rel": "urn:X-hypercat:rels:hasDescription:en",
"val": "OpenTransport Operator Catalogue"
},
{
"rel": "urn:X-hypercat:rels:supportsSearch",
"val": "urn:X-hypercat:search:simple"
}
],
"items": [
]
}
]
operator = self.operator_df.loc[self.operator_df["Operator id"]==operator_id]
if not operator.empty:
operator = operator.to_dict("records")[0]
operator_info = self.populate_json_template(operator)
json_result[0]["items"].append(operator_info)
elif operator.empty and operator_id is None:
operators = self.operator_df.to_dict("records")
for operator in operators:
operator_info = self.populate_json_template(operator)
json_result[0]["items"].append(operator_info)
else:
return None
return json_result
| 38.305389 | 144 | 0.505706 |
18d8d746db7ce25a4aa966145783929d54b50ff1 | 40,494 | py | Python | virt/ansible-latest/lib/python2.7/site-packages/ansible/module_utils/docker/common.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | null | null | null | virt/ansible-latest/lib/python2.7/site-packages/ansible/module_utils/docker/common.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | null | null | null | virt/ansible-latest/lib/python2.7/site-packages/ansible/module_utils/docker/common.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import re
import sys
from datetime import timedelta
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker (Docker SDK for Python >= 2.0.0)
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py (Docker SDK for Python < 2.0.0)
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
try:
from requests.exceptions import RequestException
except ImportError:
# Either docker-py is no longer using requests, or docker-py isn't around either,
# or docker-py's dependency requests is missing. In any case, define an exception
# class RequestException so that our code doesn't break.
class RequestException(Exception):
pass
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = []
DOCKER_REQUIRED_TOGETHER = [
['client_cert', 'client_key']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Check whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def is_valid_tag(tag, allow_empty=False):
"""Check whether the given string is a valid docker tag name."""
if not tag:
return allow_empty
# See here ("Extended description") for a definition what tags can be:
# https://docs.docker.com/engine/reference/commandline/tag/
return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def update_tls_hostname(result):
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def get_connect_params(auth, fail_function):
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
else:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = _get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = _get_tls_config(verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
"Hint: if you do not need Python 2.6 support, try "
"`pip uninstall docker-py` instead, followed by `pip install docker`.")
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module (for %s's Python %s). It is recommended to install the docker module if no "
"support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
"can leave the other module in a broken state." % (platform.node(), sys.executable))
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = missing_required_lib("Docker SDK for Python: docker")
msg = msg + ", for example via `pip install docker`. The error was: %s"
else:
msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py) for non-Python-2.6 users.
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif docker_version < LooseVersion('2.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
self.docker_api_version_str = self.version()['ApiVersion']
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
if min_docker_api_version is not None:
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['validate_certs'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def get_network(self, name=None, id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and id is None:
return None
result = None
if id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
id = result['Id']
if id is not None:
try:
self.log("Inspecting network Id %s" % id)
result = self.inspect_network(id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if len(images) == 0:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if len(images) == 0 and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) == 0:
# Last case: if docker.io wasn't there, it can be that
# the image wasn't found either (#15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, id):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not id:
return None
self.log("Find image %s (by ID)" % id)
try:
inspection = self.inspect_image(id)
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
if tags and lookup in tags:
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))
def inspect_distribution(self, image):
'''
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
since prior versions did not support accessing private repositories.
'''
if self.docker_py_version < LooseVersion('4.0.0'):
registry = auth.resolve_repository_name(image)[0]
header = auth.get_config_header(self, registry)
if header:
return self._result(self._get(
self._url('/distribution/{0}/json', image),
headers={'X-Registry-Auth': header}
), json=True)
return super(AnsibleDockerClient, self).inspect_distribution(image)
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, type):
'''
Compare values a and b as described by method and type.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``type == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``type`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if type == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if type == 'value':
return a == b
elif type == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif type == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif type == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif type == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def has_difference_for(self, name):
'''
Returns a boolean if a difference exists for name
'''
return any(diff for diff in self._diff if diff['name'] == name)
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters).
'''
result = dict()
if data is not None:
for k, v in data.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = dict()
# All supported healthcheck parameters
options = dict(
test='test',
interval='interval',
timeout='timeout',
start_period='start_period',
retries='retries'
)
duration_options = ['interval', 'timeout', 'start_period']
for (key, value) in options.items():
if value in healthcheck:
if healthcheck.get(value) is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value in duration_options:
time = convert_duration_to_nanosecond(healthcheck.get(value))
if time:
result[key] = time
elif healthcheck.get(value):
result[key] = healthcheck.get(value)
if key == 'test':
if isinstance(result[key], (tuple, list)):
result[key] = [str(e) for e in result[key]]
else:
result[key] = ['CMD-SHELL', str(result[key])]
elif key == 'retries':
try:
result[key] = int(result[key])
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(result[key])
)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
| 40.252485 | 142 | 0.57653 |
3e1668a9d7d846aeb0314ea327121464cd3f8640 | 2,554 | py | Python | solarium/led.py | codingjoe/solarium | 241a833ec5e2bcd93bd3a34f5cd9e1dd6b776474 | [
"MIT"
] | 1 | 2020-11-08T20:11:03.000Z | 2020-11-08T20:11:03.000Z | solarium/led.py | codingjoe/solarium | 241a833ec5e2bcd93bd3a34f5cd9e1dd6b776474 | [
"MIT"
] | 3 | 2020-12-19T15:40:28.000Z | 2020-12-20T16:10:26.000Z | solarium/led.py | codingjoe/solarium | 241a833ec5e2bcd93bd3a34f5cd9e1dd6b776474 | [
"MIT"
] | null | null | null | import asyncio
import logging
import gpiozero
from gpiozero.pins.pigpio import PiGPIOFactory
logger = logging.getLogger(__package__)
class PWMLED(gpiozero.PWMLED):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop("name")
self.power_state = kwargs.pop("power_state")
self.power_state.callbacks.append(self.turn_off)
super().__init__(*args, **kwargs)
def turn_off(self, value):
if not value:
self.value = int(value)
@gpiozero.PWMLED.value.setter
def value(self, value):
gpiozero.PWMLED.value.fset(self, value * self.power_state)
async def fade(self, value, transition=3, interval=0.05):
logger.debug("%s: power %s", self.name, self.power_state)
logger.debug("%s: %s -> %s (%ss)", self.name, self.value, value, transition)
value *= self.power_state
diff = value - self.value
parts = transition / interval
increments = diff / parts
i = 0
while transition > i:
self.value = max(min(1, self.value + increments), 0)
await asyncio.sleep(interval)
i += interval
class PowerToggleMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.power = 1
self.callbacks = []
def toggle(self):
logger.info("Toggle: %i -> %i", self.power, not self.power)
self.power ^= 1
for func in self.callbacks:
func(self)
def __bool__(self):
return bool(self.power)
def __int__(self):
return self.power
def __mul__(self, other):
return int(self) * other
def __rmul__(self, other):
return other * int(self)
async def listen(self):
while True:
self.value
await asyncio.sleep(1)
class PowerToggle(PowerToggleMixin, gpiozero.Button):
pass
def init(host="localhost", warm_pin=12, cold_pin=13, power_pin=17, frequency=100):
logger.debug("warm LED: %s:%d@%dHz", host, warm_pin, frequency)
logger.debug("cold LED: %s:%d@%dHz", host, cold_pin, frequency)
factory = PiGPIOFactory(host=host)
power_button = PowerToggle(power_pin, pin_factory=factory)
power_button.when_pressed = lambda: power_button.toggle()
warm = PWMLED(warm_pin, name="warm", power_state=power_button, pin_factory=factory)
cold = PWMLED(cold_pin, name="cold", power_state=power_button, pin_factory=factory)
warm.frequency = frequency
cold.frequency = frequency
return warm, cold, power_button
| 29.697674 | 87 | 0.637431 |
1cdd043bc8f49a4d7fafb8378c397d1ab0db0607 | 4,492 | py | Python | entigen/readers/csv.py | Stiivi/entigen | b2b49f5e8e8a06c33f269e7effe5fae8576e26e0 | [
"MIT"
] | 5 | 2017-02-21T04:11:58.000Z | 2020-07-03T22:59:18.000Z | entigen/readers/csv.py | Stiivi/entigen | b2b49f5e8e8a06c33f269e7effe5fae8576e26e0 | [
"MIT"
] | 3 | 2017-02-20T19:17:15.000Z | 2017-02-20T23:33:22.000Z | entigen/readers/csv.py | Stiivi/entigen | b2b49f5e8e8a06c33f269e7effe5fae8576e26e0 | [
"MIT"
] | 1 | 2020-04-16T16:55:22.000Z | 2020-04-16T16:55:22.000Z | """
CSV File Reader
"""
import csv
import os.path
from collections import defaultdict
from typing import Optional, Iterable, Dict, List
from ..errors import MetadataError
from ..model import Model, Entity, Property, Enumeration, EnumValue
from ..extensible import Reader
from ..utils import to_bool
PROPERTIES_FILE = "properties.csv"
ENTITIES_FILE = "entities.csv"
ENUMS_FILE = "enums.csv"
ENUM_VALUES_FILE = "enum_values.csv"
class CSVReader(Reader, name="csv"):
model: Model
def __init__(self, model: Optional[Model]=None) -> None:
self.model = model or Model()
def read_model(self, path: str) -> None:
self.read_entities_file(os.path.join(path, ENTITIES_FILE))
self.read_properties_file(os.path.join(path, PROPERTIES_FILE))
self.read_enumerations_file(os.path.join(path, ENUMS_FILE))
self.read_enum_values_file(os.path.join(path, ENUM_VALUES_FILE))
def read_entities_file(self, filename: str) -> None:
pass
def read_properties_file(self, filename: str) -> None:
with open(filename) as f:
reader = csv.DictReader(f)
self._read_property_rows(reader)
def _property_from_row(self, row: Dict[str, str]) -> Property:
name = row.get("name")
if not name:
raise MetadataError("Property in entity '{}' has no name"
.format(row.get("entity")))
entity_name = row.get("entity")
if not entity_name:
raise MetadataError("Property '{}' has no entity."
.format(row.get("name")))
try:
tag = int(row["tag"])
except TypeError:
raise MetadataError("Invalid tag for property '{}.{}'"
.format(entity_name, name))
# Empty string in CSV is interpreted None
default: Optional[str] = row["default"] or None
prop = Property(
name=row["name"],
tag=int(row["tag"]),
raw_type=row["type"],
label=row["label"],
desc=row["description"],
default=default,
is_optional=to_bool(row["optional"]),
)
return prop
def _read_property_rows(self, rows: Iterable[Dict[str,str]]) -> None:
"""Read properties from list of dictionaries where keys are
meta-property names and values are meta-property values."""
props: Dict[str,List[Property]]
props = defaultdict(list)
for row in rows:
prop = self._property_from_row(row)
entname = row["entity"]
props[entname].append(prop)
for entname, entprops in props.items():
entity = Entity(name=entname, properties=entprops)
self.model.add_entity(entity)
def read_enumerations_file(self, filename: str) -> None:
pass
def read_enum_values_file(self, filename: str) -> None:
# Enum file is optional
if not os.path.isfile(filename):
return
with open(filename) as f:
reader = csv.DictReader(f)
self._read_enum_rows(reader)
def _enum_value_from_row(self, row: Dict[str, str]) -> EnumValue:
name = row.get("key")
if not name:
raise MetadataError("Enum value in enum '{}' has no key"
.format(row.get("enum")))
enum_name = row.get("enum")
if not enum_name:
raise MetadataError("Key '{}' has no enum name."
.format(row.get("key")))
try:
value = int(row["value"])
except TypeError:
raise MetadataError("Invalid enum value '{}.{}'"
.format(enum_name, name))
prop = EnumValue(
key=row["key"],
value=value,
label=row["label"],
desc=row["description"],
)
return prop
def _read_enum_rows(self, rows: Iterable[Dict[str,str]]) -> None:
"""Read values of enums. Keys are: enum, key, value, label, desc."""
values: Dict[str,List[EnumValue]]
values = defaultdict(list)
for row in rows:
value = self._enum_value_from_row(row)
enumname = row["enum"]
values[enumname].append(value)
for enumname, enumvalues in values.items():
enum = Enumeration(name=enumname, values=enumvalues)
self.model.add_enum(enum)
| 30.351351 | 76 | 0.578807 |
600a4024c23bdfaa9bd96914dc541229ca7e6244 | 18,571 | py | Python | server/processeshandler.py | Modaouche/Taskmaster | 93e96d5ea320ceca011c42b3273eb40df7eee66f | [
"MIT"
] | null | null | null | server/processeshandler.py | Modaouche/Taskmaster | 93e96d5ea320ceca011c42b3273eb40df7eee66f | [
"MIT"
] | null | null | null | server/processeshandler.py | Modaouche/Taskmaster | 93e96d5ea320ceca011c42b3273eb40df7eee66f | [
"MIT"
] | null | null | null | """A module which handle processes by launching them and fetch info."""
import os
import sys
import copy
import time
import subprocess
import threading
from datetime import datetime
from subprocess import TimeoutExpired
from server_socket import Communication_S
from program import Program
from client_request import ClientRequest
class ProcessesHandler():
"""A class which contains information about processes."""
INFO = "INFO"
ERROR = "ERROR"
CRIT = "CRIT"
WARN = "WARN"
SPAWNED = "spawned"
SUCCESS = "success"
EXITED = "exited"
GAVE_UP = "gave up"
CMDNOTFOUND = "can't find command "
PERMISSIONDENIED = "PERMISSIONDENIED"
SPAWNERR = "spawnerr"
RUNNING = "RUNNING"
RESTARTING = "restarting"
AUTORESTART = "autorestart"
UNEXPECTED = True
PROCESS = 0
OPTIONS = 1
PROCESS = 0 # subprocess object
TIMESTAMP = 1 # timestamp of the process
RUNNING = 2 # Process running ? True : False
EXITED = 3 # Process exited ? True : False
EXITCODE = 4 # Exitcodes expected for the process
STARTSECS = 5 # If the process is still running after STARTSECS, then it enter into RUNNING state
STARTRETRIES = 6 # Number of times we can restart the process
STATE = 7
PID = 8
FATAL = 3
BACKOFF = 4
FAIL = 9
STDOUT = "out"
STDERR = "err"
def __init__(self, taskmaster):
"""Initialize attributes for processes handler"""
self.taskmaster = taskmaster
self.progs_conf = taskmaster.config.programs
# print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# print(taskmaster.config.programs)
# print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# time.sleep(50)
self.datas = []
self.datas_right = False
self.commands = []
self.programs = []
self.lock_commands = threading.Lock()
self.lock_datas = threading.Lock()
self.communication = Communication_S(self)
self.client_request = ClientRequest(self)
self.umask = None
self.procs_timestamp = {}
self.lock_reload = threading.Lock()
def run(self):
"""launch every processes inside the dictionary 'programs'."""
self.umask = self.taskmaster.config.umask
self._set_processes_arch()
# print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# print(self.progs_conf)
# print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
self._launcher()
t = threading.Thread(target=self.communication)
t.start()
while True:
self.lock_reload.acquire()
self.client_request()
self._check_programs_state()
self.lock_reload.release()
def _check_programs_state(self):
"""Browse each program to probe them processes."""
i = 0
while i < len(self.programs):
self._probe_program_processes(self.programs[i])
i += 1
def _probe_program_processes(self, program):
"""Get information whether the processes are running or not."""
for k in program.keys():
if k != 'settings':
procs = program[k]
# print(procs)
for n in procs.keys():
self._check_proc_state(n, procs[n], program['settings'])
def _check_proc_state(self, name, proc_settings, prog_settings):
"""Verify if the process is running or not."""
proc = proc_settings[self.PROCESS]
if proc is not None:
deltatime = time.time() - proc_settings[self.TIMESTAMP]
if (proc.poll() is None \
and not proc_settings[self.RUNNING] \
and deltatime >= proc_settings[self.STARTSECS]) \
or (not proc_settings[self.RUNNING] and proc_settings[self.STARTSECS] == 0):
proc_settings[self.RUNNING] = True
proc_settings[self.STATE] = 0
self._print_event(self.INFO, self.RUNNING, name, startsecs=proc_settings[self.STARTSECS])
elif proc.poll() is not None and proc_settings[self.RUNNING]:
# print("B--__-_--__--_--______--___---_--_--_----_--------__")
# print(proc)
# print("******************************************************")
# print(proc_settings)
# print("******************************************************")
# print(proc_settings[self.PROCESS])
# print("B--__-_--__--_--______--___---_--_--_----_--------__")
self._print_event(self.INFO, self.EXITED, name, returncode=proc.returncode,
exitcode=proc_settings[self.EXITCODE])
proc_settings[self.PROCESS] = None
proc_settings[self.RUNNING] = False
proc_settings[self.EXITED] = True
proc_settings[self.STATE] = 1
if prog_settings['autorestart'] == 'unexpected' and prog_settings['exitcodes'] != proc.returncode \
or prog_settings['autorestart'] == True:
self._start_process(name, proc_settings, prog_settings)
elif proc.poll() is not None and not proc_settings[self.RUNNING]:
alive = None
if not proc_settings[self.RUNNING]:
alive = False
if proc_settings[self.PROCESS] is not None:
self._print_event(self.INFO, self.EXITED, name, returncode=proc.returncode,
exitcode=proc_settings[self.EXITCODE], alive=alive)
proc_settings[self.PROCESS] = None
proc_settings[self.EXITED] = True
proc_settings[self.STATE] = 1
if proc_settings[self.STARTRETRIES] > 0:
proc_settings[self.STATE] = self.BACKOFF
proc_settings[self.STARTRETRIES] -= 1
time.sleep(0.1)
self._start_process(name, proc_settings, prog_settings)
elif not proc_settings[self.RUNNING]:
proc_settings[self.STATE] = self.FATAL
self._print_event(self.INFO, self.GAVE_UP, name)
elif proc is None and proc_settings[self.FAIL] == -1:
if proc_settings[self.STARTRETRIES] > 0:
proc_settings[self.STATE] = self.BACKOFF
proc_settings[self.STARTRETRIES] -= 1
time.sleep(0.1)
self._start_process(name, proc_settings, prog_settings)
elif proc_settings[self.STARTRETRIES] == 0:
proc_settings[self.STARTRETRIES] -= 1
proc_settings[self.STATE] = self.FATAL
self._print_event(self.INFO, self.GAVE_UP, name)
def _print_event(self, target, event, name, startsecs=None, pid=None, returncode=0, exitcode=None, alive=None):
"""Display when a new event occurs."""
print(datetime.strftime(datetime.now(), "%Y-%m-%d %I:%M:%S, "), end='')
if target == self.INFO:
print("INFO ", end='')
if event == self.SPAWNED:
print("spawned: "
f"'{name}' with pid {pid}")
elif event == self.RUNNING:
print("success: "
f"{name} entered RUNNING state, "
f"process has stayed up for > than {startsecs} seconds (startsecs)")
elif event == self.EXITED:
print("exited: "
f"{name} (exit status {returncode}, ", end='')
if returncode != exitcode or alive == False:
print("not expected)")
else:
print("expected)")
elif event == self.GAVE_UP:
print("gave up: "
f"{name} entered FATAL state, too many start retries too quickly")
elif event == self.CMDNOTFOUND:
print("spawnerr: can't find command " f"'{name}'")
elif event == self.SPAWNERR:
print("spawnerr: unknown error making dispatchers for " f"'{name}'")
elif target == self.CRIT:
print("CRIT ", end='')
if event == self.PERMISSIONDENIED:
print("permission denied: "
f"'{name}'")
def _fetch_progam_names(self):
"""Fetch program names of the data structure."""
i = 0
prog_names = []
for i in range(len(self.programs)):
for k in self.programs[i].keys():
if k != 'settings':
prog_names.append(k)
return prog_names
def _set_processes_arch(self):
"""Build the data structure and architecture for the processes."""
prog_names = self._fetch_progam_names()
for prog_name, settings in self.progs_conf.items():
if prog_name not in prog_names:
program = Program(prog_name, settings)
self.programs.append(program.program)
else:
for i in range(len(self.programs)):
if prog_name in self.programs[i].keys():
ret = self.client_request._compare_settings(self.programs[i]['settings'], settings)
if ret is True:
self.client_request._stop_program(prog_name)
program = Program(prog_name, settings)
self.programs[i] = program.program
continue
def _launcher(self):
"""Launch all program's processes with autostart == True."""
i = 0
if self.umask is not None:
os.umask(self.umask)
while i < len(self.programs):
program = self.programs[i]
i += 1
if program['settings']['autostart']:
for k in program:
if k != 'settings':
procs = program[k]
for name in procs:
self._start_process(name, procs[name], program['settings'])
def _open_stream(self, filename):
"""Create a log file and return the stream associated."""
# print(self.umask)
try:
# print(filename)
f = open(filename, 'w')
except PermissionError:
return -1
except FileNotFoundError:
return -2
except IOError as e:
return -3
else:
return f
def _popen_options_handler(self, proc_name, prog_settings):
"""Prepare all the options before the popen call."""
settings = {}
settings['env'] = os.environ.copy()
if prog_settings['env']:
settings['env'].update(prog_settings['env'])
settings['command'] = prog_settings['command'].split()
if prog_settings['stdout_logfile'] == 'AUTO':
settings['stdout_logfile'] = self._open_stream(proc_name + self.STDOUT)
elif prog_settings['stdout_logfile'] == None:
settings['stdout_logfile'] = subprocess.PIPE
else:
settings['stdout_logfile'] = self._open_stream(prog_settings['stdout_logfile'])
if prog_settings['stderr_logfile'] == 'AUTO':
settings['stderr_logfile'] = self._open_stream(proc_name + self.STDERR)
elif prog_settings['stderr_logfile'] == None:
settings['stderr_logfile'] = subprocess.PIPE
else:
settings['stderr_logfile'] = self._open_stream(prog_settings['stderr_logfile'])
if prog_settings['directory'] is not None:
settings['directory'] = prog_settings['directory']
else:
settings['directory'] = None
settings['umask'] = None
if prog_settings['umask'] is not None:
settings['umask'] = prog_settings['umask']
return settings
def _start_process(self, proc_name, proc_settings, prog_settings):
"""Start a process using subprocess module."""
settings = self._popen_options_handler(proc_name, prog_settings)
# if settings['stdout_logfile'] < 0 or settings['stderr_logfile'] < 0:
# return 0
saved_umask = os.umask(0)
if settings['umask']:
os.umask(settings['umask'])
try:
proc = subprocess.Popen(
settings['command'],
stdin= -1,
stdout=settings['stdout_logfile'],
stderr=settings['stderr_logfile'],
cwd=settings['directory'],
env=settings['env'],
)
proc_settings[self.PID] = proc.pid
except FileNotFoundError as e:
proc_settings[self.PROCESS] = None
proc_settings[self.FAIL] = -1
self._print_event(self.INFO, self.CMDNOTFOUND, settings['command'][0])
return -1
except PermissionError as e:
proc_settings[self.PROCESS] = None
proc_settings[self.FAIL] = -1
self._print_event(self.CRIT, self.PERMISSIONDENIED, settings['directory'])
return -1
except OSError as e:
proc_settings[self.FAIL] = -1
print(f"CRIT {e}")
return -1
else:
if ((settings['stdout_logfile'] is int and settings['stdout_logfile'] < 0) and settings['stdout_logfile'] != subprocess.PIPE) \
or (settings['stderr_logfile'] is int and settings['stderr_logfile'] < 0) and settings['stderr_logfile'] != subprocess.PIPE:
proc_settings[self.PROCESS] = None
proc_settings[self.FAIL] = -1
self._print_event(self.INFO, self.SPAWNERR, proc_name)
return -1
else:
self._print_event(self.INFO, self.SPAWNED, proc_name, pid=proc.pid)
proc_settings[self.PROCESS] = proc
proc_settings[self.TIMESTAMP] = time.time()
proc_settings[self.PID] = proc.pid
proc_settings[self.STATE] = -1
proc_settings[self.EXITCODE] = prog_settings['exitcodes']
os.umask(saved_umask)
return 0
def _check_processes(self):
"""check the status of the processes."""
for prog_name, (proc, options) in self.procs.items():
if not options['running']:
if self._try_running(prog_name, proc, options) == -1:
self._try_restart(prog_name, proc, options)
# elif options['running']:
# if self._process_exited(prog_name, options):
# self._restart_process(prog_name)
def _try_running(self, prog_name, proc, options):
"""
Set the the process to a running state
if and only if the diff between now and
the 'process timestamp' > startsecs
"""
self.startsecs = options['startsecs']
self.deltatime = time.time() - options['timestamp']
if self.deltatime >= self.startsecs and not self.procs[prog_name][self.OPTIONS]['exited']:
options['running'] = True
print(self.procs[prog_name][self.OPTIONS]['running'])
options['startretries'] = 0
self._print_status(self.INFO, self.SUCCESS, prog_name, True)
return 0
return -1
def _try_restart(self, prog_name, proc, options):
"""Try to restart the process."""
if self.deltatime > self.startsecs and not options['running']:
if self.programs[prog_name]['startretries'] > options['startretries']:
# print("-----")
# print(options['startretries'])
# print("-----")
self._print_status(self.INFO, self.EXITED, prog_name, True)
self._restart_process(prog_name)
self.procs[prog_name][self.OPTIONS]['startretries'] += 1
print(self.procs[prog_name][self.OPTIONS]['startretries'])
elif not options['gave_up']:
options['gave_up'] = True
self._print_status(self.INFO, self.EXITED, prog_name, True)
self._print_status(self.INFO, self.GAVE_UP, prog_name)
# del self.procs[prog_name]
# def _process_exited(self, prog_name, options):
# """
# Either autorestart == unexpected and exitcodes != process.returncode,
# or autorestart == True, then it allows by returning > 0 to restart the
# process.
# """
# proc = self.procs[prog_name][self.PROCESS]
# if proc.poll() is not None:
# if not options['exited']:
# self.procs[prog_name][self.OPTIONS]['exited'] = True
# self._print_status(self.INFO, self.EXITED, prog_name)
# if options['autorestart'] == True \
# or (options['autorestart'] == 'unexpected' \
# and options['exitcodes'] != proc.returncode):
# return 1
# return 0
def _print_status(self, type_msg, status, prog_name, poll=None):
"""Display the current state of a process."""
if type_msg == self.INFO:
print("INFO ", end='')
if status == self.SPAWNED:
print(f"{self.SPAWNED}: '{prog_name}' "
f"with pid {self.procs[prog_name][self.PROCESS].pid}")
elif status == self.SUCCESS:
print(f"{self.SUCCESS}: {prog_name} "
f"entered {self.RUNNING} state, "
"process has stayed up for > than "
f"{self.programs[prog_name]['startsecs']} "
"seconds (startsecs)")
elif status == self.EXITED:
print(f"{self.EXITED}: {prog_name} "
f"(exit status {self.procs[prog_name][self.PROCESS].returncode}; ", end='')
exitcode = self.procs[prog_name][self.PROCESS].returncode
if exitcode != self.programs[prog_name]['exitcodes'] or poll:
print("not expected)")
else:
print("expected)")
elif status == self.GAVE_UP:
print(f"{self.GAVE_UP}: {prog_name} "
"entered FATAL state, too many start retries too quickly")
| 43.491803 | 144 | 0.543428 |
9f7de768086840af8191fd0e8c510758bfd09bc3 | 4,209 | py | Python | retail_sales_prediction/utils/run_model.py | amjadraza/retail-sales-prediction | 82af7de0383d88b51a961393b27f5d43ace372da | [
"MIT"
] | null | null | null | retail_sales_prediction/utils/run_model.py | amjadraza/retail-sales-prediction | 82af7de0383d88b51a961393b27f5d43ace372da | [
"MIT"
] | 1 | 2021-11-15T17:52:14.000Z | 2021-11-15T17:52:14.000Z | retail_sales_prediction/utils/run_model.py | amjadraza/retail-sales-prediction | 82af7de0383d88b51a961393b27f5d43ace372da | [
"MIT"
] | 1 | 2020-09-13T05:41:36.000Z | 2020-09-13T05:41:36.000Z | """
.. module:: run_model
:synopsis: Collection of Models
.. moduleauthor:: MA Raza
This modules consists of collection of various machine learning models. We start with Light GBM.
Depending on the time, we can add more
Todo:
* Add more machine learning models, such as GBM, RF and XGBoost
* Spark Compatible GBM and Light GBM Models
* Add Model Diagnostic plots using SHAP Library
* Feature Reduction
* Config file
"""
import sys
sys.path.append('.')
import os
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
from retail_sales_prediction import logger
def run_model_lgbm(feature_prep, X_train, y_train, X_val, y_val, X_test, config, num_days=6):
"""
Training the Light GBM Model.
Args:
feature_prep:
X_train:
y_train:
X_val:
y_val:
X_test:
num_days:
Returns:
:param model_params:
"""
logger("Training and predicting models...")
# params = {
# 'num_leaves': 3,
# 'objective': 'regression',
# 'min_data_in_leaf': 200,
# 'learning_rate': 0.02,
# 'feature_fraction': 0.8,
# 'bagging_fraction': 0.7,
# 'bagging_freq': 1,
# 'metric': 'l2',
# 'num_threads': 20
# }
params = config['model_params']
# MAX_ROUNDS = 200
MAX_ROUNDS = config['MAX_ROUNDS']
output_dir = config['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger.info('output directory : {}'.fromat(output_dir))
val_pred = []
test_pred = []
cate_vars = []
for i in range(16):
logger.info("=" * 50)
logger.info("Step %d" % (i+1))
logger.info("=" * 50)
dtrain = lgb.Dataset(
X_train, label=y_train[:, i],
categorical_feature=cate_vars,
weight=pd.concat([feature_prep.items["perishable"]] * num_days) * 0.25 + 1
)
dval = lgb.Dataset(
X_val, label=y_val[:, i], reference=dtrain,
weight=feature_prep.items["perishable"] * 0.25 + 1,
categorical_feature=cate_vars)
bst = lgb.train(
params, dtrain, num_boost_round=MAX_ROUNDS,
valid_sets=[dtrain, dval], early_stopping_rounds=125, verbose_eval=50
)
logger.info("\n".join(("%s: %.2f" % x) for x in sorted(
zip(X_train.columns, bst.feature_importance("gain")),
key=lambda x: x[1], reverse=True
)))
val_pred.append(bst.predict(
X_val, num_iteration=bst.best_iteration or MAX_ROUNDS))
test_pred.append(bst.predict(
X_test, num_iteration=bst.best_iteration or MAX_ROUNDS))
logger.info('**** Finished Training *****')
logger.info("Validation mse:", mean_squared_error(
y_val, np.array(val_pred).transpose()))
weight = feature_prep.items["perishable"] * 0.25 + 1
err = (y_val - np.array(val_pred).transpose())**2
err = err.sum(axis=1) * weight
err = np.sqrt(err.sum() / weight.sum() / 16)
logger.info('nwrmsle = {}'.format(err))
y_val = np.array(val_pred).transpose()
df_preds = pd.DataFrame(
y_val, index=feature_prep.df_2017.index,
columns=pd.date_range("2017-07-26", periods=16)
).stack().to_frame("unit_sales")
df_preds.index.set_names(["store_nbr", "item_nbr", "date"], inplace=True)
df_preds["unit_sales"] = np.clip(np.expm1(df_preds["unit_sales"]), 0, 1000)
df_preds.reset_index().to_csv(output_dir + 'lgb_cv.csv', index=False)
logger.info("Making submission...")
y_test = np.array(test_pred).transpose()
df_preds = pd.DataFrame(
y_test, index=feature_prep.df_2017.index,
columns=pd.date_range("2017-08-16", periods=16)
).stack().to_frame("unit_sales")
df_preds.index.set_names(["store_nbr", "item_nbr", "date"], inplace=True)
submission = feature_prep.test[["id"]].join(df_preds, how="left").fillna(0)
submission["unit_sales"] = np.clip(np.expm1(submission["unit_sales"]), 0, 1000)
submission.to_csv(output_dir + 'lgb_sub.csv', float_format='%.4f', index=None)
| 33.141732 | 96 | 0.627465 |
2f9297153a5295b2d97bd3a0b259e116f107d57d | 22,501 | py | Python | source/models/sbi_snpe_mod/snpe_base.py | dominickeehan/bayesian-microlensing | bf95b8346019e6a6262e42e4c5c8e5b870c903b5 | [
"MIT"
] | 1 | 2021-10-13T00:41:02.000Z | 2021-10-13T00:41:02.000Z | source/models/sbi_snpe_mod/snpe_base.py | dominickeehan/bayesian-microlensing | bf95b8346019e6a6262e42e4c5c8e5b870c903b5 | [
"MIT"
] | null | null | null | source/models/sbi_snpe_mod/snpe_base.py | dominickeehan/bayesian-microlensing | bf95b8346019e6a6262e42e4c5c8e5b870c903b5 | [
"MIT"
] | null | null | null | # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.
import time
import logging
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Union
from warnings import warn
import torch
from torch import Tensor, ones, optim
from torch.nn.utils import clip_grad_norm_
from torch.utils import data
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.tensorboard import SummaryWriter
from sbi import utils as utils
from sbi.inference import NeuralInference, check_if_proposal_has_default_x
from sbi.inference.posteriors.direct_posterior import DirectPosterior
from sbi.types import TorchModule
from sbi.utils import (
RestrictedPrior,
check_estimator_arg,
test_posterior_net_for_multi_d_x,
validate_theta_and_x,
x_shape_from_simulation,
)
from sbi.utils.sbiutils import ImproperEmpirical, mask_sims_from_prior
class PosteriorEstimator(NeuralInference, ABC):
def __init__(
self,
prior: Optional[Any] = None,
density_estimator: Union[str, Callable] = "maf",
device: str = "cpu",
logging_level: Union[int, str] = "WARNING",
summary_writer: Optional[SummaryWriter] = None,
show_progress_bars: bool = True,
**unused_args,
):
"""Base class for Sequential Neural Posterior Estimation methods.
Args:
density_estimator: If it is a string, use a pre-configured network of the
provided type (one of nsf, maf, mdn, made). Alternatively, a function
that builds a custom neural network can be provided. The function will
be called with the first batch of simulations (theta, x), which can
thus be used for shape inference and potentially for z-scoring. It
needs to return a PyTorch `nn.Module` implementing the density
estimator. The density estimator needs to provide the methods
`.log_prob` and `.sample()`.
unused_args: Absorbs additional arguments. No entries will be used. If it
is not empty, we warn. In future versions, when the new interface of
0.14.0 is more mature, we will remove this argument.
See docstring of `NeuralInference` class for all other arguments.
"""
super().__init__(
prior=prior,
device=device,
logging_level=logging_level,
summary_writer=summary_writer,
show_progress_bars=show_progress_bars,
**unused_args,
)
# As detailed in the docstring, `density_estimator` is either a string or
# a callable. The function creating the neural network is attached to
# `_build_neural_net`. It will be called in the first round and receive
# thetas and xs as inputs, so that they can be used for shape inference and
# potentially for z-scoring.
check_estimator_arg(density_estimator)
if isinstance(density_estimator, str):
self._build_neural_net = utils.posterior_nn(model=density_estimator)
else:
self._build_neural_net = density_estimator
self._proposal_roundwise = []
self.use_non_atomic_loss = False
# Extra SNPE-specific fields summary_writer.
self._summary.update({"rejection_sampling_acceptance_rates": []}) # type:ignore
def append_simulations(
self,
theta: Tensor,
x: Tensor,
proposal: Optional[Any] = None,
) -> "PosteriorEstimator":
r"""
Store parameters and simulation outputs to use them for later training.
Data are stored as entries in lists for each type of variable (parameter/data).
Stores $\theta$, $x$, prior_masks (indicating if simulations are coming from the
prior or not) and an index indicating which round the batch of simulations came
from.
Args:
theta: Parameter sets.
x: Simulation outputs.
proposal: The distribution that the parameters $\theta$ were sampled from.
Pass `None` if the parameters were sampled from the prior. If not
`None`, it will trigger a different loss-function.
Returns:
NeuralInference object (returned so that this function is chainable).
"""
theta, x = validate_theta_and_x(theta, x, training_device=self._device)
self._check_proposal(proposal)
if (
proposal is None
or proposal is self._prior
or (
isinstance(proposal, RestrictedPrior) and proposal._prior is self._prior
)
):
# The `_data_round_index` will later be used to infer if one should train
# with MLE loss or with atomic loss (see, in `train()`:
# self._round = max(self._data_round_index))
self._data_round_index.append(0)
self._prior_masks.append(mask_sims_from_prior(0, theta.size(0)))
else:
if not self._data_round_index:
# This catches a pretty specific case: if, in the first round, one
# passes data that does not come from the prior.
self._data_round_index.append(1)
else:
self._data_round_index.append(max(self._data_round_index) + 1)
self._prior_masks.append(mask_sims_from_prior(1, theta.size(0)))
self._theta_roundwise.append(theta)
self._x_roundwise.append(x)
self._proposal_roundwise.append(proposal)
if self._prior is None or isinstance(self._prior, ImproperEmpirical):
if proposal is not None:
raise ValueError(
"You had not passed a prior at initialization, but now you "
"passed a proposal. If you want to run multi-round SNPE, you have "
"to specify a prior (set the `.prior` argument or re-initialize "
"the object with a prior distribution). If the samples you passed "
"to `append_simulations()` were sampled from the prior, you can "
"run single-round inference with "
"`append_simulations(..., proposal=None)`."
)
theta_prior = self.get_simulations()[0]
self._prior = ImproperEmpirical(theta_prior, ones(theta_prior.shape[0]))
return self
def train(
self,
training_batch_size: int = 50,
learning_rate: float = 5e-4,
validation_fraction: float = 0.1,
stop_after_epochs: int = 20,
max_num_epochs: Optional[int] = None,
clip_max_norm: Optional[float] = 5.0,
calibration_kernel: Optional[Callable] = None,
exclude_invalid_x: bool = True,
resume_training: bool = False,
discard_prior_samples: bool = False,
retrain_from_scratch_each_round: bool = False,
show_train_summary: bool = False,
dataloader_kwargs: Optional[dict] = None,
) -> DirectPosterior:
r"""
Return density estimator that approximates the distribution $p(\theta|x)$.
Args:
training_batch_size: Training batch size.
learning_rate: Learning rate for Adam optimizer.
validation_fraction: The fraction of data to use for validation.
stop_after_epochs: The number of epochs to wait for improvement on the
validation set before terminating training.
max_num_epochs: Maximum number of epochs to run. If reached, we stop
training even when the validation loss is still decreasing. If None, we
train until validation loss increases (see also `stop_after_epochs`).
clip_max_norm: Value at which to clip the total gradient norm in order to
prevent exploding gradients. Use None for no clipping.
calibration_kernel: A function to calibrate the loss with respect to the
simulations `x`. See Lueckmann, Gonçalves et al., NeurIPS 2017.
exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞`
during training. Expect errors, silent or explicit, when `False`.
resume_training: Can be used in case training time is limited, e.g. on a
cluster. If `True`, the split between train and validation set, the
optimizer, the number of epochs, and the best validation log-prob will
be restored from the last time `.train()` was called.
discard_prior_samples: Whether to discard samples simulated in round 1, i.e.
from the prior. Training may be sped up by ignoring such less targeted
samples.
retrain_from_scratch_each_round: Whether to retrain the conditional density
estimator for the posterior from scratch each round.
show_train_summary: Whether to print the number of epochs and validation
loss after the training.
dataloader_kwargs: Additional or updated kwargs to be passed to the training
and validation dataloaders (like, e.g., a collate_fn)
Returns:
Density estimator that approximates the distribution $p(\theta|x)$.
"""
# Calibration kernels proposed in Lueckmann, Gonçalves et al., 2017.
if calibration_kernel is None:
calibration_kernel = lambda x: ones([len(x)], device=self._device)
max_num_epochs = 2 ** 31 - 1 if max_num_epochs is None else max_num_epochs
# Starting index for the training set (1 = discard round-0 samples).
start_idx = int(discard_prior_samples and self._round > 0)
# For non-atomic loss, we can not reuse samples from prev. rounds as of now.
if self.use_non_atomic_loss:
start_idx = self._round
theta, x, prior_masks = self.get_simulations(
start_idx, exclude_invalid_x, warn_on_invalid=True
)
# Dataset is shared for training and validation loaders.
dataset = data.TensorDataset(
theta,
x,
prior_masks,
)
# Set the proposal to the last proposal that was passed by the user. For
# atomic SNPE, it does not matter what the proposal is. For non-atomic
# SNPE, we only use the latest data that was passed, i.e. the one from the
# last proposal.
proposal = self._proposal_roundwise[-1]
train_loader, val_loader = self.get_dataloaders(
dataset,
training_batch_size,
validation_fraction,
resume_training,
dataloader_kwargs=dataloader_kwargs,
)
# First round or if retraining from scratch:
# Call the `self._build_neural_net` with the rounds' thetas and xs as
# arguments, which will build the neural network.
# This is passed into NeuralPosterior, to create a neural posterior which
# can `sample()` and `log_prob()`. The network is accessible via `.net`.
if self._neural_net is None or retrain_from_scratch_each_round:
self._neural_net = self._build_neural_net(
theta[self.train_indices], x[self.train_indices]
)
# If data on training device already move net as well.
if (
not self._device == "cpu"
and f"{x.device.type}:{x.device.index}" == self._device
):
self._neural_net.to(self._device)
test_posterior_net_for_multi_d_x(self._neural_net, theta, x)
self._x_shape = x_shape_from_simulation(x)
# Move entire net to device for training.
self._neural_net.to(self._device)
if not resume_training:
self.optimizer = optim.Adam(
list(self._neural_net.parameters()), lr=learning_rate
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max = 500)
self.epoch, self._val_log_prob = 0, float("-Inf")
while self.epoch <= max_num_epochs and not self._converged(
self.epoch, stop_after_epochs
):
# Train for a single epoch.
self._neural_net.train()
train_log_prob_sum = 0
epoch_start_time = time.time()
for batch in train_loader:
self.optimizer.zero_grad()
# Get batches on current device.
theta_batch, x_batch, masks_batch = (
batch[0].to(self._device),
batch[1].to(self._device),
batch[2].to(self._device),
)
batch_loss = torch.mean(
self._loss(
theta_batch,
x_batch,
masks_batch,
proposal,
calibration_kernel,
)
)
train_log_prob_sum += batch_loss.sum().item()
batch_loss.backward()
if clip_max_norm is not None:
clip_grad_norm_(
self._neural_net.parameters(),
max_norm=clip_max_norm,
)
self.optimizer.step()
self.epoch += 1
scheduler.step()
train_log_prob_sum /= int(theta.shape[0] * (1.0-validation_fraction))
self._summary["train_log_probs"].append(train_log_prob_sum)
# Calculate validation performance.
self._neural_net.eval()
log_prob_sum = 0
with torch.no_grad():
for batch in val_loader:
theta_batch, x_batch, masks_batch = (
batch[0].to(self._device),
batch[1].to(self._device),
batch[2].to(self._device),
)
# Take negative loss here to get validation log_prob.
batch_log_prob = -self._loss(
theta_batch,
x_batch,
masks_batch,
proposal,
calibration_kernel,
)
log_prob_sum += batch_log_prob.sum().item()
# Take mean over all validation samples.
self._val_log_prob = log_prob_sum / (
len(val_loader) * val_loader.batch_size
)
# Log validation log prob for every epoch.
self._summary["validation_log_probs"].append(self._val_log_prob)
self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time)
print(f"Training. Epoch: {self.epoch}. Mean validation log probability: {self._val_log_prob:.4f}. Learning rate: {self.optimizer.param_groups[0]['lr']}")
# self._maybe_show_progress(self._show_progress_bars, self.epoch)
self._report_convergence_at_end(self.epoch, stop_after_epochs, max_num_epochs)
# Update summary.
self._summary["epochs"].append(self.epoch)
self._summary["best_validation_log_probs"].append(self._best_val_log_prob)
# Update tensorboard and summary dict.
self._summarize(
round_=self._round,
x_o=None,
theta_bank=theta,
x_bank=x,
)
# Update description for progress bar.
if show_train_summary:
print(self._describe_round(self._round, self._summary))
return deepcopy(self._neural_net)
def build_posterior(
self,
density_estimator: Optional[TorchModule] = None,
rejection_sampling_parameters: Optional[Dict[str, Any]] = None,
sample_with_mcmc: bool = False,
mcmc_method: str = "slice_np",
mcmc_parameters: Optional[Dict[str, Any]] = None,
) -> DirectPosterior:
r"""
Build posterior from the neural density estimator.
For SNPE, the posterior distribution that is returned here implements the
following functionality over the raw neural density estimator:
- correct the calculation of the log probability such that it compensates for
the leakage.
- reject samples that lie outside of the prior bounds.
- alternatively, if leakage is very high (which can happen for multi-round
SNPE), sample from the posterior with MCMC.
Args:
density_estimator: The density estimator that the posterior is based on.
If `None`, use the latest neural density estimator that was trained.
rejection_sampling_parameters: Dictionary overriding the default parameters
for rejection sampling. The following parameters are supported:
`max_sampling_batch_size` to set the batch size for drawing new
samples from the candidate distribution, e.g., the posterior. Larger
batch size speeds up sampling.
sample_with_mcmc: Whether to sample with MCMC. MCMC can be used to deal
with high leakage.
mcmc_method: Method used for MCMC sampling, one of `slice_np`, `slice`,
`hmc`, `nuts`. Currently defaults to `slice_np` for a custom numpy
implementation of slice sampling; select `hmc`, `nuts` or `slice` for
Pyro-based sampling.
mcmc_parameters: Dictionary overriding the default parameters for MCMC.
The following parameters are supported: `thin` to set the thinning
factor for the chain, `warmup_steps` to set the initial number of
samples to discard, `num_chains` for the number of chains,
`init_strategy` for the initialisation strategy for chains; `prior` will
draw init locations from prior, whereas `sir` will use
Sequential-Importance-Resampling using `init_strategy_num_candidates`
to find init locations.
Returns:
Posterior $p(\theta|x)$ with `.sample()` and `.log_prob()` methods.
"""
if density_estimator is None:
density_estimator = self._neural_net
# If internal net is used device is defined.
device = self._device
else:
# Otherwise, infer it from the device of the net parameters.
device = next(density_estimator.parameters()).device
self._posterior = DirectPosterior(
method_family="snpe",
neural_net=density_estimator,
prior=self._prior,
x_shape=self._x_shape,
rejection_sampling_parameters=rejection_sampling_parameters,
sample_with_mcmc=sample_with_mcmc,
mcmc_method=mcmc_method,
mcmc_parameters=mcmc_parameters,
device=device,
)
self._posterior._num_trained_rounds = self._round + 1
# Store models at end of each round.
self._model_bank.append(deepcopy(self._posterior))
self._model_bank[-1].net.eval()
return deepcopy(self._posterior)
@abstractmethod
def _log_prob_proposal_posterior(
self, theta: Tensor, x: Tensor, masks: Tensor, proposal: Optional[Any]
) -> Tensor:
raise NotImplementedError
def _loss(
self,
theta: Tensor,
x: Tensor,
masks: Tensor,
proposal: Optional[Any],
calibration_kernel: Callable,
) -> Tensor:
"""Return loss with proposal correction (`round_>0`) or without it (`round_=0`).
The loss is the negative log prob. Irrespective of the round or SNPE method
(A, B, or C), it can be weighted with a calibration kernel.
Returns:
Calibration kernel-weighted negative log prob.
"""
if self._round == 0:
# Use posterior log prob (without proposal correction) for first round.
log_prob = self._neural_net.log_prob(theta, x)
else:
log_prob = self._log_prob_proposal_posterior(theta, x, masks, proposal)
return -(calibration_kernel(x) * log_prob)
def _check_proposal(self, proposal):
"""
Check for validity of the provided proposal distribution.
If the proposal is a `NeuralPosterior`, we check if the default_x is set.
If the proposal is **not** a `NeuralPosterior`, we warn since it is likely that
the user simply passed the prior, but this would still trigger atomic loss.
"""
if proposal is not None:
check_if_proposal_has_default_x(proposal)
if isinstance(proposal, RestrictedPrior):
if proposal._prior is not self._prior:
warn(
"The proposal you passed is a `RestrictedPrior`, but the "
"proposal distribution it uses is not the prior (it can be "
"accessed via `RestrictedPrior._prior`). We do not "
"recommend to mix the `RestrictedPrior` with multi-round "
"SNPE."
)
elif (
not isinstance(proposal, DirectPosterior)
and proposal is not self._prior
):
warn(
"The proposal you passed is neither the prior nor a "
"`NeuralPosterior` object. If you are an expert user and did so "
"for research purposes, this is fine. If not, you might be doing "
"something wrong: feel free to create an issue on Github."
)
elif self._round > 0:
raise ValueError(
"A proposal was passed but no prior was passed at initialisation. When "
"running multi-round inference, a prior needs to be specified upon "
"initialisation. Potential fix: setting the `._prior` attribute or "
"re-initialisation. If the samples passed to `append_simulations()` "
"were sampled from the prior, single-round inference can be performed "
"with `append_simulations(..., proprosal=None)`."
) | 44.822709 | 165 | 0.611351 |
131e9d35a842489755be13c4db29e6dad60f4180 | 1,005 | py | Python | setup.py | DasyDong/python-norepeat | a05c7e0ca1ab61cc3a975bc8fcd15f9d5055ffd7 | [
"MIT"
] | 91 | 2020-01-19T03:46:44.000Z | 2020-11-21T05:58:17.000Z | setup.py | DasyDong/norepeat | a05c7e0ca1ab61cc3a975bc8fcd15f9d5055ffd7 | [
"MIT"
] | null | null | null | setup.py | DasyDong/norepeat | a05c7e0ca1ab61cc3a975bc8fcd15f9d5055ffd7 | [
"MIT"
] | 2 | 2020-01-26T10:27:22.000Z | 2020-10-09T08:11:28.000Z | """Setup script for realpython-reader"""
import os.path
from setuptools import setup, find_packages
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
# This call to setup() does all the work
# https://packaging.python.org/tutorials/packaging-projects/
setup(
name="norepeat",
version="1.1.0",
description="Less codes make more tools",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/DasyDong/python-norepeat",
author="Dasy Dong",
author_email="dasydong@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
include_package_data=True,
install_requires=['pyenchant'],
entry_points={"console_scripts": ["norepeat=norepeat.__main__:main"]},
) | 30.454545 | 74 | 0.696517 |
0b00b7ea8f27c142092d0c1dc45e94adea701a23 | 6,357 | py | Python | portfolios/admin.py | tkanemoto/django-portfolios | 328990d030b3509cf5aeef09c3ae605104a18564 | [
"Apache-2.0"
] | null | null | null | portfolios/admin.py | tkanemoto/django-portfolios | 328990d030b3509cf5aeef09c3ae605104a18564 | [
"Apache-2.0"
] | null | null | null | portfolios/admin.py | tkanemoto/django-portfolios | 328990d030b3509cf5aeef09c3ae605104a18564 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from ordered_model.admin import OrderedTabularInline, OrderedModelAdmin
from .models import *
class CustomSizeMixin(object):
custom_textarea_rows = 4
custom_textarea_cols = 85
custom_text_input_style = 'width: 20em'
def formfield_for_dbfield(self, db_field, **kwargs):
field = super(CustomSizeMixin, self).formfield_for_dbfield(db_field, **kwargs)
if isinstance(db_field, models.TextField):
field.widget.attrs.update({
'cols': self.custom_textarea_cols,
'rows': self.custom_textarea_rows,
'class': '',
})
if isinstance(db_field, models.CharField):
field.widget.attrs.update({
'style': self.custom_text_input_style,
})
return field
class CollapsedMixin(object):
classes = ['collapse']
class PostInline(CollapsedMixin, CustomSizeMixin, admin.StackedInline):
model = Post
extra = 1
custom_textarea_rows = 6
custom_textarea_cols = 100
custom_text_input_style = 'width: 55em'
class EmbeddedContentInline(CollapsedMixin, CustomSizeMixin, OrderedTabularInline):
model = EmbeddedContent
fields = ('content', 'order', 'move_up_down_links',)
readonly_fields = ('order', 'move_up_down_links',)
extra = 1
ordering = ('order',)
custom_textarea_rows = 4
custom_textarea_cols = 100
class ProjectInline(CustomSizeMixin, OrderedTabularInline):
model = Project
fields = ('client', 'name', 'category', 'roles', 'date', 'url', 'order', 'move_up_down_links',)
readonly_fields = ('order', 'move_up_down_links',)
extra = 1
ordering = ('order',)
class ClientAdmin(OrderedModelAdmin):
list_display = ('name', 'description', 'order', 'move_up_down_links')
inlines = [
ProjectInline
]
def get_urls(self):
urls = super(ClientAdmin, self).get_urls()
for inline in self.inlines:
if hasattr(inline, 'get_urls'):
urls = inline.get_urls(self) + urls
return urls
def get_queryset(self, request):
qs = super(ClientAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(owner=request.user)
def save_model(self, request, obj, form, change):
if 'owner' not in form.data or form.data['owner'] is None or form.data['owner'] == '':
obj.owner = request.user
super(ClientAdmin, self).save_model(request, obj, form, change)
def get_readonly_fields(self, request, obj=None):
if obj is None:
return []
if request.user.is_superuser:
return []
return ['owner']
class TestimonialInline(CollapsedMixin, CustomSizeMixin, OrderedTabularInline):
model = Testimonial
fields = ('author', 'title', 'body', 'order', 'move_up_down_links',)
readonly_fields = ('order', 'move_up_down_links',)
extra = 1
ordering = ('order',)
custom_textarea_rows = 2
custom_textarea_cols = 60
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'author':
if not request.user.is_superuser:
kwargs['queryset'] = db_field.remote_field.model._default_manager.filter(owner=request.user)
return super(TestimonialInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class SocialMediaLinkInline(CollapsedMixin, OrderedTabularInline):
model = SocialMediaLink
fields = ('kind', 'url', 'order', 'move_up_down_links',)
readonly_fields = ('order', 'move_up_down_links',)
extra = 1
ordering = ('order',)
class MemberInline(CollapsedMixin, CustomSizeMixin, OrderedTabularInline):
model = Member
fields = ('name', 'roles', 'description', 'mugshot', 'order', 'move_up_down_links',)
readonly_fields = ('order', 'move_up_down_links',)
extra = 1
ordering = ('order',)
custom_textarea_rows = 4
custom_textarea_cols = 40
custom_text_input_style = 'width: 10em'
class EventInline(CollapsedMixin, CustomSizeMixin, admin.StackedInline):
model = Event
extra = 1
custom_textarea_rows = 4
custom_textarea_cols = 80
class PageAdmin(CustomSizeMixin, admin.ModelAdmin):
inlines = [
PostInline,
EventInline,
EmbeddedContentInline,
TestimonialInline,
SocialMediaLinkInline,
MemberInline,
]
filter_horizontal = ('clients',)
custom_text_input_style = 'width: 47em'
def get_urls(self):
urls = super(PageAdmin, self).get_urls()
for inline in self.inlines:
if hasattr(inline, 'get_urls'):
urls = inline.get_urls(self) + urls
return urls
def get_queryset(self, request):
qs = super(PageAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(owner=request.user)
def get_readonly_fields(self, request, obj=None):
if obj is None:
return []
if request.user.is_superuser:
return []
fs = ['slug', 'owner']
return fs
def get_exclude(self, request, obj=None):
if obj is None:
return []
if request.user.is_superuser:
return []
fs = ['keywords', 'template', 'domain']
if obj.template not in ['band']:
fs += ['email_shop', 'email_booking', 'youtube_playlist', 'media_background']
if obj.template not in ['composer']:
fs += ['showreel', 'clients', 'number_of_featured_clients', 'quote', 'quote_citation', 'quote_background',
'address', 'news_text']
return fs
def get_field_queryset(self, db, db_field, request):
""" Only show clients belonging to the owner. """
if db_field.name == 'clients':
if not request.user.is_superuser:
return db_field.remote_field.model._default_manager.filter(owner=request.user)
return super(PageAdmin, self).get_field_queryset(db, db_field, request)
#admin.site.register(Testimonial)
admin.site.register(Role)
#admin.site.register(Project)
admin.site.register(Client, ClientAdmin)
admin.site.register(Page, PageAdmin)
| 31.785 | 118 | 0.64779 |
a0af06bf60a480c1ee98d41009257eafde5ae4cf | 12,942 | py | Python | debinterface/interfacesWriter.py | nMustaki/debinterface | bf42d195e91853d11168511dfc14fd2f5e695093 | [
"BSD-3-Clause"
] | 10 | 2017-05-04T22:46:06.000Z | 2021-03-22T11:19:24.000Z | debinterface/interfacesWriter.py | nMustaki/debinterface | bf42d195e91853d11168511dfc14fd2f5e695093 | [
"BSD-3-Clause"
] | 39 | 2015-06-18T21:36:26.000Z | 2021-07-05T14:43:56.000Z | debinterface/interfacesWriter.py | nMustaki/debinterface | bf42d195e91853d11168511dfc14fd2f5e695093 | [
"BSD-3-Clause"
] | 21 | 2015-06-17T19:04:47.000Z | 2019-10-28T03:11:25.000Z | # -*- coding: utf-8 -*-
# Write interface
from __future__ import print_function, with_statement, absolute_import
import shutil
from collections import defaultdict
import os
from string import Template
from . import toolutils
from .adapter import NetworkAdapter
try:
import typing as tp
except ImportError:
pass
class InterfacesWriter(object):
""" Short lived class to write interfaces file """
# Define templetes for blocks used in /etc/network/interfaces.
_auto = Template('auto $name\n')
_hotplug = Template('allow-hotplug $name\n')
_iface = Template('iface $name $addrFam $source\n')
_cmd = Template('\t$varient $value\n')
_comment = Template('# $line\n')
_addressFields = [
'address', 'network', 'netmask', 'broadcast',
'gateway', 'dns-nameservers', 'dns-search'
]
_prepFields = ['pre-up', 'pre-down', 'up', 'down', 'post-up', 'post-down']
_bridgeFields = ['ports', 'fd', 'hello', 'maxage', 'stp', 'maxwait']
_plugins = ['hostapd', 'wpa-conf']
def __init__(self, adapters, interfaces_path, backup_path=None,
header_comment=None):
# type: (tp.List[NetworkAdapter], str, tp.Optional[str], tp.Optional[str])->None
""" if backup_path is None => no backup """
self._adapters = adapters
self._interfaces_path = interfaces_path
self._backup_path = backup_path
try:
is_str = isinstance(header_comment, basestring)
except NameError:
is_str = isinstance(header_comment, str)
if is_str:
self._header_comment = header_comment
else:
self._header_comment = None
@property
def adapters(self):
# type: ()->tp.List[NetworkAdapter]
return self._adapters
@adapters.setter
def adapters(self, value):
# type: (tp.List[NetworkAdapter])->None
self._adapters = value
def write_interfaces(self):
# type: ()->None
adapters_by_path = defaultdict(list)
for adapter in self._adapters:
# retrocompat check
path = adapter.interfaces_path or self._interfaces_path
adapters_by_path[adapter.interfaces_path].append(adapter)
self._backup_interfaces(adapters_by_path.keys())
try:
for path, adapters in adapters_by_path.items():
with toolutils.atomic_write(path) as interfaces:
# Write any header comments for main file.
if path == self._interfaces_path:
self._write_header_comment(interfaces)
# Loop through the provided networkAdapters and
# write the new file.
for adapter in adapters:
# Get dict of details about the adapter.
self._write_adapter(interfaces, adapter)
self._check_interfaces(self._interfaces_path)
except Exception:
# Any error, let's roll back
self._restore_interfaces(adapters_by_path.keys())
raise
def _check_interfaces(self, interfaces_path):
# type: (str)->None
"""Uses ifup to check interfaces file. If it is not in the
default place, each interface must be checked one by one.
Args:
interfaces_path (string) : the path to interfaces file
Raises:
ValueError : if invalid network interfaces
"""
ret = False
output = ""
if not self._adapters:
return
if interfaces_path == "/etc/network/interfaces":
# Do not use long form to increase portability with Busybox
# -n : print out what would happen, but don't do it
# -i : interfaces file
ret, output = toolutils.safe_subprocess([
"/sbin/ifup", "-a", "-n"
])
else:
for adapter in self._adapters:
# Do not use long form to increase portability with Busybox
# -n : print out what would happen, but don't do it
# -i : interfaces file
ret, output = toolutils.safe_subprocess([
"/sbin/ifup", "-n",
"-i{0}".format(interfaces_path),
adapter.attributes["name"]
])
if not ret:
break
if not ret:
raise ValueError("Invalid network interfaces file "
"written to disk, restoring to previous "
"one : {0}".format(output))
def _write_header_comment(self, interfaces):
# type: (tp.IO[str])->None
if self._header_comment:
for line in self._header_comment.split('\n'):
# Check the beginning of the line for a comment field
# if it does not exist, add it.
if line[:2] != "# ":
line = self._comment.substitute(line=line)
else:
# split strips the newline, add it back
line = line + '\n'
interfaces.write(line)
# Create a blank line between comment and start of interfaces
interfaces.write('\n')
def _write_adapter(self, interfaces, adapter):
# type: (tp.IO[str], NetworkAdapter)->None
try:
adapter.validateAll()
except ValueError as e:
print(repr(e))
raise
ifAttributes = adapter.export()
self._write_auto(interfaces, adapter, ifAttributes)
self._write_hotplug(interfaces, adapter, ifAttributes)
self._write_addrFam(interfaces, adapter, ifAttributes)
self._write_addressing(interfaces, adapter, ifAttributes)
self._write_bridge(interfaces, adapter, ifAttributes)
self._write_plugins(interfaces, adapter, ifAttributes)
self._write_callbacks(interfaces, adapter, ifAttributes)
self._write_unknown(interfaces, adapter, ifAttributes)
interfaces.write("\n")
def _write_auto(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write if applicable """
try:
if adapter.attributes['auto'] is True:
d = dict(name=ifAttributes['name'])
interfaces.write(self._auto.substitute(d))
except KeyError:
pass
def _write_hotplug(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write if applicable """
try:
if ifAttributes['hotplug'] is True:
d = dict(name=ifAttributes['name'])
interfaces.write(self._hotplug.substitute(d))
except KeyError:
pass
def _write_addrFam(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Construct and write the iface declaration.
The addrFam clause needs a little more processing.
"""
# Write the source clause.
# Will not error if omitted. Maybe not the best plan.
try:
if (not ifAttributes["name"]
or not ifAttributes["addrFam"]
or not ifAttributes["source"]):
raise ValueError("Invalid field content")
d = dict(name=ifAttributes['name'],
addrFam=ifAttributes['addrFam'],
source=ifAttributes['source'])
interfaces.write(self._iface.substitute(d))
except KeyError:
pass
def _write_addressing(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
for field in self._addressFields:
try:
value = ifAttributes[field]
if value and value != 'None':
if isinstance(value, list):
d = dict(varient=field,
value=" ".join(ifAttributes[field]))
else:
d = dict(varient=field, value=ifAttributes[field])
interfaces.write(self._cmd.substitute(d))
# Keep going if a field isn't provided.
except KeyError:
pass
def _write_bridge(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write the bridge information. """
for field in self._bridgeFields:
try:
value = ifAttributes['bridge-opts'][field]
if value and value != 'None':
d = dict(varient="bridge_" + field, value=value)
interfaces.write(self._cmd.substitute(d))
# Keep going if a field isn't provided.
except KeyError:
pass
def _write_callbacks(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write the up, down, pre-up, pre-down, post-up, and post-down
clauses.
"""
for field in self._prepFields:
try:
for item in ifAttributes[field]:
if item and item != 'None':
d = dict(varient=field, value=item)
interfaces.write(self._cmd.substitute(d))
except KeyError:
# Keep going if a field isn't provided.
pass
def _write_plugins(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write plugins options, currently hostapd. """
for field in self._plugins:
try:
if field in ifAttributes and ifAttributes[field] != 'None':
d = dict(varient=field, value=ifAttributes[field])
interfaces.write(self._cmd.substitute(d))
# Keep going if a field isn't provided.
except KeyError:
pass
def _write_unknown(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write unknowns options """
try:
for k, v in ifAttributes['unknown'].items():
if v:
d = dict(varient=k, value=str(v))
interfaces.write(self._cmd.substitute(d))
except (KeyError, ValueError):
pass
def _write_sourced_paths(self, interfaces, adapter, ifAttributes):
# type: (tp.IO[str], NetworkAdapter, tp.Dict[str, tp.Any])->None
""" Write sourced paths """
for path in ifAttributes.get('sourced_paths', []):
d = dict(varient="source", value=str(path))
interfaces.write(self._cmd.substitute(d))
def _backup_interfaces(self, adapters_paths):
# type: (tp.List[str])->None
"""Backup interfaces file is the file exists
Returns:
True/False, command output
Raises:
IOError : if the copy fails and the source file exists
"""
if not self._backup_path:
return
backup_path = self._backup_path
if os.path.isfile(backup_path):
backup_path = os.path.dirname(backup_path)
# For better backward compatibility
if not os.path.isdir(backup_path):
os.mkdir(backup_path)
for adapter_path in adapters_paths:
try:
src_path = os.path.join(backup_path, os.path.basename(adapter_path) + ".bak")
shutil.copy(adapter_path, src_path)
except IOError as ex:
# Only raise if source actually exists
if os.path.exists(adapter_path):
raise ex
def _restore_interfaces(self, adapters_paths):
# type: (tp.List[str])->None
"""Restore interfaces file is the file exists
Returns:
True/False, command output
Raises:
IOError : if the copy fails and the source file exists
"""
if not self._backup_path:
return
for adapter_path in adapters_paths:
src_path = ''
try:
backup_path = self._backup_path
if os.path.isfile(backup_path):
backup_path = os.path.dirname(backup_path)
src_path = os.path.join(backup_path, os.path.basename(adapter_path) + ".bak")
shutil.copy(src_path, adapter_path)
except IOError as ex:
# Only raise if source actually exists
if os.path.exists(src_path):
raise ex
| 37.842105 | 93 | 0.56251 |
2bce7c31e4c54c77ccc38e2fec63bae7c4359a4c | 976 | py | Python | main.py | Satokken/Kadai1 | 85c16f7d5c6f291648d6c3d282f97179254a094d | [
"MIT"
] | null | null | null | main.py | Satokken/Kadai1 | 85c16f7d5c6f291648d6c3d282f97179254a094d | [
"MIT"
] | null | null | null | main.py | Satokken/Kadai1 | 85c16f7d5c6f291648d6c3d282f97179254a094d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import subprocess
def LEDon():
"""on led"""
led_cmd = "echo 1 > /dev/myled0"
subprocess.call(led_cmd, shell=True)
def LEDoff():
"""off led"""
led_cmd = "echo 0 > /dev/myled0"
subprocess.call(led_cmd, shell=True)
def wait(self):
"""wiat time"""
time.sleep(self)
def SATO():
"""SATO"""
#S
LEDon()
wait(0.1)
LEDoff()
wait(0.1)
LEDon()
wait(0.1)
LEDoff()
wait(0.1)
LEDon()
wait(0.1)
LEDoff()
wait(0.5)
#A
LEDon()
wait(0.1)
LEDoff()
wait(0.1)
LEDon()
wait(0.5)
LEDoff()
wait(0.5)
#T
LEDon()
wait(0.5)
LEDoff()
wait(0.5)
#O
LEDon()
wait(0.5)
LEDoff()
wait(0.1)
LEDon()
wait(0.5)
LEDoff()
wait(0.1)
LEDon()
wait(0.5)
LEDoff()
wait(0.5)
def main():
SATO()
if __name__ == "__main__":
main()
| 12.512821 | 40 | 0.476434 |
b9c3c1cbc7f21b38f02d0df7a2b61c87a64ac6ea | 18,122 | py | Python | tensorflow_federated/python/aggregators/secure.py | Tensorflow-Devs/federated | 5df96d42d72fa43a050df6465271a38175a5fd7a | [
"Apache-2.0"
] | 1,918 | 2019-02-22T21:17:28.000Z | 2022-03-30T14:49:53.000Z | tensorflow_federated/python/aggregators/secure.py | Tensorflow-Devs/federated | 5df96d42d72fa43a050df6465271a38175a5fd7a | [
"Apache-2.0"
] | 999 | 2019-02-22T21:47:44.000Z | 2022-03-31T11:06:42.000Z | tensorflow_federated/python/aggregators/secure.py | Tensorflow-Devs/federated | 5df96d42d72fa43a050df6465271a38175a5fd7a | [
"Apache-2.0"
] | 498 | 2019-02-22T21:17:56.000Z | 2022-03-29T02:54:15.000Z | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for secure summation."""
import collections
import enum
import math
from typing import Optional, Union
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import primitives
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import estimation_process
from tensorflow_federated.python.core.templates import measured_process
NORM_TF_TYPE = tf.float32
COUNT_TF_TYPE = tf.int32
ThresholdEstType = Union[int, float, np.ndarray,
estimation_process.EstimationProcess]
# Enum for internal tracking of configuration of `SecureSumFactory`.
class _Config(enum.Enum):
INT = 1
FLOAT = 2
def _check_bound_process(bound_process: estimation_process.EstimationProcess,
name: str):
"""Checks type properties for estimation process for bounds.
The process must be an `EstimationProcess` with `next` function of type
signature (<state@SERVER, NORM_TF_TYPE@CLIENTS> -> state@SERVER), and `report`
with type signature (state@SERVER -> NORM_TF_TYPE@SERVER).
Args:
bound_process: A process to check.
name: A string name for formatting error messages.
"""
py_typecheck.check_type(bound_process, estimation_process.EstimationProcess)
next_parameter_type = bound_process.next.type_signature.parameter
if not next_parameter_type.is_struct() or len(next_parameter_type) != 2:
raise TypeError(f'`{name}.next` must take two arguments but found:\n'
f'{next_parameter_type}')
float_type_at_clients = computation_types.at_clients(NORM_TF_TYPE)
if not next_parameter_type[1].is_assignable_from(float_type_at_clients):
raise TypeError(
f'Second argument of `{name}.next` must be assignable from '
f'{float_type_at_clients} but found {next_parameter_type[1]}')
next_result_type = bound_process.next.type_signature.result
if not bound_process.state_type.is_assignable_from(next_result_type):
raise TypeError(f'Result type of `{name}.next` must consist of state only '
f'but found result type:\n{next_result_type}\n'
f'while the state type is:\n{bound_process.state_type}')
report_type = bound_process.report.type_signature.result
estimated_value_type_at_server = computation_types.at_server(
next_parameter_type[1].member)
if not report_type.is_assignable_from(estimated_value_type_at_server):
raise TypeError(
f'Report type of `{name}.report` must be assignable from '
f'{estimated_value_type_at_server} but found {report_type}.')
class SecureSumFactory(factory.UnweightedAggregationFactory):
"""`AggregationProcess` factory for securely summing values.
The created `tff.templates.AggregationProcess` uses the
`tff.federated_secure_sum_bitwidth` operator for movement of all values from
`tff.CLIENTS` to `tff.SERVER`.
In order for values to be securely summed, their range needs to be known in
advance and communicated to clients, so that clients can prepare the values in
a form compatible with the `tff.federated_secure_sum_bitwidth` operator (that
is, integers in range `[0, 2**b-1]` for some `b`), and for inverse mapping to
be applied on the server. This will be done as specified by the
`upper_bound_threshold` and `lower_bound_threshold` constructor arguments,
with the following options:
For integer values to be summed, these arguments must be `int` Python
constants or integer Numpy scalars, and the values during execution will be
clipped to these thresholds and then securely summed.
For floating point values to be summed, the values during execution will be
clipped to these thresholds, then uniformly quantized to integers in the range
`[0, 2**32-1]`, and then securely summed.
The `upper_bound_threshold` and `lower_bound_threshold` arguments can in this
case be either `float` Python constants, float Numpy scalars, or instances of
`tff.templates.EstimationProcess`, which adapts the thresholds between rounds
of execution.
In all cases, it is possible to specify only `upper_bound_threshold`, in which
case this threshold will be treated as a bound on the absolute value of the
value to be summed.
For the case when floating point values are to be securely summed and more
aggressive quantization is needed (i.e. less than 32 bits), the recommended
pattern is to use `tff.aggregators.EncodedSumFactory` with this factory class
as its inner aggregation factory.
"""
def __init__(self,
upper_bound_threshold: ThresholdEstType,
lower_bound_threshold: Optional[ThresholdEstType] = None):
"""Initializes `SecureSumFactory`.
Args:
upper_bound_threshold: Either a `int` or `float` Python constant, a Numpy
scalar, or a `tff.templates.EstimationProcess`, used for determining the
upper bound before summation.
lower_bound_threshold: Optional. Either a `int` or `float` Python
constant, a Numpy scalar, or a `tff.templates.EstimationProcess`, used
for determining the lower bound before summation. If specified, must be
the same type as `upper_bound_threshold`.
Raises:
TypeError: If `upper_bound_threshold` and `lower_bound_threshold` are not
instances of one of (`int`, `float` or
`tff.templates.EstimationProcess`).
ValueError: If `upper_bound_threshold` is provided as a negative constant.
"""
py_typecheck.check_type(upper_bound_threshold, ThresholdEstType.__args__)
if lower_bound_threshold is not None:
if not isinstance(lower_bound_threshold, type(upper_bound_threshold)):
raise TypeError(
f'Provided upper_bound_threshold and lower_bound_threshold '
f'must have the same types, but found:\n'
f'type(upper_bound_threshold): {upper_bound_threshold}\n'
f'type(lower_bound_threshold): {lower_bound_threshold}')
# Configuration specific for aggregating integer types.
if _is_integer(upper_bound_threshold):
self._config_mode = _Config.INT
if lower_bound_threshold is None:
_check_positive(upper_bound_threshold)
lower_bound_threshold = -1 * upper_bound_threshold
else:
_check_upper_larger_than_lower(upper_bound_threshold,
lower_bound_threshold)
self._init_fn = _empty_state
self._get_bounds_from_state = _create_get_bounds_const(
upper_bound_threshold, lower_bound_threshold)
self._update_state = lambda _, __, ___: _empty_state()
# We must add one because the size of inclusive range [0, threshold_range]
# is threshold_range + 1. We ensure that threshold_range > 0 above.
self._secagg_bitwidth = math.ceil(
math.log2(upper_bound_threshold - lower_bound_threshold + 1))
# Configuration specific for aggregating floating point types.
else:
self._config_mode = _Config.FLOAT
if _is_float(upper_bound_threshold):
# Bounds specified as Python constants.
if lower_bound_threshold is None:
_check_positive(upper_bound_threshold)
lower_bound_threshold = -1.0 * upper_bound_threshold
else:
_check_upper_larger_than_lower(upper_bound_threshold,
lower_bound_threshold)
self._get_bounds_from_state = _create_get_bounds_const(
upper_bound_threshold, lower_bound_threshold)
self._init_fn = _empty_state
self._update_state = lambda _, __, ___: _empty_state()
else:
# Bounds specified as an EstimationProcess.
_check_bound_process(upper_bound_threshold, 'upper_bound_threshold')
if lower_bound_threshold is None:
self._get_bounds_from_state = _create_get_bounds_single_process(
upper_bound_threshold)
self._init_fn = upper_bound_threshold.initialize
self._update_state = _create_update_state_single_process(
upper_bound_threshold)
else:
_check_bound_process(lower_bound_threshold, 'lower_bound_threshold')
self._get_bounds_from_state = _create_get_bounds_two_processes(
upper_bound_threshold, lower_bound_threshold)
self._init_fn = _create_initial_state_two_processes(
upper_bound_threshold, lower_bound_threshold)
self._update_state = _create_update_state_two_processes(
upper_bound_threshold, lower_bound_threshold)
def create(
self,
value_type: factory.ValueType) -> aggregation_process.AggregationProcess:
self._check_value_type_compatible_with_config_mode(value_type)
@computations.federated_computation(self._init_fn.type_signature.result,
computation_types.FederatedType(
value_type, placements.CLIENTS))
def next_fn(state, value):
# Server-side preparation.
upper_bound, lower_bound = self._get_bounds_from_state(state)
# Compute min and max *before* clipping and use it to update the state.
value_max = intrinsics.federated_map(_reduce_nest_max, value)
value_min = intrinsics.federated_map(_reduce_nest_min, value)
new_state = self._update_state(state, value_min, value_max)
# Clips value to [lower_bound, upper_bound] and securely sums it.
summed_value = self._sum_securely(value, upper_bound, lower_bound)
# TODO(b/163880757): pass upper_bound and lower_bound through clients.
measurements = self._compute_measurements(upper_bound, lower_bound,
value_max, value_min)
return measured_process.MeasuredProcessOutput(new_state, summed_value,
measurements)
return aggregation_process.AggregationProcess(self._init_fn, next_fn)
def _compute_measurements(self, upper_bound, lower_bound, value_max,
value_min):
"""Creates measurements to be reported. All values are summed securely."""
is_max_clipped = intrinsics.federated_map(
computations.tf_computation(
lambda bound, value: tf.cast(bound < value, COUNT_TF_TYPE)),
(intrinsics.federated_broadcast(upper_bound), value_max))
max_clipped_count = intrinsics.federated_secure_sum_bitwidth(
is_max_clipped, bitwidth=1)
is_min_clipped = intrinsics.federated_map(
computations.tf_computation(
lambda bound, value: tf.cast(bound > value, COUNT_TF_TYPE)),
(intrinsics.federated_broadcast(lower_bound), value_min))
min_clipped_count = intrinsics.federated_secure_sum_bitwidth(
is_min_clipped, bitwidth=1)
measurements = collections.OrderedDict(
secure_upper_clipped_count=max_clipped_count,
secure_lower_clipped_count=min_clipped_count,
secure_upper_threshold=upper_bound,
secure_lower_threshold=lower_bound)
return intrinsics.federated_zip(measurements)
def _sum_securely(self, value, upper_bound, lower_bound):
"""Securely sums `value` placed at CLIENTS."""
if self._config_mode == _Config.INT:
value = intrinsics.federated_map(
_client_shift, (value, intrinsics.federated_broadcast(upper_bound),
intrinsics.federated_broadcast(lower_bound)))
value = intrinsics.federated_secure_sum_bitwidth(value,
self._secagg_bitwidth)
num_summands = intrinsics.federated_sum(_client_one())
value = intrinsics.federated_map(_server_shift,
(value, lower_bound, num_summands))
return value
elif self._config_mode == _Config.FLOAT:
return primitives.secure_quantized_sum(value, lower_bound, upper_bound)
else:
raise ValueError(f'Unexpected internal config type: {self._config_mode}')
def _check_value_type_compatible_with_config_mode(self, value_type):
py_typecheck.check_type(value_type, factory.ValueType.__args__)
if self._config_mode == _Config.INT:
if not type_analysis.is_structure_of_integers(value_type):
raise TypeError(
f'The `SecureSumFactory` was configured to work with integer '
f'dtypes. All values in provided `value_type` hence must be of '
f'integer dtype. \nProvided value_type: {value_type}')
elif self._config_mode == _Config.FLOAT:
if not type_analysis.is_structure_of_floats(value_type):
raise TypeError(
f'The `SecureSumFactory` was configured to work with floating '
f'point dtypes. All values in provided `value_type` hence must be '
f'of floating point dtype. \nProvided value_type: {value_type}')
else:
raise ValueError(f'Unexpected internal config type: {self._config_mode}')
def _check_positive(value):
if value <= 0:
raise ValueError(
f'If only `upper_bound_threshold` is specified as a Python constant, '
f'it must be positive. Its negative will be used as a lower bound '
f'which would be larger than the upper bound. \n'
f'Provided `upper_bound_threshold`: {value}')
def _check_upper_larger_than_lower(upper_bound_threshold,
lower_bound_threshold):
if upper_bound_threshold <= lower_bound_threshold:
raise ValueError(
f'The provided `upper_bound_threshold` must be larger than the '
f'provided `lower_bound_threshold`, but received:\n'
f'`upper_bound_threshold`: {upper_bound_threshold}\n'
f'`lower_bound_threshold`: {lower_bound_threshold}\n')
def _is_integer(value):
is_py_int = isinstance(value, int)
is_np_int = isinstance(value, np.ndarray) and bool(
np.issubdtype(value.dtype, np.integer))
return is_py_int or is_np_int
def _is_float(value):
is_py_float = isinstance(value, float)
is_np_float = isinstance(value, np.ndarray) and bool(
np.issubdtype(value.dtype, np.floating))
return is_py_float or is_np_float
@computations.tf_computation()
def _reduce_nest_max(value):
max_list = tf.nest.map_structure(tf.reduce_max, tf.nest.flatten(value))
return tf.reduce_max(tf.stack(max_list))
@computations.tf_computation()
def _reduce_nest_min(value):
min_list = tf.nest.map_structure(tf.reduce_min, tf.nest.flatten(value))
return tf.reduce_min(tf.stack(min_list))
@computations.tf_computation()
def _client_shift(value, upper_bound, lower_bound):
return tf.nest.map_structure(
lambda v: tf.clip_by_value(v, lower_bound, upper_bound) - lower_bound,
value)
@computations.tf_computation()
def _server_shift(value, lower_bound, num_summands):
return tf.nest.map_structure(
lambda v: v + (lower_bound * tf.cast(num_summands, lower_bound.dtype)),
value)
@computations.federated_computation()
def _empty_state():
return intrinsics.federated_value((), placements.SERVER)
def _client_one():
return intrinsics.federated_eval(
computations.tf_computation(lambda: tf.constant(1, tf.int32)),
placements.CLIENTS)
def _create_initial_state_two_processes(upper_bound_process,
lower_bound_process):
@computations.federated_computation()
def initial_state():
return intrinsics.federated_zip(
(upper_bound_process.initialize(), lower_bound_process.initialize()))
return initial_state
def _create_get_bounds_const(upper_bound, lower_bound):
def get_bounds(state):
del state # Unused.
return (intrinsics.federated_value(upper_bound, placements.SERVER),
intrinsics.federated_value(lower_bound, placements.SERVER))
return get_bounds
def _create_get_bounds_single_process(process):
def get_bounds(state):
upper_bound = process.report(state)
lower_bound = intrinsics.federated_map(
computations.tf_computation(lambda x: x * -1.0), upper_bound)
return upper_bound, lower_bound
return get_bounds
def _create_get_bounds_two_processes(upper_bound_process, lower_bound_process):
def get_bounds(state):
upper_bound = upper_bound_process.report(state[0])
lower_bound = lower_bound_process.report(state[1])
return upper_bound, lower_bound
return get_bounds
def _create_update_state_single_process(process):
def update_state(state, value_min, value_max):
abs_max_fn = computations.tf_computation(
lambda x, y: tf.maximum(tf.abs(x), tf.abs(y)))
abs_value_max = intrinsics.federated_map(abs_max_fn, (value_min, value_max))
return process.next(state, abs_value_max)
return update_state
def _create_update_state_two_processes(upper_bound_process,
lower_bound_process):
def update_state(state, value_min, value_max):
return intrinsics.federated_zip(
(upper_bound_process.next(state[0], value_max),
lower_bound_process.next(state[1], value_min)))
return update_state
| 42.64 | 80 | 0.728672 |
4796a152237de314d26fcd5ac0e2ee0433b4ddf1 | 1,315 | py | Python | check_#2/main.py | thomasvincent/MonitoringPlugins | b7662e78fe931ea5a5156d9fe15d647db6feb404 | [
"Apache-2.0"
] | null | null | null | check_#2/main.py | thomasvincent/MonitoringPlugins | b7662e78fe931ea5a5156d9fe15d647db6feb404 | [
"Apache-2.0"
] | null | null | null | check_#2/main.py | thomasvincent/MonitoringPlugins | b7662e78fe931ea5a5156d9fe15d647db6feb404 | [
"Apache-2.0"
] | null | null | null | #!/opt/zenoss/bin/python
import os
import sys
import re
import urllib2
import time
from xml.dom.minidom import *
def help():
print "Usage:"
print "main.py YOUR_URL"
sys.exit(3)
if len(sys.argv) != 2:
help()
else:
url = sys.argv[1]
try:
if "http://" not in url:
url="http://"+url
request = urllib2.Request(url)
response = urllib2.urlopen(request)
content = response.read()
####################################
######## Script main logic #########
####################################
try:
xmldoc = parseString(content)
reservedPrefixes_FLAG = False
for each in xmldoc.childNodes:
if (each.nodeName.lower() == "reservedPrefixes".lower()):
reservedPrefixes_FLAG = True
if (reservedPrefixes_FLAG):
print "CHECK #2 OK - reserverPrefixes has been found."
sys.exit(0)
else:
print "CHECK #2 CRITICAL - Wrong response received: reservedPrefixes not found."
sys.exit(2)
except xml.parsers.expat.ExpatError, e:
print "CHECK #2 CRITICAL - XML ERROR: ",e
####################################
except (urllib2.HTTPError, urllib2.URLError), e:
print "CHECK #2 CRITICAL HTTP/URL Error:- "+str(e)
sys.exit(2) | 27.978723 | 89 | 0.540684 |
92fe73c25a6e55e424016d0d8c272a8b56c321d0 | 7,807 | py | Python | airflow/providers/apache/kylin/operators/kylin_cube.py | daringanitch/airflow-7-14-docker | af19b126e94876c371553f6a7cfae6b1102f79fd | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2020-02-16T18:13:54.000Z | 2021-01-01T03:22:19.000Z | airflow/providers/apache/kylin/operators/kylin_cube.py | daringanitch/airflow-7-14-docker | af19b126e94876c371553f6a7cfae6b1102f79fd | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-15T22:39:05.000Z | 2020-10-15T22:39:05.000Z | airflow/providers/apache/kylin/operators/kylin_cube.py | daringanitch/airflow-7-14-docker | af19b126e94876c371553f6a7cfae6b1102f79fd | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-27T17:17:23.000Z | 2020-12-27T17:17:23.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from datetime import datetime
from typing import Optional
from kylinpy import kylinpy
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kylin.hooks.kylin import KylinHook
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class KylinCubeOperator(BaseOperator):
"""
This operator is used to submit request about kylin build/refresh/merge,
and can track job status . so users can easier to build kylin job
For more detail information in
`Apache Kylin <http://kylin.apache.org/>`_
:param kylin_conn_id: The connection id as configured in Airflow administration.
:type kylin_conn_id: str
:param project: kylin project name, this param will overwrite the project in kylin_conn_id:
:type project: str
:param cube: kylin cube name
:type cube: str
:param dsn: (dsn , dsn url of kylin connection ,which will overwrite kylin_conn_id.
for example: kylin://ADMIN:KYLIN@sandbox/learn_kylin?timeout=60&is_debug=1)
:type dsn: str
:param command: (kylin command include 'build', 'merge', 'refresh', 'delete',
'build_streaming', 'merge_streaming', 'refresh_streaming', 'disable', 'enable',
'purge', 'clone', 'drop'.
build - use /kylin/api/cubes/{cubeName}/build rest api,and buildType is ‘BUILD’,
and you should give start_time and end_time
refresh - use build rest api,and buildType is ‘REFRESH’
merge - use build rest api,and buildType is ‘MERGE’
build_streaming - use /kylin/api/cubes/{cubeName}/build2 rest api,and buildType is ‘BUILD’
and you should give offset_start and offset_end
refresh_streaming - use build2 rest api,and buildType is ‘REFRESH’
merge_streaming - use build2 rest api,and buildType is ‘MERGE’
delete - delete segment, and you should give segment_name value
disable - disable cube
enable - enable cube
purge - purge cube
clone - clone cube,new cube name is {cube_name}_clone
drop - drop cube)
:type command: str
:param start_time: build segment start time
:type start_time: Optional[str]
:param end_time: build segment end time
:type end_time: Optional[str]
:param offset_start: streaming build segment start time
:type offset_start: Optional[str]
:param offset_end: streaming build segment end time
:type offset_end: Optional[str]
:param segment_name: segment name
:type segment_name: str
:param is_track_job: (whether to track job status. if value is True,will track job until
job status is in("FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL",
"STOPPED") or timeout)
:type is_track_job: bool
:param interval: track job status,default value is 60s
:type interval: int
:param timeout: timeout value,default value is 1 day,60 * 60 * 24 s
:type timeout: int
:param eager_error_status: (jobs error status,if job status in this list ,this task will be error.
default value is tuple(["ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"]))
:type eager_error_status: tuple
"""
template_fields = (
'project',
'cube',
'dsn',
'command',
'start_time',
'end_time',
'segment_name',
'offset_start',
'offset_end',
)
ui_color = '#E79C46'
build_command = {
'fullbuild',
'build',
'merge',
'refresh',
'build_streaming',
'merge_streaming',
'refresh_streaming',
}
jobs_end_status = {"FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"}
# pylint: disable=too-many-arguments,inconsistent-return-statements
@apply_defaults
def __init__(
self,
*,
kylin_conn_id: str = 'kylin_default',
project: Optional[str] = None,
cube: Optional[str] = None,
dsn: Optional[str] = None,
command: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
offset_start: Optional[str] = None,
offset_end: Optional[str] = None,
segment_name: Optional[str] = None,
is_track_job: bool = False,
interval: int = 60,
timeout: int = 60 * 60 * 24,
eager_error_status=("ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"),
**kwargs,
):
super().__init__(**kwargs)
self.kylin_conn_id = kylin_conn_id
self.project = project
self.cube = cube
self.dsn = dsn
self.command = command
self.start_time = start_time
self.end_time = end_time
self.segment_name = segment_name
self.offset_start = offset_start
self.offset_end = offset_end
self.is_track_job = is_track_job
self.interval = interval
self.timeout = timeout
self.eager_error_status = eager_error_status
self.jobs_error_status = [stat.upper() for stat in eager_error_status]
def execute(self, context):
_hook = KylinHook(kylin_conn_id=self.kylin_conn_id, project=self.project, dsn=self.dsn)
_support_invoke_command = kylinpy.CubeSource.support_invoke_command
if self.command.lower() not in _support_invoke_command:
raise AirflowException(
'Kylin:Command {} can not match kylin command list {}'.format(
self.command, _support_invoke_command
)
)
kylinpy_params = {
'start': datetime.fromtimestamp(int(self.start_time) / 1000) if self.start_time else None,
'end': datetime.fromtimestamp(int(self.end_time) / 1000) if self.end_time else None,
'name': self.segment_name,
'offset_start': int(self.offset_start) if self.offset_start else None,
'offset_end': int(self.offset_end) if self.offset_end else None,
}
rsp_data = _hook.cube_run(self.cube, self.command.lower(), **kylinpy_params)
if self.is_track_job and self.command.lower() in self.build_command:
started_at = timezone.utcnow()
job_id = rsp_data.get("uuid")
if job_id is None:
raise AirflowException("kylin job id is None")
self.log.info("kylin job id: %s", job_id)
job_status = None
while job_status not in self.jobs_end_status:
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
raise AirflowException(f'kylin job {job_id} timeout')
time.sleep(self.interval)
job_status = _hook.get_job_status(job_id)
self.log.info('Kylin job status is %s ', job_status)
if job_status in self.jobs_error_status:
raise AirflowException(f'Kylin job {job_id} status {job_status} is error ')
if self.do_xcom_push:
return rsp_data
| 40.874346 | 102 | 0.657871 |
19b2a42b709da729eb72e2a21e547c1cc18dae7a | 14,469 | py | Python | pydm/widgets/qtplugins.py | jbellister-slac/pydm | 8b8a4c638c4310cd08cfe0ebcc20da6b70f94130 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pydm/widgets/qtplugins.py | jbellister-slac/pydm | 8b8a4c638c4310cd08cfe0ebcc20da6b70f94130 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pydm/widgets/qtplugins.py | jbellister-slac/pydm | 8b8a4c638c4310cd08cfe0ebcc20da6b70f94130 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import logging
import os
from ..utilities.iconfont import IconFont
from .archiver_time_plot import PyDMArchiverTimePlot
from .byte import PyDMByteIndicator
from .checkbox import PyDMCheckbox
from .datetime import PyDMDateTimeEdit, PyDMDateTimeLabel
from .drawing import (PyDMDrawingArc, PyDMDrawingChord, PyDMDrawingCircle,
PyDMDrawingEllipse, PyDMDrawingImage,
PyDMDrawingIrregularPolygon, PyDMDrawingLine,
PyDMDrawingPie, PyDMDrawingPolygon, PyDMDrawingPolyline,
PyDMDrawingRectangle, PyDMDrawingTriangle)
from .embedded_display import PyDMEmbeddedDisplay
from .enum_button import PyDMEnumButton
from .enum_combo_box import PyDMEnumComboBox
from .frame import PyDMFrame
from .image import PyDMImageView
from .label import PyDMLabel
from .line_edit import PyDMLineEdit
from .logdisplay import PyDMLogDisplay
from .pushbutton import PyDMPushButton
from .qtplugin_base import (WidgetCategory, get_widgets_from_entrypoints,
qtplugin_factory)
from .qtplugin_extensions import (ArchiveTimeCurveEditorExtension,
BasicSettingsExtension, RulesExtension,
ScatterCurveEditorExtension, SymbolExtension,
TimeCurveEditorExtension,
WaveformCurveEditorExtension)
from .related_display_button import PyDMRelatedDisplayButton
from .scale import PyDMScaleIndicator
from .scatterplot import PyDMScatterPlot
from .shell_command import PyDMShellCommand
from .slider import PyDMSlider
from .spinbox import PyDMSpinbox
from .symbol import PyDMSymbol
from .waveformtable import PyDMWaveformTable
from .scale import PyDMScaleIndicator
from .analog_indicator import PyDMAnalogIndicator
from .timeplot import PyDMTimePlot
from .archiver_time_plot import PyDMArchiverTimePlot
from .waveformplot import PyDMWaveformPlot
from .scatterplot import PyDMScatterPlot
from .tab_bar_qtplugin import TabWidgetPlugin
from .template_repeater import PyDMTemplateRepeater
from .terminator import PyDMTerminator
from .timeplot import PyDMTimePlot
from .waveformplot import PyDMWaveformPlot
from .waveformtable import PyDMWaveformTable
logger = logging.getLogger(__name__)
ifont = IconFont()
BASE_EXTENSIONS = [BasicSettingsExtension, RulesExtension]
# Label plugin
PyDMLabelPlugin = qtplugin_factory(PyDMLabel, group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("tag"))
# Time Plot plugin
PyDMTimePlotPlugin = qtplugin_factory(PyDMTimePlot, group=WidgetCategory.PLOT,
extensions=[TimeCurveEditorExtension,
RulesExtension],
icon=ifont.icon("chart-line"))
# In order to keep the archiver functionality invisible to users who do not have access to an instance of the
# archiver appliance, only load this if the user has the associated environment variable set
if "PYDM_ARCHIVER_URL" in os.environ:
# Time Plot with archiver appliance support plugin
PyDMArchiverTimePlotPlugin = qtplugin_factory(PyDMArchiverTimePlot, group=WidgetCategory.PLOT,
extensions=[ArchiveTimeCurveEditorExtension,
RulesExtension],
icon=ifont.icon("chart-line"))
# Waveform Plot plugin
PyDMWaveformPlotPlugin = qtplugin_factory(PyDMWaveformPlot,
group=WidgetCategory.PLOT,
extensions=[
WaveformCurveEditorExtension,
RulesExtension],
icon=ifont.icon("wave-square"))
# Scatter Plot plugin
PyDMScatterPlotPlugin = qtplugin_factory(PyDMScatterPlot,
group=WidgetCategory.PLOT,
extensions=[
ScatterCurveEditorExtension,
RulesExtension],
icon=ifont.icon("project-diagram"))
# Byte plugin
PyDMByteIndicatorPlugin = qtplugin_factory(PyDMByteIndicator,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("ellipsis-v"))
# Checkbox plugin
PyDMCheckboxPlugin = qtplugin_factory(PyDMCheckbox, group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("check-square"))
# Date/Time plugins
PyDMDateTimeEditPlugin = qtplugin_factory(PyDMDateTimeEdit,
group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("calendar-minus"))
PyDMDateTimeLabelPlugin = qtplugin_factory(PyDMDateTimeLabel,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("calendar-alt"))
# Drawing plugins
PyDMDrawingArcPlugin = qtplugin_factory(PyDMDrawingArc,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("circle-notch"))
PyDMDrawingChordPlugin = qtplugin_factory(PyDMDrawingChord,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("moon"))
PyDMDrawingCirclePlugin = qtplugin_factory(PyDMDrawingCircle,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("circle"))
PyDMDrawingEllipsePlugin = qtplugin_factory(PyDMDrawingEllipse,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("ellipsis-h"))
PyDMDrawingImagePlugin = qtplugin_factory(PyDMDrawingImage,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("image"))
PyDMDrawingLinePlugin = qtplugin_factory(PyDMDrawingLine,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("minus"))
PyDMDrawingPiePlugin = qtplugin_factory(PyDMDrawingPie,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("pizza-slice"))
PyDMDrawingRectanglePlugin = qtplugin_factory(PyDMDrawingRectangle,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("border-style"))
PyDMDrawingTrianglePlugin = qtplugin_factory(PyDMDrawingTriangle,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("caret-up"))
PyDMDrawingPolygonPlugin = qtplugin_factory(PyDMDrawingPolygon,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("draw-polygon"))
PyDMDrawingPolylinePlugin = qtplugin_factory(PyDMDrawingPolyline,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("share-alt"))
PyDMDrawingIrregularPolygonPlugin = qtplugin_factory(PyDMDrawingIrregularPolygon,
group=WidgetCategory.DRAWING,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("draw-polygon"))
# Embedded Display plugin
PyDMEmbeddedDisplayPlugin = qtplugin_factory(PyDMEmbeddedDisplay,
group=WidgetCategory.CONTAINER,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("layer-group"))
# Enum Button plugin
PyDMEnumButtonPlugin = qtplugin_factory(PyDMEnumButton,
group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("bars"))
# Enum Combobox plugin
PyDMEnumComboBoxPlugin = qtplugin_factory(PyDMEnumComboBox,
group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("list-ol"))
# Frame plugin
PyDMFramePlugin = qtplugin_factory(PyDMFrame, group=WidgetCategory.CONTAINER,
is_container=True,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("expand"))
# Image plugin
PyDMImageViewPlugin = qtplugin_factory(PyDMImageView,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("camera"))
# Line Edit plugin
PyDMLineEditPlugin = qtplugin_factory(PyDMLineEdit, group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("edit"))
# Log Viewer
PyDMLogDisplayPlugin = qtplugin_factory(PyDMLogDisplay,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("clipboard"))
# Push Button plugin
PyDMPushButtonPlugin = qtplugin_factory(PyDMPushButton,
group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("mouse"))
# Related Display Button plugin
PyDMRelatedDisplayButtonPlugin = qtplugin_factory(PyDMRelatedDisplayButton,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon(
"window-maximize"))
# Shell Command plugin
PyDMShellCommandPlugin = qtplugin_factory(PyDMShellCommand,
group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("terminal"))
# Slider plugin
PyDMSliderPlugin = qtplugin_factory(PyDMSlider, group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("sliders-h"))
# Spinbox plugin
PyDMSpinboxplugin = qtplugin_factory(PyDMSpinbox, group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("sort-numeric-up"))
# Scale Indicator plugin
PyDMScaleIndicatorPlugin = qtplugin_factory(PyDMScaleIndicator,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("level-up-alt")
)
# Analog Indicator plugin
PyDMAnalogIndicatorPlugin = qtplugin_factory(PyDMAnalogIndicator,
group=WidgetCategory.DISPLAY,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("level-up-alt")
)
# Symbol plugin
PyDMSymbolPlugin = qtplugin_factory(PyDMSymbol, group=WidgetCategory.DISPLAY,
extensions=[SymbolExtension,
RulesExtension],
icon=ifont.icon("icons"))
# Waveform Table plugin
PyDMWaveformTablePlugin = qtplugin_factory(PyDMWaveformTable,
group=WidgetCategory.INPUT,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("table"))
# Tab Widget plugin
PyDMTabWidgetPlugin = TabWidgetPlugin(extensions=BASE_EXTENSIONS)
# Template Repeater plugin
PyDMTemplateRepeaterPlugin = qtplugin_factory(PyDMTemplateRepeater,
group=WidgetCategory.CONTAINER,
extensions=BASE_EXTENSIONS,
icon=ifont.icon("align-justify"))
# Terminator Widget plugin
PyDMTerminatorPlugin = qtplugin_factory(PyDMTerminator,
group=WidgetCategory.MISC,
extensions=BASE_EXTENSIONS)
# **********************************************
# NOTE: Add in new PyDM widgets above this line.
# **********************************************
# Add in designer widget plugins from other classes via entrypoints:
globals().update(**get_widgets_from_entrypoints())
| 50.768421 | 109 | 0.532932 |
590dba3fc6d9257cecaa7ca7152a6fba1885a3fd | 14,921 | py | Python | organizations/backends/defaults.py | hlongmore/django-organizations | 6c4313057d3d38627870f560e0ac0faf7551264a | [
"BSD-2-Clause"
] | null | null | null | organizations/backends/defaults.py | hlongmore/django-organizations | 6c4313057d3d38627870f560e0ac0faf7551264a | [
"BSD-2-Clause"
] | null | null | null | organizations/backends/defaults.py | hlongmore/django-organizations | 6c4313057d3d38627870f560e0ac0faf7551264a | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2018, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Backend classes should provide common interface
"""
import email.utils
import inspect
import uuid
try:
from typing import ClassVar # noqa
except ImportError:
# thanks Python 3.5
from typing import Any as ClassVar # noqa
from typing import Optional # noqa
from typing import Text # noqa
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.contrib.auth import login
from django.core.mail import EmailMessage
from django.http import Http404
from django.shortcuts import redirect
from django.shortcuts import render
from django.template import loader
from django.utils.translation import ugettext as _
from organizations.backends.forms import UserRegistrationForm
from organizations.backends.forms import org_registration_form
from organizations.backends.tokens import RegistrationTokenGenerator
from organizations.compat import reverse
from organizations.utils import create_organization
from organizations.utils import default_org_model
from organizations.utils import model_field_attr
class BaseBackend(object):
"""
Base backend class for registering and inviting users to an organization
"""
registration_form_template = "organizations/register_form.html"
activation_success_template = "organizations/register_success.html"
def __init__(self, org_model=None, namespace=None):
# type: (Optional[ClassVar], Optional[Text]) -> None
self.user_model = get_user_model()
self.org_model = org_model or default_org_model()
self.namespace = namespace
def namespace_preface(self):
return "" if not self.namespace else "{}:".format(self.namespace)
def get_urls(self):
raise NotImplementedError
def get_success_url(self):
"""Will return the class's `success_url` attribute unless overridden"""
raise NotImplementedError
def get_form(self, **kwargs):
"""Returns the form for registering or inviting a user"""
if not hasattr(self, "form_class"):
raise AttributeError(_("You must define a form_class"))
return self.form_class(**kwargs)
def get_token(self, user, **kwargs):
"""Returns a unique token for the given user"""
return RegistrationTokenGenerator().make_token(user)
def get_username(self):
"""
Returns a UUID-based 'random' and unique username.
This is required data for user models with a username field.
"""
return str(uuid.uuid4())[
:model_field_attr(self.user_model, "username", "max_length")
]
def activate_organizations(self, user):
"""
Activates the related organizations for the user.
It only activates the related organizations by model type - that is, if
there are multiple types of organizations then only organizations in
the provided model class are activated.
"""
try:
relation_name = self.org_model().user_relation_name
except TypeError:
# No org_model specified, raises a TypeError because NoneType is
# not callable. This the most sensible default:
relation_name = "organizations_organization"
organization_set = getattr(user, relation_name)
for org in organization_set.filter(is_active=False):
org.is_active = True
org.save()
def activate_view(self, request, user_id, token):
"""
View function that activates the given User by setting `is_active` to
true if the provided information is verified.
"""
try:
user = self.user_model.objects.get(id=user_id, is_active=False)
except self.user_model.DoesNotExist:
raise Http404(_("Your URL may have expired."))
if not RegistrationTokenGenerator().check_token(user, token):
raise Http404(_("Your URL may have expired."))
form = self.get_form(
data=request.POST or None, files=request.FILES or None, instance=user
)
if form.is_valid():
form.instance.is_active = True
user = form.save()
user.set_password(form.cleaned_data["password"])
user.save()
self.activate_organizations(user)
user = authenticate(
username=form.cleaned_data["username"],
password=form.cleaned_data["password"],
)
login(request, user)
return redirect(self.get_success_url())
return render(request, self.registration_form_template, {"form": form})
def send_reminder(self, user, sender=None, **kwargs):
"""Sends a reminder email to the specified user"""
if user.is_active:
return False
token = RegistrationTokenGenerator().make_token(user)
kwargs.update({"token": token})
self.email_message(
user, self.reminder_subject, self.reminder_body, sender, **kwargs
).send()
def email_message(
self,
user,
subject_template,
body_template,
sender=None,
message_class=EmailMessage,
**kwargs
):
"""
Returns an email message for a new user. This can be easily overridden.
For instance, to send an HTML message, use the EmailMultiAlternatives message_class
and attach the additional conent.
"""
if sender:
try:
display_name = sender.get_full_name()
except (AttributeError, TypeError):
display_name = sender.get_username()
from_email = "%s <%s>" % (
display_name, email.utils.parseaddr(settings.DEFAULT_FROM_EMAIL)[1]
)
reply_to = "%s <%s>" % (display_name, sender.email)
else:
from_email = settings.DEFAULT_FROM_EMAIL
reply_to = from_email
headers = {"Reply-To": reply_to}
kwargs.update({"sender": sender, "user": user})
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(
kwargs
).strip() # Remove stray newline characters
body = body_template.render(kwargs)
return message_class(subject, body, from_email, [user.email], headers=headers)
class RegistrationBackend(BaseBackend):
"""
A backend for allowing new users to join the site by creating a new user
associated with a new organization.
"""
# NOTE this backend stands to be simplified further, as email verification
# should be beyond the purview of this app
activation_subject = "organizations/email/activation_subject.txt"
activation_body = "organizations/email/activation_body.html"
reminder_subject = "organizations/email/reminder_subject.txt"
reminder_body = "organizations/email/reminder_body.html"
form_class = UserRegistrationForm
def get_success_url(self):
return reverse("registration_success")
def get_urls(self):
return [
url(r"^complete/$", view=self.success_view, name="registration_success"),
url(
r"^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
view=self.activate_view,
name="registration_register",
),
url(r"^$", view=self.create_view, name="registration_create"),
]
@property
def urls(self):
return self.get_urls(), self.namespace or "registration"
def register_by_email(self, email, sender=None, request=None, **kwargs):
"""
Returns a User object filled with dummy data and not active, and sends
an invitation email.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
user.is_active = False
user.save()
self.send_activation(user, sender, **kwargs)
return user
def send_activation(self, user, sender=None, **kwargs):
"""
Invites a user to join the site
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({"token": token})
self.email_message(
user, self.activation_subject, self.activation_body, sender, **kwargs
).send()
def create_view(self, request):
"""
Initiates the organization and user account creation process
"""
try:
if request.user.is_authenticated():
return redirect("organization_add")
except TypeError:
if request.user.is_authenticated:
return redirect("organization_add")
form = org_registration_form(self.org_model)(request.POST or None)
if form.is_valid():
try:
user = self.user_model.objects.get(email=form.cleaned_data["email"])
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(
username=self.get_username(),
email=form.cleaned_data["email"],
password=self.user_model.objects.make_random_password(),
)
user.is_active = False
user.save()
else:
return redirect("organization_add")
organization = create_organization(
user,
form.cleaned_data["name"],
form.cleaned_data["slug"],
is_active=False,
)
return render(
request,
self.activation_success_template,
{"user": user, "organization": organization},
)
return render(request, self.registration_form_template, {"form": form})
def success_view(self, request):
return render(request, self.activation_success_template, {})
class InvitationBackend(BaseBackend):
"""
A backend for inviting new users to join the site as members of an
organization.
"""
notification_subject = "organizations/email/notification_subject.txt"
notification_body = "organizations/email/notification_body.html"
invitation_subject = "organizations/email/invitation_subject.txt"
invitation_body = "organizations/email/invitation_body.html"
reminder_subject = "organizations/email/reminder_subject.txt"
reminder_body = "organizations/email/reminder_body.html"
form_class = UserRegistrationForm
def get_success_url(self):
# TODO get this url name from an attribute
return reverse("organization_list")
def get_urls(self):
# TODO enable naming based on a model?
return [
url(
r"^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
view=self.activate_view,
name="invitations_register",
)
]
def invite_by_email(self, email, sender=None, request=None, **kwargs):
"""Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
# TODO break out user creation process
if "username" in inspect.getargspec(
self.user_model.objects.create_user
).args:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
else:
user = self.user_model.objects.create(
email=email, password=self.user_model.objects.make_random_password()
)
user.is_active = False
user.save()
self.send_invitation(user, sender, **kwargs)
return user
def send_invitation(self, user, sender=None, **kwargs):
"""An intermediary function for sending an invitation email that
selects the templates, generating the token, and ensuring that the user
has not already joined the site.
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({"token": token})
self.email_message(
user, self.invitation_subject, self.invitation_body, sender, **kwargs
).send()
return True
def send_notification(self, user, sender=None, **kwargs):
"""
An intermediary function for sending an notification email informing
a pre-existing, active user that they have been added to a new
organization.
"""
if not user.is_active:
return False
self.email_message(
user, self.notification_subject, self.notification_body, sender, **kwargs
).send()
return True
| 38.755844 | 91 | 0.644796 |
66246c199be872659965111c70b496d35b3546c4 | 6,428 | py | Python | resources/lib/common/misc_utils.py | kevenli/plugin.video.netflix | ed94e1ba88141ba3336d380f1e7a2e5665fa0a9a | [
"MIT"
] | null | null | null | resources/lib/common/misc_utils.py | kevenli/plugin.video.netflix | ed94e1ba88141ba3336d380f1e7a2e5665fa0a9a | [
"MIT"
] | null | null | null | resources/lib/common/misc_utils.py | kevenli/plugin.video.netflix | ed94e1ba88141ba3336d380f1e7a2e5665fa0a9a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Miscellaneous utility functions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from future.utils import iteritems
try: # Python 2
from itertools import imap as map # pylint: disable=redefined-builtin
except ImportError:
pass
# try: # Python 3
# from io import StringIO
# except ImportError: # Python 2
# from StringIO import StringIO
try: # Python 3
from urllib.parse import quote, urlencode
except ImportError: # Python 2
from urllib import urlencode
from urllib2 import quote
from resources.lib.globals import g
def find(value_to_find, attribute, search_space):
"""Find a video with matching id in a dict or list"""
for video in search_space:
if video[attribute] == value_to_find:
return video
raise KeyError('Metadata for {} does not exist'.format(value_to_find))
def find_episode_metadata(videoid, metadata):
"""Find metadata for a specific episode within a show metadata dict"""
season = find(int(videoid.seasonid), 'id', metadata['seasons'])
return (find(int(videoid.episodeid), 'id', season.get('episodes', {})),
season)
def get_class_methods(class_item=None):
"""
Returns the class methods of agiven class object
:param class_item: Class item to introspect
:type class_item: object
:returns: list -- Class methods
"""
from types import FunctionType
_type = FunctionType
return [x for x, y in iteritems(class_item.__dict__)
if isinstance(y, _type)]
def build_url(pathitems=None, videoid=None, params=None, mode=None):
"""Build a plugin URL from pathitems and query parameters. Add videoid to the path if it's present."""
if not (pathitems or videoid):
raise ValueError('Either pathitems or videoid must be set.')
path = '{netloc}/{path}/{qs}'.format(
netloc=g.BASE_URL,
path=_encode_path(mode, pathitems, videoid),
qs=_encode_params(params))
return path
def _expand_mode(mode):
return [mode] if mode else []
def _expand_videoid(videoid):
return videoid.to_path() if videoid else []
def _encode_path(mode, pathitems, videoid):
return quote(
'/'.join(_expand_mode(mode) +
(pathitems or []) +
_expand_videoid(videoid)).encode('utf-8'))
def _encode_params(params):
return ('?' + urlencode(params)) if params else ''
def is_numeric(string):
"""Return true if string represents an integer, else false"""
try:
int(string)
except ValueError:
return False
return True
def strp(value, form):
"""
Helper function to safely create datetime objects from strings
:return: datetime - parsed datetime object
"""
# pylint: disable=broad-except
from datetime import datetime
def_value = datetime.utcfromtimestamp(0)
try:
return datetime.strptime(value, form)
except TypeError:
# Python bug https://bugs.python.org/issue27400
try:
from time import strptime
return datetime(*(strptime(value, form)[0:6]))
except ValueError:
return def_value
except Exception:
return def_value
# def compress_data(data):
# """GZIP and b64 encode data"""
# out = StringIO()
# with gzip.GzipFile(fileobj=out, mode='w') as outh:
# outh.write(data)
# return base64.standard_b64encode(out.getvalue())
def merge_dicts(dict_to_merge, merged_dict):
"""Recursively merge the contents of dict_to_merge into merged_dict.
Values that are already present in merged_dict will be overwritten if they are also present in dict_to_merge"""
for key, value in iteritems(dict_to_merge):
if isinstance(merged_dict.get(key), dict):
merge_dicts(value, merged_dict[key])
else:
merged_dict[key] = value
return merged_dict
def compare_dicts(dict_a, dict_b, excluded_keys=None):
"""Compare two dict with same keys, with optional keys to exclude from compare"""
if excluded_keys is None:
excluded_keys = []
return all(dict_a[k] == dict_b[k] for k in dict_a if k not in excluded_keys)
def chunked_list(seq, chunk_len):
for start in range(0, len(seq), chunk_len):
yield seq[start:start + chunk_len]
def any_value_except(mapping, excluded_keys):
"""Return a random value from a dict that is not associated with excluded_key.
Raises StopIteration if there are no other keys than excluded_key"""
return next(mapping[key] for key in mapping if key not in excluded_keys)
def enclose_quotes(content):
return '"' + content + '"'
def is_edge_esn(esn):
"""Return True if the esn is an EDGE esn"""
return esn.startswith('NFCDIE-02-')
def is_minimum_version(version, min_version):
"""Return True if version is equal or greater to min_version"""
return list(map(int, version.split('.'))) >= list(map(int, min_version.split('.')))
def is_less_version(version, max_version):
"""Return True if version is less to max_version"""
return list(map(int, version.split('.'))) < list(map(int, max_version.split('.')))
def make_list(arg):
"""Return a list with arg as its member or arg if arg is already a list. Returns an empty list if arg is None"""
return (arg
if isinstance(arg, list)
else ([arg]
if arg is not None
else []))
def convert_seconds_to_hms_str(time):
h = int(time // 3600)
time %= 3600
m = int(time // 60)
s = int(time % 60)
return '{:02d}:{:02d}:{:02d}'.format(h, m, s)
def remove_html_tags(raw_html):
import re
pattern = re.compile('<.*?>')
return re.sub(pattern, '', raw_html)
def censure(value, length=3):
"""Censor part of the string with asterisks"""
if not value:
return value
return value[:-length] + '*' * length
def run_threaded(non_blocking, target_func, *args, **kwargs):
"""Call a function in a thread, when specified"""
if not non_blocking:
return target_func(*args, **kwargs)
from threading import Thread
Thread(target=target_func, args=args, kwargs=kwargs).start()
return None
| 29.897674 | 116 | 0.667704 |
169ffd4564add75ebc606fb1f688d3cdff0f1873 | 5,056 | py | Python | openpeerpower/components/ring/camera.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | openpeerpower/components/ring/camera.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | openpeerpower/components/ring/camera.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """This component provides support to the Ring Door Bell camera."""
import asyncio
from datetime import timedelta
from itertools import chain
import logging
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
import requests
from openpeerpower.components.camera import Camera
from openpeerpower.components.ffmpeg import DATA_FFMPEG
from openpeerpower.const import ATTR_ATTRIBUTION
from openpeerpower.core import callback
from openpeerpower.helpers.aiohttp_client import async_aiohttp_proxy_stream
from openpeerpower.util import dt as dt_util
from . import ATTRIBUTION, DOMAIN
from .entity import RingEntityMixin
FORCE_REFRESH_INTERVAL = timedelta(minutes=45)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(opp, config_entry, async_add_entities):
"""Set up a Ring Door Bell and StickUp Camera."""
devices = opp.data[DOMAIN][config_entry.entry_id]["devices"]
cams = []
for camera in chain(
devices["doorbots"], devices["authorized_doorbots"], devices["stickup_cams"]
):
if not camera.has_subscription:
continue
cams.append(RingCam(config_entry.entry_id, opp.data[DATA_FFMPEG], camera))
async_add_entities(cams)
class RingCam(RingEntityMixin, Camera):
"""An implementation of a Ring Door Bell camera."""
def __init__(self, config_entry_id, ffmpeg, device):
"""Initialize a Ring Door Bell camera."""
super().__init__(config_entry_id, device)
self._name = self._device.name
self._ffmpeg = ffmpeg
self._last_event = None
self._last_video_id = None
self._video_url = None
self._expires_at = dt_util.utcnow() - FORCE_REFRESH_INTERVAL
async def async_added_to_opp(self):
"""Register callbacks."""
await super().async_added_to_opp()
await self.ring_objects["history_data"].async_track_device(
self._device, self._history_update_callback
)
async def async_will_remove_from_opp(self):
"""Disconnect callbacks."""
await super().async_will_remove_from_opp()
self.ring_objects["history_data"].async_untrack_device(
self._device, self._history_update_callback
)
@callback
def _history_update_callback(self, history_data):
"""Call update method."""
if history_data:
self._last_event = history_data[0]
self.async_schedule_update_op_state(True)
else:
self._last_event = None
self._last_video_id = None
self._video_url = None
self._expires_at = dt_util.utcnow()
self.async_write_op_state()
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._device.id
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"video_url": self._video_url,
"last_video_id": self._last_video_id,
}
async def async_camera_image(self):
"""Return a still image response from the camera."""
ffmpeg = ImageFrame(self._ffmpeg.binary)
if self._video_url is None:
return
image = await asyncio.shield(
ffmpeg.get_image(
self._video_url,
output_format=IMAGE_JPEG,
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
if self._video_url is None:
return
stream = CameraMjpeg(self._ffmpeg.binary)
await stream.open_camera(self._video_url)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.opp,
request,
stream_reader,
self._ffmpeg.ffmpeg_stream_content_type,
)
finally:
await stream.close()
async def async_update(self):
"""Update camera entity and refresh attributes."""
if self._last_event is None:
return
if self._last_event["recording"]["status"] != "ready":
return
utcnow = dt_util.utcnow()
if self._last_video_id == self._last_event["id"] and utcnow <= self._expires_at:
return
try:
video_url = await self.opp.async_add_executor_job(
self._device.recording_url, self._last_event["id"]
)
except requests.Timeout:
_LOGGER.warning(
"Time out fetching recording url for camera %s", self.entity_id
)
video_url = None
if video_url:
self._last_video_id = self._last_event["id"]
self._video_url = video_url
self._expires_at = FORCE_REFRESH_INTERVAL + utcnow
| 30.829268 | 88 | 0.640427 |
09640b79b130f16ea5f24b7b7233d97655ce0f70 | 7,788 | py | Python | examples/cnn/main.py | codecaution/Hetu | e278732c2fe3554c8d576585f5bcbf79ade31b68 | [
"Apache-2.0"
] | null | null | null | examples/cnn/main.py | codecaution/Hetu | e278732c2fe3554c8d576585f5bcbf79ade31b68 | [
"Apache-2.0"
] | null | null | null | examples/cnn/main.py | codecaution/Hetu | e278732c2fe3554c8d576585f5bcbf79ade31b68 | [
"Apache-2.0"
] | 3 | 2021-11-29T13:47:48.000Z | 2022-03-03T02:00:43.000Z | import hetu as ht
import models
import numpy as np
import argparse
import logging
from time import time
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def print_rank0(msg):
if device_id == 0:
logger.info(msg)
if __name__ == "__main__":
# argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True,
help='model to be tested')
parser.add_argument('--dataset', type=str, required=True,
help='dataset to be trained on')
parser.add_argument('--batch-size', type=int,
default=128, help='batch size')
parser.add_argument('--learning-rate', type=float,
default=0.1, help='learning rate')
parser.add_argument('--opt', type=str, default='sgd',
help='optimizer to be used, default sgd; sgd / momentum / adagrad / adam')
parser.add_argument('--num-epochs', type=int,
default=10, help='epoch number')
parser.add_argument('--gpu', type=int, default=0,
help='gpu to be used, -1 means cpu')
parser.add_argument('--validate', action='store_true',
help='whether to use validation')
parser.add_argument('--timing', action='store_true',
help='whether to time the training phase')
parser.add_argument('--comm-mode', default=None, help='communication mode')
args = parser.parse_args()
if args.comm_mode is not None:
args.comm_mode = args.comm_mode.lower()
assert args.comm_mode in (None, 'allreduce', 'ps', 'hybrid')
args.model = args.model.lower()
assert args.model in ['alexnet', 'cnn_3_layers', 'lenet', 'logreg', 'lstm', 'mlp', 'resnet18', 'resnet34', 'rnn', 'vgg16', 'vgg19'], \
'Model not supported!'
model = eval('models.' + args.model)
args.dataset = args.dataset.lower()
assert args.dataset in ['mnist', 'cifar10', 'cifar100', 'imagenet']
args.opt = args.opt.lower()
assert args.opt in ['sgd', 'momentum', 'nesterov',
'adagrad', 'adam'], 'Optimizer not supported!'
if args.opt == 'sgd':
opt = ht.optim.SGDOptimizer(learning_rate=args.learning_rate)
elif args.opt == 'momentum':
opt = ht.optim.MomentumOptimizer(learning_rate=args.learning_rate)
elif args.opt == 'nesterov':
opt = ht.optim.MomentumOptimizer(
learning_rate=args.learning_rate, nesterov=True)
elif args.opt == 'adagrad':
opt = ht.optim.AdaGradOptimizer(
learning_rate=args.learning_rate, initial_accumulator_value=0.1)
else:
opt = ht.optim.AdamOptimizer(learning_rate=args.learning_rate)
# data loading
if args.dataset == 'mnist':
datasets = ht.data.mnist()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# train_set_x: (50000, 784), train_set_y: (50000, 10)
# valid_set_x: (10000, 784), valid_set_y: (10000, 10)
# x_shape = (args.batch_size, 784)
# y_shape = (args.batch_size, 10)
elif args.dataset == 'cifar10':
train_set_x, train_set_y, valid_set_x, valid_set_y = ht.data.normalize_cifar(
num_class=10)
if args.model == "mlp":
train_set_x = train_set_x.reshape(train_set_x.shape[0], -1)
valid_set_x = valid_set_x.reshape(valid_set_x.shape[0], -1)
# train_set_x: (50000, 3, 32, 32), train_set_y: (50000, 10)
# valid_set_x: (10000, 3, 32, 32), valid_set_y: (10000, 10)
# x_shape = (args.batch_size, 3, 32, 32)
# y_shape = (args.batch_size, 10)
elif args.dataset == 'cifar100':
train_set_x, train_set_y, valid_set_x, valid_set_y = ht.data.normalize_cifar(
num_class=100)
# train_set_x: (50000, 3, 32, 32), train_set_y: (50000, 100)
# valid_set_x: (10000, 3, 32, 32), valid_set_y: (10000, 100)
else:
raise NotImplementedError
# model definition
x = ht.dataloader_op([
ht.Dataloader(train_set_x, args.batch_size, 'train'),
ht.Dataloader(valid_set_x, args.batch_size, 'validate'),
])
y_ = ht.dataloader_op([
ht.Dataloader(train_set_y, args.batch_size, 'train'),
ht.Dataloader(valid_set_y, args.batch_size, 'validate'),
])
if args.model in ['resnet18', 'resnet34', 'vgg16', 'vgg19'] and args.dataset == 'cifar100':
loss, y = model(x, y_, 100)
else:
loss, y = model(x, y_)
train_op = opt.minimize(loss)
eval_nodes = {'train': [loss, y, y_, train_op], 'validate': [loss, y, y_]}
if args.comm_mode is None:
if args.gpu < 0:
ctx = ht.cpu()
else:
ctx = ht.gpu(args.gpu)
executor = ht.Executor(eval_nodes, ctx=ctx)
else:
strategy = ht.dist.DataParallel(args.comm_mode)
executor = ht.Executor(eval_nodes, dist_strategy=strategy)
n_train_batches = executor.get_batch_num('train')
n_valid_batches = executor.get_batch_num('validate')
global device_id
device_id = executor.rank
if device_id is None:
device_id = 0
print_rank0("Training {} on HETU".format(args.model))
print_rank0('Use {} Optimizer.'.format(args.opt))
print_rank0('Use data {}.'.format(args.dataset))
# training
print_rank0("Start training loop...")
running_time = 0
for i in range(args.num_epochs + 1):
print_rank0("Epoch %d" % i)
loss_all = 0
batch_num = 0
if args.timing:
start = time()
correct_predictions = []
for minibatch_index in range(n_train_batches):
loss_val, predict_y, y_val, _ = executor.run(
'train', eval_node_list=[loss, y, y_, train_op])
# Loss for this minibatch
predict_y = predict_y.asnumpy()
y_val = y_val.asnumpy()
loss_all += loss_val.asnumpy()
batch_num += 1
# Predict accuracy for this minibatch
correct_prediction = np.equal(
np.argmax(y_val, 1),
np.argmax(predict_y, 1)).astype(np.float32)
correct_predictions.extend(correct_prediction)
loss_all /= batch_num
accuracy = np.mean(correct_predictions)
print_rank0("Train loss = %f" % loss_all)
print_rank0("Train accuracy = %f" % accuracy)
if args.timing:
end = time()
during_time = end - start
print_rank0("Running time of current epoch = %fs" % (during_time))
if i != 0:
running_time += during_time
if args.validate:
val_loss_all = 0
batch_num = 0
correct_predictions = []
for minibatch_index in range(n_valid_batches):
loss_val, valid_y_predicted, y_val = executor.run(
'validate', eval_node_list=[loss, y, y_], convert_to_numpy_ret_vals=True)
val_loss_all += loss_val
batch_num += 1
correct_prediction = np.equal(
np.argmax(y_val, 1),
np.argmax(valid_y_predicted, 1)).astype(np.float32)
correct_predictions.extend(correct_prediction)
val_loss_all /= batch_num
accuracy = np.mean(correct_predictions)
print_rank0("Validation loss = %f" % val_loss_all)
print_rank0("Validation accuracy = %f" % accuracy)
print_rank0("*"*50)
print_rank0("Running time of total %d epoch = %fs" %
(args.num_epochs, running_time))
| 40.989474 | 138 | 0.598998 |
269d17e0a22b3759d231a25781b5039cbf3f1202 | 661 | py | Python | Labs/lab4/l4e1.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | Labs/lab4/l4e1.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | Labs/lab4/l4e1.py | felixchiasson/ITI1520 | 4208904bf7576433313524ebd1c1bdb9f49277f2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
###############################################################################
# File Name : l4e1.py
# Created By : Félix Chiasson (7138723)
# Creation Date : [2015-10-06 11:30]
# Last Modified : [2015-10-06 11:35]
# Description : Trouver les erreurs logiques et corrigez les
###############################################################################
compteur = 1 # Compteur devrait commencer à 1, not 0
while (compteur <= 10): # Doit s'arrêter lorsque = 10
print(compteur)
compteur = compteur + 1
| 47.214286 | 80 | 0.394856 |
36c3432108cdc974d23da853df7f25ff611a469e | 1,495 | py | Python | bluebottle/bb_accounts/utils.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/bb_accounts/utils.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/bb_accounts/utils.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | import re
from django.utils.http import int_to_base36
from django.contrib.auth.tokens import default_token_generator
from bluebottle.clients import properties
from bluebottle.clients.utils import tenant_url, tenant_name
from bluebottle.members.messages import AccountActivationMessage
def send_welcome_mail(user=None):
from bluebottle.members.models import MemberPlatformSettings
settings = MemberPlatformSettings.objects.get()
context = {
'email': user.email,
'site': tenant_url(),
'site_name': tenant_name(),
'user': user,
'first_name': user.first_name,
'contact_email': properties.CONTACT_EMAIL,
'closed_site': settings.closed,
'LANGUAGE_CODE': user.primary_language,
}
# If there is no password and no remote_id (SSO) then use the
# welcome + password template, and then set a random password
if not user.password and not user.remote_id:
context.update({
'token': default_token_generator.make_token(user),
'uid': int_to_base36(user.pk),
})
msg = AccountActivationMessage(user, context=context)
msg.compose_and_send()
user.welcome_email_is_sent = True
user.save()
def valid_email(email=None):
"""Returns True if argument is a string with valid email adddress"""
if not email:
return False
pattern = r"[^@]+@[^@]+\.[^@]+"
pat = re.compile(pattern)
if pat.match(email):
return True
return False
| 29.313725 | 72 | 0.683612 |
b59f5b1c622f6ad998a3822b369af4a41b794c13 | 19,772 | py | Python | pypy/rlib/test/test_libffi.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/rlib/test/test_libffi.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/rlib/test/test_libffi.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | import sys
import py
from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong
from pypy.rlib.test.test_clibffi import BaseFfiTest, get_libm_name, make_struct_ffitype_e
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.rpython.lltypesystem.ll2ctypes import ALLOCATED
from pypy.rlib.libffi import (CDLL, Func, get_libc_name, ArgChain, types,
IS_32_BIT, array_getitem, array_setitem)
from pypy.rlib.libffi import (struct_getfield_int, struct_setfield_int,
struct_getfield_longlong, struct_setfield_longlong,
struct_getfield_float, struct_setfield_float,
struct_getfield_singlefloat, struct_setfield_singlefloat)
class TestLibffiMisc(BaseFfiTest):
CDLL = CDLL
def test_argchain(self):
chain = ArgChain()
assert chain.numargs == 0
chain2 = chain.arg(42)
assert chain2 is chain
assert chain.numargs == 1
intarg = chain.first
assert chain.last is intarg
assert intarg.intval == 42
chain.arg(123.45)
assert chain.numargs == 2
assert chain.first is intarg
assert intarg.next is chain.last
floatarg = intarg.next
assert floatarg.floatval == 123.45
def test_wrong_args(self):
# so far the test passes but for the wrong reason :-), i.e. because
# .arg() only supports integers and floats
chain = ArgChain()
x = lltype.malloc(lltype.GcStruct('xxx'))
y = lltype.malloc(lltype.GcArray(rffi.SIGNED), 3)
z = lltype.malloc(lltype.Array(rffi.SIGNED), 4, flavor='raw')
py.test.raises(TypeError, "chain.arg(x)")
py.test.raises(TypeError, "chain.arg(y)")
py.test.raises(TypeError, "chain.arg(z)")
lltype.free(z, flavor='raw')
def test_library_open(self):
lib = self.get_libc()
del lib
assert not ALLOCATED
def test_library_get_func(self):
lib = self.get_libc()
ptr = lib.getpointer('fopen', [], types.void)
py.test.raises(KeyError, lib.getpointer, 'xxxxxxxxxxxxxxx', [], types.void)
del ptr
del lib
assert not ALLOCATED
def test_struct_fields(self):
longsize = 4 if IS_32_BIT else 8
POINT = lltype.Struct('POINT',
('x', rffi.LONG),
('y', rffi.SHORT),
('z', rffi.VOIDP),
)
y_ofs = longsize
z_ofs = longsize*2
p = lltype.malloc(POINT, flavor='raw')
p.x = 42
p.y = rffi.cast(rffi.SHORT, -1)
p.z = rffi.cast(rffi.VOIDP, 0x1234)
addr = rffi.cast(rffi.VOIDP, p)
assert struct_getfield_int(types.slong, addr, 0) == 42
assert struct_getfield_int(types.sshort, addr, y_ofs) == -1
assert struct_getfield_int(types.pointer, addr, z_ofs) == 0x1234
#
struct_setfield_int(types.slong, addr, 0, 43)
struct_setfield_int(types.sshort, addr, y_ofs, 0x1234FFFE) # 0x1234 is masked out
struct_setfield_int(types.pointer, addr, z_ofs, 0x4321)
assert p.x == 43
assert p.y == -2
assert rffi.cast(rffi.LONG, p.z) == 0x4321
#
lltype.free(p, flavor='raw')
def test_array_fields(self):
POINT = lltype.Struct("POINT",
("x", lltype.Float),
("y", lltype.Float),
)
points = lltype.malloc(rffi.CArray(POINT), 2, flavor="raw")
points[0].x = 1.0
points[0].y = 2.0
points[1].x = 3.0
points[1].y = 4.0
points = rffi.cast(rffi.CArrayPtr(lltype.Char), points)
assert array_getitem(types.double, 16, points, 0, 0) == 1.0
assert array_getitem(types.double, 16, points, 0, 8) == 2.0
assert array_getitem(types.double, 16, points, 1, 0) == 3.0
assert array_getitem(types.double, 16, points, 1, 8) == 4.0
#
array_setitem(types.double, 16, points, 0, 0, 10.0)
array_setitem(types.double, 16, points, 0, 8, 20.0)
array_setitem(types.double, 16, points, 1, 0, 30.0)
array_setitem(types.double, 16, points, 1, 8, 40.0)
#
assert array_getitem(types.double, 16, points, 0, 0) == 10.0
assert array_getitem(types.double, 16, points, 0, 8) == 20.0
assert array_getitem(types.double, 16, points, 1, 0) == 30.0
assert array_getitem(types.double, 16, points, 1, 8) == 40.0
#
lltype.free(points, flavor="raw")
def test_struct_fields_longlong(self):
POINT = lltype.Struct('POINT',
('x', rffi.LONGLONG),
('y', rffi.ULONGLONG)
)
y_ofs = 8
p = lltype.malloc(POINT, flavor='raw')
p.x = r_longlong(123)
p.y = r_ulonglong(456)
addr = rffi.cast(rffi.VOIDP, p)
assert struct_getfield_longlong(types.slonglong, addr, 0) == 123
assert struct_getfield_longlong(types.ulonglong, addr, y_ofs) == 456
#
v = rffi.cast(lltype.SignedLongLong, r_ulonglong(9223372036854775808))
struct_setfield_longlong(types.slonglong, addr, 0, v)
struct_setfield_longlong(types.ulonglong, addr, y_ofs, r_longlong(-1))
assert p.x == -9223372036854775808
assert rffi.cast(lltype.UnsignedLongLong, p.y) == 18446744073709551615
#
lltype.free(p, flavor='raw')
def test_struct_fields_float(self):
POINT = lltype.Struct('POINT',
('x', rffi.DOUBLE),
('y', rffi.DOUBLE)
)
y_ofs = 8
p = lltype.malloc(POINT, flavor='raw')
p.x = 123.4
p.y = 567.8
addr = rffi.cast(rffi.VOIDP, p)
assert struct_getfield_float(types.double, addr, 0) == 123.4
assert struct_getfield_float(types.double, addr, y_ofs) == 567.8
#
struct_setfield_float(types.double, addr, 0, 321.0)
struct_setfield_float(types.double, addr, y_ofs, 876.5)
assert p.x == 321.0
assert p.y == 876.5
#
lltype.free(p, flavor='raw')
def test_struct_fields_singlefloat(self):
POINT = lltype.Struct('POINT',
('x', rffi.FLOAT),
('y', rffi.FLOAT)
)
y_ofs = 4
p = lltype.malloc(POINT, flavor='raw')
p.x = r_singlefloat(123.4)
p.y = r_singlefloat(567.8)
addr = rffi.cast(rffi.VOIDP, p)
assert struct_getfield_singlefloat(types.double, addr, 0) == r_singlefloat(123.4)
assert struct_getfield_singlefloat(types.double, addr, y_ofs) == r_singlefloat(567.8)
#
struct_setfield_singlefloat(types.double, addr, 0, r_singlefloat(321.0))
struct_setfield_singlefloat(types.double, addr, y_ofs, r_singlefloat(876.5))
assert p.x == r_singlefloat(321.0)
assert p.y == r_singlefloat(876.5)
#
lltype.free(p, flavor='raw')
class TestLibffiCall(BaseFfiTest):
"""
Test various kind of calls through libffi.
The peculiarity of these tests is that they are run both directly (going
really through libffi) and by jit/metainterp/test/test_fficall.py, which
tests the call when JITted.
If you need to test a behaviour than it's not affected by JITing (e.g.,
typechecking), you should put your test in TestLibffiMisc.
"""
CDLL = CDLL
@classmethod
def setup_class(cls):
from pypy.tool.udir import udir
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.translator.tool.cbuild import STANDARD_DEFINES
from pypy.translator.platform import platform
BaseFfiTest.setup_class()
# prepare C code as an example, so we can load it and call
# it via rlib.libffi
c_file = udir.ensure("test_libffi", dir=1).join("foolib.c")
# automatically collect the C source from the docstrings of the tests
snippets = []
exports = []
for name in dir(cls):
if name.startswith('test_'):
meth = getattr(cls, name)
# the heuristic to determine it it's really C code could be
# improved: so far we just check that there is a '{' :-)
if meth.__doc__ is not None and '{' in meth.__doc__:
snippets.append(meth.__doc__)
import re
for match in re.finditer(" ([a-z_]+)\(", meth.__doc__):
exports.append(match.group(1))
#
c_file.write(STANDARD_DEFINES + str(py.code.Source('\n'.join(snippets))))
eci = ExternalCompilationInfo(export_symbols=exports)
cls.libfoo_name = str(platform.compile([c_file], eci, 'x',
standalone=False))
def get_libfoo(self):
return self.CDLL(self.libfoo_name)
def call(self, funcspec, args, RESULT, is_struct=False, jitif=[]):
"""
Call the specified function after constructing and ArgChain with the
arguments in ``args``.
The function is specified with ``funcspec``, which is a tuple of the
form (lib, name, argtypes, restype).
This method is overridden by metainterp/test/test_fficall.py in
order to do the call in a loop and JIT it. The optional arguments are
used only by that overridden method.
"""
lib, name, argtypes, restype = funcspec
func = lib.getpointer(name, argtypes, restype)
chain = ArgChain()
for arg in args:
if isinstance(arg, tuple):
methname, arg = arg
meth = getattr(chain, methname)
meth(arg)
else:
chain.arg(arg)
return func.call(chain, RESULT, is_struct=is_struct)
# ------------------------------------------------------------------------
def test_very_simple(self):
"""
int diff_xy(int x, Signed y)
{
return x - y;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'diff_xy', [types.sint, types.signed], types.sint)
res = self.call(func, [50, 8], lltype.Signed)
assert res == 42
def test_simple(self):
"""
int sum_xy(int x, double y)
{
return (x + (int)y);
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'sum_xy', [types.sint, types.double], types.sint)
res = self.call(func, [38, 4.2], lltype.Signed, jitif=["floats"])
assert res == 42
def test_float_result(self):
libm = self.get_libm()
func = (libm, 'pow', [types.double, types.double], types.double)
res = self.call(func, [2.0, 3.0], rffi.DOUBLE, jitif=["floats"])
assert res == 8.0
def test_cast_result(self):
"""
unsigned char cast_to_uchar_and_ovf(int x)
{
return 200+(unsigned char)x;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'cast_to_uchar_and_ovf', [types.sint], types.uchar)
res = self.call(func, [0], rffi.UCHAR)
assert res == 200
def test_cast_argument(self):
"""
int many_args(char a, int b)
{
return a+b;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'many_args', [types.uchar, types.sint], types.sint)
res = self.call(func, [chr(20), 22], rffi.SIGNED)
assert res == 42
def test_char_args(self):
"""
char sum_args(char a, char b) {
return a + b;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'sum_args', [types.schar, types.schar], types.schar)
res = self.call(func, [123, 43], rffi.CHAR)
assert res == chr(166)
def test_unsigned_short_args(self):
"""
unsigned short sum_xy_us(unsigned short x, unsigned short y)
{
return x+y;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'sum_xy_us', [types.ushort, types.ushort], types.ushort)
res = self.call(func, [32000, 8000], rffi.USHORT)
assert res == 40000
def test_pointer_as_argument(self):
"""#include <stdlib.h>
Signed inc(Signed* x)
{
Signed oldval;
if (x == NULL)
return -1;
oldval = *x;
*x = oldval+1;
return oldval;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'inc', [types.pointer], types.signed)
null = lltype.nullptr(rffi.SIGNEDP.TO)
res = self.call(func, [null], rffi.SIGNED)
assert res == -1
#
ptr_result = lltype.malloc(rffi.SIGNEDP.TO, 1, flavor='raw')
ptr_result[0] = 41
res = self.call(func, [ptr_result], rffi.SIGNED)
if self.__class__ is TestLibffiCall:
# the function was called only once
assert res == 41
assert ptr_result[0] == 42
lltype.free(ptr_result, flavor='raw')
# the test does not make sense when run with the JIT through
# meta_interp, because the __del__ are not properly called (hence
# we "leak" memory)
del libfoo
assert not ALLOCATED
else:
# the function as been called 9 times
assert res == 50
assert ptr_result[0] == 51
lltype.free(ptr_result, flavor='raw')
def test_return_pointer(self):
"""
struct pair {
Signed a;
Signed b;
};
struct pair my_static_pair = {10, 20};
Signed* get_pointer_to_b()
{
return &my_static_pair.b;
}
"""
libfoo = self.get_libfoo()
func = (libfoo, 'get_pointer_to_b', [], types.pointer)
res = self.call(func, [], rffi.SIGNEDP)
assert res[0] == 20
def test_void_result(self):
"""
int dummy;
void set_dummy(int val) { dummy = val; }
int get_dummy() { return dummy; }
"""
libfoo = self.get_libfoo()
set_dummy = (libfoo, 'set_dummy', [types.sint], types.void)
get_dummy = (libfoo, 'get_dummy', [], types.sint)
#
initval = self.call(get_dummy, [], rffi.SIGNED)
#
res = self.call(set_dummy, [initval+1], lltype.Void)
assert res is None
#
res = self.call(get_dummy, [], rffi.SIGNED)
assert res == initval+1
def test_single_float_args(self):
"""
float sum_xy_float(float x, float y)
{
return x+y;
}
"""
from ctypes import c_float # this is used only to compute the expected result
libfoo = self.get_libfoo()
func = (libfoo, 'sum_xy_float', [types.float, types.float], types.float)
x = r_singlefloat(12.34)
y = r_singlefloat(56.78)
res = self.call(func, [x, y], rffi.FLOAT, jitif=["singlefloats"])
expected = c_float(c_float(12.34).value + c_float(56.78).value).value
assert float(res) == expected
def test_slonglong_args(self):
"""
long long sum_xy_longlong(long long x, long long y)
{
return x+y;
}
"""
maxint32 = 2147483647 # we cannot really go above maxint on 64 bits
# (and we would not test anything, as there long
# is the same as long long)
libfoo = self.get_libfoo()
func = (libfoo, 'sum_xy_longlong', [types.slonglong, types.slonglong],
types.slonglong)
if IS_32_BIT:
x = r_longlong(maxint32+1)
y = r_longlong(maxint32+2)
else:
x = maxint32+1
y = maxint32+2
res = self.call(func, [x, y], rffi.LONGLONG, jitif=["longlong"])
expected = maxint32*2 + 3
assert res == expected
def test_ulonglong_args(self):
"""
unsigned long long sum_xy_ulonglong(unsigned long long x,
unsigned long long y)
{
return x+y;
}
"""
maxint64 = 9223372036854775807 # maxint64+1 does not fit into a
# longlong, but it does into a
# ulonglong
libfoo = self.get_libfoo()
func = (libfoo, 'sum_xy_ulonglong', [types.ulonglong, types.ulonglong],
types.ulonglong)
x = r_ulonglong(maxint64+1)
y = r_ulonglong(2)
res = self.call(func, [x, y], rffi.ULONGLONG, jitif=["longlong"])
expected = maxint64 + 3
assert res == expected
def test_wrong_number_of_arguments(self):
from pypy.rpython.llinterp import LLException
libfoo = self.get_libfoo()
func = (libfoo, 'sum_xy', [types.sint, types.double], types.sint)
glob = globals()
loc = locals()
def my_raises(s):
try:
exec s in glob, loc
except TypeError:
pass
except LLException, e:
if str(e) != "<LLException 'TypeError'>":
raise
else:
assert False, 'Did not raise'
my_raises("self.call(func, [38], rffi.SIGNED)") # one less
my_raises("self.call(func, [38, 12.3, 42], rffi.SIGNED)") # one more
def test_byval_argument(self):
"""
struct Point {
Signed x;
Signed y;
};
Signed sum_point(struct Point p) {
return p.x + p.y;
}
"""
libfoo = CDLL(self.libfoo_name)
ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed])
ffi_point = ffi_point_struct.ffistruct
sum_point = (libfoo, 'sum_point', [ffi_point], types.signed)
#
ARRAY = rffi.CArray(rffi.SIGNED)
buf = lltype.malloc(ARRAY, 2, flavor='raw')
buf[0] = 30
buf[1] = 12
adr = rffi.cast(rffi.VOIDP, buf)
res = self.call(sum_point, [('arg_raw', adr)], rffi.SIGNED,
jitif=["byval"])
assert res == 42
# check that we still have the ownership on the buffer
assert buf[0] == 30
assert buf[1] == 12
lltype.free(buf, flavor='raw')
lltype.free(ffi_point_struct, flavor='raw')
def test_byval_result(self):
"""
struct Point make_point(Signed x, Signed y) {
struct Point p;
p.x = x;
p.y = y;
return p;
}
"""
libfoo = CDLL(self.libfoo_name)
ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed])
ffi_point = ffi_point_struct.ffistruct
libfoo = CDLL(self.libfoo_name)
make_point = (libfoo, 'make_point', [types.signed, types.signed], ffi_point)
#
PTR = lltype.Ptr(rffi.CArray(rffi.SIGNED))
p = self.call(make_point, [12, 34], PTR, is_struct=True,
jitif=["byval"])
assert p[0] == 12
assert p[1] == 34
lltype.free(p, flavor='raw')
lltype.free(ffi_point_struct, flavor='raw')
| 36.614815 | 93 | 0.54471 |
0a6c577afac40b778126a7b4c69b067f62ce7723 | 16,253 | py | Python | vokenization/vokenization.py | StevenyzZhang/vokenization | 6308f2e9834c50f55fde6192588c0173d1e2d63b | [
"MIT"
] | null | null | null | vokenization/vokenization.py | StevenyzZhang/vokenization | 6308f2e9834c50f55fde6192588c0173d1e2d63b | [
"MIT"
] | null | null | null | vokenization/vokenization.py | StevenyzZhang/vokenization | 6308f2e9834c50f55fde6192588c0173d1e2d63b | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyleft 2020 project COL.
from collections import defaultdict
import math
import pickle
import os
import sys
import h5py
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from transformers import BertTokenizer
import common
from indexing import TorchGPUIndexer, FaissGPUIndexer
VERY_LARGE = 9595959595
class Vokenizer:
def __init__(self, model, tokenizer, keys_dir, img_sets=('coco_minival',),
max_img_num=VERY_LARGE, gpus=(0,), backend='faiss', upper_bound=128,
sent_level=False):
"""
:param model: Hugginface language model
:param tokenizer: Hugginface Tokenizer
:param keys_dir: the directory which saves the keys.
:param img_sets: the img_sets to be loaded, see common.IMAGE_SETS for all options.
:param max_img_num: load up to #max_img_num images into the dictionary
:param gpus: The GPUs used in calculating the BERT outputs and indexing.
Note: Currently only one GPU is supported!!!
"""
self.model = model.cuda(gpus[0]) if model is not None else model
self.tokenizer = tokenizer
self.img_sets = img_sets
self.gpus = gpus # The GPUs used in the indexer
self.gpu = self.gpus[0]
self.backend = backend
self.upper_bound = upper_bound
self.sent_level = sent_level # Otherwise use word level
max_img_num = VERY_LARGE if max_img_num == -1 else max_img_num
# These two are important, which indicates the mapping from
# vokens to their actual images.
self.img_paths = []
self.img_ids = []
for img_set in self.img_sets:
assert img_set in common.IMAGE_SETS, "%s not in image sets %s" % (
img_set, common.IMAGE_SETS)
# Load image paths corresponding to the keys.
# img_paths_fname = os.path.join(common.LOCAL_DIR, 'images', img_set + "_paths.txt")
# img_ids_fname = os.path.join(common.LOCAL_DIR, 'images', img_set + "_ids.txt")
img_paths_fname = os.path.join(keys_dir, f"{img_set}.path")
img_ids_fname = os.path.join(keys_dir, f"{img_set}.ids")
if not os.path.exists(img_paths_fname):
# If the actual images are not saved on the server, we would use the img_ids.
img_paths_fname = img_ids_fname
with open(img_paths_fname) as f:
all_img_paths = list(map(lambda x: x.strip(), f.readlines()))
with open(img_ids_fname) as g:
all_img_ids = list(map(lambda x: x.strip(), g.readlines()))
assert len(all_img_paths) == len(all_img_ids)
for img_path, img_id in zip(all_img_paths, all_img_ids):
if len(self.img_paths) < max_img_num:
self.img_paths.append(img_path)
self.img_ids.append(f"{img_set}/{img_id}")
else:
break
assert len(self.img_paths) == len(self.img_ids)
# Lazy loading and indexing
self.keys = None
self.keys_dir = keys_dir
self.indexed = False
self.indexer = None
@property
def img_num(self):
return len(self.img_paths)
def dump_img_ids(self, fname):
"""
Dump the mapping from the voken_id to img_ids, to fname.
Saved in the format of array.
"""
with open(fname, 'w') as f:
for img_id in self.img_ids:
f.write(img_id + "\n")
def __len__(self):
return self.img_num
def indexing(self):
self.model.eval()
# Load pre-extracted image keys.
self.keys = []
remain_img_num = self.img_num
for img_set in self.img_sets:
assert img_set in common.IMAGE_SETS, "%s not in image sets %s" % (
img_set, common.IMAGE_SETS)
keys_fname = os.path.join(self.keys_dir, img_set + '.hdf5')
if not os.path.exists(keys_fname):
assert False, "keys of image set %s is not extracted, please save it at %s" % (
img_set, keys_fname
)
# Load Keys
h5_file = h5py.File(keys_fname, 'r')
dset = h5_file["keys"]
load_img_num = min(remain_img_num, len(dset))
load_keys = dset[:load_img_num]
self.keys.append(load_keys)
remain_img_num -= load_img_num
h5_file.close()
if load_img_num == 0:
break
# Lazy indexing
self.keys = np.concatenate(self.keys, 0)
if self.backend == 'torch':
self.indexer = TorchGPUIndexer(self.keys, gpus=self.gpus, fp16=True)
elif self.backend == 'faiss':
self.indexer = FaissGPUIndexer(self.keys, gpus=self.gpus, fp16=True)
else:
raise NotImplementedError(f"Backend {self.backend} is not supported")
self.indexed = True
def vokenize_sents(self, sents, topk=None):
input_ids = []
for sent in sents:
input_ids.append(self.tokenizer.encode(
sent,
add_special_tokens=False,
# return_tensors='pt' # Return PyTorch (pt) tensors
))
return self.vokenize_ids(input_ids, attention_mask=None, topk=topk)
def vokenize_ids(self, input_ids, attention_mask=None, topk=None):
"""
:param input_ids: A list of token_ids i.e.,
[[token_1_1, token_1_2, ...], [token_2_1, token_2_2, ...], ...]
:param attention_mask: I did not use it for now.
:param topk: Retrieve the topk vokens for each token.
:return: top_scores, top_idxs, input_tokens, top_paths
Note: 1. The results would consider the additional special tokens while the input_tokens do **not**.
2. If topk=None, it will be a 2-d results with:
[ [s11_top1, s12_top1, ...],
[s21_top1, s22_top1, ...],
..... ]
If topk!=None (e.g., 1, 5, 10), it will be a 3-d results with:
[ [ [s11_top1, s11_top2, ...],
[s12_top1, s12_top2, ...],
...... ],
[ [s21_top1, s21_top2, ...],
[s22_top1, s22_top2, ...],
...... ],
..... ],
where s11_top1 means s1(the 1st sentence)1(the 1st token of the 1st sentence)_top1(the top-1 index)
"""
if not self.indexed: # Index the keys at the first retrieval call.
self.indexing()
# The original tokens
input_tokens = [
([self.tokenizer.cls_token] + [self.tokenizer._convert_id_to_token(idx) for idx in input_id] + [self.tokenizer.sep_token])
for input_id in input_ids]
# Deal with over-length tokens (because the BERT-style encoder has length limit due to the positional embedding)
# Here is a process to avoid very short sequence when cutting the long sentence:
# Suppose the sentence length is 18 and UPPER_BOUND is 8,
# we draw it as <----------------->, where "<" is bos, and ">" is the last token
# instead of cut it as <------->------->->, which has very short sequence <-> in the end.
# we cut it with almost equal length: <----->----->----->
input_ids = input_ids.copy()
sent2segs = defaultdict(list)
for i in range(len(input_ids)):
if len(input_ids[i]) > self.upper_bound:
num_segments = math.ceil(len(input_ids[i]) / self.upper_bound)
tokens_per_seg = int(len(input_ids[i]) / num_segments)
remaining = input_ids[i][tokens_per_seg:]
input_ids[i] = input_ids[i][:tokens_per_seg]
while len(remaining) > 0:
# print(len(remaining))
sent2segs[i].append(len(input_ids))
input_ids.append(remaining[:tokens_per_seg])
remaining = remaining[tokens_per_seg:]
# Convert to torch tensors.
if not type(input_ids) is torch.Tensor:
input_ids = [
torch.tensor(self.tokenizer.build_inputs_with_special_tokens(list(input_id)))
for input_id in input_ids
]
input_ids = pad_sequence(input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
attention_mask = (input_ids != self.tokenizer.pad_token_id) # word_tokens --> 1, pad_token --> 0
if attention_mask.all():
attention_mask = None
# Get lengths
if attention_mask is not None:
lengths = list(attention_mask.sum(1).numpy())
else:
lengths = [len(input_ids[0])] * len(input_ids)
if attention_mask is not None and type(input_ids) is not torch.Tensor:
attention_mask = torch.tensor(attention_mask)
# Lang model inference
input_ids = input_ids.cuda(self.gpu)
if attention_mask is not None:
attention_mask = attention_mask.cuda(self.gpu)
def apply_model(input_ids, attention_mask, lengths):
with torch.no_grad():
lang_output = self.model(input_ids, attention_mask) # b, l, f
if type(lang_output) is list:
lang_output = lang_output[0]
# Gather language output
if self.sent_level:
# lang_output of shape [batch_size, dim]
gathered_output = lang_output
else:
# lang_output of shape [batch_size, max_len, dim]
# --> gathered_output [ \sum_i len(i), dim]
gathered_output = torch.cat([output[:length] for output, length in zip(lang_output, lengths)])
# Visn retrieval
if topk is None:
# It will call the function `max()` and return a 2-d tensor
top_score, top_idx = self.indexer.batch_top1(gathered_output)
else:
# It will call the function `topk(k)` and return a 3-d tensor
top_score, top_idx = self.indexer.batch_topk(gathered_output, topk=topk)
return top_score, top_idx
top_score, top_idx = memory_safe_apply(apply_model, input_ids, attention_mask, lengths)
# Split
top_score, top_idx = top_score.detach().cpu(), top_idx.detach().cpu()
if not self.sent_level:
# If word level, split it
top_scores = list(top_score.split(lengths)) # [ float_tensor(len1), float_tensor(len2), ...]
top_idxs = list(top_idx.split(lengths)) # [ int_tensor(len1), int_tensor(len2), ...]
else:
# If sent level, repeat the voken.
# Use clone() here
top_scores = [ts.expand(length, *ts.shape).clone() for ts, length in zip(top_score, lengths)]
top_idxs = [tid.expand(length, *tid.shape).clone() for tid, length in zip(top_idx, lengths)]
if top_idxs[0].dim() == 1:
# Return the top1 paths
top_paths = [[self.img_paths[idx.item()] for idx in top_idx]
for top_idx in top_idxs]
else:
# Return the topk paths related to the sentences
top_paths = [[[self.img_paths[k_idx.item()] for k_idx in topk_idx]
for topk_idx in top_idx]
for top_idx in top_idxs]
if self.sent_level:
for i, tid in enumerate(top_idxs):
# Keep the first positive and others negative, to mark the header of the sentence.
# [3] --> [3, 3, 3, 3] --> [-4, -4, -4, -4] --> [3, -4, -4, -4]
# "-x-1" is used to handle zero, [0] --> [1, 1, 1, 1] --> [-1, -1, -1, -1] --> [0, -1, -1, -1]
# print('Before conversion', tid)
tid[:] = tid * (-1) - 1
tid[1] = tid[1] * (-1) - 1 # The tid[0] is corresponding to <cls>
# print('After conversion', top_idxs[i])
# Put back the segments of over-length sentences
if len(sent2segs) > 0:
for sent_id, segment_ids in sent2segs.items():
for segment_id in segment_ids:
# Append the results with the segments:
# ---------Now---------------- + ----Appended Segment-----
# [<cls1> I have a <sep1>][:-1] + [<cls2> cat . <sep2>][1:]
# = [<cls1> I have a cat . <sep2>]
top_scores[sent_id] = torch.cat([top_scores[sent_id][:-1], top_scores[segment_id][1:]])
top_idxs[sent_id] = torch.cat([top_idxs[sent_id][:-1], top_idxs[segment_id][1:]])
top_paths[sent_id] = top_paths[sent_id][:-1] + top_paths[segment_id][1:]
num_sents = len(input_tokens)
top_scores = top_scores[:num_sents]
top_idxs = top_idxs[:num_sents]
top_paths = top_paths[:num_sents]
return top_scores, top_idxs, input_tokens, top_paths
def memory_safe_apply(func, *args):
"""
If batch-wise applying exceeds the GPU memory, it would process each sample separately and sequentially
:param func: function with some constraints, see code for details.
:param args: args of this function
:return:
"""
try:
return func(*args)
except RuntimeError as e:
print(e)
batch_size = len(args[0])
outputs = []
for i in range(batch_size):
one_batch_args = tuple(a[i: i+1] for a in args)
output = func(*one_batch_args)
# **output of the func should be of the format**:
# (o1, o2, ...) where each o_i is a tensor of shape [1, ...]
assert type(output) is tuple or type(output) is list
outputs.append(output)
# outputs = ( (o1_1, o1_2, ...), (o2_1, o2_2, ...), ...)
# zip(*outputs) = ( (o1_1, o2_1, ...), (o1_2, o2_2, ...), ...)
outputs = tuple(torch.cat(output) for output in zip(*outputs))
return outputs
default_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def load_model_and_tokenizer(load, cpu=False):
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from xmatching.model import LangModel, VisnModel, JointModel
lang_layers = '4,3,2,1'
dim = 64
lang = 'bert'
visn = 'resnext101_32x8d'
# Models
lang_layers = list(map(lambda x: -int(x), lang_layers.split(','))) # The layers concated as the output.
lang_model = LangModel(dim, arch=lang, layers=lang_layers)
visn_model = VisnModel(dim, arch=visn)
# The use of joint model would help synchronization in distributed learning.
model = JointModel(lang_model, visn_model)
if os.path.exists(load + '/BEST.pth.model'):
sys.path.append('./xmatching')
# for dirc in os.listdir(load + '/src'):
# sys.path.append(load + '/src/' + dirc)
# import model # The pickle has some issues... thus must load the library
if cpu:
device = torch.device('cpu')
joint_model = torch.load(load + '/BEST.pth.model',
map_location=device)
else:
joint_model = torch.load(load + '/BEST.pth.model')
# ZYZ: We need the init of language model
model.lang_model.load_state_dict(joint_model.lang_model.state_dict())
joint_model.lang_model = model.lang_model
joint_model.cuda()
joint_model.eval() # DO NOT FORGET THIS!!!
else:
print("No snapshots there, exit.")
exit()
if os.path.exists(load + '/tokenizer.pkl'):
with open(load + '/tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
else:
tokenizer = default_tokenizer
return joint_model.lang_model, tokenizer
| 43.573727 | 134 | 0.56648 |
8445e9dd0509d19ae17a45a49b1496f776a51197 | 53,803 | py | Python | synapse/config/server.py | appotry/synapse | 7564b8e118aa764fd0075f0d69910a5b2cd58182 | [
"Apache-2.0"
] | null | null | null | synapse/config/server.py | appotry/synapse | 7564b8e118aa764fd0075f0d69910a5b2cd58182 | [
"Apache-2.0"
] | null | null | null | synapse/config/server.py | appotry/synapse | 7564b8e118aa764fd0075f0d69910a5b2cd58182 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import os.path
import re
import urllib.parse
from textwrap import indent
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import attr
import yaml
from netaddr import AddrFormatError, IPNetwork, IPSet
from twisted.conch.ssh.keys import Key
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_server_name
from ._base import Config, ConfigError
from ._util import validate_config
logger = logging.Logger(__name__)
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
# on IPv6 when '::' is set.
#
# We later check for errors when binding to 0.0.0.0 and ignore them if :: is also in
# in the list.
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
def _6to4(network: IPNetwork) -> IPNetwork:
"""Convert an IPv4 network into a 6to4 IPv6 network per RFC 3056."""
# 6to4 networks consist of:
# * 2002 as the first 16 bits
# * The first IPv4 address in the network hex-encoded as the next 32 bits
# * The new prefix length needs to include the bits from the 2002 prefix.
hex_network = hex(network.first)[2:]
hex_network = ("0" * (8 - len(hex_network))) + hex_network
return IPNetwork(
"2002:%s:%s::/%d"
% (
hex_network[:4],
hex_network[4:],
16 + network.prefixlen,
)
)
def generate_ip_set(
ip_addresses: Optional[Iterable[str]],
extra_addresses: Optional[Iterable[str]] = None,
config_path: Optional[Iterable[str]] = None,
) -> IPSet:
"""
Generate an IPSet from a list of IP addresses or CIDRs.
Additionally, for each IPv4 network in the list of IP addresses, also
includes the corresponding IPv6 networks.
This includes:
* IPv4-Compatible IPv6 Address (see RFC 4291, section 2.5.5.1)
* IPv4-Mapped IPv6 Address (see RFC 4291, section 2.5.5.2)
* 6to4 Address (see RFC 3056, section 2)
Args:
ip_addresses: An iterable of IP addresses or CIDRs.
extra_addresses: An iterable of IP addresses or CIDRs.
config_path: The path in the configuration for error messages.
Returns:
A new IP set.
"""
result = IPSet()
for ip in itertools.chain(ip_addresses or (), extra_addresses or ()):
try:
network = IPNetwork(ip)
except AddrFormatError as e:
raise ConfigError(
"Invalid IP range provided: %s." % (ip,), config_path
) from e
result.add(network)
# It is possible that these already exist in the set, but that's OK.
if ":" not in str(network):
result.add(IPNetwork(network).ipv6(ipv4_compatible=True))
result.add(IPNetwork(network).ipv6(ipv4_compatible=False))
result.add(_6to4(network))
return result
# IP ranges that are considered private / unroutable / don't make sense.
DEFAULT_IP_RANGE_BLACKLIST = [
# Localhost
"127.0.0.0/8",
# Private networks.
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
# Carrier grade NAT.
"100.64.0.0/10",
# Address registry.
"192.0.0.0/24",
# Link-local networks.
"169.254.0.0/16",
# Formerly used for 6to4 relay.
"192.88.99.0/24",
# Testing networks.
"198.18.0.0/15",
"192.0.2.0/24",
"198.51.100.0/24",
"203.0.113.0/24",
# Multicast.
"224.0.0.0/4",
# Localhost
"::1/128",
# Link-local addresses.
"fe80::/10",
# Unique local addresses.
"fc00::/7",
# Testing networks.
"2001:db8::/32",
# Multicast.
"ff00::/8",
# Site-local addresses
"fec0::/10",
]
DEFAULT_ROOM_VERSION = "6"
ROOM_COMPLEXITY_TOO_GREAT = (
"Your homeserver is unable to join rooms this large or complex. "
"Please speak to your server administrator, or upgrade your instance "
"to join this room."
)
METRICS_PORT_WARNING = """\
The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
a listener. Please see
https://matrix-org.github.io/synapse/latest/metrics-howto.html
on how to configure the new listener.
--------------------------------------------------------------------------------"""
KNOWN_LISTENER_TYPES = {
"http",
"metrics",
"manhole",
"replication",
}
KNOWN_RESOURCES = {
"client",
"consent",
"federation",
"keys",
"media",
"metrics",
"openid",
"replication",
"static",
"webclient",
}
@attr.s(frozen=True)
class HttpResourceConfig:
names: List[str] = attr.ib(
factory=list,
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore
)
compress: bool = attr.ib(
default=False,
validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class HttpListenerConfig:
"""Object describing the http-specific parts of the config of a listener"""
x_forwarded: bool = False
resources: List[HttpResourceConfig] = attr.ib(factory=list)
additional_resources: Dict[str, dict] = attr.ib(factory=dict)
tag: Optional[str] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ListenerConfig:
"""Object describing the configuration of a single listener."""
port: int = attr.ib(validator=attr.validators.instance_of(int))
bind_addresses: List[str]
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
tls: bool = False
# http_options is only populated if type=http
http_options: Optional[HttpListenerConfig] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ManholeConfig:
"""Object describing the configuration of the manhole"""
username: str = attr.ib(validator=attr.validators.instance_of(str))
password: str = attr.ib(validator=attr.validators.instance_of(str))
priv_key: Optional[Key]
pub_key: Optional[Key]
@attr.s(frozen=True)
class LimitRemoteRoomsConfig:
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
complexity: Union[float, int] = attr.ib(
validator=attr.validators.instance_of(
(float, int) # type: ignore[arg-type] # noqa
),
default=1.0,
)
complexity_error: str = attr.ib(
validator=attr.validators.instance_of(str),
default=ROOM_COMPLEXITY_TOO_GREAT,
)
admins_can_join: bool = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
class ServerConfig(Config):
section = "server"
def read_config(self, config, **kwargs):
self.server_name = config["server_name"]
self.server_context = config.get("server_context", None)
try:
parse_and_validate_server_name(self.server_name)
except ValueError as e:
raise ConfigError(str(e))
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client_location = config.get("web_client_location", None)
self.soft_file_limit = config.get("soft_file_limit", 0)
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.user_agent_suffix = config.get("user_agent_suffix")
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
self.serve_server_wellknown = config.get("serve_server_wellknown", False)
# Whether we should serve a "client well-known":
# (a) at .well-known/matrix/client on our client HTTP listener
# (b) in the response to /login
#
# ... which together help ensure that clients use our public_baseurl instead of
# whatever they were told by the user.
#
# For the sake of backwards compatibility with existing installations, this is
# True if public_baseurl is specified explicitly, and otherwise False. (The
# reasoning here is that we have no way of knowing that the default
# public_baseurl is actually correct for existing installations - many things
# will not work correctly, but that's (probably?) better than sending clients
# to a completely broken URL.
self.serve_client_wellknown = False
public_baseurl = config.get("public_baseurl")
if public_baseurl is None:
public_baseurl = f"https://{self.server_name}/"
logger.info("Using default public_baseurl %s", public_baseurl)
else:
self.serve_client_wellknown = True
if public_baseurl[-1] != "/":
public_baseurl += "/"
self.public_baseurl = public_baseurl
# check that public_baseurl is valid
try:
splits = urllib.parse.urlsplit(self.public_baseurl)
except Exception as e:
raise ConfigError(f"Unable to parse URL: {e}", ("public_baseurl",))
if splits.scheme not in ("https", "http"):
raise ConfigError(
f"Invalid scheme '{splits.scheme}': only https and http are supported"
)
if splits.query or splits.fragment:
raise ConfigError(
"public_baseurl cannot contain query parameters or a #-fragment"
)
# Whether to enable user presence.
presence_config = config.get("presence") or {}
self.use_presence = presence_config.get("enabled")
if self.use_presence is None:
self.use_presence = config.get("use_presence", True)
# Custom presence router module
# This is the legacy way of configuring it (the config should now be put in the modules section)
self.presence_router_module_class = None
self.presence_router_config = None
presence_router_config = presence_config.get("presence_router")
if presence_router_config:
(
self.presence_router_module_class,
self.presence_router_config,
) = load_module(presence_router_config, ("presence", "presence_router"))
# Whether to update the user directory or not. This should be set to
# false only if we are updating the user directory in a worker
self.update_user_directory = config.get("update_user_directory", True)
# whether to enable the media repository endpoints. This should be set
# to false if the media repository is running as a separate endpoint;
# doing so ensures that we will not run cache cleanup jobs on the
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API.
self.require_auth_for_profile_requests = config.get(
"require_auth_for_profile_requests", False
)
# Whether to require sharing a room with a user to retrieve their
# profile data
self.limit_profile_requests_to_users_who_share_rooms = config.get(
"limit_profile_requests_to_users_who_share_rooms",
False,
)
# Whether to retrieve and display profile data for a user when they
# are invited to a room
self.include_profile_data_on_invite = config.get(
"include_profile_data_on_invite", True
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
):
raise ConfigError(
"Can't use 'restrict_public_rooms_to_local_users' if"
" 'allow_public_rooms_without_auth' and/or"
" 'allow_public_rooms_over_federation' is set."
)
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
# flag is now obsolete but we need to check it for backward-compatibility.
if config.get("restrict_public_rooms_to_local_users", False):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
self.allow_public_rooms_without_auth = config.get(
"allow_public_rooms_without_auth", False
)
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
self.allow_public_rooms_over_federation = config.get(
"allow_public_rooms_over_federation", False
)
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
# Ensure room version is a str
default_room_version = str(default_room_version)
if default_room_version not in KNOWN_ROOM_VERSIONS:
raise ConfigError(
"Unknown default_room_version: %s, known room versions: %s"
% (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
)
# Get the actual room version object rather than just the identifier
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
# whether to enable search. If disabled, new entries will not be inserted
# into the search tables and they will not be indexed. Users will receive
# errors when attempting to search for messages.
self.enable_search = config.get("enable_search", True)
self.filter_timeline_limit = config.get("filter_timeline_limit", 100)
# Whether we should block invites sent to users on this server
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
if self.limit_usage_by_mau:
self.max_mau_value = config.get("max_mau_value", 0)
self.mau_stats_only = config.get("mau_stats_only", False)
self.mau_limits_reserved_threepids = config.get(
"mau_limit_reserved_threepids", []
)
self.mau_trial_days = config.get("mau_trial_days", 0)
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
redaction_retention_period = config.get("redaction_retention_period", "7d")
if redaction_retention_period is not None:
self.redaction_retention_period: Optional[int] = self.parse_duration(
redaction_retention_period
)
else:
self.redaction_retention_period = None
# How long to keep entries in the `users_ips` table.
user_ips_max_age = config.get("user_ips_max_age", "28d")
if user_ips_max_age is not None:
self.user_ips_max_age: Optional[int] = self.parse_duration(user_ips_max_age)
else:
self.user_ips_max_age = None
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
# Admin uri to direct users at should their instance become blocked
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
ip_range_blacklist = config.get(
"ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST
)
# Attempt to create an IPSet from the given ranges
# Always blacklist 0.0.0.0, ::
self.ip_range_blacklist = generate_ip_set(
ip_range_blacklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",)
)
self.ip_range_whitelist = generate_ip_set(
config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",)
)
# The federation_ip_range_blacklist is used for backwards-compatibility
# and only applies to federation and identity servers.
if "federation_ip_range_blacklist" in config:
# Always blacklist 0.0.0.0, ::
self.federation_ip_range_blacklist = generate_ip_set(
config["federation_ip_range_blacklist"],
["0.0.0.0", "::"],
config_path=("federation_ip_range_blacklist",),
)
# 'federation_ip_range_whitelist' was never a supported configuration option.
self.federation_ip_range_whitelist = None
else:
# No backwards-compatiblity requrired, as federation_ip_range_blacklist
# is not given. Default to ip_range_blacklist and ip_range_whitelist.
self.federation_ip_range_blacklist = self.ip_range_blacklist
self.federation_ip_range_whitelist = self.ip_range_whitelist
# (undocumented) option for torturing the worker-mode replication a bit,
# for testing. The value defines the number of milliseconds to pause before
# sending out any replication updates.
self.replication_torture_level = config.get("replication_torture_level")
# Whether to require a user to be in the room to add an alias to it.
# Defaults to True.
self.require_membership_for_aliases = config.get(
"require_membership_for_aliases", True
)
# Whether to allow per-room membership profiles through the send of membership
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
# here.
if config.get("no_tls", False):
l2 = []
for listener in self.listeners:
if listener.tls:
logger.info(
"Ignoring TLS-enabled listener on port %i due to no_tls",
listener.port,
)
else:
l2.append(listener)
self.listeners = l2
if not self.web_client_location:
_warn_if_webclient_configured(self.listeners)
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
self.limit_remote_rooms = LimitRemoteRoomsConfig(
**(config.get("limit_remote_rooms") or {})
)
bind_port = config.get("bind_port")
if bind_port:
if config.get("no_tls", False):
raise ConfigError("no_tls is incompatible with bind_port")
self.listeners = []
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
http_options = HttpListenerConfig(
resources=[
HttpResourceConfig(names=["client"], compress=gzip_responses),
HttpResourceConfig(names=["federation"]),
],
)
self.listeners.append(
ListenerConfig(
port=bind_port,
bind_addresses=[bind_host],
tls=True,
type="http",
http_options=http_options,
)
)
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
ListenerConfig(
port=unsecure_port,
bind_addresses=[bind_host],
tls=False,
type="http",
http_options=http_options,
)
)
manhole = config.get("manhole")
if manhole:
self.listeners.append(
ListenerConfig(
port=manhole,
bind_addresses=["127.0.0.1"],
type="manhole",
)
)
manhole_settings = config.get("manhole_settings") or {}
validate_config(
_MANHOLE_SETTINGS_SCHEMA, manhole_settings, ("manhole_settings",)
)
manhole_username = manhole_settings.get("username", "matrix")
manhole_password = manhole_settings.get("password", "rabbithole")
manhole_priv_key_path = manhole_settings.get("ssh_priv_key_path")
manhole_pub_key_path = manhole_settings.get("ssh_pub_key_path")
manhole_priv_key = None
if manhole_priv_key_path is not None:
try:
manhole_priv_key = Key.fromFile(manhole_priv_key_path)
except Exception as e:
raise ConfigError(
f"Failed to read manhole private key file {manhole_priv_key_path}"
) from e
manhole_pub_key = None
if manhole_pub_key_path is not None:
try:
manhole_pub_key = Key.fromFile(manhole_pub_key_path)
except Exception as e:
raise ConfigError(
f"Failed to read manhole public key file {manhole_pub_key_path}"
) from e
self.manhole_settings = ManholeConfig(
username=manhole_username,
password=manhole_password,
priv_key=manhole_priv_key,
pub_key=manhole_pub_key,
)
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
ListenerConfig(
port=metrics_port,
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
type="http",
http_options=HttpListenerConfig(
resources=[HttpResourceConfig(names=["metrics"])]
),
)
)
self.cleanup_extremities_with_dummy_events = config.get(
"cleanup_extremities_with_dummy_events", True
)
# The number of forward extremities in a room needed to send a dummy event.
self.dummy_events_threshold = config.get("dummy_events_threshold", 10)
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver, and instead return a 200 with a fake sid if this kind of error is
# met, without sending anything.
# This is a compromise between sending an email, which could be a spam vector,
# and letting the client know which email address is bound to an account and
# which one isn't.
self.request_token_inhibit_3pid_errors = config.get(
"request_token_inhibit_3pid_errors",
False,
)
# List of users trialing the new experimental default push rules. This setting is
# not included in the sample configuration file on purpose as it's a temporary
# hack, so that some users can trial the new defaults without impacting every
# user on the homeserver.
users_new_default_push_rules: list = (
config.get("users_new_default_push_rules") or []
)
if not isinstance(users_new_default_push_rules, list):
raise ConfigError("'users_new_default_push_rules' must be a list")
# Turn the list into a set to improve lookup speed.
self.users_new_default_push_rules: set = set(users_new_default_push_rules)
# Whitelist of domain names that given next_link parameters must have
next_link_domain_whitelist: Optional[List[str]] = config.get(
"next_link_domain_whitelist"
)
self.next_link_domain_whitelist: Optional[Set[str]] = None
if next_link_domain_whitelist is not None:
if not isinstance(next_link_domain_whitelist, list):
raise ConfigError("'next_link_domain_whitelist' must be a list")
# Turn the list into a set to improve lookup speed.
self.next_link_domain_whitelist = set(next_link_domain_whitelist)
templates_config = config.get("templates") or {}
if not isinstance(templates_config, dict):
raise ConfigError("The 'templates' section must be a dictionary")
self.custom_template_directory: Optional[str] = templates_config.get(
"custom_template_directory"
)
if self.custom_template_directory is not None and not isinstance(
self.custom_template_directory, str
):
raise ConfigError("'custom_template_directory' must be a string")
def has_tls_listener(self) -> bool:
return any(listener.tls for listener in self.listeners)
def generate_config_section(
self,
server_name,
data_dir_path,
open_private_ports,
listeners,
config_dir_path,
**kwargs,
):
ip_range_blacklist = "\n".join(
" # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST
)
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
unsecure_port = bind_port - 400
else:
bind_port = 8448
unsecure_port = 8008
pid_file = os.path.join(data_dir_path, "homeserver.pid")
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
# default config string
default_room_version = DEFAULT_ROOM_VERSION
secure_listeners = []
unsecure_listeners = []
private_addresses = ["::1", "127.0.0.1"]
if listeners:
for listener in listeners:
if listener["tls"]:
secure_listeners.append(listener)
else:
# If we don't want open ports we need to bind the listeners
# to some address other than 0.0.0.0. Here we chose to use
# localhost.
# If the addresses are already bound we won't overwrite them
# however.
if not open_private_ports:
listener.setdefault("bind_addresses", private_addresses)
unsecure_listeners.append(listener)
secure_http_bindings = indent(
yaml.dump(secure_listeners), " " * 10
).lstrip()
unsecure_http_bindings = indent(
yaml.dump(unsecure_listeners), " " * 10
).lstrip()
if not unsecure_listeners:
unsecure_http_bindings = (
"""- port: %(unsecure_port)s
tls: false
type: http
x_forwarded: true"""
% locals()
)
if not open_private_ports:
unsecure_http_bindings += (
"\n bind_addresses: ['::1', '127.0.0.1']"
)
unsecure_http_bindings += """
resources:
- names: [client, federation]
compress: false"""
if listeners:
# comment out this block
unsecure_http_bindings = "#" + re.sub(
"\n {10}",
lambda match: match.group(0) + "#",
unsecure_http_bindings,
)
if not secure_listeners:
secure_http_bindings = (
"""#- port: %(bind_port)s
# type: http
# tls: true
# resources:
# - names: [client, federation]"""
% locals()
)
return (
"""\
## Server ##
# The public-facing domain of the server
#
# The server_name name will appear at the end of usernames and room addresses
# created on this server. For example if the server_name was example.com,
# usernames on this server would be in the format @user:example.com
#
# In most cases you should avoid using a matrix specific subdomain such as
# matrix.example.com or synapse.example.com as the server_name for the same
# reasons you wouldn't use user@email.example.com as your email address.
# See https://matrix-org.github.io/synapse/latest/delegate.html
# for information on how to host Synapse on a subdomain while preserving
# a clean server_name.
#
# The server_name cannot be changed later so it is important to
# configure this correctly before you start Synapse. It should be all
# lowercase and may contain an explicit port.
# Examples: matrix.org, localhost:8080
#
server_name: "%(server_name)s"
# When running as a daemon, the file to store the pid in
#
pid_file: %(pid_file)s
# The absolute URL to the web client which /_matrix/client will redirect
# to if 'webclient' is configured under the 'listeners' configuration.
#
# This option can be also set to the filesystem path to the web client
# which will be served at /_matrix/client/ if 'webclient' is configured
# under the 'listeners' configuration, however this is a security risk:
# https://github.com/matrix-org/synapse#security-note
#
#web_client_location: https://riot.example.com/
# The public-facing base URL that clients use to access this Homeserver (not
# including _matrix/...). This is the same URL a user might enter into the
# 'Custom Homeserver URL' field on their client. If you use Synapse with a
# reverse proxy, this should be the URL to reach Synapse via the proxy.
# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
# 'listeners' below).
#
# Defaults to 'https://<server_name>/'.
#
#public_baseurl: https://example.com/
# Uncomment the following to tell other servers to send federation traffic on
# port 443.
#
# By default, other servers will try to reach our server on port 8448, which can
# be inconvenient in some environments.
#
# Provided 'https://<server_name>/' on port 443 is routed to Synapse, this
# option configures Synapse to serve a file at
# 'https://<server_name>/.well-known/matrix/server'. This will tell other
# servers to send traffic to port 443 instead.
#
# See https://matrix-org.github.io/synapse/latest/delegate.html for more
# information.
#
# Defaults to 'false'.
#
#serve_server_wellknown: true
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
# hard limit.
#
#soft_file_limit: 0
# Presence tracking allows users to see the state (e.g online/offline)
# of other local and remote users.
#
presence:
# Uncomment to disable presence tracking on this homeserver. This option
# replaces the previous top-level 'use_presence' option.
#
#enabled: false
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API. Defaults to
# 'false'. Note that profile data is also available via the federation
# API, unless allow_profile_lookup_over_federation is set to false.
#
#require_auth_for_profile_requests: true
# Uncomment to require a user to share a room with another user in order
# to retrieve their profile information. Only checked on Client-Server
# requests. Profile requests from other servers should be checked by the
# requesting server. Defaults to 'false'.
#
#limit_profile_requests_to_users_who_share_rooms: true
# Uncomment to prevent a user's profile data from being retrieved and
# displayed in a room until they have joined it. By default, a user's
# profile data is included in an invite event, regardless of the values
# of the above two settings, and whether or not the users share a server.
# Defaults to 'true'.
#
#include_profile_data_on_invite: false
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
#
#allow_public_rooms_without_auth: true
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
#
#allow_public_rooms_over_federation: true
# The default room version for newly created rooms.
#
# Known room versions are listed here:
# https://matrix.org/docs/spec/#complete-list-of-room-versions
#
# For example, for room version 1, default_room_version should be set
# to "1".
#
#default_room_version: "%(default_room_version)s"
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
#gc_thresholds: [700, 10, 10]
# The minimum time in seconds between each GC for a generation, regardless of
# the GC thresholds. This ensures that we don't do GC too frequently.
#
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
# generation 0 GCs, etc.
#
# Defaults to `[1s, 10s, 30s]`.
#
#gc_min_interval: [0.5s, 30s, 1m]
# Set the limit on the returned events in the timeline in the get
# and sync operations. The default value is 100. -1 means no upper limit.
#
# Uncomment the following to increase the limit to 5000.
#
#filter_timeline_limit: 5000
# Whether room invites to users on this server should be blocked
# (except those sent by local server admins). The default is False.
#
#block_non_admin_invites: true
# Room searching
#
# If disabled, new messages will not be indexed for searching and users
# will receive errors when searching for messages. Defaults to enabled.
#
#enable_search: false
# Prevent outgoing requests from being sent to the following blacklisted IP address
# CIDR ranges. If this option is not specified then it defaults to private IP
# address ranges (see the example below).
#
# The blacklist applies to the outbound requests for federation, identity servers,
# push servers, and for checking key validity for third-party invite events.
#
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
# listed here, since they correspond to unroutable addresses.)
#
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
#
# Note: The value is ignored when an HTTP proxy is in use
#
#ip_range_blacklist:
%(ip_range_blacklist)s
# List of IP address CIDR ranges that should be allowed for federation,
# identity servers, push servers, and for checking key validity for
# third-party invite events. This is useful for specifying exceptions to
# wide-ranging blacklisted target IP ranges - e.g. for communication with
# a push server only visible in your network.
#
# This whitelist overrides ip_range_blacklist and defaults to an empty
# list.
#
#ip_range_whitelist:
# - '192.168.1.1'
# List of ports that Synapse should listen on, their purpose and their
# configuration.
#
# Options for each listener include:
#
# port: the TCP port to bind to
#
# bind_addresses: a list of local addresses to listen on. The default is
# 'all local interfaces'.
#
# type: the type of listener. Normally 'http', but other valid options are:
# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html),
# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html),
# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
#
# x_forwarded: Only valid for an 'http' listener. Set to true to use the
# X-Forwarded-For header as the client IP. Useful when Synapse is
# behind a reverse-proxy.
#
# resources: Only valid for an 'http' listener. A list of resources to host
# on this port. Options for each resource are:
#
# names: a list of names of HTTP resources. See below for a list of
# valid resource names.
#
# compress: set to true to enable HTTP compression for this resource.
#
# additional_resources: Only valid for an 'http' listener. A map of
# additional endpoints which should be loaded via dynamic modules.
#
# Valid resource names are:
#
# client: the client-server API (/_matrix/client), and the synapse admin
# API (/_synapse/admin). Also implies 'media' and 'static'.
#
# consent: user consent forms (/_matrix/consent).
# See https://matrix-org.github.io/synapse/latest/consent_tracking.html.
#
# federation: the server-server API (/_matrix/federation). Also implies
# 'media', 'keys', 'openid'
#
# keys: the key discovery API (/_matrix/keys).
#
# media: the media API (/_matrix/media).
#
# metrics: the metrics interface.
# See https://matrix-org.github.io/synapse/latest/metrics-howto.html.
#
# openid: OpenID authentication.
#
# replication: the HTTP replication API (/_synapse/replication).
# See https://matrix-org.github.io/synapse/latest/workers.html.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
#
# webclient: A web client. Requires web_client_location to be set.
#
listeners:
# TLS-enabled listener: for when matrix traffic is sent directly to synapse.
#
# Disabled by default. To enable it, uncomment the following. (Note that you
# will also need to give Synapse a TLS key and certificate: see the TLS section
# below.)
#
%(secure_http_bindings)s
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
%(unsecure_http_bindings)s
# example additional_resources:
#
#additional_resources:
# "/_matrix/my/custom/endpoint":
# module: my_module.CustomRequestHandler
# config: {}
# Turn on the twisted ssh manhole service on localhost on the given
# port.
#
#- port: 9000
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
# Connection settings for the manhole
#
manhole_settings:
# The username for the manhole. This defaults to 'matrix'.
#
#username: manhole
# The password for the manhole. This defaults to 'rabbithole'.
#
#password: mypassword
# The private and public SSH key pair used to encrypt the manhole traffic.
# If these are left unset, then hardcoded and non-secret keys are used,
# which could allow traffic to be intercepted if sent over a public network.
#
#ssh_priv_key_path: %(config_dir_path)s/id_rsa
#ssh_pub_key_path: %(config_dir_path)s/id_rsa.pub
# Forward extremities can build up in a room due to networking delays between
# homeservers. Once this happens in a large room, calculation of the state of
# that room can become quite expensive. To mitigate this, once the number of
# forward extremities reaches a given threshold, Synapse will send an
# org.matrix.dummy_event event, which will reduce the forward extremities
# in the room.
#
# This setting defines the threshold (i.e. number of forward extremities in the
# room) at which dummy events are sent. The default value is 10.
#
#dummy_events_threshold: 5
## Homeserver blocking ##
# How to reach the server admin, used in ResourceLimitError
#
#admin_contact: 'mailto:admin@server.com'
# Global blocking
#
#hs_disabled: false
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
# Monthly Active User Blocking
#
# Used in cases where the admin or server owner wants to limit to the
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
# enabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
# the server will start blocking user actions.
#
# 'mau_trial_days' is a means to add a grace period for active users. It
# means that users must be active for this number of days before they
# can be considered active and guards against the case where lots of users
# sign up in a short space of time never to return after their initial
# session.
#
# 'mau_limit_alerting' is a means of limiting client side alerting
# should the mau limit be reached. This is useful for small instances
# where the admin has 5 mau seats (say) for 5 specific people and no
# interest increasing the mau limit further. Defaults to True, which
# means that alerting is enabled
#
#limit_usage_by_mau: false
#max_mau_value: 50
#mau_trial_days: 2
#mau_limit_alerting: false
# If enabled, the metrics for the number of monthly active users will
# be populated, however no one will be limited. If limit_usage_by_mau
# is true, this is implied to be true.
#
#mau_stats_only: false
# Sometimes the server admin will want to ensure certain accounts are
# never blocked by mau checking. These accounts are specified here.
#
#mau_limit_reserved_threepids:
# - medium: 'email'
# address: 'reserved_user@example.com'
# Used by phonehome stats to group together related servers.
#server_context: context
# Resource-constrained homeserver settings
#
# When this is enabled, the room "complexity" will be checked before a user
# joins a new remote room. If it is above the complexity limit, the server will
# disallow joining, or will instantly leave.
#
# Room complexity is an arbitrary measure based on factors such as the number of
# users in the room.
#
limit_remote_rooms:
# Uncomment to enable room complexity checking.
#
#enabled: true
# the limit above which rooms cannot be joined. The default is 1.0.
#
#complexity: 0.5
# override the error which is returned when the room is too complex.
#
#complexity_error: "This room is too complex."
# allow server admins to join complex rooms. Default is false.
#
#admins_can_join: true
# Whether to require a user to be in the room to add an alias to it.
# Defaults to 'true'.
#
#require_membership_for_aliases: false
# Whether to allow per-room membership profiles through the send of membership
# events with profile information that differ from the target's global profile.
# Defaults to 'true'.
#
#allow_per_room_profiles: false
# How long to keep redacted events in unredacted form in the database. After
# this period redacted events get replaced with their redacted form in the DB.
#
# Defaults to `7d`. Set to `null` to disable.
#
#redaction_retention_period: 28d
# How long to track users' last seen time and IPs in the database.
#
# Defaults to `28d`. Set to `null` to disable clearing out of old rows.
#
#user_ips_max_age: 14d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
# Note that for some endpoints the error situation is the e-mail already being
# used, and for others the error is entering the e-mail being unused.
# If this option is enabled, instead of returning an error, these endpoints will
# act as if no error happened and return a fake session ID ('sid') to clients.
#
#request_token_inhibit_3pid_errors: true
# A list of domains that the domain portion of 'next_link' parameters
# must match.
#
# This parameter is optionally provided by clients while requesting
# validation of an email or phone number, and maps to a link that
# users will be automatically redirected to after validation
# succeeds. Clients can make use this parameter to aid the validation
# process.
#
# The whitelist is applied whether the homeserver or an
# identity server is handling validation.
#
# The default value is no whitelist functionality; all domains are
# allowed. Setting this value to an empty list will instead disallow
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
"""
% locals()
)
def read_arguments(self, args):
if args.manhole is not None:
self.manhole = args.manhole
if args.daemonize is not None:
self.daemonize = args.daemonize
if args.print_pidfile is not None:
self.print_pidfile = args.print_pidfile
@staticmethod
def add_arguments(parser):
server_group = parser.add_argument_group("server")
server_group.add_argument(
"-D",
"--daemonize",
action="store_true",
default=None,
help="Daemonize the homeserver",
)
server_group.add_argument(
"--print-pidfile",
action="store_true",
default=None,
help="Print the path to the pidfile just before daemonizing",
)
server_group.add_argument(
"--manhole",
metavar="PORT",
dest="manhole",
type=int,
help="Turn on the twisted telnet manhole service on the given port.",
)
def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]:
"""Reads the three durations for the GC min interval option, returning seconds."""
if durations is None:
return None
try:
if len(durations) != 3:
raise ValueError()
return (
self.parse_duration(durations[0]) / 1000,
self.parse_duration(durations[1]) / 1000,
self.parse_duration(durations[2]) / 1000,
)
except Exception:
raise ConfigError(
"Value of `gc_min_interval` must be a list of three durations if set"
)
def is_threepid_reserved(reserved_threepids, threepid):
"""Check the threepid against the reserved threepid config
Args:
reserved_threepids([dict]) - list of reserved threepids
threepid(dict) - The threepid to test for
Returns:
boolean Is the threepid undertest reserved_user
"""
for tp in reserved_threepids:
if threepid["medium"] == tp["medium"] and threepid["address"] == tp["address"]:
return True
return False
def read_gc_thresholds(thresholds):
"""Reads the three integer thresholds for garbage collection. Ensures that
the thresholds are integers if thresholds are supplied.
"""
if thresholds is None:
return None
try:
assert len(thresholds) == 3
return int(thresholds[0]), int(thresholds[1]), int(thresholds[2])
except Exception:
raise ConfigError(
"Value of `gc_threshold` must be a list of three integers if set"
)
def parse_listener_def(listener: Any) -> ListenerConfig:
"""parse a listener config from the config file"""
listener_type = listener["type"]
port = listener.get("port")
if not isinstance(port, int):
raise ConfigError("Listener configuration is lacking a valid 'port' option")
tls = listener.get("tls", False)
bind_addresses = listener.get("bind_addresses", [])
bind_address = listener.get("bind_address")
# if bind_address was specified, add it to the list of addresses
if bind_address:
bind_addresses.append(bind_address)
# if we still have an empty list of addresses, use the default list
if not bind_addresses:
if listener_type == "metrics":
# the metrics listener doesn't support IPv6
bind_addresses.append("0.0.0.0")
else:
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
http_config = None
if listener_type == "http":
http_config = HttpListenerConfig(
x_forwarded=listener.get("x_forwarded", False),
resources=[
HttpResourceConfig(**res) for res in listener.get("resources", [])
],
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
NO_MORE_WEB_CLIENT_WARNING = """
Synapse no longer includes a web client. To enable a web client, configure
web_client_location. To remove this warning, remove 'webclient' from the 'listeners'
configuration.
"""
def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
for listener in listeners:
if not listener.http_options:
continue
for res in listener.http_options.resources:
for name in res.names:
if name == "webclient":
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
return
_MANHOLE_SETTINGS_SCHEMA = {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"ssh_priv_key_path": {"type": "string"},
"ssh_pub_key_path": {"type": "string"},
},
}
| 39.186453 | 104 | 0.624408 |
07bf9e8b50b297ac349af993db1cb05022d7091b | 10,774 | py | Python | tests/providers/google/marketing_platform/operators/test_campaign_manager.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 1 | 2020-08-20T02:00:27.000Z | 2020-08-20T02:00:27.000Z | tests/providers/google/marketing_platform/operators/test_campaign_manager.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 9 | 2020-07-28T15:07:03.000Z | 2022-03-29T22:27:52.000Z | tests/providers/google/marketing_platform/operators/test_campaign_manager.py | vuppalli/airflow | dfe8337ca2d3ed173d9ecc112938271519792c40 | [
"Apache-2.0"
] | 2 | 2020-03-08T14:12:55.000Z | 2020-06-10T10:17:32.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from tempfile import NamedTemporaryFile
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.operators.campaign_manager import (
GoogleCampaignManagerBatchInsertConversionsOperator, GoogleCampaignManagerBatchUpdateConversionsOperator,
GoogleCampaignManagerDeleteReportOperator, GoogleCampaignManagerDownloadReportOperator,
GoogleCampaignManagerInsertReportOperator, GoogleCampaignManagerRunReportOperator,
)
API_VERSION = "api_version"
GCP_CONN_ID = "google_cloud_default"
CONVERSION = {
"kind": "dfareporting#conversion",
"floodlightActivityId": 1234,
"floodlightConfigurationId": 1234,
"gclid": "971nc2849184c1914019v1c34c14",
"ordinal": "0",
"customVariables": [
{
"kind": "dfareporting#customFloodlightVariable",
"type": "U10",
"value": "value",
}
],
}
class TestGoogleCampaignManagerDeleteReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
op = GoogleCampaignManagerDeleteReportOperator(
profile_id=profile_id,
report_id=report_id,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.delete_report.assert_called_once_with(
profile_id=profile_id, report_id=report_id
)
class TestGoogleCampaignManagerGetReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.http"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.tempfile"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GCSHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerDownloadReportOperator.xcom_push"
)
def test_execute(
self,
xcom_mock,
mock_base_op,
gcs_hook_mock,
hook_mock,
tempfile_mock,
http_mock,
):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
file_id = "FILE_ID"
bucket_name = "test_bucket"
report_name = "test_report.csv"
temp_file_name = "TEST"
http_mock.MediaIoBaseDownload.return_value.next_chunk.return_value = (
None,
True,
)
tempfile_mock.NamedTemporaryFile.return_value.__enter__.return_value.name = (
temp_file_name
)
op = GoogleCampaignManagerDownloadReportOperator(
profile_id=profile_id,
report_id=report_id,
file_id=file_id,
bucket_name=bucket_name,
report_name=report_name,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.get_report_file.assert_called_once_with(
profile_id=profile_id, report_id=report_id, file_id=file_id
)
gcs_hook_mock.assert_called_once_with(
google_cloud_storage_conn_id=GCP_CONN_ID, delegate_to=None
)
gcs_hook_mock.return_value.upload.assert_called_once_with(
bucket_name=bucket_name,
object_name=report_name + ".gz",
gzip=True,
filename=temp_file_name,
mime_type="text/csv",
)
xcom_mock.assert_called_once_with(
None, key="report_name", value=report_name + ".gz"
)
class TestGoogleCampaignManagerInsertReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerInsertReportOperator.xcom_push"
)
def test_execute(self, xcom_mock, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report = {"report": "test"}
report_id = "test"
hook_mock.return_value.insert_report.return_value = {"id": report_id}
op = GoogleCampaignManagerInsertReportOperator(
profile_id=profile_id,
report=report,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.insert_report.assert_called_once_with(
profile_id=profile_id, report=report
)
xcom_mock.assert_called_once_with(None, key="report_id", value=report_id)
def test_prepare_template(self):
profile_id = "PROFILE_ID"
report = {"key": "value"}
with NamedTemporaryFile("w+", suffix=".json") as f:
f.write(json.dumps(report))
f.flush()
op = GoogleCampaignManagerInsertReportOperator(
profile_id=profile_id,
report=f.name,
api_version=API_VERSION,
task_id="test_task",
)
op.prepare_template()
assert isinstance(op.report, dict)
assert op.report == report
class TestGoogleCampaignManagerRunReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerRunReportOperator.xcom_push"
)
def test_execute(self, xcom_mock, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
file_id = "FILE_ID"
synchronous = True
hook_mock.return_value.run_report.return_value = {"id": file_id}
op = GoogleCampaignManagerRunReportOperator(
profile_id=profile_id,
report_id=report_id,
synchronous=synchronous,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.run_report.assert_called_once_with(
profile_id=profile_id, report_id=report_id, synchronous=synchronous
)
xcom_mock.assert_called_once_with(None, key="file_id", value=file_id)
class TestGoogleCampaignManagerBatchInsertConversionsOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
op = GoogleCampaignManagerBatchInsertConversionsOperator(
task_id="insert_conversion",
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
)
op.execute(None)
hook_mock.return_value.conversions_batch_insert.assert_called_once_with(
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
max_failed_inserts=0,
)
class TestGoogleCampaignManagerBatchUpdateConversionOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
op = GoogleCampaignManagerBatchUpdateConversionsOperator(
task_id="update_conversion",
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
)
op.execute(None)
hook_mock.return_value.conversions_batch_update.assert_called_once_with(
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
max_failed_updates=0,
)
| 35.675497 | 109 | 0.672916 |
cfce62c2199f8e88eca5924b87fa2bf157bc5f3e | 4,366 | py | Python | chembl_core_db/db/models/abstractModel.py | thesgc/chembl_core_db | 028150e762bc5e92f49de9a40e09f205955327f9 | [
"Apache-2.0"
] | null | null | null | chembl_core_db/db/models/abstractModel.py | thesgc/chembl_core_db | 028150e762bc5e92f49de9a40e09f205955327f9 | [
"Apache-2.0"
] | null | null | null | chembl_core_db/db/models/abstractModel.py | thesgc/chembl_core_db | 028150e762bc5e92f49de9a40e09f205955327f9 | [
"Apache-2.0"
] | null | null | null | __author__ = 'mnowotka'
from django.db import models
from django.conf import settings
from django.db.models.base import ModelBase
import re
import sys
import inspect
from django.core.exceptions import ImproperlyConfigured
try:
# django >= 1.7
from django.apps import apps
get_model = apps.get_model
except ImportError:
# django < 1.7
from django.db.models import get_model
import copy
#-----------------------------------------------------------------------------------------------------------------------
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#-----------------------------------------------------------------------------------------------------------------------
class ChemblCoreAbstractModel(models.Model):
class Meta:
abstract = True
app_label = 'chembl_core_model'
managed = settings.CORE_TABLES_MANAGED
#-----------------------------------------------------------------------------------------------------------------------
class ChemblAppAbstractModel(models.Model):
class Meta:
abstract = True
managed = settings.APP_SPECIFIC_TABLES_MANAGED
#-----------------------------------------------------------------------------------------------------------------------
class ChemblModelMetaClass(ModelBase):
def __new__(cls, name, bases, attrs):
n = name
if "Meta" in attrs:
meta = attrs["Meta"]
if hasattr(meta, "db_table"):
n = meta.db_table
klas = super(ChemblModelMetaClass, cls).__new__(cls, name, bases, attrs)
if settings.EXPORT_MODE:
klas._meta.db_table = unicode(convert(n))
else:
klas._meta.db_table = u'' + settings.CHEMBL_SCHEMA_NAME + '.' + convert(n) + ''
return klas
#-----------------------------------------------------------------------------------------------------------------------
def remove_field(cls, f_name):
if hasattr(cls, f_name):
delattr(cls, f_name)
#-----------------------------------------------------------------------------------------------------------------------
def rebase(module, klas):
if isinstance(klas, basestring):
relClsName = klas.split('.')[-1]
else:
relClsName = klas.__name__
lst = inspect.getmembers(sys.modules[module], lambda x: inspect.isclass(x) and x.__name__ == relClsName)
if len(lst):
return lst[0][1]
return module.split('.')[0] + '.' + relClsName
#-----------------------------------------------------------------------------------------------------------------------
class ModifiedModelMetaclass(ChemblModelMetaClass):
def __new__(cls, name, bases, attrs):
try:
metaCls = attrs['Meta']
meta = metaCls()
except KeyError:
raise ImproperlyConfigured("Helper class %s hasn't a Meta subclass!" % name)
# Find model class for this helper
try:
model = getattr(meta, 'model')
except AttributeError:
return super(ModifiedModelMetaclass, cls).__new__(cls, name, bases, attrs)
if isinstance(model, basestring):
model_class = get_model(*model.split('.'))
elif issubclass(model, models.Model):
model_class = model
else:
raise ImproperlyConfigured("Model informed by Meta subclass of %s is improperly!" % name)
remove_field(metaCls, 'model')
module = attrs['__module__']
excludes = getattr(meta, 'exclude', ())
if excludes == None:
excludes = ()
remove_field(metaCls, 'exclude')
attrs['Meta'] = metaCls
fields = [f for f in model_class._meta.fields + model_class._meta.local_many_to_many if f.name not in excludes]
for field in fields:
f = copy.deepcopy(field)
if hasattr(f, 'rel') and f.rel:
if hasattr(f.rel, 'through'):
f.rel.through = rebase(module, f.rel.through)
f.rel.to = rebase(module, f.rel.to)
attrs[f.name] = f
return super(ModifiedModelMetaclass, cls).__new__(cls, name, bases, attrs)
#-----------------------------------------------------------------------------------------------------------------------
| 35.495935 | 120 | 0.477325 |
491225dd330124d87f45f6a7e84ccbcea657ccb2 | 11,371 | py | Python | resources/Database.py | washingtonSampaioVieira/spreadsheet-migrator | e773dc3abc501dc84c11cf740e2de27abfb9491f | [
"Apache-2.0"
] | null | null | null | resources/Database.py | washingtonSampaioVieira/spreadsheet-migrator | e773dc3abc501dc84c11cf740e2de27abfb9491f | [
"Apache-2.0"
] | null | null | null | resources/Database.py | washingtonSampaioVieira/spreadsheet-migrator | e773dc3abc501dc84c11cf740e2de27abfb9491f | [
"Apache-2.0"
] | null | null | null | import os
from dotenv import load_dotenv
import mysql.connector as mysql
from . import Logger
from config import DatabaseField
class Database:
def __init__(self):
self.logger = Logger.create_logger('Database', 'database.log')
def connect(self):
connection = None
load_dotenv()
db_hostname = os.getenv('DB_HOSTNAME')
db_user = os.getenv('DB_USERNAME')
db_pass = os.getenv('DB_PASSWORD')
db_name = os.getenv('DB_NAME')
try:
connection = mysql.connect(host=db_hostname, user=db_user, passwd=db_pass, database=db_name)
except mysql.errors.InterfaceError:
self.logger.error('Error connecting to database, check .env file')
return connection
def solicitation_exists(self, solicitation_code, model_id):
db = self.connect()
if db is None:
return False
cursor = db.cursor(dictionary=True, buffered=True)
query = 'select id_order from tb_order where order_code = %s and id_product = %s limit 1'
try:
cursor.execute(query, (solicitation_code, model_id))
result = cursor.fetchone()
row_count = cursor.rowcount
db.close()
if result is not None:
return result['id_order']
else:
return False
except mysql.errors.ProgrammingError as error:
self.logger.error("Something went wrong on solicitation_exists function.\n\tDetails: %s" % error.msg)
return False
def get_status_order(self, status):
query = (f'select id_order_status from tb_order_status where status = "{status}" limit 1 ')
db = self.connect()
if db is None:
return False
cursor = db.cursor()
try:
cursor.execute(query)
row_count = cursor.rowcount
result = cursor.fetchone()
id_order_status = result[0]
return id_order_status
except mysql.errors.ProgrammingError as error:
print(error)
self.logger.error("Something went wrong on select tb_order_status function.\n\tDetails: %s" % error.msg)
return False
def update_finished_status(self, id_order):
db = self.connect()
if db is None:
return False
query = (
f'UPDATE `tb_rel_order_status` SET `finished_at` = NOW() WHERE `id_order` = "{id_order}" and '
'`finished_at` is null'
)
cursor = db.cursor()
try:
cursor.execute(query)
db.commit()
row_count = cursor.rowcount
db.close()
return row_count != 0
except mysql.errors.ProgrammingError as error:
self.logger.error("Something went wrong on update_ function.\n\tDetails: %s" % error.msg)
return False
return True
def insert_delivery(self, solicitation):
order_id = self.solicitation_exists(solicitation[DatabaseField.ID], DatabaseField.CIPP_ID)
if order_id is None:
return False
if not solicitation[DatabaseField.TRACKING_CODE]:
return False
query = (
'INSERT INTO `tb_delivery` (`id_delivery`, `id_order`, `price`, `tracking_code`, `delivery_at`, '
'`created_at`, `updated_at`) VALUES '
f'(UUID(), "{order_id}", 0.00, "{solicitation[DatabaseField.TRACKING_CODE]}", null,'
f' NOW(), NOW());')
db = self.connect()
if db is None:
return False
cursor = db.cursor()
try:
cursor.execute(query)
db.commit()
self.logger.info('Create delivery at solicitaion%s' % solicitation)
row_count = cursor.rowcount
return row_count != 0
except mysql.errors.ProgrammingError as error:
print(error)
self.logger.error("error creating delivery record: %s" % error.msg)
return False
return True
def update_status_solicitation(self, solicitation):
id_system_user = self.get_user_from_client_cnpj(solicitation[DatabaseField.CNPJ])
id_status_order = self.get_status_order(solicitation[DatabaseField.STATUS_SOLIC])
id_order = self.solicitation_exists(solicitation[DatabaseField.ID], DatabaseField.CIPP_ID)
if id_system_user and id_status_order and id_order:
self.update_finished_status(id_order)
else:
print("unregistered client")
self.logger.info(f"Unregistered client, Solicitation: {solicitation}")
return False
if solicitation[DatabaseField.STATUS_SOLIC] == DatabaseField.DELIVERED:
self.insert_delivery(solicitation)
query = (
'INSERT INTO `tb_rel_order_status`'
'(`id_order_status`, `id_status_order`, `id_system_user`, `id_order`, `created_at`, `updated_at`, '
'`finished_at`) '
f'VALUES (UUID(), "{id_status_order}", "{DatabaseField.ID_SYSTEM_USER}", "{id_order}", NOW(), NOW(), NULL)'
)
db = self.connect()
if db is None:
return False
cursor = db.cursor()
try:
cursor.execute(query)
db.commit()
self.logger.info('Create new status solicitaion%s' % solicitation)
row_count = cursor.rowcount
db.close()
return row_count != 0
except mysql.errors.ProgrammingError as error:
print(error)
self.logger.error("Something went wrong on update_status_solicitation function.\n\tDetails: %s" % error.msg)
return False
def get_user_from_client_id(self, id_client):
query = ('select id_client_user from tb_client_print_log as cl '
f'inner join tb_client_user as u on u.id_client = cl.id_client where cl.id_client_print_log = "{id_client}" limit 1 ')
db = self.connect()
if db is None:
return False
cursor = db.cursor()
try:
cursor.execute(query)
row_count = cursor.rowcount
result = cursor.fetchone()
id_client_user = result[0]
return id_client_user
except mysql.errors.ProgrammingError as error:
print(error)
self.logger.error("Something went wrong on get_user_from_client_id function.\n\tDetails: %s" % error.msg)
return False
def get_user_from_client_cnpj(self, cnpj):
query = ('select id_client_user from tb_client_print_log as cl '
f'inner join tb_client_user as u on u.id_client = cl.id_client where cl.cnpj = "{cnpj}" limit 1')
db = self.connect()
if db is None:
return False
cursor = db.cursor()
try:
cursor.execute(query)
result = cursor.fetchone()
if result != None:
id_client_user = result[0]
return id_client_user
else:
return False
except mysql.errors.ProgrammingError as error:
print(error)
self.logger.error("Something went wrong on get_user_from_client_cnpj function.\n\tDetails: %s" % error.msg)
return False
def insert_solicitation(self, solicitation):
# solicitation already exists
if self.solicitation_exists(solicitation[DatabaseField.ID], DatabaseField.CIPP_ID) is not False:
print("Solicitação ja existente")
return True
db = self.connect()
if db is None:
return False
solicitation[DatabaseField.OWNER_ID] = self.get_owner_id(solicitation[DatabaseField.CNPJ])
if solicitation[DatabaseField.OWNER_ID] == 0:
print(solicitation[DatabaseField.CNPJ])
print("Nem um propritario cadastrado com o CNPJ desta solicitação")
return False
solicitation[DatabaseField.CLIENT_USER] = self.get_user_from_client_id(solicitation[DatabaseField.OWNER_ID])
if solicitation[DatabaseField.CLIENT_USER] == 0:
print("Empresa não tem usuário cadastrado")
return False
cursor = db.cursor()
query = (
'INSERT INTO `tb_order`(`id_order`, `id_client_user`, `id_product`, `id_delivery_type`, `id_log_client`, '
'`order_code`, `quantity`, `initial_number`, `final_number`, `created_at`, `updated_at`) '
'VALUES (UUID(), %s, %s, %s, %s, %s, %s, %s, %s, NOW(), NOW())'
)
self.logger.info('Inserting solicitation %s' % solicitation)
try:
cursor.execute(query, (
solicitation[DatabaseField.CLIENT_USER],
DatabaseField.CIPP_ID,
DatabaseField.DELIVERY_TYPE_WITHDRAWAL,
solicitation[DatabaseField.OWNER_ID],
solicitation[DatabaseField.ID],
solicitation[DatabaseField.QUANTITY],
solicitation[DatabaseField.INITIAL_NUMBER],
solicitation[DatabaseField.FINAL_NUMBER],
))
db.commit()
# last_id =
solicitation[DatabaseField.ID] = cursor.execute('SELECT last_insert_id()')
self.update_status_solicitation(solicitation)
# Inserted id
# Inserir status da solicitação
row_count = cursor.rowcount
db.close()
return row_count != 0
except mysql.errors.ProgrammingError as error:
print(error)
self.logger.error("Something went wrong on insert_solicitation function.\ntDetails: %s" % error.msg)
return False
def get_owner_id(self, cnpj):
db = self.connect()
owner_id = 0
if db is None:
return owner_id
cursor = db.cursor(dictionary=True, buffered=True)
query = 'select id_client_print_log from tb_client_print_log where cnpj = %s limit 1'
try:
cursor.execute(query, (cnpj,))
if cursor.rowcount == 0:
return owner_id
self.logger.info('Fetching owner id from cnpj: %s' % cnpj)
owner = cursor.fetchone()
owner_id = owner['id_client_print_log']
db.close()
return owner_id
except mysql.errors.ProgrammingError as error:
self.logger.error("Something went wrong on get_owner_id function.\n\tDetails: %s" % error.msg)
return owner_id
def owner_exists(self, cnpj):
db = self.connect()
if db is None:
return False
cursor = db.cursor(buffered=True)
query = 'select proprietario_id from tbl_proprietario where cnpj = %s'
try:
cursor.execute(query, (cnpj,))
row_count = cursor.rowcount
db.close()
return row_count != 0
except mysql.errors.ProgrammingError as error:
self.logger.error("Something went wrong on owner_exists function.\n\tDetails: %s" % error.msg)
return False
def insert(self, data):
if DatabaseField.ENTRY_DATE in data.keys():
return self.insert_solicitation(data)
# elif DatabaseField.NAME in data.keys():
# return self.insert_owner(data)
| 33.444118 | 136 | 0.59854 |
3c9812ff3973df5b80abd078b29eaa65e3945b09 | 9,905 | py | Python | qa/rpc-tests/test_framework/equihash.py | Z-ee/Z96 | cb8022edc7c49706dfeaa393c4b1f6d5a6b945d5 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/equihash.py | Z-ee/Z96 | cb8022edc7c49706dfeaa393c4b1f6d5a6b945d5 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/equihash.py | Z-ee/Z96 | cb8022edc7c49706dfeaa393c4b1f6d5a6b945d5 | [
"MIT"
] | null | null | null | from operator import itemgetter
import struct
DEBUG = False
VERBOSE = False
word_size = 32
word_mask = (1<<word_size)-1
def expand_array(inp, out_len, bit_len, byte_pad=0):
assert bit_len >= 8 and word_size >= 7+bit_len
bit_len_mask = (1<<bit_len)-1
out_width = (bit_len+7)/8 + byte_pad
assert out_len == 8*out_width*len(inp)/bit_len
out = bytearray(out_len)
bit_len_mask = (1 << bit_len) - 1
# The acc_bits least-significant bits of acc_value represent a bit sequence
# in big-endian order.
acc_bits = 0;
acc_value = 0;
j = 0
for i in xrange(len(inp)):
acc_value = ((acc_value << 8) & word_mask) | inp[i]
acc_bits += 8
# When we have bit_len or more bits in the accumulator, write the next
# output element.
if acc_bits >= bit_len:
acc_bits -= bit_len
for x in xrange(byte_pad, out_width):
out[j+x] = (
# Big-endian
acc_value >> (acc_bits+(8*(out_width-x-1)))
) & (
# Apply bit_len_mask across byte boundaries
(bit_len_mask >> (8*(out_width-x-1))) & 0xFF
)
j += out_width
return out
def compress_array(inp, out_len, bit_len, byte_pad=0):
assert bit_len >= 8 and word_size >= 7+bit_len
in_width = (bit_len+7)/8 + byte_pad
assert out_len == bit_len*len(inp)/(8*in_width)
out = bytearray(out_len)
bit_len_mask = (1 << bit_len) - 1
# The acc_bits least-significant bits of acc_value represent a bit sequence
# in big-endian order.
acc_bits = 0;
acc_value = 0;
j = 0
for i in xrange(out_len):
# When we have fewer than 8 bits left in the accumulator, read the next
# input element.
if acc_bits < 8:
acc_value = ((acc_value << bit_len) & word_mask) | inp[j]
for x in xrange(byte_pad, in_width):
acc_value = acc_value | (
(
# Apply bit_len_mask across byte boundaries
inp[j+x] & ((bit_len_mask >> (8*(in_width-x-1))) & 0xFF)
) << (8*(in_width-x-1))); # Big-endian
j += in_width
acc_bits += bit_len
acc_bits -= 8
out[i] = (acc_value >> acc_bits) & 0xFF
return out
def get_indices_from_minimal(minimal, bit_len):
eh_index_size = 4
assert (bit_len+7)/8 <= eh_index_size
len_indices = 8*eh_index_size*len(minimal)/bit_len
byte_pad = eh_index_size - (bit_len+7)/8
expanded = expand_array(minimal, len_indices, bit_len, byte_pad)
return [struct.unpack('>I', expanded[i:i+4])[0] for i in range(0, len_indices, eh_index_size)]
def get_minimal_from_indices(indices, bit_len):
eh_index_size = 4
assert (bit_len+7)/8 <= eh_index_size
len_indices = len(indices)*eh_index_size
min_len = bit_len*len_indices/(8*eh_index_size)
byte_pad = eh_index_size - (bit_len+7)/8
byte_indices = bytearray(''.join([struct.pack('>I', i) for i in indices]))
return compress_array(byte_indices, min_len, bit_len, byte_pad)
def hash_nonce(digest, nonce):
for i in range(8):
digest.update(struct.pack('<I', nonce >> (32*i)))
def hash_xi(digest, xi):
digest.update(struct.pack('<I', xi))
return digest # For chaining
def count_zeroes(h):
# Convert to binary string
if type(h) == bytearray:
h = ''.join('{0:08b}'.format(x, 'b') for x in h)
else:
h = ''.join('{0:08b}'.format(ord(x), 'b') for x in h)
# Count leading zeroes
return (h+'1').index('1')
def has_collision(ha, hb, i, l):
res = [ha[j] == hb[j] for j in range((i-1)*l/8, i*l/8)]
return reduce(lambda x, y: x and y, res)
def distinct_indices(a, b):
for i in a:
for j in b:
if i == j:
return False
return True
def xor(ha, hb):
return bytearray(a^b for a,b in zip(ha,hb))
def gbp_basic(digest, n, k):
'''Implementation of Basic Wagner's algorithm for the GBP.'''
validate_params(n, k)
collision_length = n/(k+1)
hash_length = (k+1)*((collision_length+7)//8)
indices_per_hash_output = 512/n
# 1) Generate first list
if DEBUG: print 'Generating first list'
X = []
tmp_hash = ''
for i in range(0, 2**(collision_length+1)):
r = i % indices_per_hash_output
if r == 0:
# X_i = H(I||V||x_i)
curr_digest = digest.copy()
hash_xi(curr_digest, i/indices_per_hash_output)
tmp_hash = curr_digest.digest()
X.append((
expand_array(bytearray(tmp_hash[r*n/8:(r+1)*n/8]),
hash_length, collision_length),
(i,)
))
# 3) Repeat step 2 until 2n/(k+1) bits remain
for i in range(1, k):
if DEBUG: print 'Round %d:' % i
# 2a) Sort the list
if DEBUG: print '- Sorting list'
X.sort(key=itemgetter(0))
if DEBUG and VERBOSE:
for Xi in X[-32:]:
print '%s %s' % (print_hash(Xi[0]), Xi[1])
if DEBUG: print '- Finding collisions'
Xc = []
while len(X) > 0:
# 2b) Find next set of unordered pairs with collisions on first n/(k+1) bits
j = 1
while j < len(X):
if not has_collision(X[-1][0], X[-1-j][0], i, collision_length):
break
j += 1
# 2c) Store tuples (X_i ^ X_j, (i, j)) on the table
for l in range(0, j-1):
for m in range(l+1, j):
# Check that there are no duplicate indices in tuples i and j
if distinct_indices(X[-1-l][1], X[-1-m][1]):
if X[-1-l][1][0] < X[-1-m][1][0]:
concat = X[-1-l][1] + X[-1-m][1]
else:
concat = X[-1-m][1] + X[-1-l][1]
Xc.append((xor(X[-1-l][0], X[-1-m][0]), concat))
# 2d) Drop this set
while j > 0:
X.pop(-1)
j -= 1
# 2e) Replace previous list with new list
X = Xc
# k+1) Find a collision on last 2n(k+1) bits
if DEBUG:
print 'Final round:'
print '- Sorting list'
X.sort(key=itemgetter(0))
if DEBUG and VERBOSE:
for Xi in X[-32:]:
print '%s %s' % (print_hash(Xi[0]), Xi[1])
if DEBUG: print '- Finding collisions'
solns = []
while len(X) > 0:
j = 1
while j < len(X):
if not (has_collision(X[-1][0], X[-1-j][0], k, collision_length) and
has_collision(X[-1][0], X[-1-j][0], k+1, collision_length)):
break
j += 1
for l in range(0, j-1):
for m in range(l+1, j):
res = xor(X[-1-l][0], X[-1-m][0])
if count_zeroes(res) == 8*hash_length and distinct_indices(X[-1-l][1], X[-1-m][1]):
if DEBUG and VERBOSE:
print 'Found solution:'
print '- %s %s' % (print_hash(X[-1-l][0]), X[-1-l][1])
print '- %s %s' % (print_hash(X[-1-m][0]), X[-1-m][1])
if X[-1-l][1][0] < X[-1-m][1][0]:
solns.append(list(X[-1-l][1] + X[-1-m][1]))
else:
solns.append(list(X[-1-m][1] + X[-1-l][1]))
# 2d) Drop this set
while j > 0:
X.pop(-1)
j -= 1
return [get_minimal_from_indices(soln, collision_length+1) for soln in solns]
def gbp_validate(digest, minimal, n, k):
validate_params(n, k)
collision_length = n/(k+1)
hash_length = (k+1)*((collision_length+7)//8)
indices_per_hash_output = 512/n
solution_width = (1 << k)*(collision_length+1)//8
if len(minimal) != solution_width:
print 'Invalid solution length: %d (expected %d)' % \
(len(minimal), solution_width)
return False
X = []
for i in get_indices_from_minimal(minimal, collision_length+1):
r = i % indices_per_hash_output
# X_i = H(I||V||x_i)
curr_digest = digest.copy()
hash_xi(curr_digest, i/indices_per_hash_output)
tmp_hash = curr_digest.digest()
X.append((
expand_array(bytearray(tmp_hash[r*n/8:(r+1)*n/8]),
hash_length, collision_length),
(i,)
))
for r in range(1, k+1):
Xc = []
for i in range(0, len(X), 2):
if not has_collision(X[i][0], X[i+1][0], r, collision_length):
print 'Invalid solution: invalid collision length between StepRows'
return False
if X[i+1][1][0] < X[i][1][0]:
print 'Invalid solution: Index tree incorrectly ordered'
return False
if not distinct_indices(X[i][1], X[i+1][1]):
print 'Invalid solution: duplicate indices'
return False
Xc.append((xor(X[i][0], X[i+1][0]), X[i][1] + X[i+1][1]))
X = Xc
if len(X) != 1:
print 'Invalid solution: incorrect length after end of rounds: %d' % len(X)
return False
if count_zeroes(X[0][0]) != 8*hash_length:
print 'Invalid solution: incorrect number of zeroes: %d' % count_zeroes(X[0][0])
return False
return True
def snowgem_person(n, k):
return b'SnowgemPoW' + struct.pack('<II', n, k)
def print_hash(h):
if type(h) == bytearray:
return ''.join('{0:02x}'.format(x, 'x') for x in h)
else:
return ''.join('{0:02x}'.format(ord(x), 'x') for x in h)
def validate_params(n, k):
if (k >= n):
raise ValueError('n must be larger than k')
if (((n/(k+1))+1) >= 32):
raise ValueError('Parameters must satisfy n/(k+1)+1 < 32')
| 33.690476 | 99 | 0.529531 |
ea0ad09b853333aee1feca1bc8ad0d8b05735bee | 2,996 | py | Python | weather_mv/loader_pipeline/pipeline_test.py | google/weather-tools | acc630c6a7fbef91f9c7eba86219e58391462bf6 | [
"Apache-2.0"
] | 66 | 2021-12-21T00:10:02.000Z | 2022-03-31T18:45:33.000Z | weather_mv/loader_pipeline/pipeline_test.py | google/weather-tools | acc630c6a7fbef91f9c7eba86219e58391462bf6 | [
"Apache-2.0"
] | 76 | 2021-12-21T15:23:12.000Z | 2022-03-31T14:26:05.000Z | weather_mv/loader_pipeline/pipeline_test.py | google/weather-tools | acc630c6a7fbef91f9c7eba86219e58391462bf6 | [
"Apache-2.0"
] | 15 | 2021-12-21T00:10:10.000Z | 2022-03-31T18:46:28.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from .pipeline import run
class TestCLI(unittest.TestCase):
def setUp(self) -> None:
self.base_cli_args = (
'weather-mv '
'-i weather_mv/test_data/test_data_2018*.nc '
'-o myproject.mydataset.mytable '
'--import_time 2022-02-04T22:22:12.125893'
).split()
self.base_cli_known_args = {
'uris': 'weather_mv/test_data/test_data_2018*.nc',
'output_table': 'myproject.mydataset.mytable',
'dry_run': False,
'skip_region_validation': False,
'import_time': '2022-02-04T22:22:12.125893',
'infer_schema': False,
'num_shards': 5,
'topic': None,
'variables': [],
'window_size': 1.0,
'xarray_open_dataset_kwargs': {},
'coordinate_chunk_size': 10_000,
'disable_in_memory_copy': False
}
def test_dry_runs_are_allowed(self):
known_args, _ = run(self.base_cli_args + '--dry-run'.split())
self.assertEqual(known_args.dry_run, True)
def test_area_only_allows_four(self):
with self.assertRaisesRegex(AssertionError, 'Must specify exactly 4 lat/long .* N, W, S, E'):
run(self.base_cli_args + '--area 1 2 3'.split())
with self.assertRaisesRegex(AssertionError, 'Must specify exactly 4 lat/long .* N, W, S, E'):
run(self.base_cli_args + '--area 1 2 3 4 5'.split())
known_args, pipeline_args = run(self.base_cli_args + '--area 1 2 3 4'.split())
self.assertEqual(pipeline_args, [])
self.assertEqual(vars(known_args), {
**self.base_cli_known_args,
'area': [1, 2, 3, 4]
})
def test_topic_creates_a_streaming_pipeline(self):
_, pipeline_args = run(self.base_cli_args + '--topic projects/myproject/topics/my-topic'.split())
self.assertEqual(pipeline_args, ['--streaming', 'true'])
def test_accepts_json_string_for_xarray_open(self):
xarray_kwargs = dict(engine='cfgrib', backend_kwargs={'filter_by_keys': {'edition': 1}})
json_kwargs = json.dumps(xarray_kwargs)
known_args, _ = run(
self.base_cli_args + ["--xarray_open_dataset_kwargs", f"{json_kwargs}"]
)
self.assertEqual(known_args.xarray_open_dataset_kwargs, xarray_kwargs)
if __name__ == '__main__':
unittest.main()
| 38.410256 | 105 | 0.641522 |
ba265e6a5bc520e9dac70d65140525b368db5de1 | 4,027 | py | Python | opencensus/trace/ext/pyramid/pyramid_middleware.py | sandeep6189/opencensus-python | c7b7e0b2a9f97e1607d4fdf887baa5547278760a | [
"Apache-2.0"
] | null | null | null | opencensus/trace/ext/pyramid/pyramid_middleware.py | sandeep6189/opencensus-python | c7b7e0b2a9f97e1607d4fdf887baa5547278760a | [
"Apache-2.0"
] | null | null | null | opencensus/trace/ext/pyramid/pyramid_middleware.py | sandeep6189/opencensus-python | c7b7e0b2a9f97e1607d4fdf887baa5547278760a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from opencensus.trace.ext import utils
from opencensus.trace.ext.pyramid.config import PyramidTraceSettings
from opencensus.trace import attributes_helper
from opencensus.trace import execution_context
from opencensus.trace import tracer as tracer_module
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
BLACKLIST_PATHS = 'BLACKLIST_PATHS'
log = logging.getLogger(__name__)
class OpenCensusTweenFactory(object):
"""Pyramid tweens are like wsgi middleware, but have access to things
like the request, response, and application registry.
The tween factory is a globally importable callable whose
constructor takes a request handler and application registry. It
will be called with a pyramid request object.
For details on pyramid tweens, see
https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/hooks.html#creating-a-tween
"""
def __init__(self, handler, registry):
"""Constructor for the pyramid tween
:param handler: Either the main Pyramid request handling
function or another tween
:type handler: function
:param registry: The pyramid application registry
:type registry: :class:`pyramid.registry.Registry`
"""
self.handler = handler
self.registry = registry
settings = PyramidTraceSettings(registry)
self.sampler = settings.SAMPLER
self.exporter = settings.EXPORTER
self.propagator = settings.PROPAGATOR
self._blacklist_paths = settings.params.get(BLACKLIST_PATHS)
def __call__(self, request):
self._before_request(request)
response = self.handler(request)
self._after_request(request, response)
return response
def _before_request(self, request):
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
span_context = self.propagator.from_headers(request.headers)
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
span = tracer.start_span()
# Set the span name as the name of the current module name
span.name = '[{}]{}'.format(
request.method,
request.path)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_METHOD,
attribute_value=request.method)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_URL,
attribute_value=request.path)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def _after_request(self, request, response):
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
tracer = execution_context.get_opencensus_tracer()
tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE,
str(response.status_code))
tracer.end_span()
tracer.finish()
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
| 34.418803 | 94 | 0.685622 |
242c3bc55f67382e515aa806bee026e2816ef56a | 13,779 | py | Python | python/GafferSceneTest/SetAlgoTest.py | RudyCortesPearl/gaffer | 4ae48fed676e5fc920154ce39a1a9dfa4dc1f0b4 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneTest/SetAlgoTest.py | RudyCortesPearl/gaffer | 4ae48fed676e5fc920154ce39a1a9dfa4dc1f0b4 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneTest/SetAlgoTest.py | RudyCortesPearl/gaffer | 4ae48fed676e5fc920154ce39a1a9dfa4dc1f0b4 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import functools
import IECore
import GafferScene
import GafferSceneTest
class SetAlgoTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
sphere1 = GafferScene.Sphere( "Sphere1" )
sphere1["name"].setValue( 'sphere1' )
sphere2 = GafferScene.Sphere( "Sphere2" )
sphere2["name"].setValue( 'sphere2' )
sphere3 = GafferScene.Sphere( "Sphere3" )
sphere3["name"].setValue( 'sphere3' )
group1 = GafferScene.Group( "Group1" )
group1["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group1["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group1["in"]["in0"].setInput( sphere1["out"] )
group1["in"]["in1"].setInput( sphere2["out"] )
setA = GafferScene.Set( "SetA" )
setA["name"].setValue( 'setA' )
setA["paths"].setValue( IECore.StringVectorData( [ '/group/sphere1', '/group/sphere2' ] ) )
setB = GafferScene.Set( "SetB" )
setB["name"].setValue( 'setB' )
setB["paths"].setValue( IECore.StringVectorData( [ '/group/sphere2' ] ) )
setC = GafferScene.Set( "SetC" )
setC["name"].setValue( 'setC' )
setC["paths"].setValue( IECore.StringVectorData( [ '/sphere3' ] ) )
setD = GafferScene.Set( "SetD" )
setD["name"].setValue( 'setD' )
setD["paths"].setValue( IECore.StringVectorData( [] ) )
group2 = GafferScene.Group( "Group2" )
group2["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group2["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group2["in"].addChild( GafferScene.ScenePlug( "in3" ) )
setA["in"].setInput( group1["out"] )
setB["in"].setInput( setA["out"] )
setC["in"].setInput( sphere3["out"] )
setD["in"].setInput( setC["out"] )
group2["in"]["in0"].setInput( setB["out"] )
group2["in"]["in2"].setInput( setD["out"] )
# Set memberships:
# A: ( /group/group/sphere1, /group/group/sphere2 )
# B: ( /group/group/sphere2 )
# C: ( /group/sphere3 )
# D: ( )
expressionCheck = functools.partial( self.assertCorrectEvaluation, group2["out"] )
expressionCheck( '', [] )
expressionCheck( 'setA', [ '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( '/group/sphere3', [ '/group/sphere3' ] )
# Test expressions that contain only sets and have a clearly defined evaluation order
expressionCheck( '(setA | setC)', [ '/group/group/sphere1', '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( '(setA | setB)', [ '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( '(setA & setB)', [ '/group/group/sphere2' ] )
expressionCheck( '(setA & setC)', [] )
expressionCheck( '(setA | setB) & setD', [] )
expressionCheck( '(setA & setB) | setD', [ '/group/group/sphere2' ] )
expressionCheck( '(setA - setB)', [ '/group/group/sphere1' ] )
expressionCheck( '(setA - setC)', [ '/group/group/sphere1', '/group/group/sphere2'] )
expressionCheck( '(setB - setC)', [ '/group/group/sphere2' ] )
# Test expressions that omit the explicit grouping and rely on operator precedence
expressionCheck( 'setA setC', [ '/group/group/sphere1', '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( 'setA | setB | setC', [ '/group/group/sphere1', '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( 'setA | setB & setC', [ '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( 'setA & setB | setC', [ '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( 'setA & setB - setC', [ '/group/group/sphere2' ] )
expressionCheck( 'setA - setB | setC', [ '/group/group/sphere1', '/group/sphere3' ] )
# Test more complex expressions that contain explicit object names and lists thereof
expressionCheck( '/group/light1 /group/light2', [ '/group/light1', '/group/light2' ] )
expressionCheck( '(/group/light1 /group/light2)', [ '/group/light1', '/group/light2' ] )
expressionCheck( '/group/light1 /group/light2 setA', [ '/group/light1', '/group/light2', '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( '(/group/light1 /group/light2) | setA', [ '/group/light1', '/group/light2', '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( 'setA & (/group/group/sphere1 /group/group/sphere42)', [ '/group/group/sphere1' ] )
expressionCheck( 'setA - /group/group/sphere2', [ '/group/group/sphere1' ] )
expressionCheck( '(setA - /group/group/sphere2)', [ '/group/group/sphere1' ] )
expressionCheck( 'setA - ((setC /group/group/sphere2) & setB)', [ '/group/group/sphere1' ] )
expressionCheck( '(setA - ((setC /group/group/sphere2) & setB))', [ '/group/group/sphere1' ] )
expressionCheck( 'setA - (/group/group/sphere1 /group/group/sphere2) | (setA setB setC) & setC', [ '/group/sphere3' ] )
# Test if proper exception is thrown for invalid expression
with self.assertRaises( RuntimeError ) as e :
# note the missing )
GafferScene.SetAlgo.evaluateSetExpression( 'setA - (/group/group/sphere2', group2["out"] )
self.assertEqual( str( e.exception ), 'Syntax error in indicated part of SetExpression.\nsetA - (/group/group/sphere2\n |---------------------|\n.' )
# Sets that don't exist should be replaced with an empty PathMatcher
expressionCheck( 'A', [] )
# Test that changing set contents will result in an updated hash
h = GafferScene.SetAlgo.setExpressionHash( "setA", group2["out"] )
setA["paths"].setValue( IECore.StringVectorData( [ '/group/sphere1' ] ) )
self.assertNotEqual( h, GafferScene.SetAlgo.setExpressionHash( "setA", group2["out"] ) )
def testColonAndDotInSetAndObjectNames( self ):
sphere1 = GafferScene.Sphere( "Sphere1" )
sphere1["name"].setValue( 'MyObject:sphere1.model' )
setA = GafferScene.Set( "SetA" )
setA["name"].setValue( "MySets:setA.set" )
setA["paths"].setValue( IECore.StringVectorData( [ "/MyObject:sphere1.model" ] ) )
self.assertCorrectEvaluation( setA["out"], "MySets:setA.set", [ "/MyObject:sphere1.model" ] )
self.assertCorrectEvaluation( setA["out"], "/MyObject:sphere1.model", [ "/MyObject:sphere1.model" ] )
def testWildcardInSetName( self ) :
sphereA = GafferScene.Sphere( "SphereA" )
sphereA["sets"].setValue( 'sphereA' )
sphereA["name"].setValue( 'sphereA' )
sphereB = GafferScene.Sphere( "SphereB" )
sphereB["sets"].setValue( 'sphereB' )
sphereB["name"].setValue( 'sphereB' )
sphereC = GafferScene.Sphere( "SphereC" )
sphereC["sets"].setValue( 'sphereC' )
sphereC["name"].setValue( 'sphereC' )
sphereC2 = GafferScene.Sphere( "SphereC2" )
sphereC2["sets"].setValue( 'sphereC' )
sphereC2["name"].setValue( 'sphereC2' )
# sphere that we don't want in the resulting set
undesired = GafferScene.Sphere( "undesired" )
undesired["sets"].setValue( 'undesired' )
undesired["name"].setValue( 'undesired' )
group = GafferScene.Group( "Group" )
group["in"][0].setInput( sphereA["out"] )
group["in"][1].setInput( sphereB["out"] )
group["in"][2].setInput( sphereC["out"] )
group["in"][3].setInput( sphereC2["out"] )
group["in"][4].setInput( undesired["out"] )
oldHash = GafferScene.SetAlgo.setExpressionHash( "sphere*", group["out"] )
# Test different features of StringAlgo.
# Note that '-' is reserved as a SetExpression operator and the
# respective range related feature of StringAlgo isn't supported ("myFoo[A-Z]").
self.assertCorrectEvaluation( group["out"], "sphere*", ["/group/sphereA", "/group/sphereB", "/group/sphereC", "/group/sphereC2"] )
self.assertCorrectEvaluation( group["out"], "sphere* | undesired", ["/group/sphereA", "/group/sphereB", "/group/sphereC", "/group/sphereC2", "/group/undesired"] )
self.assertCorrectEvaluation( group["out"], "sphere[AB]", ["/group/sphereA", "/group/sphereB"] )
self.assertCorrectEvaluation( group["out"], "sphere[!AB]", ["/group/sphereC", "/group/sphereC2"] )
self.assertCorrectEvaluation( group["out"], "sphere?", ["/group/sphereA", "/group/sphereB", "/group/sphereC", "/group/sphereC2"] )
sphereC2["sets"].setValue( 'sphere?' )
self.assertCorrectEvaluation( group["out"], "sphere\?", ["/group/sphereC2"] )
self.assertNotEqual( oldHash, GafferScene.SetAlgo.setExpressionHash( "sphere*", group["out"] ) )
def testInterestingSetNames( self ) :
sphere = GafferScene.Sphere()
for setName in ( ":", "a:", "a:b", "!", "0", "]A" ) :
sphere["sets"].setValue( setName )
self.assertCorrectEvaluation( sphere["out"], setName, { "/sphere" } )
def testInAndContaining( self ) :
# /group
# /sphere
# /group
# /sphere
# /sphere1
# /sphere
sphere = GafferScene.Sphere()
sphere["sets"].setValue( "indubitablyASet containingThings" )
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][0].setInput( sphere["out"] )
group2 = GafferScene.Group()
group2["in"][0].setInput( sphere["out"] )
group2["in"][1].setInput( group["out"] )
parent = GafferScene.Parent()
parent["in"].setInput( group2["out"] )
parent["child"].setInput( sphere["out"] )
setA = GafferScene.Set()
setA["in"].setInput( parent["out"] )
setA["name"].setValue( "A" )
setA["paths"].setValue( IECore.StringVectorData( [
"/group/group",
] ) )
setB = GafferScene.Set()
setB["in"].setInput( setA["out"] )
setB["name"].setValue( "B" )
setB["paths"].setValue( IECore.StringVectorData( [
"/group/group/sphere",
"/group/sphere",
] ) )
self.assertSceneValid( setB["out"] )
# Test basic operation of `in`
self.assertCorrectEvaluation( setB["out"], "A in B", [] )
self.assertCorrectEvaluation( setB["out"], "A in A", setA["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "B in A", [ "/group/group/sphere" ] )
self.assertCorrectEvaluation( setB["out"], "B in B", setB["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "/group/group/sphere in /group", [ "/group/group/sphere" ] )
self.assertCorrectEvaluation( setB["out"], "B in /group/group", [ "/group/group/sphere" ] )
self.assertCorrectEvaluation( setB["out"], "B in ( /group/group /somewhereElse )", [ "/group/group/sphere" ] )
# Test basic operation of `containing`
self.assertCorrectEvaluation( setB["out"], "A containing B", [ "/group/group" ] )
self.assertCorrectEvaluation( setB["out"], "A containing A", setA["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "B containing A", [] )
self.assertCorrectEvaluation( setB["out"], "B containing B", setB["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "/group containing /group/sphere", [ "/group" ] )
self.assertCorrectEvaluation( setB["out"], "A containing /group/group/sphere", [ "/group/group" ] )
# Test various problematic parses
self.assertCorrectEvaluation( setB["out"], "A in (A)", setA["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "A in(A)", setA["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "(A)in(A)", setA["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "indubitablyASet", setB["out"].set( "indubitablyASet" ).value.paths() )
self.assertCorrectEvaluation( setB["out"], "A in indubitablyASet", [] )
self.assertCorrectEvaluation( setB["out"], "B in indubitablyASet", setB["paths"].getValue() )
self.assertCorrectEvaluation( setB["out"], "A in in?*", [] )
self.assertCorrectEvaluation( setB["out"], "containingThings", setB["out"].set( "containingThings" ).value.paths() )
self.assertCorrectEvaluation( setB["out"], "B in containing*", setB["paths"].getValue() )
def testWildcardsInObjectNames( self ) :
sphere = GafferScene.Sphere()
for expression in [
"/*",
"/spher[ef]",
"/spher?",
] :
with self.assertRaisesRegexp( RuntimeError, 'Object name "{0}" contains wildcards'.format( re.escape( expression ) ) ) :
GafferScene.SetAlgo.evaluateSetExpression( expression, sphere["out"] )
def assertCorrectEvaluation( self, scenePlug, expression, expectedContents ) :
result = set( GafferScene.SetAlgo.evaluateSetExpression( expression, scenePlug ).paths() )
self.assertEqual( result, set( expectedContents ) )
if __name__ == "__main__":
unittest.main()
| 44.592233 | 164 | 0.668699 |
162c82725c6bdee42f5165020575a3d8b22652e7 | 1,554 | py | Python | modules/test/test_eleda.py | amuritna/phenny | c01f409f41db125fe3f50093ed1ec3454f95a529 | [
"EFL-2.0"
] | 7 | 2018-10-29T18:01:47.000Z | 2022-01-21T04:13:46.000Z | modules/test/test_eleda.py | amuritna/phenny | c01f409f41db125fe3f50093ed1ec3454f95a529 | [
"EFL-2.0"
] | 225 | 2018-03-08T10:41:50.000Z | 2021-11-01T19:51:17.000Z | modules/test/test_eleda.py | amuritna/phenny | c01f409f41db125fe3f50093ed1ec3454f95a529 | [
"EFL-2.0"
] | 44 | 2018-03-19T15:30:15.000Z | 2020-07-29T08:47:45.000Z | """
test_eleda.py - tests for the eleda module
author: nu11us <work.willeggleston@gmail.com>
"""
import unittest
from mock import MagicMock
from modules import eleda
class TestEleda(unittest.TestCase):
def setUp(self):
self.phenny = MagicMock()
self.input = MagicMock()
def test_follow(self):
self.input.group = lambda x: [None, 'firespeaker eng-spa'][x]
eleda.follow(self.phenny, self.input)
out = self.phenny.reply.call_args[0][0]
eleda.follows = []
self.assertTrue("now following" in out)
def test_unfollow_invalid(self):
self.user = MagicMock()
self.user.nick = "firespeaker"
eleda.follows = [self.user]
self.input.group = lambda x: [None, 'not_firespeaker'][x]
eleda.unfollow(self.phenny, self.input)
out = self.phenny.reply.call_args[0][0]
self.assertTrue("Sorry, you aren't following that user" in out)
def test_following(self):
self.user = MagicMock()
self.user.nick = "firespeaker"
self.user2 = MagicMock()
self.user2.nick = "begiak"
eleda.follows = [self.user, self.user2]
eleda.following(self.phenny, self.input)
out = self.phenny.say.call_args[0][0]
self.assertTrue("firespeaker" in out and "begiak" in out)
def test_no_one_following(self):
eleda.follows = []
eleda.following(self.phenny, self.input)
out = self.phenny.reply.call_args[0][0]
self.assertTrue("No one is being followed at the moment." in out)
| 33.06383 | 73 | 0.63964 |
5cf0f976698262788670e04d3c76ddd9da6dddd2 | 3,423 | py | Python | training_mode/swin_training/lr_scheduler.py | weihaoxie/FaceX-Zoo | db0b087e4f4d28152e172d6c8d3767a8870733b4 | [
"Apache-2.0"
] | 1 | 2022-02-07T02:03:37.000Z | 2022-02-07T02:03:37.000Z | training_mode/swin_training/lr_scheduler.py | weihaoxie/FaceX-Zoo | db0b087e4f4d28152e172d6c8d3767a8870733b4 | [
"Apache-2.0"
] | null | null | null | training_mode/swin_training/lr_scheduler.py | weihaoxie/FaceX-Zoo | db0b087e4f4d28152e172d6c8d3767a8870733b4 | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.scheduler.step_lr import StepLRScheduler
from timm.scheduler.scheduler import Scheduler
def build_scheduler(optimizer, n_iter_per_epoch, epoches, warm_up_epoches):
num_steps = int(epoches * n_iter_per_epoch)
warmup_steps = int(warm_up_epoches * n_iter_per_epoch)
lr_scheduler = None
NAME = 'cosine'
if NAME == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_steps,
t_mul=1.,
lr_min=5.0e-06,
warmup_lr_init=5.0e-07,
warmup_t=warmup_steps,
cycle_limit=1,
t_in_epochs=False,
)
elif NAME == 'linear':
lr_scheduler = LinearLRScheduler(
optimizer,
t_initial=num_steps,
lr_min_rate=0.01,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
elif NAME == 'step':
decay_steps = int(2 * n_iter_per_epoch)
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=decay_steps,
decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
return lr_scheduler
class LinearLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min_rate: float,
warmup_t=0,
warmup_lr_init=0.,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.t_initial = t_initial
self.lr_min_rate = lr_min_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
t = t - self.warmup_t
total_t = self.t_initial - self.warmup_t
lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
| 33.23301 | 105 | 0.570844 |
5261499941ab96c98eb2ede5f182c68c0fc860b7 | 26,203 | py | Python | default.py | BorgesGabo/gaia | 9e9b2b57c28d92933749ab8f2070e4962e6caaf6 | [
"Unlicense"
] | null | null | null | default.py | BorgesGabo/gaia | 9e9b2b57c28d92933749ab8f2070e4962e6caaf6 | [
"Unlicense"
] | null | null | null | default.py | BorgesGabo/gaia | 9e9b2b57c28d92933749ab8f2070e4962e6caaf6 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
# This is a sample controller
# - index is the default action of any application
# - user is required for authentication and authorization
# - download is for downloading files uploaded in the db (does streaming)
# -------------------------------------------------------------------------
import datetime
from prettytable import PrettyTable
from prettytable import ALL
def process_po():
# this function creates a form with date types and query the db between the 2 dates
# this function is an extract from http://brunorocha.org/python/web2py/search-form-with-web2py.html
# default values to keep the form when submitted
# if you do not want defaults set all below to None
date_initial_default = \
datetime.datetime.strptime(request.vars.date_initial, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_inicial else None
date_final_default = \
datetime.datetime.strptime(request.vars.date_final, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_final else None
# The search form created with .factory
form = SQLFORM.factory(
Field("date_initial", "datetime", default=date_initial_default),
Field("date_final", "datetime", default=date_final_default),
formstyle='divs',
submit_button="Search",
)
# The base query to fetch all orders of db.po, db.po_details, db.product
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
# testing if the form was accepted
if form.process().accepted:
# gathering form submitted values
date_initial = form.vars.date_initial
date_final = form.vars.date_final
# more dynamic conditions in to query
if date_initial:
query &= db.po.date >= date_initial
if date_final:
query &= db.po.date <= date_final
count = db(query).count()
results = db(query).select(db.po.po_number,db.po.date,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, db.po.customer_id, orderby='po_number')
msg = T("%s registros encontrados" % count )
ABCD(query)
return dict(form=form, msg=msg, results=results)
def i_file(): # esta funcion es para generar el path y parse un archivo json sin el formulario
import sys
import json
import re
file_name='orders1.json'
uploadfolder='C:\Users\Sandra\Documents\Herbert\Projecto web2py'
#form=SQLFORM.factory(Field('json_file', 'upload', uploadfolder=os.path.join(request.folder, 'uploads'),label='esta el laberl'))
path=('%s\%s' % (uploadfolder, file_name))
print path
print str('path is a type of:')
print type(path)
'''if form.process().accepted:
#file_name=form.vars.json_file
#path=('%s\%s' % (os.path.join(request.folder,'uploads'), file_name))
path=('%s\%s' % (uploadfolder, file_name))
#redirect(URL('printme', args=(path)))
redirect(URL('form1'))
printme(str(path))
return dict(form=form )'''
with open(path) as json_file:
datos=json.load(json_file)
print (type(datos))
print (len(datos))
print datos[1]
return
def printme(str):
#str="hello there"
print str
return dict(str=str)
def gameTime():
print path
return 'OK'
def display_form():
record = db.wp(request.args(0))
form = SQLFORM(db.wp, record, deletable=True,
upload=URL('download'))
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
def download():
return response.download(request, db)
def up_json():
form=SQLFORM(db.wp)
return dict(form=form)
def ABCD(query):
# Esta corre metiendo el siguiente codigo c:\Python27\python.exe c:\web2py\web2py.py -S EssenciaAPI24/default/ABCD
b_lst=[] #crea lista de b con los subtotales
c_lst=[] #crea lista de c contiene los totales por producto
qty_lst=[] #crea lista de cantidades
pres_lst=[] #crea lista de presentaciones
#**************************************QUERY BASE **************************************
#define el query base -> DAL > query
'''query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
query &= db.po.po_number<2432''' #quitar comillas si quiere probar desde la linea de comandos
orders_query_lst=db(query).select(db.po.id, db.po.po_number, groupby='po.po_number').as_list() #obtiene id de los pedidos del query
n=len(orders_query_lst) #obtiene el numero de pedidos de query
d_lst=[str(x['po_number'])+'|Recibido' for x in orders_query_lst] #obtiene las referencias de los pedidos del query
#print orders_query_lst #impresion de prueba
print '\n'
#print d_lst #impresion de prueba
#***************************************QUERY A,B *****************************************
a_product_id_lst=db(query).select(db.product.id, db.product.name, groupby='product.name').as_list() # obtiene id, nombre productos query sin repetir
for i in range (len(a_product_id_lst)): # iterando sobre A: a_products_lst
query_a = query
query_a &= db.product.id==a_product_id_lst[i]['id']
for j in range (n): # iterando sobre orders_query_lst
query_b = query_a
query_b &= db.po.id ==orders_query_lst[j]['id']
#print query_b # impresion de prueba
bj_lst = db(query_b).select(db.po_detail.quantity, orderby='po.po_number', groupby='po.po_number').as_list() #obtiene cantidad
qtyj_lst = db(query_b).select(db.po_detail.quantity, orderby='po.po_number', groupby='po.po_number').as_list() #obtiene cantidad
presj_lst =db(query_b).select(db.product.pres, orderby='po.po_number', groupby='po.po_number').as_list() #obtiene pres
if len(bj_lst)==0: #si el pedido no tiene este producto ponga 0
bj_lst = 0
b_lst.append(0)
else:
b_lst.append(int(bj_lst[0]['quantity'])) # de lo contrario ponga el valor de bj_lst
if len(qtyj_lst)==0: #si no hay cantidad en ese pedido ponga un cero
qtyj_lst=0
presj_lst=0 #ponga un cero en la presentacion
qty_lst.append(0) #ingreselo en la lista de cantidad
pres_lst.append(0) #ingreselo en la lista de presentacion
else: # en caso contrario obtenga los valores de la consultas
qty_lst.append(int(qtyj_lst[0]['quantity'])) # obtiene el numero de cantidad
pres_lst.append(int(presj_lst[0]['pres'])) # obtiene el numero de la presentacion del producto
#print qty_lst #impresion de prueba
#print pres_lst #impresion de prueba
z_lst=[]
z_lst=[qty_lst*pres_lst for qty_lst,pres_lst in zip(qty_lst,pres_lst)] #calcula pres*qty para cada uno de los elementos de la lista
#print z_lst
#print (str('j is:'), j) #impresion de prueba
#print (str('bj_lst is:'), bj_lst) #impresion de prueba
#print (str('b_lst is:'), b_lst) #impresion de prueba
#************************************* IMPRIME TABLA RESUMEN **************************************
a_product_name_lst=db(query).select(db.product.name, groupby='product.name').as_list() #obtiene lista de nombres productos no repetidos en rango
field_names_lst=d_lst #crea una lista con todos los numeros del pedido dentro del rango
field_names_lst.insert(0, "Producto") # agrega al inicio de la lista el titulo producto
field_names_lst.insert(len(field_names_lst),"Total") # Adiciona al final de la lista el titulo total
summary_table=PrettyTable(field_names_lst) # crea la tabla resumen con los titulos de cada columna
total_lst=[]
for y in range (0,len(a_product_id_lst)):
begining_slice=y*n #definicion del inicio del intervalo de corte de la lista
end_slice=begining_slice+n #definicion del fin del intervalo de corte de la lista
row_summary_lst=z_lst[begining_slice:end_slice] #Toma los totales entre el incio y fin del intervalo sin tocar el fin
#si desea solo las cantidades del pedido sin multiplicar por los pesos usar b_lst por Z_lst
total=sum(row_summary_lst) #suma las cantidades de todos los pedidos del rango
row_summary_lst.insert(0,a_product_name_lst[y]['name']) #agrega el nombre al inicio de la lista
row_summary_lst.insert(len(row_summary_lst),total) # agrega el total al final de la lista
summary_table.add_row(row_summary_lst) # agrega filas a la tabla
summary_table.align='l'
#summary_table.align['Producto']='l' # alinea la a la izquierda la primera columna
summary_table.align['Total']='r' # alinea a la derecha la ultima columna
print summary_table # imprime la tabla resumen
with open ('consolidado.txt','w') as w: # escribe la tabla en un archivo txt
w.write(str('ESTE ES EL CONSOLIDADO DE LOS SIGUIENTES PEDIDOS:'))
w.write('\n')
w.write(str(summary_table))
return
def table():
pt = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
pt.align["City name"] = "l" # Left align city names
pt.padding_width = 1 # One space between column edges and contents (default)
pt.add_row(["Adelaide",1295, 1158259, 600.5])
pt.add_row(["Brisbane",5905, 1857594, 1146.4])
pt.add_row(["Darwin", 112, 120900, 1714.7])
pt.add_row(["Hobart", 1357, 205556, 619.5])
pt.add_row(["Sydney", 2058, 4336374, 1214.8])
pt.add_row(["Melbourne", 1566, 3806092, 646.9])
pt.add_row(["Perth", 5386, 1554769, 869.4])
lines = pt.get_string()
with open ('la_tabla.txt','w') as w:
w.write(str(pt))
print pt
return
def merger():
#This functions consolidates all the filtered po's according to the total quatities per product
#1. Performs the filter by dates -> results = type(DAL. query), form= type(DAL, form), msg=type(DAL, string)
#------------------------------------------
#1.0 defines the initial and final dates
date_initial_default = \
datetime.datetime.strptime(request.vars.date_initial, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_inicial else None
date_final_default = \
datetime.datetime.strptime(request.vars.date_final, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_final else None
#1.1 The search form created with .factory
form = SQLFORM.factory(
Field("date_initial", "datetime", default=date_initial_default),
Field("date_final", "datetime", default=date_final_default),
formstyle='divs',
submit_button="Search",
)
#1.2 The base query to fetch all orders of db.po, db.po_details, db.product
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
# 1.3 testing if the form was accepted
if form.process().accepted:
# gathering form submitted values
date_initial = form.vars.date_initial
date_final = form.vars.date_final
# more dynamic conditions in to query
if date_initial:
query &= db.po.date >= date_initial
if date_final:
query &= db.po.date <= date_final
#1.4 counts the total the number of registers
count = db(query).count()
#1.5 returns the query results
results = db(query).select(db.po.po_number,db.po.date,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, db.po.customer_id, orderby='po_number')
#1.6 prints a message with the number of results
msg = T("%s registers" % count )
#2. gets all the products contained within the orders in 1. = A
A=db(query).select(db.product.id, groupby='product.name')
#2.1 convert A to a list
A_rows=A.as_list()
#2.2 gets the list's length and print it
count2 = len(A_rows)
msg2 = T("%s registers" % count2 )
#3. consolidates all the quantities per po for each product = B
#3.1 retrieves the first product.id from A
Ai=A_rows[0]['id']
#3.2 lists all the po.id in the range of dates
orders=db(query).select(db.po.id, orderby='po.po_number',groupby='po.po_number' ).as_list()
#for i, val in enumerate(orders):
for a in orders:
i=a['id']
#i=0
Bj=orders[i]['id']
query_B=query
#3.4 get the total quantity for the product.id(Ai)
query_B &= db.po_detail.product_id==Ai
Bijs=db(query_B).select(db.product.pres *db.po_detail.quantity, groupby='product.name')
#4. gets all the subtotals per product = C
#5. gets all the customers contained within the orders in 1. = D
return dict(results=results, msg=msg, form=form, A=A, msg2=msg2, Ai=Ai,Bijs=Bijs ,orders=orders, i=i)
def iterate():
#This function is to perform iteration tests on the db
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
query &= db.po.po_number<2430
#total = db.po_detail.quantity* db.product.pres
#creates a DAL query and stores as a dictionary
#result=db(query).select(db.po.id, db.po.po_number, db.po.date ,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, db.po.customer_id, total).as_dict()
#this is a raw query
#result=db.executesql('SELECT po.po_number,po_detail.product_id,product.name,product.pres FROM po,po_detail,product WHERE po.id==po_detail.po_id and po_detail.product_id==product.id and po.po_number<2428;',as_dict=True)
#result=db.executesql('SELECT product.name, po_detail.id from po_detail, product, po WHERE po.id==po_detail.po_id and po_detail.product_id==product.id and po.po_number<2428;' ,as_dict=True )
#This query removes the duplicates from the pos
#result=db.executesql('SELECT min(po_detail.product_id), product.name, product.id FROM po_detail, product, po WHERE product.id==po_detail.product_id and po_detail.po_id==po.id and po.po_number<2428 GROUP BY po_detail.product_id',as_dict=True)
result=db(query).select(db.po.id, orderby='po.po_number',groupby='po.po_number' ).as_list()
# get all the products in the orders not repeated
A=db(query).select(db.product.id, groupby='product.name').as_list()
#filter the orders as a list and count the results
pedidos_lst=db(query).select(db.po.po_number, orderby='po.po_number',groupby='po.po_number' ).as_list()
n=len(pedidos_lst)
b=[]
for pedido in pedidos_lst:
j=pedido['po_number'] #get the po_number from the dictionary for each pedido
print str('j is:')
print j
query_B=query #assign the query to a new query_B whose po_number and product belongs to Ai element and Bj element
query_B &= db.po.po_number==j
print query_B
query_B &= db.product.id ==A[0]['id']
print query_B
Bij=db(query_B).select(db.product.pres*db.po_detail.quantity).as_list()
if not Bij:
Bij[pedido]=0
print str('list is empty')
#print Bij[0]
#Bij=Bij[0]['_extra']['(product.pres*po_detail.quantity)']
#Bij=Bij['_extra']
print str('Bij is:')
print Bij
b.append(Bij)
print str('b is:')
print b
print str('el numero de pedidos es:')
print n
pedidos=pedidos_lst[0]['po_number']
A=db(query).select(db.product.id, groupby='product.name').as_list()
print str('los pedidos son:')
print pedidos
print str('A is:')
print A
#b=[]
#for a in A:
#i=a['id'] #get the id number from dictionary
#query_A=query #assign the main query to a new one
#get the 'i' product of A and retrieve the columns: name and pres
#query_A &= db.product.id==i
#print str('i is:')
#print i
#result4 = db(query_A).select(db.product.name, db.product.pres, db.po_detail.quantity, orderby='po.po_number',groupby='po.po_number').as_list()
#print str('result4 is:')
#print result4
#print str('b is:')
#print b
#result5=int(result4[0]['product']['pres'])
#print str('result5 is:')
#print result5
#b.append(result5)
#print str('b afert append is:')
#print b
#c:\Python27\python.exe c:\web2py\web2py.py -S EssenciaAPI24/default/iterate -M
#retrieves the third's dictonary element
#result=result[0]
#key=result['id']
#gets the dict' length
count= len(result)
msg = T("%s registers" % count )
return dict(result=result, msg=msg, j=j)
#return dict(result=result, msg=msg, pedidos=pedidos, result4=result4, b=b, result5=result5, n=n)
def sandbox():
# this function is to perform queries tests on the db
key=19
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
query &= db.po.po_number<2424
#query &= db.product.id==key
total = db.po_detail.quantity* db.product.pres
result=db(query).select(db.po.id, db.po.po_number, db.po.date ,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, total, db.po.customer_id)
#gets the first element
#result=result[0]
#gets the column desired
#result=result['_extra']
#gets the value
#result=result['(po_detail.quantity * product.pres)']
count = db(query).count()
msg = T("%s registers" % count )
return dict(result=result, msg=msg, form=form)
def start():
# this function creates a form with date types and query the db between the 2 dates
# this function is an extract from http://brunorocha.org/python/web2py/search-form-with-web2py.html
# default values to keep the form when submitted
# if you do not want defaults set all below to None
date_initial_default = \
datetime.datetime.strptime(request.vars.date_initial, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_inicial else None
date_final_default = \
datetime.datetime.strptime(request.vars.date_final, "%Y-%m-%d %H:%M:%S") \
if request.vars.date_final else None
# The search form created with .factory
form = SQLFORM.factory(
Field("date_initial", "datetime", default=date_initial_default),
Field("date_final", "datetime", default=date_final_default),
formstyle='divs',
submit_button="Search",
)
# The base query to fetch all orders of db.po, db.po_details, db.product
query = db.po.id==db.po_detail.po_id
query &= db.po_detail.product_id==db.product.id
# testing if the form was accepted
if form.process().accepted:
# gathering form submitted values
date_initial = form.vars.date_initial
date_final = form.vars.date_final
# more dynamic conditions in to query
if date_initial:
query &= db.po.date >= date_initial
if date_final:
query &= db.po.date <= date_final
count = db(query).count()
results = db(query).select(db.po.po_number,db.po.date,db.po_detail.product_id,db.po_detail.quantity,db.product.pres, db.po.customer_id, orderby='po_number')
msg = T("%s registers" % count )
return dict(form=form, msg=msg, results=results)
def order():
#this function uploads and handles the form from db.po's table also uploads a query which select in reverse order all data in db.po's table
ordenes=db(db.po.id>0).select(orderby=~db.po.id)
form=SQLFORM(db.po, buttons =[TAG.button('guardar', _type="submit"),TAG.button('actualizar listado', _type="button", _onClick ="parent.location='%s'" %URL(order)), TAG.button('siguiente',_type="button", _onClick=" parent.location='%s'" %URL(orderd))])
if form.process().accepted:
response.flash='order accepted'
elif form.errors:
response.flash= 'check the data inserted'
else:
response.flash= 'please fill out the form'
return dict(ordenes=ordenes, form=form)
def orderd():
#this function uploads and handles the form from db.po_detail's table also uploads a query which select in reverse order all data in db.po_detail table
ordenes=db(db.po_detail.id>0).select(orderby=~db.po_detail.po_id)
form=SQLFORM(db.po_detail, buttons = [TAG.button('guardar',_type="submit"),TAG.button('actualizar listado',_type="button",_onClick = "parent.location='%s' " % URL(orderd))])
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(ordenes=ordenes, form=form)
def form1():
#This function creates a form from db.customer's table
form = SQLFORM(db.customer,buttons = [TAG.button('guardar',_type="submit"),TAG.button('siguiente',_type="button",_onClick = "parent.location='%s' " % URL(order))])
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(form=form)
def form2():
#This function creates a form from db.po's table
form = SQLFORM(db.po,buttons = [TAG.button('save',_type="submit"),TAG.button('next',_type="button",_onClick = "parent.location='%s' " % URL(form3))])
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(form=form)
def form3():
#This function creates a form db.po_detail's form
form = SQLFORM(db.po_detail)
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(form=form)
def form4():
#This function creates a form from db.product's table
form = SQLFORM(db.product, buttons=[TAG.button('guardar', _type="submit")])
if form.process().accepted:
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(form=form)
def form5():
#This function creates a grid form from db.product's table
grid = SQLFORM.grid(db.po_detail, user_signature=False)
return locals()
def customer_edit_form():
#This function creates a grid form from db.product's table
grid = SQLFORM.grid(db.customer, user_signature=False)
return locals()
def product_edit_form():
#This function creates a grid form from db.product's table
grid = SQLFORM.grid(db.product, user_signature=False)
return locals()
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
if you need a simple wiki simply replace the two lines below with:
return auth.wiki()
"""
response.flash = T("Hello World")
return dict(message=T('Welcome to web2py!'))
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
| 45.022337 | 255 | 0.607946 |
92ad2a8b3fa1fcdfb8057a128572e063f6501a9b | 3,300 | py | Python | output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_enumeration_5_xsd/nistschema_sv_iv_list_decimal_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_enumeration_5_xsd/nistschema_sv_iv_list_decimal_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/decimal/schema_instance/nistschema_sv_iv_list_decimal_enumeration_5_xsd/nistschema_sv_iv_list_decimal_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from decimal import Decimal
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-list-decimal-enumeration-5-NS"
class NistschemaSvIvListDecimalEnumeration5Type(Enum):
VALUE_69_3817003_806533146393_14244_7_692_24_9875727835_21_0_272460498067008671_789_1_580436548_481858549_88_5294419203 = (
Decimal("-69.3817003"),
Decimal("-806533146393.14244"),
Decimal("-7.692"),
Decimal("24.9875727835"),
Decimal("-21"),
Decimal("-0.272460498067008671"),
Decimal("789.1"),
Decimal("580436548.481858549"),
Decimal("-88.5294419203"),
)
VALUE_185053587719_80_794_5_43507402_5874_3675248638_27067_480489_97043436573497932_0 = (
Decimal("-185053587719.80"),
Decimal("-794"),
Decimal("-5.43507402"),
Decimal("-5874.3675248638"),
Decimal("-27067.480489"),
Decimal("97043436573497932.0"),
)
VALUE_42_12164620414052_54707_925170_674092163_2667417_7840_18_112787_7433_33_9_5_1662400_522081_438918_1_1 = (
Decimal("42.12164620414052"),
Decimal("54707.925170"),
Decimal("-674092163.2667417"),
Decimal("7840.18"),
Decimal("-112787.7433"),
Decimal("-33.9"),
Decimal("5.1662400"),
Decimal("522081.438918"),
Decimal("-1.1"),
)
VALUE_0_9679_11_93_55376_858_67_49397_5889105522594_120_17635_60_74_3256_344002_1962231 = (
Decimal("-0.9679"),
Decimal("-11.93"),
Decimal("55376.858"),
Decimal("67.49397"),
Decimal("5889105522594.120"),
Decimal("-17635.60"),
Decimal("74.3256"),
Decimal("-344002.1962231"),
)
VALUE_980968517543_7_1_66_0_9_36_60485_855511097386_6 = (
Decimal("-980968517543.7"),
Decimal("-1.66"),
Decimal("0.9"),
Decimal("-36.60485"),
Decimal("-855511097386.6"),
)
VALUE_677_8137_953196_72765_500799323_66_3301179007_728_471755663018_9327833_3550357_8298052_89_320163069_05121_41035356_53_0_944465388 = (
Decimal("677.8137"),
Decimal("953196.72765"),
Decimal("-500799323"),
Decimal("66.3301179007"),
Decimal("-728.471755663018"),
Decimal("9327833.3550357"),
Decimal("8298052.89"),
Decimal("320163069.05121"),
Decimal("41035356.53"),
Decimal("0.944465388"),
)
VALUE_37_74_9412497_567185408_32_2_89122_6_0_5_140_36420183149 = (
Decimal("37.74"),
Decimal("9412497.567185408"),
Decimal("-32.2"),
Decimal("-89122.6"),
Decimal("-0.5"),
Decimal("140.36420183149"),
)
@dataclass
class NistschemaSvIvListDecimalEnumeration5:
class Meta:
name = "NISTSchema-SV-IV-list-decimal-enumeration-5"
namespace = "NISTSchema-SV-IV-list-decimal-enumeration-5-NS"
value: Optional[NistschemaSvIvListDecimalEnumeration5Type] = field(
default=None,
metadata={
"required": True,
}
)
| 36.263736 | 143 | 0.597273 |
8e8df65900b38cca2dd0b143977afa835df716ec | 15,942 | py | Python | dashboard/dashboard/file_bug.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 1 | 2019-11-01T23:31:22.000Z | 2019-11-01T23:31:22.000Z | dashboard/dashboard/file_bug.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 5 | 2020-09-07T12:36:46.000Z | 2022-03-02T05:49:30.000Z | dashboard/dashboard/file_bug.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 1 | 2020-07-25T00:02:48.000Z | 2020-07-25T00:02:48.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for filing a bug on the issue tracker."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import logging
import re
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import ndb
from dashboard import auto_bisect
from dashboard import oauth2_decorator
from dashboard import short_uri
from dashboard.common import namespaced_stored_object
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import bug_data
from dashboard.models import bug_label_patterns
from dashboard.models import histogram
from dashboard.services import crrev_service
from dashboard.services import gitiles_service
from dashboard.services import issue_tracker_service
from tracing.value.diagnostics import reserved_infos
# A list of bug labels to suggest for all performance regression bugs.
_DEFAULT_LABELS = [
'Type-Bug-Regression',
'Pri-2',
]
_OMAHA_PROXY_URL = 'https://omahaproxy.appspot.com/all.json'
class FileBugHandler(request_handler.RequestHandler):
"""Uses oauth2 to file a new bug with a set of alerts."""
def post(self):
"""A POST request for this endpoint is the same as a GET request."""
self.get()
@oauth2_decorator.DECORATOR.oauth_required
def get(self):
"""Either shows the form to file a bug, or if filled in, files the bug.
The form to file a bug is popped up from the triage-dialog polymer element.
The default summary, description and label strings are constructed there.
Request parameters:
summary: Bug summary string.
description: Bug full description string.
owner: Bug owner email address.
keys: Comma-separated Alert keys in urlsafe format.
Outputs:
HTML, using the template 'bug_result.html'.
"""
if not utils.IsValidSheriffUser():
self.RenderHtml('bug_result.html', {
'error': 'You must be logged in with a chromium.org account '
'to file bugs.'
})
return
summary = self.request.get('summary')
description = self.request.get('description')
labels = self.request.get_all('label')
components = self.request.get_all('component')
keys = self.request.get('keys')
owner = self.request.get('owner')
cc = self.request.get('cc')
if not keys:
self.RenderHtml('bug_result.html', {
'error': 'No alerts specified to add bugs to.'
})
return
if self.request.get('finish'):
self._CreateBug(owner, cc, summary, description, labels, components, keys)
else:
self._ShowBugDialog(summary, description, keys)
def _ShowBugDialog(self, summary, description, urlsafe_keys):
"""Sends a HTML page with a form for filing the bug.
Args:
summary: The default bug summary string.
description: The default bug description string.
urlsafe_keys: Comma-separated Alert keys in urlsafe format.
"""
alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')]
labels, components = _FetchLabelsAndComponents(alert_keys)
owner_components = _FetchBugComponents(alert_keys)
self.RenderHtml('bug_result.html', {
'bug_create_form': True,
'keys': urlsafe_keys,
'summary': summary,
'description': description,
'labels': labels,
'components': components.union(owner_components),
'owner': '',
'cc': users.get_current_user(),
})
def _CreateBug(self, owner, cc, summary, description, labels, components,
urlsafe_keys):
"""Creates a bug, associates it with the alerts, sends a HTML response.
Args:
owner: string, must end with @chromium.org if not empty.
cc: CSV of email addresses to CC on the bug.
summary: The new bug summary string.
description: The new bug description string.
labels: List of label strings for the new bug.
components: List of component strings for the new bug.
urlsafe_keys: Comma-separated alert keys in urlsafe format.
"""
# Only project members (@chromium.org accounts) can be owners of bugs.
if owner and not owner.endswith('@chromium.org'):
self.RenderHtml('bug_result.html', {
'error': 'Owner email address must end with @chromium.org.'
})
return
http = oauth2_decorator.DECORATOR.http()
template_params = FileBug(
http, owner, cc, summary, description, labels, components,
urlsafe_keys.split(','))
self.RenderHtml('bug_result.html', template_params)
def _GetDocsForTest(test):
test_suite = utils.TestKey('/'.join(test.id().split('/')[:3]))
docs = histogram.SparseDiagnostic.GetMostRecentDataByNamesSync(
test_suite, [reserved_infos.DOCUMENTATION_URLS.name])
if not docs:
return None
docs = docs[reserved_infos.DOCUMENTATION_URLS.name].get('values')
return docs[0]
def _AdditionalDetails(bug_id, alerts):
"""Returns a message with additional information to add to a bug."""
base_url = '%s/group_report' % _GetServerURL()
bug_page_url = '%s?bug_id=%s' % (base_url, bug_id)
sid = short_uri.GetOrCreatePageState(json.dumps(_UrlsafeKeys(alerts)))
alerts_url = '%s?sid=%s' % (base_url, sid)
comment = '<b>All graphs for this bug:</b>\n %s\n\n' % bug_page_url
comment += ('(For debugging:) Original alerts at time of bug-filing:\n %s\n'
% alerts_url)
bot_names = {a.bot_name for a in alerts}
if bot_names:
comment += '\n\nBot(s) for this bug\'s original alert(s):\n\n'
comment += '\n'.join(sorted(bot_names))
else:
comment += '\nCould not extract bot names from the list of alerts.'
docs_by_suite = {}
for a in alerts:
test = a.GetTestMetadataKey()
suite = test.id().split('/')[2]
if suite in docs_by_suite:
continue
docs = _GetDocsForTest(test)
if not docs:
continue
docs_by_suite[suite] = docs
for k, v in docs_by_suite.items():
comment += '\n\n%s - %s:\n %s' % (k, v[0], v[1])
return comment
def _GetServerURL():
return 'https://' + app_identity.get_default_version_hostname()
def _UrlsafeKeys(alerts):
return [a.key.urlsafe() for a in alerts]
def _ComponentFromCrLabel(label):
return label.replace('Cr-', '').replace('-', '>')
def _FetchLabelsAndComponents(alert_keys):
"""Fetches a list of bug labels and components for the given Alert keys."""
labels = set(_DEFAULT_LABELS)
components = set()
alerts = ndb.get_multi(alert_keys)
sheriff_keys = set(alert.sheriff for alert in alerts)
sheriff_labels = [sheriff.labels for sheriff in ndb.get_multi(sheriff_keys)]
tags = [item for sublist in sheriff_labels for item in sublist]
for tag in tags:
if tag.startswith('Cr-'):
components.add(_ComponentFromCrLabel(tag))
else:
labels.add(tag)
if any(a.internal_only for a in alerts):
# This is a Chrome-specific behavior, and should ideally be made
# more general (maybe there should be a list in datastore of bug
# labels to add for internal bugs).
labels.add('Restrict-View-Google')
for test in {a.GetTestMetadataKey() for a in alerts}:
labels_components = bug_label_patterns.GetBugLabelsForTest(test)
for item in labels_components:
if item.startswith('Cr-'):
components.add(_ComponentFromCrLabel(item))
else:
labels.add(item)
return labels, components
def _FetchBugComponents(alert_keys):
"""Fetches the ownership bug components of the most recent alert on a per-test
path basis from the given alert keys.
"""
alerts = ndb.get_multi(alert_keys)
sorted_alerts = reversed(sorted(alerts, key=lambda alert: alert.timestamp))
most_recent_components = {}
for alert in sorted_alerts:
alert_test = alert.test.id()
if (alert.ownership and alert.ownership.get('component') and
most_recent_components.get(alert_test) is None):
if isinstance(alert.ownership['component'], list):
most_recent_components[alert_test] = alert.ownership['component'][0]
else:
most_recent_components[alert_test] = alert.ownership['component']
return set(most_recent_components.values())
def _MilestoneLabel(alerts):
"""Returns a milestone label string, or None.
Because revision numbers for other repos may not be easily reconcilable with
Chromium milestones, do not label them (see
https://github.com/catapult-project/catapult/issues/2906).
"""
revisions = [a.end_revision for a in alerts if hasattr(a, 'end_revision')]
if not revisions:
return None
end_revision = min(revisions)
for a in alerts:
if a.end_revision == end_revision:
row_key = utils.GetRowKey(a.test, a.end_revision)
row = row_key.get()
if hasattr(row, 'r_commit_pos'):
end_revision = row.r_commit_pos
else:
return None
break
try:
milestone = _GetMilestoneForRevision(end_revision)
except KeyError:
logging.error('List of versions not in the expected format')
if not milestone:
return None
logging.info('Matched rev %s to milestone %s.', end_revision, milestone)
return 'M-%d' % milestone
def _GetMilestoneForRevision(revision):
"""Finds the oldest milestone for a given revision from OmahaProxy.
The purpose of this function is to resolve the milestone that would be blocked
by a suspected regression. We do this by locating in the list of current
versions, regardless of platform and channel, all the version strings (e.g.
36.0.1234.56) that match revisions (commit positions) later than the earliest
possible end_revision of the suspected regression; we then parse out the
first numeric part of such strings, assume it to be the corresponding
milestone, and return the lowest one in the set.
Args:
revision: An integer or string containing an integer.
Returns:
An integer representing the lowest milestone matching the given revision or
the highest milestone if the given revision exceeds all defined milestones.
Note that the default is 0 when no milestones at all are found. If the
given revision is None, then None is returned.
"""
if revision is None:
return None
milestones = set()
default_milestone = 0
all_versions = _GetAllCurrentVersionsFromOmahaProxy()
for os in all_versions:
for version in os['versions']:
try:
milestone = int(version['current_version'].split('.')[0])
version_commit = version.get('branch_base_position')
if version_commit and int(revision) < int(version_commit):
milestones.add(milestone)
if milestone > default_milestone:
default_milestone = milestone
except ValueError:
# Sometimes 'N/A' is given. We ignore these entries.
logging.warn('Could not cast one of: %s, %s, %s as an int',
revision, version['branch_base_position'],
version['current_version'].split('.')[0])
if milestones:
return min(milestones)
return default_milestone
def _GetAllCurrentVersionsFromOmahaProxy():
"""Retrieves a the list current versions from OmahaProxy and parses it."""
try:
response = urlfetch.fetch(_OMAHA_PROXY_URL)
if response.status_code == 200:
return json.loads(response.content)
except urlfetch.Error:
logging.error('Error pulling list of current versions (omahaproxy).')
except ValueError:
logging.error('OmahaProxy did not return valid JSON.')
return []
def _GetSingleCLForAnomalies(alerts):
"""If all anomalies were caused by the same culprit, return it. Else None."""
revision = alerts[0].start_revision
if not all(a.start_revision == revision and
a.end_revision == revision for a in alerts):
return None
return revision
def _GetCommitInfoForAlert(alert):
repository_url = None
repositories = namespaced_stored_object.Get('repositories')
test_path = utils.TestPath(alert.test)
if test_path.startswith('ChromiumPerf'):
repository_url = repositories['chromium']['repository_url']
elif test_path.startswith('ClankInternal'):
repository_url = repositories['clank']['repository_url']
if not repository_url:
# Can't get committer info from this repository.
return None
rev = str(auto_bisect.GetRevisionForBisect(alert.end_revision, alert.test))
# TODO(sullivan, dtu): merge this with similar pinoint code.
if (re.match(r'^[0-9]{5,7}$', rev) and
repository_url == repositories['chromium']['repository_url']):
# This is a commit position, need the git hash.
result = crrev_service.GetNumbering(
number=rev,
numbering_identifier='refs/heads/master',
numbering_type='COMMIT_POSITION',
project='chromium',
repo='chromium/src')
rev = result['git_sha']
if not re.match(r'[a-fA-F0-9]{40}$', rev):
# This still isn't a git hash; can't assign bug.
return None
return gitiles_service.CommitInfo(repository_url, rev)
def _AssignBugToCLAuthor(bug_id, commit_info, service):
"""Assigns the bug to the author of the given revision."""
author = commit_info['author']['email']
message = commit_info['message']
service.AddBugComment(
bug_id,
'Assigning to %s because this is the only CL in range:\n%s' % (
author, message),
status='Assigned',
owner=author)
def FileBug(http, owner, cc, summary, description, labels, components,
urlsafe_keys, needs_bisect=True):
alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys]
alerts = ndb.get_multi(alert_keys)
if not description:
description = 'See the link to graphs below.'
milestone_label = _MilestoneLabel(alerts)
if milestone_label:
labels.append(milestone_label)
user_issue_tracker_service = issue_tracker_service.IssueTrackerService(http)
new_bug_response = user_issue_tracker_service.NewBug(
summary, description, labels=labels, components=components, owner=owner,
cc=cc)
if 'error' in new_bug_response:
return {'error': new_bug_response['error']}
bug_id = new_bug_response['bug_id']
bug_data.Bug(id=bug_id).put()
for a in alerts:
a.bug_id = bug_id
ndb.put_multi(alerts)
comment_body = _AdditionalDetails(bug_id, alerts)
# Add the bug comment with the service account, so that there are no
# permissions issues.
dashboard_issue_tracker_service = issue_tracker_service.IssueTrackerService(
utils.ServiceAccountHttp())
dashboard_issue_tracker_service.AddBugComment(bug_id, comment_body)
template_params = {'bug_id': bug_id}
if all(k.kind() == 'Anomaly' for k in alert_keys):
logging.info('Kicking bisect for bug ' + str(bug_id))
culprit_rev = _GetSingleCLForAnomalies(alerts)
if culprit_rev is not None:
commit_info = _GetCommitInfoForAlert(alerts[0])
if commit_info:
author = commit_info['author']['email']
message = commit_info['message']
if not utils.GetSheriffForAutorollCommit(author, message):
needs_bisect = False
_AssignBugToCLAuthor(
bug_id, commit_info, dashboard_issue_tracker_service)
if needs_bisect:
bisect_result = auto_bisect.StartNewBisectForBug(bug_id)
if 'error' in bisect_result:
logging.info('Failed to kick bisect for ' + str(bug_id))
template_params['bisect_error'] = bisect_result['error']
else:
logging.info('Successfully kicked bisect for ' + str(bug_id))
template_params.update(bisect_result)
else:
kinds = set()
for k in alert_keys:
kinds.add(k.kind())
logging.info(
'Didn\'t kick bisect for bug id %s because alerts had kinds %s',
bug_id, list(kinds))
return template_params
| 35.192053 | 80 | 0.704491 |
41d7d7b990fee48b76f92ba588f90d22375d5e86 | 5,782 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_region_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_region_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_region_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RegionOperations(object):
"""RegionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name, # type: str
service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RegionListResult"]
"""Lists all azure regions in which the service exists.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.RegionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RegionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/regions'} # type: ignore
| 47.00813 | 188 | 0.656174 |
ff902390044f07e135ab70feaf6a3a1c16337c3b | 1,799 | py | Python | setup.py | CasperCL/pyramid-oauthlib | e0cd47e8326052a705624a217abc120056280759 | [
"BSD-2-Clause"
] | null | null | null | setup.py | CasperCL/pyramid-oauthlib | e0cd47e8326052a705624a217abc120056280759 | [
"BSD-2-Clause"
] | null | null | null | setup.py | CasperCL/pyramid-oauthlib | e0cd47e8326052a705624a217abc120056280759 | [
"BSD-2-Clause"
] | null | null | null | import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
user_options = [
('cov', None, "measure coverage")
]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['pyramid_oauthlib']
if self.cov:
self.test_args += ['--cov', 'pyramid_oauthlib']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
setup(
name='pyramid_oauthlib',
version='0.3.0',
description='Pyramid OAuthLib integration',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
],
author='Randall Leeds',
author_email='tilgovi@hypothes.is',
url='https://github.com/tilgovi/pyramid_oauthlib',
keywords='web pyramid pylons oauth authentication',
cmdclass={'test': PyTest},
exclude_package_data={'': ['.gitignore']},
include_package_data=True,
install_requires=['pyramid', 'oauthlib'],
packages=find_packages(),
setup_requires=['setuptools_git'],
tests_require=['mock', 'pytest', 'pytest-cov'],
zip_safe=False,
)
| 29.491803 | 62 | 0.642023 |
e373d4dc0fb43f00db4ab48fb184b5e8e7dc04d6 | 3,342 | py | Python | python/3dsmax_icon_viewer/3dsmax_icon_viewer.py | ATGH15102AFMLD/techart-staff | 8f38bb3d0c2d0f8316ddab0920a1170402a303c0 | [
"MIT"
] | null | null | null | python/3dsmax_icon_viewer/3dsmax_icon_viewer.py | ATGH15102AFMLD/techart-staff | 8f38bb3d0c2d0f8316ddab0920a1170402a303c0 | [
"MIT"
] | null | null | null | python/3dsmax_icon_viewer/3dsmax_icon_viewer.py | ATGH15102AFMLD/techart-staff | 8f38bb3d0c2d0f8316ddab0920a1170402a303c0 | [
"MIT"
] | null | null | null | import os
import MaxPlus
from PySide.QtCore import Qt
from PySide.QtCore import QDirIterator
from PySide.QtCore import QSize
from PySide.QtGui import QIcon
from PySide.QtGui import QWidget
from PySide.QtGui import QToolButton
from PySide.QtGui import QHBoxLayout
from PySide.QtGui import QVBoxLayout
from PySide.QtGui import QSortFilterProxyModel
from PySide.QtGui import QLineEdit
from PySide.QtGui import QListView
from PySide.QtGui import QStringListModel
from PySide.QtGui import QLabel
from PySide.QtGui import QImageReader
from PySide.QtGui import QPixmap
from PySide.QtGui import QImage
class TListModel(QStringListModel):
def __init__(self, parent=None):
super(TListModel, self).__init__(parent)
self.supported_image_formats = set()
self.paths = []
self.icons = []
self.load_icons()
def rowCount(self, parent):
return len(self.paths)
def data(self, index, role=Qt.DisplayRole):
row = index.row()
if role in (Qt.DisplayRole, Qt.EditRole):
return str(self.paths[row])
if role == Qt.DecorationRole:
return self.icons[row]
if role == Qt.SizeHintRole:
return QSize(100, 48)
def load_icons(self):
for format in QImageReader.supportedImageFormats():
self.supported_image_formats.add('.' + str(format))
it = QDirIterator(":", QDirIterator.Subdirectories)
while it.hasNext():
path = it.next()
fn, ext = os.path.splitext(path)
if ext in self.supported_image_formats:
image = QImage(path)
self.icons.append(image)
self.paths.append(path)
class IconExplorer(QWidget):
def __init__(self, parent=None):
super(IconExplorer, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("3ds Max Icon Explorer")
self.setup_ui()
self.resize(800, 600)
def setup_ui(self):
main_layout = QHBoxLayout(self)
edt = QLineEdit(self)
edt.setPlaceholderText("Wildcard filter")
btn = QToolButton(self)
btn.clicked.connect(self.set_icon)
layout = QHBoxLayout(self)
layout.addWidget(edt)
layout.addWidget(btn)
layout2 = QVBoxLayout()
layout2.addLayout(layout)
model = TListModel(self)
proxy = QSortFilterProxyModel(self)
proxy.setFilterCaseSensitivity(Qt.CaseInsensitive)
proxy.setSourceModel(model)
edt.textChanged.connect(proxy.setFilterWildcard)
list = QListView()
list.setModel(proxy)
selection_model = list.selectionModel()
selection_model.currentChanged.connect(self.currentChanged)
layout2.addWidget(list)
main_layout.addLayout(layout2)
image = QLabel("Select icon", self)
image.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter);
image.setMinimumWidth(256)
main_layout.addWidget(image)
self.btn = btn
self.edt = edt
self.image = image
self.list = list
self.proxy = proxy
self.model = model
self.selection_model = selection_model
def currentChanged(self, current, previous):
row = current.row()
proxyIndex = self.proxy.index(row, 0)
sourceIndex = self.proxy.mapToSource(proxyIndex)
row = sourceIndex.row()
image = self.proxy.sourceModel().icons[row]
self.image.setPixmap(QPixmap(image));
def set_icon(self):
i = self.list.currentIndex()
if i is not None:
path = self.model.data(i, Qt.DisplayRole)
ico = QIcon(path)
self.btn.setIcon(ico)
if __name__ == '__main__':
wnd = IconExplorer(MaxPlus.GetQMaxWindow())
wnd.show() | 26.736 | 61 | 0.742968 |
34a22dd4ad9ab46d6938c8ba8be9e6f6b3432bf1 | 497 | py | Python | quickkart_api/auth.py | envaleed/quick-kart-api-deploy | 2b962dce3bc5ba19d4e90cb86822c016d51f65c2 | [
"MIT"
] | null | null | null | quickkart_api/auth.py | envaleed/quick-kart-api-deploy | 2b962dce3bc5ba19d4e90cb86822c016d51f65c2 | [
"MIT"
] | null | null | null | quickkart_api/auth.py | envaleed/quick-kart-api-deploy | 2b962dce3bc5ba19d4e90cb86822c016d51f65c2 | [
"MIT"
] | null | null | null | from quickkart_api import app
from quickkart_api.models import Users
from flask_jwt import JWT, jwt_required, current_identity
from flask import abort
def authenticate(username, password):
user = Users.query.filter_by(username=username).first()
if user and user.check_password(password):
return user
return abort(500, "Authentication failed")
def identity(payload):
return Users.query.filter(Users.id == payload['identity']).scalar()
jwt = JWT(app,authenticate,identity) | 33.133333 | 71 | 0.7666 |
c659153b82b044486d578db646bdbde83a5377c3 | 193 | py | Python | python/testData/inspections/PyRedundantParenthesesInspection/TryExcept.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyRedundantParenthesesInspection/TryExcept.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyRedundantParenthesesInspection/TryExcept.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | try:
foo()
except (<weak_warning descr="Remove redundant parentheses">(A)</weak_warning>):
pass
except <weak_warning descr="Remove redundant parentheses">(A)</weak_warning> :
pass
| 24.125 | 79 | 0.715026 |
11079142c6764f7180438174fb3f133f46e5d956 | 6,733 | py | Python | editor.py | HackerPoet/FursonaGenerator | ef2ee24774d783fe83a6c7700b9a1cdd1c434368 | [
"MIT"
] | 61 | 2019-01-25T02:47:55.000Z | 2021-11-21T21:41:32.000Z | editor.py | HackerPoet/FursonaGenerator | ef2ee24774d783fe83a6c7700b9a1cdd1c434368 | [
"MIT"
] | 1 | 2021-01-14T07:42:08.000Z | 2021-04-22T00:53:38.000Z | editor.py | HackerPoet/FursonaGenerator | ef2ee24774d783fe83a6c7700b9a1cdd1c434368 | [
"MIT"
] | 12 | 2019-02-04T20:18:29.000Z | 2021-08-28T13:27:54.000Z | import pygame
import random, sys
import numpy as np
import cv2
#User constants
device = "cpu"
model_dir = 'test24/'
is_gan = True
background_color = (210, 210, 210)
edge_color = (60, 60, 60)
slider_color = (20, 20, 20)
num_params = 80
image_scale = 3
image_padding = 10
slider_w = 15
slider_h = 100
slider_px = 5
slider_py = 10
slider_cols = 20
#Keras
print "Loading Keras..."
import os
os.environ['THEANORC'] = "./" + device + ".theanorc"
os.environ['KERAS_BACKEND'] = "theano"
import theano
print "Theano Version: " + theano.__version__
from keras.models import Sequential, load_model, model_from_json
from keras.layers import Dense, Activation, Dropout, Flatten, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.noise import GaussianNoise
from keras.layers.local import LocallyConnected2D
from keras.optimizers import Adam, RMSprop, SGD
from keras.regularizers import l2
from keras.layers.advanced_activations import ELU
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model
from keras import backend as K
K.set_image_data_format('channels_first')
print "Loading model..."
if is_gan:
gen_model = load_model(model_dir + 'generator.h5')
num_params = gen_model.input_shape[1]
img_c, img_h, img_w = gen_model.output_shape[1:]
if len(sys.argv) >= 2:
enc_model = load_model(model_dir + 'encoder.h5')
fname_in = sys.argv[1]
fname_out = fname_in.split('.')
fname_out[-2] += "_out"
fname_out = '.'.join(fname_out)
img = cv2.imread(fname_in)
h = img.shape[0]
w = img.shape[1]
if w > h:
offs = (w - h)/2
img = img[:,offs:offs+h,:]
elif h > w:
offs = (h - w)/2
img = img[offs:offs+w,:,:]
img = cv2.resize(img, (img_h, img_w), interpolation = cv2.INTER_AREA)
img = np.transpose(img, (2, 0, 1))
img = img.astype(np.float32) / 255.0
img = np.expand_dims(img, axis=0)
w = enc_model.predict(img)
img = gen_model.predict(enc_model.predict(img))[0]
img = (img * 255.0).astype(np.uint8)
img = np.transpose(img, (1, 2, 0))
cv2.imwrite(fname_out, img)
exit(0)
else:
model = load_model(model_dir + 'model.h5')
gen_func = K.function([model.get_layer('encoder').input, K.learning_phase()], [model.layers[-1].output])
num_params = model.get_layer('encoder').input_shape[1]
img_c, img_h, img_w = model.output_shape[1:]
assert(img_c == 3)
#Derived constants
slider_w = slider_w + slider_px*2
slider_h = slider_h + slider_py*2
drawing_x = image_padding
drawing_y = image_padding
drawing_w = img_w * image_scale
drawing_h = img_h * image_scale
slider_rows = (num_params - 1) / slider_cols + 1
sliders_x = drawing_x + drawing_w + image_padding
sliders_y = image_padding
sliders_w = slider_w * slider_cols
sliders_h = slider_h * slider_rows
window_w = drawing_w + sliders_w + image_padding*3
window_h = drawing_h + image_padding*2
#Global variables
prev_mouse_pos = None
mouse_pressed = False
cur_slider_ix = 0
needs_update = True
cur_params = np.zeros((num_params,), dtype=np.float32)
cur_face = np.zeros((img_c, img_h, img_w), dtype=np.uint8)
rgb_array = np.zeros((img_h, img_w, img_c), dtype=np.uint8)
print "Loading Statistics..."
means = np.load(model_dir + 'means.npy')
stds = np.load(model_dir + 'stds.npy')
evals = np.load(model_dir + 'evals.npy')
evecs = np.load(model_dir + 'evecs.npy')
#Open a window
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((window_w, window_h))
face_surface_mini = pygame.Surface((img_w, img_h))
face_surface = screen.subsurface((drawing_x, drawing_y, drawing_w, drawing_h))
pygame.display.set_caption('Fursona Editor - By CodeParade')
font = pygame.font.SysFont("monospace", 15)
def update_mouse_click(mouse_pos):
global cur_slider_ix
global mouse_pressed
x = (mouse_pos[0] - sliders_x)
y = (mouse_pos[1] - sliders_y)
if x >= 0 and y >= 0 and x < sliders_w and y < sliders_h:
slider_ix_w = x / slider_w
slider_ix_h = y / slider_h
cur_slider_ix = slider_ix_h * slider_cols + slider_ix_w
mouse_pressed = True
def update_mouse_move(mouse_pos):
global needs_update
y = (mouse_pos[1] - sliders_y)
if y >= 0 and y < sliders_h:
slider_row_ix = cur_slider_ix / slider_cols
slider_val = y - slider_row_ix * slider_h
slider_val = min(max(slider_val, slider_py), slider_h - slider_py) - slider_py
val = (float(slider_val) / (slider_h - slider_py*2) - 0.5) * 6.0
cur_params[cur_slider_ix] = val
needs_update = True
def draw_sliders():
for i in xrange(num_params):
row = i / slider_cols
col = i % slider_cols
x = sliders_x + col * slider_w
y = sliders_y + row * slider_h
cx = x + slider_w / 2
cy_1 = y + slider_py
cy_2 = y + slider_h - slider_py
pygame.draw.line(screen, slider_color, (cx, cy_1), (cx, cy_2))
py = y + int((cur_params[i] / 6.0 + 0.5) * (slider_h - slider_py*2)) + slider_py
pygame.draw.circle(screen, slider_color, (cx, py), slider_w/2 - slider_px)
cx_1 = x + slider_px
cx_2 = x + slider_w - slider_px
for j in xrange(7):
ly = y + slider_h/2 + (j-3)*(slider_h/7)
pygame.draw.line(screen, slider_color, (cx_1, ly), (cx_2, ly))
def draw_face():
pygame.surfarray.blit_array(face_surface_mini, np.transpose(cur_face, (2, 1, 0)))
pygame.transform.scale(face_surface_mini, (drawing_w, drawing_h), face_surface)
pygame.draw.rect(screen, (0,0,0), (drawing_x, drawing_y, drawing_w, drawing_h), 1)
#Main loop
running = True
while running:
#Process events
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
elif event.type == pygame.MOUSEBUTTONDOWN:
if pygame.mouse.get_pressed()[0]:
prev_mouse_pos = pygame.mouse.get_pos()
update_mouse_click(prev_mouse_pos)
update_mouse_move(prev_mouse_pos)
elif pygame.mouse.get_pressed()[2]:
cur_params = np.zeros((num_params,), dtype=np.float32)
needs_update = True
elif event.type == pygame.MOUSEBUTTONUP:
mouse_pressed = False
prev_mouse_pos = None
elif event.type == pygame.MOUSEMOTION and mouse_pressed:
update_mouse_move(pygame.mouse.get_pos())
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
cur_params = np.clip(np.random.normal(0.0, 1.0, (num_params,)), -3.0, 3.0)
needs_update = True
#Check if we need an update
if needs_update:
x = means + np.dot(cur_params * evals, evecs)
#x = means + stds * cur_params
x = np.expand_dims(x, axis=0)
if is_gan:
y = gen_model.predict(x)[0]
else:
y = gen_func([x, 0])[0][0]
cur_face = (y * 255.0).astype(np.uint8)
needs_update = False
#Draw to the screen
screen.fill(background_color)
draw_face()
draw_sliders()
#Flip the screen buffer
pygame.display.flip()
pygame.time.wait(10)
| 29.924444 | 105 | 0.713649 |
b90f167d5d81968d3e0bd828b428923552cb9765 | 2,275 | py | Python | mobilenet_supervised_cifar.py | koshian2/Pseudo-Label-Keras | bf26731980d4a31fe8ce084579ec02e8b14a6e15 | [
"MIT"
] | 39 | 2019-02-05T06:34:10.000Z | 2022-02-21T14:55:22.000Z | mobilenet_supervised_cifar.py | koshian2/Pseudo-Label-Keras | bf26731980d4a31fe8ce084579ec02e8b14a6e15 | [
"MIT"
] | null | null | null | mobilenet_supervised_cifar.py | koshian2/Pseudo-Label-Keras | bf26731980d4a31fe8ce084579ec02e8b14a6e15 | [
"MIT"
] | 11 | 2019-02-05T06:34:21.000Z | 2020-09-20T12:53:32.000Z | from keras.layers import Input, UpSampling2D, Dense, GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.models import Model
from keras.utils import to_categorical
from keras.datasets import cifar10
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
import pickle, os, zipfile, glob
#1から訓練させる
def create_cnn():
net = MobileNet(input_shape=(128,128,3), weights=None, include_top=False)
# upsampling(32->128)
input = Input((32,32,3))
x = UpSampling2D(4)(input)
x = net(x)
x = GlobalAveragePooling2D()(x)
x = Dense(10, activation="softmax")(x)
model = Model(input, x)
model.summary()
return model
def train(n_labeled_data):
model = create_cnn()
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
indices = np.arange(X_train.shape[0])
np.random.shuffle(indices)
y_test_true = np.ravel(y_test)
X_train = X_train[indices[:n_labeled_data]] / 255.0
X_test = X_test / 255.0
y_train = to_categorical(y_train[indices[:n_labeled_data]], 10)
y_test = to_categorical(y_test, 10)
model.compile("adam", loss="categorical_crossentropy", metrics=["acc"])
if not os.path.exists("result_mobilenet"):
os.mkdir("result_mobilenet")
hist = model.fit(X_train, y_train, batch_size=min(n_labeled_data, 256),
validation_data=(X_test, y_test), epochs=100).history
with open(f"result_mobilenet/history_{n_labeled_data:05}.dat", "wb") as fp:
pickle.dump(hist, fp)
# tsne-plot
emb_model = Model(model.input, model.layers[-2].output)
embedding = emb_model.predict(X_test)
proj = TSNE(n_components=2).fit_transform(embedding)
cmp = plt.get_cmap("tab10")
plt.figure()
for i in range(10):
select_flag = y_test_true == i
plt_latent = proj[select_flag, :]
plt.scatter(plt_latent[:,0], plt_latent[:,1], color=cmp(i), marker=".")
plt.savefig(f"result_mobilenet/embedding_{n_labeled_data:05}.png")
if __name__ == "__main__":
n_batches = [500, 1000, 5000, 10000]
for nb in n_batches:
train(nb)
with zipfile.ZipFile("result_mobilenet.zip", "w") as zip:
for f in glob.glob("result_mobilenet/*"):
zip.write(f)
| 31.597222 | 79 | 0.677802 |
8fb5d89b1344a66ea126f4891a7db1a591c69481 | 39,483 | py | Python | venv/Lib/site-packages/coverage/parser.py | MarcoDelMondo/python-docs-samples | 857a9c85db5b39b639747a6394e1cc776ff7ba4b | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/coverage/parser.py | MarcoDelMondo/python-docs-samples | 857a9c85db5b39b639747a6394e1cc776ff7ba4b | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/coverage/parser.py | MarcoDelMondo/python-docs-samples | 857a9c85db5b39b639747a6394e1cc776ff7ba4b | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Code parsing for coverage.py."""
import ast
import collections
import os
import re
import token
import tokenize
from coverage import env
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints, string_class
from coverage.bytecode import CodeObjects
from coverage.debug import short_stack
from coverage.misc import contract, new_contract, nice_pair, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
class PythonParser(object):
"""Parse code to find executable lines, excluded lines, etc.
This information is all based on static analysis: no code execution is
involved.
"""
@contract(text='unicode|None')
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
which the text will be read. Excluded lines are those that match
`exclude`, a regex.
"""
assert text or filename, "PythonParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
from coverage.python import get_python_source
try:
self.text = get_python_source(self.filename)
except IOError as err:
raise NoSource(
"No source for code: '%s': %s" % (self.filename, err)
)
self.exclude = exclude
# The text lines of the parsed code.
self.lines = self.text.split('\n')
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
self.statements = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
self.excluded = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
self.raw_statements = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
self.raw_excluded = set()
# The line numbers of class and function definitions.
self.raw_classdefs = set()
# The line numbers of docstring lines.
self.raw_docstrings = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
self._multiline = {}
# Lazily-created ByteParser, arc data, and missing arc descriptions.
self._byte_parser = None
self._all_arcs = None
self._missing_arc_fragments = None
@property
def byte_parser(self):
"""Create a ByteParser on demand."""
if not self._byte_parser:
self._byte_parser = ByteParser(self.text, filename=self.filename)
return self._byte_parser
def lines_matching(self, *regexes):
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
of the regexes in `regexes`. The entire line needn't match, just a
part of it.
"""
combined = join_regex(regexes)
if env.PY2:
combined = combined.decode("utf8")
regex_c = re.compile(combined)
matches = set()
for i, ltext in enumerate(self.lines, start=1):
if regex_c.search(ltext):
matches.add(i)
return matches
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
excluding_decorators = False
prev_toktype = token.INDENT
first_line = None
empty = True
first_on_line = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: not covered
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME:
if ttext == 'class':
# Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.raw_classdefs.add(slineno)
elif toktype == token.OP:
if ttext == ':':
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
self.raw_excluded.add(elineno)
exclude_indent = indent
excluding = True
excluding_decorators = False
elif ttext == '@' and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
if excluding_decorators:
self.raw_excluded.add(elineno)
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
self._multiline[l] = first_line
first_line = None
first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
empty = False
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.raw_excluded.add(elineno)
first_on_line = False
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
self.raw_statements.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
return self._multiline.get(line, line)
def first_lines(self, lines):
"""Map the line numbers in `lines` to the correct first line of the
statement.
Returns a set of the first lines.
"""
return set(self.first_line(l) for l in lines)
def translate_lines(self, lines):
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
def translate_arcs(self, arcs):
"""Implement `FileReporter.translate_arcs`."""
return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
Sets the .excluded and .statements attributes, normalized to the first
line of multi-line statements.
"""
try:
self._raw_parse()
except (tokenize.TokenError, IndentationError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
lineno = err.args[1][0] # TokenError
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
self.filename, err.args[0], lineno
)
)
self.excluded = self.first_lines(self.raw_excluded)
ignore = self.excluded | self.raw_docstrings
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
def arcs(self):
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
to the first line of multi-line statements.
"""
if self._all_arcs is None:
self._analyze_ast()
return self._all_arcs
def _analyze_ast(self):
"""Run the AstArcAnalyzer and save its results.
`_all_arcs` is the set of arcs in the code.
"""
aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
aaa.analyze()
self._all_arcs = set()
for l1, l2 in aaa.arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
self._all_arcs.add((fl1, fl2))
self._missing_arc_fragments = aaa.missing_arc_fragments
def exit_counts(self):
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
for l in self.raw_classdefs:
# Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
return exit_counts
def missing_arc_description(self, start, end, executed_arcs=None):
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
actual_start = start
if (
executed_arcs and
end < 0 and end == -start and
(end, start) not in executed_arcs and
(end, start) in self._missing_arc_fragments
):
# It's a one-line callable, and we never even started it,
# and we have a message about not starting it.
start, end = end, start
fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
msgs = []
for fragment_pair in fragment_pairs:
smsg, emsg = fragment_pair
if emsg is None:
if end < 0:
# Hmm, maybe we have a one-line callable, let's check.
if (-end, end) in self._missing_arc_fragments:
return self.missing_arc_description(-end, end)
emsg = "didn't jump to the function exit"
else:
emsg = "didn't jump to line {lineno}"
emsg = emsg.format(lineno=end)
msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg)
if smsg is not None:
msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start))
msgs.append(msg)
return " or ".join(msgs)
class ByteParser(object):
"""Parse bytecode to understand the structure of code."""
@contract(text='unicode')
def __init__(self, text, code=None, filename=None):
self.text = text
if code:
self.code = code
else:
try:
self.code = compile_unicode(text, filename, "exec")
except SyntaxError as synerr:
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
filename, synerr.msg, synerr.lineno
)
)
# Alternative Python implementations don't always provide all the
# attributes on code objects that we need to do the analysis.
for attr in ['co_lnotab', 'co_firstlineno', 'co_consts']:
if not hasattr(self.code, attr):
raise CoverageException(
"This implementation of Python doesn't support code analysis.\n"
"Run coverage.py under CPython for this command."
)
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
children = CodeObjects(self.code)
return (ByteParser(self.text, code=c) for c in children)
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
line numbers. Produces a sequence: (b0, l0), (b1, l1), ...
Only byte offsets that correspond to line numbers are included in the
results.
"""
# Adapted from dis.py in the standard library.
byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
yield (byte_num, line_num)
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
yield l
#
# AST analysis
#
class LoopBlock(object):
"""A block on the block stack representing a `for` or `while` loop."""
def __init__(self, start):
self.start = start
self.break_exits = set()
class FunctionBlock(object):
"""A block on the block stack representing a function definition."""
def __init__(self, start, name):
self.start = start
self.name = name
class TryBlock(object):
"""A block on the block stack representing a `try` block."""
def __init__(self, handler_start=None, final_start=None):
self.handler_start = handler_start
self.final_start = final_start
self.break_from = set()
self.continue_from = set()
self.return_from = set()
self.raise_from = set()
class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
"""The information needed to start an arc.
`lineno` is the line number the arc starts from. `cause` is a fragment
used as the startmsg for AstArcAnalyzer.missing_arc_fragments.
"""
def __new__(cls, lineno, cause=None):
return super(ArcStart, cls).__new__(cls, lineno, cause)
# Define contract words that PyContract doesn't have.
# ArcStarts is for a list or set of ArcStart's.
new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
class AstArcAnalyzer(object):
"""Analyze source text with an AST to find executable code paths."""
@contract(text='unicode', statements=set)
def __init__(self, text, statements, multiline):
self.root_node = ast.parse(neuter_encoding_declaration(text))
# TODO: I think this is happening in too many places.
self.statements = set(multiline.get(l, l) for l in statements)
self.multiline = multiline
if int(os.environ.get("COVERAGE_ASTDUMP", 0)): # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print("Statements: {}".format(self.statements))
print("Multiline map: {}".format(self.multiline))
ast_dump(self.root_node)
self.arcs = set()
# A map from arc pairs to a pair of sentence fragments: (startmsg, endmsg).
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
self.missing_arc_fragments = collections.defaultdict(list)
self.block_stack = []
self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
def analyze(self):
"""Examine the AST tree from `root_node` to determine possible arcs.
This sets the `arcs` attribute to be a set of (from, to) line number
pairs.
"""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, "_code_object__" + node_name, None)
if code_object_handler is not None:
code_object_handler(node)
def add_arc(self, start, end, smsg=None, emsg=None):
"""Add an arc, including message fragments to use if it is missing."""
if self.debug:
print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg))
print(short_stack(limit=6))
self.arcs.add((start, end))
if smsg is not None or emsg is not None:
self.missing_arc_fragments[(start, end)].append((smsg, emsg))
def nearest_blocks(self):
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
@contract(returns=int)
def line_for_node(self, node):
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_line__" + node_name, None)
if handler is not None:
return handler(node)
else:
return node.lineno
def _line__Assign(self, node):
return self.line_for_node(node.value)
def _line__Dict(self, node):
# Python 3.5 changed how dict literals are made.
if env.PYVERSION >= (3, 5) and node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
# Unpacked dict literals `{**{'a':1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
return node.lineno
def _line__List(self, node):
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
def _line__Module(self, node):
if node.body:
return self.line_for_node(node.body[0])
else:
# Modules have no line number, they always start at 1.
return 1
OK_TO_DEFAULT = set([
"Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
"Import", "ImportFrom", "Nonlocal", "Pass", "Print",
])
@contract(returns='ArcStarts')
def add_arcs(self, node):
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_handle__" + node_name, None)
if handler is not None:
return handler(node)
if 0:
node_name = node.__class__.__name__
if node_name not in self.OK_TO_DEFAULT:
print("*** Unhandled: {0}".format(node))
return set([ArcStart(self.line_for_node(node), cause=None)])
@contract(returns='ArcStarts')
def add_body_arcs(self, body, from_start=None, prev_starts=None):
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
be the previous line in flow before this body. `prev_starts` is a set
of ArcStarts that can be the previous line. Only one of them should be
given.
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
prev_starts = set([from_start])
for body_node in body:
lineno = self.line_for_node(body_node)
first_line = self.multiline.get(lineno, lineno)
if first_line not in self.statements:
continue
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.add_arcs(body_node)
return prev_starts
def is_constant_expr(self, node):
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["NameConstant", "Num"]:
return True
elif node_name == "Name":
if env.PY3 and node.id in ["True", "False", "None"]:
return True
return False
# tests to write:
# TODO: while EXPR:
# TODO: while False:
# TODO: listcomps hidden deep in other expressions
# TODO: listcomps hidden in lists: x = [[i for i in range(10)]]
# TODO: nested function definitions
@contract(exits='ArcStarts')
def process_break_exits(self, exits):
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
block.break_exits.update(exits)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.break_from.update(exits)
break
@contract(exits='ArcStarts')
def process_continue_exits(self, exits):
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
for xit in exits:
self.add_arc(xit.lineno, block.start, xit.cause)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.continue_from.update(exits)
break
@contract(exits='ArcStarts')
def process_raise_exits(self, exits):
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock):
if block.handler_start is not None:
for xit in exits:
self.add_arc(xit.lineno, block.handler_start, xit.cause)
break
elif block.final_start is not None:
block.raise_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't except from function '{0}'".format(block.name),
)
break
@contract(exits='ArcStarts')
def process_return_exits(self, exits):
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock) and block.final_start is not None:
block.return_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't return from function '{0}'".format(block.name),
)
break
## Handlers
@contract(returns='ArcStarts')
def _handle__Break(self, node):
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits([break_start])
return set()
@contract(returns='ArcStarts')
def _handle_decorated(self, node):
"""Add arcs for things that can be decorated (classes and functions)."""
last = self.line_for_node(node)
if node.decorator_list:
for dec_node in node.decorator_list:
dec_start = self.line_for_node(dec_node)
if dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
body_start = self.line_for_node(node.body[0])
body_start = self.multiline.get(body_start, body_start)
for lineno in range(last+1, body_start):
if lineno in self.statements:
self.add_arc(last, lineno)
last = lineno
# The body is handled in collect_arcs.
return set([ArcStart(last, cause=None)])
_handle__ClassDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__Continue(self, node):
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits([continue_start])
return set()
@contract(returns='ArcStarts')
def _handle__For(self, node):
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
exits = self.add_body_arcs(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# no else clause: exit from the for line.
exits.add(from_start)
return exits
_handle__AsyncFor = _handle__For
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__If(self, node):
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
exits |= self.add_body_arcs(node.orelse, from_start=from_start)
return exits
@contract(returns='ArcStarts')
def _handle__Raise(self, node):
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits([raise_start])
# `raise` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Return(self, node):
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits([return_start])
# `return` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Try(self, node):
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
handler_start = None
if node.finalbody:
final_start = self.line_for_node(node.finalbody[0])
else:
final_start = None
try_block = TryBlock(handler_start=handler_start, final_start=final_start)
self.block_stack.append(try_block)
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start, cause=None))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
if node.handlers:
# If there are `except` clauses, then raises in the try body
# will already jump to them. Start this set over for raises in
# `except` and `else`.
try_block.raise_from = set([])
else:
self.block_stack.pop()
handler_exits = set()
if node.handlers:
last_handler_start = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
self.add_arc(last_handler_start, handler_start)
last_handler_start = handler_start
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
if node.orelse:
exits = self.add_body_arcs(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
final_from = ( # You can get to the `finally` clause from:
exits | # the exits of the body or `else` clause,
try_block.break_from | # or a `break`,
try_block.continue_from | # or a `continue`,
try_block.raise_from | # or a `raise`,
try_block.return_from # or a `return`.
)
exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
if try_block.break_from:
break_exits = self._combine_finally_starts(try_block.break_from, exits)
self.process_break_exits(break_exits)
if try_block.continue_from:
continue_exits = self._combine_finally_starts(try_block.continue_from, exits)
self.process_continue_exits(continue_exits)
if try_block.raise_from:
raise_exits = self._combine_finally_starts(try_block.raise_from, exits)
self.process_raise_exits(raise_exits)
if try_block.return_from:
return_exits = self._combine_finally_starts(try_block.return_from, exits)
self.process_return_exits(return_exits)
return exits
def _combine_finally_starts(self, starts, exits):
"""Helper for building the cause of `finally` branches."""
causes = []
for lineno, cause in sorted(starts):
if cause is not None:
causes.append(cause.format(lineno=lineno))
cause = " or ".join(causes)
exits = set(ArcStart(ex.lineno, cause) for ex in exits)
return exits
@contract(returns='ArcStarts')
def _handle__TryExcept(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryExcept, it means there was no finally, so fake it, and treat as
# a general Try node.
node.finalbody = []
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__TryFinally(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryFinally, see if there's a TryExcept nested inside. If so, merge
# them. Otherwise, fake fields to complete a Try node.
node.handlers = []
node.orelse = []
first = node.body[0]
if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
assert len(node.body) == 1
node.body = first.body
node.handlers = first.handlers
node.orelse = first.orelse
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__While(self, node):
constant_test = self.is_constant_expr(node.test)
start = to_top = self.line_for_node(node.test)
if constant_test:
to_top = self.line_for_node(node.body[0])
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
if not constant_test:
exits.add(from_start)
return exits
@contract(returns='ArcStarts')
def _handle__With(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
_handle__AsyncWith = _handle__With
def _code_object__Module(self, node):
start = self.line_for_node(node)
if node.body:
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
else:
# Empty module.
self.add_arc(-start, start)
self.add_arc(start, -start)
def _code_object__FunctionDef(self, node):
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
self.process_return_exits(exits)
self.block_stack.pop()
_code_object__AsyncFunctionDef = _code_object__FunctionDef
def _code_object__ClassDef(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
for xit in exits:
self.add_arc(
xit.lineno, -start, xit.cause,
"didn't exit the body of class '{0}'".format(node.name),
)
def _make_oneline_code_method(noun): # pylint: disable=no-self-argument
"""A function to make methods for online callable _code_object__ methods."""
def _code_object__oneline_callable(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start, None, "didn't run the {0} on line {1}".format(noun, start))
self.add_arc(
start, -start, None,
"didn't finish the {0} on line {1}".format(noun, start),
)
return _code_object__oneline_callable
_code_object__Lambda = _make_oneline_code_method("lambda")
_code_object__GeneratorExp = _make_oneline_code_method("generator expression")
_code_object__DictComp = _make_oneline_code_method("dictionary comprehension")
_code_object__SetComp = _make_oneline_code_method("set comprehension")
if env.PY3:
_code_object__ListComp = _make_oneline_code_method("list comprehension")
SKIP_DUMP_FIELDS = ["ctx"]
def _is_simple_value(value):
"""Is `value` simple enough to be displayed on a single line?"""
return (
value in [None, [], (), {}, set()] or
isinstance(value, (string_class, int, float))
)
# TODO: a test of ast_dump?
def ast_dump(node, depth=0):
"""Dump the AST for `node`.
This recursively walks the AST, printing a readable version.
"""
indent = " " * depth
if not isinstance(node, ast.AST):
print("{0}<{1} {2!r}>".format(indent, node.__class__.__name__, node))
return
lineno = getattr(node, "lineno", None)
if lineno is not None:
linemark = " @ {0}".format(node.lineno)
else:
linemark = ""
head = "{0}<{1}{2}".format(indent, node.__class__.__name__, linemark)
named_fields = [
(name, value)
for name, value in ast.iter_fields(node)
if name not in SKIP_DUMP_FIELDS
]
if not named_fields:
print("{0}>".format(head))
elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
field_name, value = named_fields[0]
print("{0} {1}: {2!r}>".format(head, field_name, value))
else:
print(head)
if 0:
print("{0}# mro: {1}".format(
indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
))
next_indent = indent + " "
for field_name, value in named_fields:
prefix = "{0}{1}:".format(next_indent, field_name)
if _is_simple_value(value):
print("{0} {1!r}".format(prefix, value))
elif isinstance(value, list):
print("{0} [".format(prefix))
for n in value:
ast_dump(n, depth + 8)
print("{0}]".format(next_indent))
else:
print(prefix)
ast_dump(value, depth + 8)
print("{0}>".format(indent))
| 38.258721 | 99 | 0.594737 |
b61696cac392ae302491877370e52e4a8be2048d | 300 | py | Python | leetcode/medium/201-bitwise_and_num_range.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | 1 | 2020-02-25T10:32:27.000Z | 2020-02-25T10:32:27.000Z | leetcode/medium/201-bitwise_and_num_range.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | null | null | null | leetcode/medium/201-bitwise_and_num_range.py | shubhamoli/practice | 5a24fdeb6e5f43b821ef0510fe3b343ddda18f22 | [
"MIT"
] | null | null | null | """
Leetcode #201
"""
class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
i = 0
while m != n:
m >>= 1
n >>= 1
i += 1
return n << i
if __name__ == "__main__":
assert Solution().rangeBitwiseAnd(5, 7) == 4
| 14.285714 | 53 | 0.433333 |
0dff1489c980999791ea5bb39579caaf69e1af73 | 266 | py | Python | adventofcode/day23/test_day23.py | EikaNN/AdventOfCode2017 | 21bac909b5c586224e904803332ef2cf01fc1e16 | [
"MIT"
] | 2 | 2018-01-04T23:26:23.000Z | 2018-07-05T14:02:44.000Z | adventofcode/day23/test_day23.py | EikaNN/AdventOfCode2017 | 21bac909b5c586224e904803332ef2cf01fc1e16 | [
"MIT"
] | 1 | 2017-12-26T23:12:39.000Z | 2017-12-31T11:46:25.000Z | adventofcode/day23/test_day23.py | EikaNN/AdventOfCode2017 | 21bac909b5c586224e904803332ef2cf01fc1e16 | [
"MIT"
] | null | null | null | import unittest
class Day23Test(unittest.TestCase):
# One of the reasons this puzzle is original is that it has no tests :)
def test_part_one(self):
pass
def test_part_two(self):
pass
if __name__ == '__main__':
unittest.main()
| 15.647059 | 75 | 0.657895 |
359b96636dba7a0f409abda2e903d6dbd9e0ac49 | 11,435 | py | Python | pybind/nos/v7_1_0/brocade_ha_rpc/reload/input/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/brocade_ha_rpc/reload/input/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/brocade_ha_rpc/reload/input/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-ha - based on the path /brocade_ha_rpc/reload/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__rbridge_id','__system','__standby',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__standby = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="standby", rest_name="standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the standby MM'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)
self.__rbridge_id = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..3']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Rbridge-id (use only with reload system)', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='ras-extensions:switchid-type', is_config=True)
self.__system = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="system", rest_name="system", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the chassis'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_ha_rpc', u'reload', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'reload', u'input']
def _get_rbridge_id(self):
"""
Getter method for rbridge_id, mapped from YANG variable /brocade_ha_rpc/reload/input/rbridge_id (ras-extensions:switchid-type)
"""
return self.__rbridge_id
def _set_rbridge_id(self, v, load=False):
"""
Setter method for rbridge_id, mapped from YANG variable /brocade_ha_rpc/reload/input/rbridge_id (ras-extensions:switchid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbridge_id() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..3']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Rbridge-id (use only with reload system)', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='ras-extensions:switchid-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rbridge_id must be of a type compatible with ras-extensions:switchid-type""",
'defined-type': "ras-extensions:switchid-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..3']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Rbridge-id (use only with reload system)', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='ras-extensions:switchid-type', is_config=True)""",
})
self.__rbridge_id = t
if hasattr(self, '_set'):
self._set()
def _unset_rbridge_id(self):
self.__rbridge_id = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..3']}), is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Rbridge-id (use only with reload system)', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='ras-extensions:switchid-type', is_config=True)
def _get_system(self):
"""
Getter method for system, mapped from YANG variable /brocade_ha_rpc/reload/input/system (empty)
"""
return self.__system
def _set_system(self, v, load=False):
"""
Setter method for system, mapped from YANG variable /brocade_ha_rpc/reload/input/system (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_system is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="system", rest_name="system", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the chassis'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="system", rest_name="system", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the chassis'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)""",
})
self.__system = t
if hasattr(self, '_set'):
self._set()
def _unset_system(self):
self.__system = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="system", rest_name="system", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the chassis'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)
def _get_standby(self):
"""
Getter method for standby, mapped from YANG variable /brocade_ha_rpc/reload/input/standby (empty)
"""
return self.__standby
def _set_standby(self, v, load=False):
"""
Setter method for standby, mapped from YANG variable /brocade_ha_rpc/reload/input/standby (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_standby is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_standby() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="standby", rest_name="standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the standby MM'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """standby must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="standby", rest_name="standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the standby MM'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)""",
})
self.__standby = t
if hasattr(self, '_set'):
self._set()
def _unset_standby(self):
self.__standby = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="standby", rest_name="standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Reboots the standby MM'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='empty', is_config=True)
rbridge_id = __builtin__.property(_get_rbridge_id, _set_rbridge_id)
system = __builtin__.property(_get_system, _set_system)
standby = __builtin__.property(_get_standby, _set_standby)
_pyangbind_elements = {'rbridge_id': rbridge_id, 'system': system, 'standby': standby, }
| 59.248705 | 602 | 0.71596 |
2d9379de6cf7ac52998bf17a5d2181d634d25aea | 185 | py | Python | backend/instagram_utility/application_instagram_api/urls.py | Joeffison/InstagramUtility | 530572534eeed54d02d8ff24c7a59aa13239d45c | [
"MIT"
] | 1 | 2017-12-28T21:17:23.000Z | 2017-12-28T21:17:23.000Z | backend/instagram_utility/application_instagram_api/urls.py | Joeffison/InstagramUtility | 530572534eeed54d02d8ff24c7a59aa13239d45c | [
"MIT"
] | null | null | null | backend/instagram_utility/application_instagram_api/urls.py | Joeffison/InstagramUtility | 530572534eeed54d02d8ff24c7a59aa13239d45c | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.model_list),
url(r'^follow/$', views.follow),
url(r'^unfollow/$', views.unfollow),
]
| 18.5 | 38 | 0.664865 |
6bc79b5cd5a0030e7c7d0676d0c8ca70872755c7 | 413 | py | Python | Intermediate/30/key_handling.py | Matthew1906/100DaysOfPython | 94ffff8f5535ce5d574f49c0d7971d64a4575aad | [
"MIT"
] | 1 | 2021-12-25T02:19:18.000Z | 2021-12-25T02:19:18.000Z | Intermediate/30/key_handling.py | Matthew1906/100DaysOfPython | 94ffff8f5535ce5d574f49c0d7971d64a4575aad | [
"MIT"
] | null | null | null | Intermediate/30/key_handling.py | Matthew1906/100DaysOfPython | 94ffff8f5535ce5d574f49c0d7971d64a4575aad | [
"MIT"
] | 1 | 2021-11-25T10:31:47.000Z | 2021-11-25T10:31:47.000Z | facebook_posts = [
{'Likes': 21, 'Comments': 2},
{'Likes': 13, 'Comments': 2, 'Shares': 1},
{'Likes': 33, 'Comments': 8, 'Shares': 3},
{'Comments': 4, 'Shares': 2},
{'Comments': 1, 'Shares': 1},
{'Likes': 19, 'Comments': 3}
]
total_likes = 0
for post in facebook_posts:
try:
total_likes = total_likes + post['Likes']
except KeyError:
pass
print(total_likes) | 22.944444 | 49 | 0.549637 |
c8f3e365dd613faf80318df8eb31f4d1381439aa | 656 | py | Python | Bar_Tapping_Project/Bar_tab.py | InamdarAbid/PythonTutorial | 6aaecf7bafe5b0031159187985dcd9638a1f7e28 | [
"MIT"
] | 1 | 2019-01-04T07:40:26.000Z | 2019-01-04T07:40:26.000Z | Bar_Tapping_Project/Bar_tab.py | InamdarAbid/PythonTutorial | 6aaecf7bafe5b0031159187985dcd9638a1f7e28 | [
"MIT"
] | null | null | null | Bar_Tapping_Project/Bar_tab.py | InamdarAbid/PythonTutorial | 6aaecf7bafe5b0031159187985dcd9638a1f7e28 | [
"MIT"
] | null | null | null | class Tab:
#Menu will be fixed for all tables of bar
menu ={
'Wine':5,
'Beer':3,
'Soft drink':2,
'Chicken':10,
'Mutton':15,
'Veg':8,
'Desert':6
}
def __init__(self):
self.total = 0
self.items= []
def add_item(self,item):
self.items.append(item)
self.total = self.total + self.menu[item]
def print_bill(self,gst):
gst = self.total * gst /100
total = self.total + gst
for item in self.items:
print(f'{item:15} Rs. {self.menu[item]}')
print(f'{"Amount payble":15} Rs. {total:.2f}')
| 23.428571 | 54 | 0.490854 |
a24ff8834a2d77a8637fa08c06fc3452e05e8c9e | 156 | py | Python | src/learners/__init__.py | vanstrn/LICA | 9dcc598f3b5329e2b266a319ac9552bb962c59ae | [
"MIT"
] | null | null | null | src/learners/__init__.py | vanstrn/LICA | 9dcc598f3b5329e2b266a319ac9552bb962c59ae | [
"MIT"
] | null | null | null | src/learners/__init__.py | vanstrn/LICA | 9dcc598f3b5329e2b266a319ac9552bb962c59ae | [
"MIT"
] | null | null | null | from .lica_learner import LICALearner,LICALearner_CNN
REGISTRY = {}
REGISTRY["lica_learner"] = LICALearner
REGISTRY["lica_learner_cnn"] = LICALearner_CNN
| 22.285714 | 53 | 0.807692 |
da8f70abd7643c09af4e6014a98b5dad55192eaa | 441 | py | Python | tests/admin.py | awais786/django-stdimage | 6c043a08a892b75202b96a73615588d8e16aa9e1 | [
"MIT"
] | 274 | 2015-01-13T10:31:48.000Z | 2022-02-06T16:22:03.000Z | tests/admin.py | awais786/django-stdimage | 6c043a08a892b75202b96a73615588d8e16aa9e1 | [
"MIT"
] | 167 | 2015-01-06T09:51:45.000Z | 2022-03-28T21:15:16.000Z | tests/admin.py | sunnysavera/django-stdimage | ef9a2eb4843f0de41117a5eec9ba04962181d23d | [
"MIT"
] | 76 | 2015-01-09T00:13:56.000Z | 2021-12-20T22:08:39.000Z | from django.contrib import admin
from . import models
admin.site.register(models.AdminDeleteModel)
admin.site.register(models.AdminUpdateModel)
admin.site.register(models.ResizeCropModel)
admin.site.register(models.ResizeModel)
admin.site.register(models.SimpleModel)
admin.site.register(models.ThumbnailModel)
admin.site.register(models.MaxSizeModel)
admin.site.register(models.MinSizeModel)
admin.site.register(models.ForceMinSizeModel)
| 31.5 | 45 | 0.85034 |
056165ff2b030db9151acb5a13d7f838dc325dc2 | 3,210 | py | Python | utils/viz/viz_pose.py | jwen307/MLBase | 06220d058a44b97831a222153ab980e1452a167f | [
"MIT"
] | null | null | null | utils/viz/viz_pose.py | jwen307/MLBase | 06220d058a44b97831a222153ab980e1452a167f | [
"MIT"
] | null | null | null | utils/viz/viz_pose.py | jwen307/MLBase | 06220d058a44b97831a222153ab980e1452a167f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 12:11:20 2021
@author: jeff
"""
import sys
import os
import os.path as osp
import argparse
import numpy as np
import cv2
import math
import torch
import torchvision.transforms as transforms
import torchvision
import matplotlib.pyplot as plt
from torch.nn.parallel.data_parallel import DataParallel
import torch.backends.cudnn as cudnn
import torchvision.transforms.functional as F
from tqdm import tqdm
import open3d as o3d
import time
sys.path.insert(0, osp.join('posenet', 'posenet_common'))
from posenet_utils.vis import vis_keypoints, vis_3d_multiple_skeleton
#Save the 2d pose imposed on the actual image
def vis_2d_pose(imgs, output_pose_2d_list, skeleton, joint_num, abs_counts, output_dir):
for i in range(len(imgs)):
if isinstance(output_pose_2d_list[i],np.ndarray):
# visualize 2d poses
vis_img = (imgs[i]*255).cpu().permute(1,2,0).numpy().copy()
vis_kps = np.zeros((3,joint_num))
vis_kps[0,:] = output_pose_2d_list[i][:,0]
vis_kps[1,:] = output_pose_2d_list[i][:,1]
vis_kps[2,:] = 1
vis_img = vis_keypoints(cv2.cvtColor(vis_img,cv2.COLOR_RGB2BGR), vis_kps, skeleton)
cv2.imwrite(output_dir + 'output_pose_2d{0}.jpg'.format(abs_counts[i]), vis_img)
#Note: CV2 uses BGR color format by default so need to doc v2.cvtColor(vis_img,cv2.COLOR_RGB2BGR)
def scene_4d_human(mesh_dir, extrinsics, world_poses, skeleton):
#Get the mesh of the scene
mesh= o3d.io.read_triangle_mesh(mesh_dir)
mesh.compute_vertex_normals()
#Visualize the scene
vis = o3d.visualization.Visualizer()
vis.create_window()
vis.add_geometry(mesh)
first_pose = []
for i, pose in enumerate(world_poses):
if isinstance(pose,np.ndarray):
first_pose = pose
break
#Initialize the line_set geometry
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(first_pose),
lines=o3d.utility.Vector2iVector(skeleton),
)
vis.add_geometry(line_set)
#Sphere is the camera position
cam = o3d.geometry.TriangleMesh.create_cone(radius=0.07, height=0.2)
cam.paint_uniform_color([0.0, 0.2, 1.0])
vis.add_geometry(cam)
for i, pose in enumerate(world_poses):
if isinstance(pose,np.ndarray):
extrinsic = extrinsics[i]
#Need to at 1.5 to the z component to match xy plane of scannet
extrinsic[2,3] += 1.5
#Update skeleton and camera position
line_set.points = o3d.utility.Vector3dVector(pose)
cam.transform(extrinsic)
vis.update_geometry(line_set)
vis.update_geometry(cam)
vis.poll_events()
vis.update_renderer()
#Put sphere back at world center
cam.transform(np.linalg.inv(extrinsic))
time.sleep(0.05)
#vis.destroy_window()
| 29.181818 | 109 | 0.625545 |
1aa0bc73b8dd39ec23bfa8d7af052f93e07b2e97 | 1,097 | py | Python | wheelchair_navigation/scripts/nav_joy.py | OSUrobotics/wheelchair-automation | 909f7f0ead388a6cf88efee8f0c75cd220b849e9 | [
"MIT"
] | 13 | 2015-08-13T06:14:30.000Z | 2021-11-22T22:32:24.000Z | wheelchair_navigation/scripts/nav_joy.py | OSUrobotics/wheelchair-automation | 909f7f0ead388a6cf88efee8f0c75cd220b849e9 | [
"MIT"
] | 1 | 2015-01-10T23:30:06.000Z | 2015-01-21T15:44:08.000Z | wheelchair_navigation/scripts/nav_joy.py | OSUrobotics/wheelchair-automation | 909f7f0ead388a6cf88efee8f0c75cd220b849e9 | [
"MIT"
] | 7 | 2015-03-20T05:42:03.000Z | 2020-04-19T20:58:14.000Z | #!/usr/bin/env python
import roslib
roslib.load_manifest('wheelchair_navigation')
import rospy, message_filters
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan, Joy
import numpy as np
import std_msgs.msg
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal, MoveBaseActionGoal
class nav_clear:
def __init__(self):
self.joy_sub = rospy.Subscriber('/joy',Joy,self.joy_callback)
# self.move_goal = rospy.Subscriber("/move_base/goal", MoveBaseActionGoal, self.newGoalHandler)
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
rospy.loginfo("Wating for move_base server.....")
self.client.wait_for_server()
def joy_callback(self,joy):
if (joy.buttons[2] == 1):
print "joy_callback"
self.client.cancel_goal()
self.client.cancel_all_goals()
rospy.loginfo("Goal cancelled")
else:
pass
if __name__ == '__main__':
rospy.init_node('nav_joysick',log_level=rospy.DEBUG)
nav_clear = nav_clear()
rospy.spin()
| 32.264706 | 103 | 0.706472 |
e354173a1da471c880f357a322e95aeeb11ecd38 | 12,983 | py | Python | pipelines/subrefz.py | deshima-dev/qlook-pipeline | 90f520e101a58ae1dc1ffd9317ad16035e8efe0c | [
"MIT"
] | null | null | null | pipelines/subrefz.py | deshima-dev/qlook-pipeline | 90f520e101a58ae1dc1ffd9317ad16035e8efe0c | [
"MIT"
] | 3 | 2020-10-17T17:09:19.000Z | 2022-02-03T14:10:47.000Z | pipelines/subrefz.py | deshima-dev/qlook-pipeline | 90f520e101a58ae1dc1ffd9317ad16035e8efe0c | [
"MIT"
] | null | null | null | # standard libraries
import warnings
import argparse
import pathlib
import yaml
# dependent packages
import decode as dc
import numpy as np
from scipy.signal import argrelmax, argrelmin
import matplotlib.pyplot as plt
from astropy import table
from astropy.modeling import models, fitting
# original package
from utils import functions as fc
# module settings
warnings.filterwarnings("ignore")
plt.style.use("seaborn-darkgrid")
plt.style.use("seaborn-muted")
# command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("dfits_file", help="DFITS name")
parser.add_argument("antlog_file", help="antenna log")
parser.add_argument("yaml_file", help="parameter file")
args = parser.parse_args()
dfits_file = pathlib.Path(args.dfits_file)
antlog_file = pathlib.Path(args.antlog_file)
yaml_file = pathlib.Path(args.yaml_file)
with open(yaml_file) as file:
params = yaml.load(file, Loader=yaml.SafeLoader)
# directory settings
obsid = dfits_file.name.split("_")[1].split(".")[0]
output_dir = pathlib.Path(params["file"]["output_dir"]) / obsid
if not output_dir.exists():
output_dir.mkdir(parents=True)
result_file = output_dir / params["file"]["result_file"]
image_format = params["file"]["image_format"]
do_plot = params["file"]["do_plot"]
dpi = params["file"]["dpi"]
# fc.loaddfits parameters
ch = params["loaddfits"]["ch"]
array = fc.loaddfits(dfits_file, antlog_file, **params["loaddfits"])
# 1st step: check time stream
print("#1: check time stream")
scantypes = np.unique(array.scantype)
print(f"scantypes: {scantypes}")
fig, ax = plt.subplots(2, 1, figsize=(10, 5), dpi=dpi)
tstart0 = params["check_scantypes"]["tstart0"]
tend0 = params["check_scantypes"]["tend0"]
tstart1 = params["check_scantypes"]["tstart1"]
tend1 = params["check_scantypes"]["tend1"]
subarray0 = array[tstart0:tend0, :]
subarray1 = array[tstart1:tend1, :]
refch = params["check_scantypes"]["refch"]
plot_params = {"marker": ".", "markersize": 0.5, "linestyle": "None"}
dc.plot.plot_timestream(subarray0, ch, scantypes=["GRAD"], ax=ax[0], **plot_params)
dc.plot.plot_timestream(subarray0, ch, scantypes=["ON"], ax=ax[0], **plot_params)
dc.plot.plot_timestream(subarray1, ch, scantypes=["ON"], ax=ax[1], **plot_params)
fig.tight_layout()
fig.savefig(output_dir / f"time_stream.{image_format}")
if do_plot:
plt.show()
else:
plt.clf()
plt.close()
# 2nd step: plot subref offsets vs time
print("#2: plot subref offsets vs time")
fig, ax = plt.subplots(3, 1, figsize=(10, 7), dpi=dpi)
dc.plot.plot_tcoords(array, ("time", "subref_x"), scantypes=["ON"], ax=ax[0])
dc.plot.plot_tcoords(array, ("time", "subref_y"), scantypes=["ON"], ax=ax[1])
dc.plot.plot_tcoords(array, ("time", "subref_z"), scantypes=["ON"], ax=ax[2])
maxid = list(argrelmax(array.subref_z[array.scantype == "ON"].values, order=1)[0])
minid = list(argrelmin(array.subref_z[array.scantype == "ON"].values, order=1)[0])
ax[2].plot(
array.time[array.scantype == "ON"][maxid],
array.subref_z[array.scantype == "ON"][maxid],
"o",
color="C1",
label="local max",
)
ax[2].plot(
array.time[array.scantype == "ON"][minid],
array.subref_z[array.scantype == "ON"][minid],
"o",
color="C2",
label="local min",
)
ax[2].legend()
fig.tight_layout()
fig.savefig(output_dir / f"subref_movement.{image_format}")
if do_plot:
plt.show()
else:
plt.clf()
plt.close()
# 3rd step: plot temperature vs subref_z
print("#3: plot temperature vs subref_z")
fig, ax = plt.subplots(1, 1, figsize=(10, 5), dpi=dpi)
ax.plot(
array.subref_z[array.scantype == "ON"],
array[:, ch][array.scantype == "ON"],
label="ON",
)
ax.set_xlabel("subref_z")
ax.set_ylabel("temperature")
ax.set_title(f"ch #{ch}")
ax.legend()
fig.tight_layout()
fig.savefig(output_dir / f"temp_vs_subrefz.{image_format}")
if do_plot:
plt.show()
else:
plt.clf()
plt.close()
# 4th step: Gauss-fit
print("#4: Gauss-fit")
alldata = table.QTable(
names=("scan_speed", "peak", "z_mean", "z_stddev", "slope", "intercept")
)
if maxid[0] < minid[0]:
minid.insert(0, np.nan)
if minid[-1] < maxid[-1]:
minid.append(np.nan)
amp0 = params["fitting"]["amplitude"]
z0 = params["fitting"]["z_mean"]
s0 = params["fitting"]["z_stddev"]
sl = params["fitting"]["slope"]
ic = params["fitting"]["intercept"]
g_init = models.Gaussian1D(amplitude=amp0, mean=z0, stddev=s0) + models.Linear1D(sl, ic)
fit_g = fitting.LevMarLSQFitter()
for n in range(len(maxid)):
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
if n == 0:
if not np.isnan(minid[n]):
ax.plot(
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
array[:, refch][array.scantype == "ON"][minid[n] : maxid[n]],
color="C0",
label="obs (dZ > 0)",
)
g = fit_g(
g_init,
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
array[:, refch][array.scantype == "ON"][minid[n] : maxid[n]],
)
ax.plot(
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
g(array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]]),
color="C2",
label="model (dZ > 0)",
)
dt = (
array.time[array.scantype == "ON"][maxid[n]]
- array.time[array.scantype == "ON"][minid[n]]
).values.item() / 1e9
ds = float(
(
array.subref_z[array.scantype == "ON"][maxid[n]]
- array.subref_z[array.scantype == "ON"][minid[n]]
).values
)
ss = ds / dt
alldata.add_row(
(ss, g.amplitude_0, g.mean_0, g.stddev_0, g.slope_1, g.intercept_1)
)
ax.plot(
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
array[:, refch][array.scantype == "ON"][maxid[n] : minid[n + 1]],
color="C1",
label="obs (dZ < 0)",
)
g = fit_g(
g_init,
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
array[:, refch][array.scantype == "ON"][maxid[n] : minid[n + 1]],
)
ax.plot(
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
g(array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]]),
color="C3",
label="model (dZ < 0)",
)
dt = (
array.time[array.scantype == "ON"][minid[n + 1]]
- array.time[array.scantype == "ON"][maxid[n]]
).values.item() / 1e9
ds = float(
(
array.subref_z[array.scantype == "ON"][minid[n + 1]]
- array.subref_z[array.scantype == "ON"][maxid[n]]
).values
)
ss = ds / dt
alldata.add_row(
(ss, g.amplitude_0, g.mean_0, g.stddev_0, g.slope_1, g.intercept_1)
)
elif n == len(maxid) - 1:
ax.plot(
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
array[:, refch][array.scantype == "ON"][minid[n] : maxid[n]],
color="C0",
label="obs (dZ > 0)",
)
g = fit_g(
g_init,
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
array[:, refch][array.scantype == "ON"][minid[n] : maxid[n]],
)
ax.plot(
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
g(array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]]),
color="C2",
label="model (dZ > 0)",
)
dt = (
array.time[array.scantype == "ON"][maxid[n]]
- array.time[array.scantype == "ON"][minid[n]]
).values.item() / 1e9
ds = float(
(
array.subref_z[array.scantype == "ON"][maxid[n]]
- array.subref_z[array.scantype == "ON"][minid[n]]
).values
)
ss = ds / dt
alldata.add_row(
(ss, g.amplitude_0, g.mean_0, g.stddev_0, g.slope_1, g.intercept_1)
)
if not np.isnan(minid[n + 1]):
ax.plot(
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
array[:, refch][array.scantype == "ON"][maxid[n] : minid[n + 1]],
color="C1",
label="obs (dZ < 0)",
)
g = fit_g(
g_init,
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
array[:, refch][array.scantype == "ON"][maxid[n] : minid[n + 1]],
)
ax.plot(
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
g(array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]]),
color="C3",
label="model (dZ < 0)",
)
dt = (
array.time[array.scantype == "ON"][minid[n + 1]]
- array.time[array.scantype == "ON"][maxid[n]]
).values.item() / 1e9
ds = float(
(
array.subref_z[array.scantype == "ON"][minid[n + 1]]
- array.subref_z[array.scantype == "ON"][maxid[n]]
).values
)
ss = ds / dt
alldata.add_row(
(ss, g.amplitude_0, g.mean_0, g.stddev_0, g.slope_1, g.intercept_1)
)
else:
ax.plot(
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
array[:, refch][array.scantype == "ON"][minid[n] : maxid[n]],
color="C0",
label="obs (dZ > 0)",
)
g = fit_g(
g_init,
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
array[:, refch][array.scantype == "ON"][minid[n] : maxid[n]],
)
ax.plot(
array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]],
g(array.subref_z[array.scantype == "ON"][minid[n] : maxid[n]]),
color="C2",
label="model (dZ > 0)",
)
dt = (
array.time[array.scantype == "ON"][maxid[n]]
- array.time[array.scantype == "ON"][minid[n]]
).values.item() / 1e9
ds = float(
(
array.subref_z[array.scantype == "ON"][maxid[n]]
- array.subref_z[array.scantype == "ON"][minid[n]]
).values
)
ss = ds / dt
alldata.add_row(
(ss, g.amplitude_0, g.mean_0, g.stddev_0, g.slope_1, g.intercept_1)
)
ax.plot(
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
array[:, refch][array.scantype == "ON"][maxid[n] : minid[n + 1]],
color="C1",
label="obs (dZ < 0)",
)
g = fit_g(
g_init,
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
array[:, refch][array.scantype == "ON"][maxid[n] : minid[n + 1]],
)
ax.plot(
array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]],
g(array.subref_z[array.scantype == "ON"][maxid[n] : minid[n + 1]]),
color="C3",
label="model (dZ < 0)",
)
dt = (
array.time[array.scantype == "ON"][minid[n + 1]]
- array.time[array.scantype == "ON"][maxid[n]]
).values.item() / 1e9
ds = float(
(
array.subref_z[array.scantype == "ON"][minid[n + 1]]
- array.subref_z[array.scantype == "ON"][maxid[n]]
).values
)
ss = ds / dt
alldata.add_row(
(ss, g.amplitude_0, g.mean_0, g.stddev_0, g.slope_1, g.intercept_1)
)
ax.set_xlabel("subref_z")
ax.set_ylabel("temparature")
ax.set_title(f"Group #{n}")
ax.legend()
fig.tight_layout()
fig.savefig(output_dir / f"subrefz_fit_#{n}.{image_format}")
if do_plot:
plt.show()
else:
plt.clf()
plt.close()
# 5th step: plot subref_z vs scan_speed
print("#5: plot subref_z vs scan_speed")
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ax.scatter([], [], color="C0", label="dZ > 0")
ax.scatter([], [], color="C1", label="dZ < 0")
for ss, zm in zip(alldata["scan_speed"], alldata["z_mean"]):
if ss < 0:
ax.plot(-ss, zm, "o", color="C1")
else:
ax.plot(ss, zm, "o", color="C0")
sz_mean = alldata["z_mean"].mean()
ax.axhline(sz_mean, color="C2", label=f"mean: {sz_mean:.2f}")
ax.set_xlabel("scan_speed")
ax.set_ylabel("subref_z")
ax.legend()
fig.tight_layout()
fig.savefig(output_dir / f"subrefz_vs_scanspeed.{image_format}")
if do_plot:
plt.show()
else:
plt.clf()
plt.close()
alldata.write(result_file, format="ascii", overwrite=True)
| 32.376559 | 88 | 0.539552 |
52ad4ba8f442a49bb2ee79b77c8b1f665c8cb186 | 55,401 | py | Python | gen_models/PixelVAE/interpolations_take_out_to_run/cifarinterpolation1_filter_3_mean_4.py | leilayasmeen/MSc_Thesis | ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d | [
"MIT"
] | 2 | 2019-10-29T03:26:20.000Z | 2021-03-07T10:02:39.000Z | gen_models/PixelVAE/interpolations_take_out_to_run/cifarinterpolation1_filter_3_mean_4.py | leilayasmeen/MSc_Thesis | ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d | [
"MIT"
] | null | null | null | gen_models/PixelVAE/interpolations_take_out_to_run/cifarinterpolation1_filter_3_mean_4.py | leilayasmeen/MSc_Thesis | ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d | [
"MIT"
] | null | null | null |
"""
PixelVAE: A Latent Variable Model for Natural Images
Ishaan Gulrajani, Kundan Kumar, Faruk Ahmed, Adrien Ali Taiga, Francesco Visin, David Vazquez, Aaron Courville
"""
import os, sys
sys.path.append(os.getcwd())
N_GPUS = 2
import random
import tflib as lib
import tflib.sampling_loop_cifar_filter_3
import tflib.ops.kl_unit_gaussian
import tflib.ops.kl_gaussian_gaussian
import tflib.ops.conv2d
import tflib.ops.linear
import tflib.ops.batchnorm
import tflib.ops.embedding
import tflib.cifar
import tflib.cifar_256
import numpy as np
import tensorflow as tf
import imageio
from imageio import imsave
import keras
import time
import functools
import sklearn
from sklearn.model_selection import train_test_split
DATASET = 'cifar10' # mnist_256
SETTINGS = '32px_cifar' # mnist_256, 32px_small, 32px_big, 64px_small, 64px_big
OUT_DIR = DATASET + '_interpolation1_final_filter_3_mean_4'
if not os.path.isdir(OUT_DIR):
os.makedirs(OUT_DIR)
print "Created directory {}".format(OUT_DIR)
if SETTINGS == 'mnist_256':
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
# Whether to treat pixel inputs to the model as real-valued (as in the
# original PixelCNN) or discrete (gets better likelihoods).
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 32
DIM_1 = 16
DIM_2 = 32
DIM_3 = 32
DIM_4 = 64
LATENT_DIM_2 = 128
NUM_CLASSES = 10
ALPHA1_ITERS = 5000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.0
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 2*500,
'stop_after': 500*500,
'callback_every': 10*500
}
LR = 1e-3
LR_DECAY_AFTER = TIMES['stop_after']
LR_DECAY_FACTOR = 1.
BATCH_SIZE = 100
N_CHANNELS = 1
HEIGHT = 28
WIDTH = 28
# These aren't actually used for one-level models but some parts
# of the code still depend on them being defined.
LATENT_DIM_1 = 64
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
elif SETTINGS == '32px_small':
MODE = 'two_level'
EMBED_INPUTS = True
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 128
DIM_1 = 64
DIM_2 = 128
DIM_3 = 256
LATENT_DIM_1 = 64
DIM_PIX_2 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
ALPHA1_ITERS = 2000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
BETA_ITERS = 1000
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 1000,
'stop_after': 200000,
'callback_every': 20000
}
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 1e-1
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 32
WIDTH = 32
LATENTS1_HEIGHT = 8
LATENTS1_WIDTH = 8
elif SETTINGS == '32px_big':
MODE = 'two_level'
EMBED_INPUTS = False
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 256
DIM_1 = 128
DIM_2 = 256
DIM_3 = 512
LATENT_DIM_1 = 128
DIM_PIX_2 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
ALPHA1_ITERS = 2000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
BETA_ITERS = 1000
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 1000,
'stop_after': 300000,
'callback_every': 20000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 300000
LR_DECAY_FACTOR = 1e-1
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 32
WIDTH = 32
LATENTS1_HEIGHT = 8
LATENTS1_WIDTH = 8
elif SETTINGS == '64px_small':
MODE = 'two_level'
EMBED_INPUTS = True
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 128
DIM_0 = 64
DIM_1 = 64
DIM_2 = 128
LATENT_DIM_1 = 64
DIM_PIX_2 = 256
DIM_3 = 256
DIM_4 = 512
LATENT_DIM_2 = 512
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 200000,
'callback_every': 50000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = .1
ALPHA1_ITERS = 2000
ALPHA2_ITERS = 10000
KL_PENALTY = 1.0
BETA_ITERS = 1000
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
LATENTS1_WIDTH = 16
LATENTS1_HEIGHT = 16
elif SETTINGS == '64px_big':
MODE = 'two_level'
EMBED_INPUTS = True
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 384
DIM_0 = 192
DIM_1 = 256
DIM_2 = 512
LATENT_DIM_1 = 64
DIM_PIX_2 = 512
DIM_3 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 400000,
'callback_every': 50000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = .5
ALPHA1_ITERS = 1000
ALPHA2_ITERS = 10000
KL_PENALTY = 1.00
BETA_ITERS = 500
BATCH_SIZE = 48
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
LATENTS1_WIDTH = 16
LATENTS1_HEIGHT = 16
elif SETTINGS=='64px_big_onelevel':
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
# Whether to treat pixel inputs to the model as real-valued (as in the
# original PixelCNN) or discrete (gets better likelihoods).
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 384
DIM_0 = 192
DIM_1 = 256
DIM_2 = 512
DIM_3 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
ALPHA1_ITERS = 50000
ALPHA2_ITERS = 50000
KL_PENALTY = 1.0
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 400000,
'callback_every': 50000
}
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 0.5
BATCH_SIZE = 48
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
# These aren't actually used for one-level models but some parts
# of the code still depend on them being defined.
LATENT_DIM_1 = 64
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
elif SETTINGS=='32px_cifar':
from keras.datasets import cifar10
(x_train_set, y_train_set), (x_test_set, y_test_set) = cifar10.load_data()
x_train_set = x_train_set.transpose(0,3,1,2)
x_test_set = x_test_set.transpose(0,3,1,2)
seed = 333
x_train_set, x_dev_set, y_train_set, y_dev_set = train_test_split(x_train_set, y_train_set, test_size=0.1, random_state=seed)
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
# Whether to treat pixel inputs to the model as real-valued (as in the
# original PixelCNN) or discrete (gets better likelihoods).
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 192 #LEILA EDIT: was previously 384
DIM_0 = 96 #LEILA EDIT: was previously 192
DIM_1 = 128 #LEILA EDIT: was previously 256
DIM_2 = 256 #LEILA EDIT: was previously 512
DIM_3 = 256 #LEILA EDIT: was previously 512
DIM_4 = 256 #LEILA EDIT: was previously 512
LATENT_DIM_2 = 256 #LEILA EDIT: was previously 512
ALPHA1_ITERS = 50000
ALPHA2_ITERS = 50000
KL_PENALTY = 1.0
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 400000,
'callback_every': 50000
}
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 0.5
BATCH_SIZE = 50 # 48
N_CHANNELS = 3
HEIGHT = 32 #64
WIDTH = 32 #64
NUM_CLASSES = 10
# These aren't actually used for one-level models but some parts
# of the code still depend on them being defined.
LATENT_DIM_1 = 32 #LEILAEDIT: was previously 64
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
if DATASET == 'mnist_256':
train_data, dev_data, test_data = lib.mnist_256.load(BATCH_SIZE, BATCH_SIZE) # TODO: define new data-loader so I don't load batches
elif DATASET == 'lsun_32':
train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=True)
elif DATASET == 'lsun_64':
train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=False)
elif DATASET == 'imagenet_64':
train_data, dev_data = lib.small_imagenet.load(BATCH_SIZE)
elif DATASET == 'cifar10':
train_data, dev_data, test_data = lib.cifar_256.load(BATCH_SIZE) #LEILAEDIT
lib.print_model_settings(locals().copy())
DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]
lib.ops.conv2d.enable_default_weightnorm()
lib.ops.linear.enable_default_weightnorm()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:
bn_is_training = tf.placeholder(tf.bool, shape=None, name='bn_is_training')
bn_stats_iter = tf.placeholder(tf.int32, shape=None, name='bn_stats_iter')
total_iters = tf.placeholder(tf.int32, shape=None, name='total_iters')
all_images = tf.placeholder(tf.int32, shape=[None, N_CHANNELS, HEIGHT, WIDTH], name='all_images')
all_latents1 = tf.placeholder(tf.float32, shape=[None, LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH], name='all_latents1')
split_images = tf.split(all_images, len(DEVICES), axis=0)
split_latents1 = tf.split(all_images, len(DEVICES), axis=0)
tower_cost = []
tower_outputs1_sample = []
for device_index, (device, images, latents1_sample) in enumerate(zip(DEVICES, split_images, split_latents1)):
with tf.device(device):
def nonlinearity(x):
return tf.nn.elu(x)
def pixcnn_gated_nonlinearity(a, b):
return tf.sigmoid(a) * tf.tanh(b)
def SubpixelConv2D(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
return output
def ResidualBlock(name, input_dim, output_dim, inputs, filter_size, mask_type=None, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if mask_type != None and resample != None:
raise Exception('Unsupported configuration')
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim, stride=2)
elif resample=='up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(SubpixelConv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, mask_type=mask_type, he_init=False, biases=True, inputs=inputs)
output = inputs
if mask_type == None:
output = nonlinearity(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False)
output = nonlinearity(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False, biases=False)
if device_index == 0:
output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter)
else:
output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter, update_moving_stats=False)
else:
output = nonlinearity(output)
output_a = conv_1(name+'.Conv1A', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output_b = conv_1(name+'.Conv1B', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = pixcnn_gated_nonlinearity(output_a, output_b)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
return shortcut + output
def Enc1(images):
output = images
if WIDTH == 64:
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.InputRes0', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.InputRes', input_dim=DIM_0, output_dim=DIM_1, filter_size=3, resample='down', inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.InputRes', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample='down', inputs=output)
else:
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.Res1Pre', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res1Pre2', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res1', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output)
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Enc1.Res4Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res4', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res4Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
mu_and_sigma = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_2, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = ResidualBlock('Enc1.Res2Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res2Pre2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res2', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('Enc1.Res3Pre', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res3Pre2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res3Pre3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
mu_and_sigma = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False)
return mu_and_sigma, output
def Dec1(latents, images):
output = tf.clip_by_value(latents, -50., 50.)
if LATENTS1_WIDTH == 16:
output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_2, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Dec1.Res1A', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1B', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1C', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Dec1.Res1', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1Post2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res2', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs=output)
output = ResidualBlock('Dec1.Res2Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res2Post2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res3', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs=output)
output = ResidualBlock('Dec1.Res3Post', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res3Post2', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
if WIDTH == 64:
output = ResidualBlock('Dec1.Res4', input_dim=DIM_1, output_dim=DIM_0, filter_size=3, resample='up', inputs=output)
output = ResidualBlock('Dec1.Res4Post', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output)
if PIXEL_LEVEL_PIXCNN:
if WIDTH == 64:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_0, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Make the variance of output and masked_images (roughly) match
output /= 2
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat([masked_images, output], axis=1)
if WIDTH == 64:
output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_0, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('Dec1.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
else:
output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
if WIDTH == 64:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_0, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def Enc2(h1):
output = h1
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Enc2.Res0', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1Pre2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Pre', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Pre2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Pre3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1A', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2PreA', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = tf.reshape(output, [-1, 4*4*DIM_4])
output = lib.ops.linear.Linear('Enc2.Output', input_dim=4*4*DIM_4, output_dim=2*LATENT_DIM_2, inputs=output)
return output
def Dec2(latents, targets):
output = tf.clip_by_value(latents, -50., 50.)
output = lib.ops.linear.Linear('Dec2.Input', input_dim=LATENT_DIM_2, output_dim=4*4*DIM_4, inputs=output)
output = tf.reshape(output, [-1, DIM_4, 4, 4])
output = ResidualBlock('Dec2.Res1Pre', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res1', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res1Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Dec2.Res3Post5', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post6', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post7', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post8', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
if HIGHER_LEVEL_PIXCNN:
if LATENTS1_WIDTH == 16:
masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_2, filter_size=5, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets)
else:
masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=5, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets)
# Make the variance of output and masked_targets roughly match
output /= 2
output = tf.concat([masked_targets, output], axis=1)
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_2, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
else:
output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_3, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
output = ResidualBlock('Dec2.Pix3Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
output = ResidualBlock('Dec2.Pix4Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_PIX_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
else:
if LATENTS1_WIDTH == 16:
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
return output
# Only for 32px_cifar, 64px_big_onelevel, and MNIST. Needs modification for others.
def EncFull(images):
output = images
if WIDTH == 32: #64
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('EncFull.Res1', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res2', input_dim=DIM_0, output_dim=DIM_1, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res3', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res4', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res5', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res6', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res7', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res8', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res9', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res10', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res11', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res12', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res13', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs=output)
output = tf.reshape(output, [-1, 2*2*DIM_4])
output = lib.ops.linear.Linear('EncFull.Output', input_dim=2*2*DIM_4, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
else:
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('EncFull.Res1', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res2', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res3', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res4', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = tf.reduce_mean(output, reduction_indices=[2,3])
output = lib.ops.linear.Linear('EncFull.Output', input_dim=DIM_3, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
return output
# Only for 32px_CIFAR, 64px_big_onelevel and MNIST. Needs modification for others.
def DecFull(latents, images):
output = tf.clip_by_value(latents, -50., 50.)
if WIDTH == 32: # 64:LEILAEDIT. Also changed 4*4 to 2*2 and 4,4 to 2,2 in the two lines below
output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=2*2*DIM_4, initialization='glorot', inputs=output)
output = tf.reshape(output, [-1, DIM_4, 2, 2])
output = ResidualBlock('DecFull.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res3', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res4', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res7', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res8', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res9', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res10', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res11', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res12', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res13', input_dim=DIM_1, output_dim=DIM_0, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res14', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, he_init=True, inputs=output)
else:
output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_3, initialization='glorot', inputs=output)
output = tf.reshape(tf.tile(tf.reshape(output, [-1, DIM_3, 1]), [1, 1, 49]), [-1, DIM_3, 7, 7])
output = ResidualBlock('DecFull.Res2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res4', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res5', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res6', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res7', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output)
if WIDTH == 32: #64:
dim = DIM_0
else:
dim = DIM_1
if PIXEL_LEVEL_PIXCNN:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=dim, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS, output_dim=dim, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat([masked_images, output], axis=1)
output = ResidualBlock('DecFull.Pix2Res', input_dim=2*dim, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('DecFull.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('DecFull.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
if WIDTH != 32: #64: LEILAEDIT
output = ResidualBlock('DecFull.Pix5Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=dim, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def split(mu_and_logsig):
mu, logsig = tf.split(mu_and_logsig, 2, axis=1)
sig = 0.5 * (tf.nn.softsign(logsig)+1)
logsig = tf.log(sig)
return mu, logsig, sig
def clamp_logsig_and_sig(logsig, sig):
# Early during training (see BETA_ITERS), stop sigma from going too low
floor = 1. - tf.minimum(1., tf.cast(total_iters, 'float32') / BETA_ITERS)
log_floor = tf.log(floor)
return tf.maximum(logsig, log_floor), tf.maximum(sig, floor)
scaled_images = (tf.cast(images, 'float32') - 128.) / 64.
if EMBED_INPUTS:
embedded_images = lib.ops.embedding.Embedding('Embedding', 256, DIM_EMBED, images)
embedded_images = tf.transpose(embedded_images, [0,4,1,2,3])
embedded_images = tf.reshape(embedded_images, [-1, DIM_EMBED*N_CHANNELS, HEIGHT, WIDTH])
if MODE == 'one_level':
# Layer 1
if EMBED_INPUTS:
mu_and_logsig1 = EncFull(embedded_images)
else:
mu_and_logsig1 = EncFull(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 # LEILAEDIT
if EMBED_INPUTS:
outputs1 = DecFull(latents1, embedded_images)
else:
outputs1 = DecFull(latents1, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(outputs1, [-1, 256]),
labels=tf.reshape(images, [-1])
)
)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful to avoid it.
alpha = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA1_ITERS) * KL_PENALTY
kl_cost_1 = tf.reduce_mean(
lib.ops.kl_unit_gaussian.kl_unit_gaussian(
mu1,
logsig1,
sig1
)
)
kl_cost_1 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT)
cost = reconst_cost + (alpha * kl_cost_1)
elif MODE == 'two_level':
# Layer 1
if EMBED_INPUTS:
mu_and_logsig1, h1 = Enc1(embedded_images)
else:
mu_and_logsig1, h1 = Enc1(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
if mu1.get_shape().as_list()[2] != LATENTS1_HEIGHT:
raise Exception("LATENTS1_HEIGHT doesn't match mu1 shape!")
if mu1.get_shape().as_list()[3] != LATENTS1_WIDTH:
raise Exception("LATENTS1_WIDTH doesn't match mu1 shape!")
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 + (eps * sig1)
if EMBED_INPUTS:
outputs1 = Dec1(latents1, embedded_images)
outputs1_sample = Dec1(latents1_sample, embedded_images)
else:
outputs1 = Dec1(latents1, scaled_images)
outputs1_sample = Dec1(latents1_sample, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(outputs1, [-1, 256]),
labels=tf.reshape(images, [-1])
)
)
# Layer 2
mu_and_logsig2 = Enc2(h1)
mu2, logsig2, sig2 = split(mu_and_logsig2)
eps = tf.random_normal(tf.shape(mu2))
latents2 = mu2 + (eps * sig2)
outputs2 = Dec2(latents2, latents1)
mu1_prior, logsig1_prior, sig1_prior = split(outputs2)
logsig1_prior, sig1_prior = clamp_logsig_and_sig(logsig1_prior, sig1_prior)
mu1_prior = 2. * tf.nn.softsign(mu1_prior / 2.)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful to avoid it.
alpha1 = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA1_ITERS) * KL_PENALTY
alpha2 = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA2_ITERS) * alpha1# * KL_PENALTY
kl_cost_1 = tf.reduce_mean(
lib.ops.kl_gaussian_gaussian.kl_gaussian_gaussian(
mu1,
logsig1,
sig1,
mu1_prior,
logsig1_prior,
sig1_prior
)
)
kl_cost_2 = tf.reduce_mean(
lib.ops.kl_unit_gaussian.kl_unit_gaussian(
mu2,
logsig2,
sig2
)
)
kl_cost_1 *= float(LATENT_DIM_1 * LATENTS1_WIDTH * LATENTS1_HEIGHT) / (N_CHANNELS * WIDTH * HEIGHT)
kl_cost_2 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT)
cost = reconst_cost + (alpha1 * kl_cost_1) + (alpha2 * kl_cost_2)
tower_cost.append(cost)
if MODE == 'two_level':
tower_outputs1_sample.append(outputs1_sample)
full_cost = tf.reduce_mean(
tf.concat([tf.expand_dims(x, 0) for x in tower_cost], axis=0), 0
)
if MODE == 'two_level':
full_outputs1_sample = tf.concat(tower_outputs1_sample, axis=0)
# Sampling
if MODE == 'one_level':
ch_sym = tf.placeholder(tf.int32, shape=None)
y_sym = tf.placeholder(tf.int32, shape=None)
x_sym = tf.placeholder(tf.int32, shape=None)
logits = tf.reshape(tf.slice(outputs1, tf.stack([0, ch_sym, y_sym, x_sym, 0]), tf.stack([-1, 1, 1, 1, -1])), [-1, 256])
dec1_fn_out = tf.multinomial(logits, 1)[:, 0]
def dec1_fn(_latents, _targets, _ch, _y, _x):
return session.run(dec1_fn_out, feed_dict={latents1: _latents, images: _targets, ch_sym: _ch, y_sym: _y, x_sym: _x, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})
def enc_fn(_images):
return session.run(latents1, feed_dict={images: _images, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})
sample_fn_latents1 = np.random.normal(size=(1, LATENT_DIM_2)).astype('float32')
def generate_and_save_samples(tag):
from keras.utils import np_utils
x_augmentation_set = np.zeros((1, N_CHANNELS, HEIGHT, WIDTH)) #LEILEDIT: to enable .npy image saving
y_augmentation_set = np.zeros((1, 1, NUM_CLASSES)) #LEILEDIT: to enable .npy image saving.
# Function to translate numeric images into plots
def color_grid_vis(X, nh, nw, save_path):
# from github.com/Newmu
X = X.transpose(0,2,3,1)
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw, 3))
for n, x in enumerate(X):
j = n/nw
i = n%nw
img[j*h:j*h+h, i*w:i*w+w, :] = x
imsave(OUT_DIR + '/' + save_path, img)
numsamples = 6
pvals = np.linspace(0.2, 0.8, num=4)
#pvals = np.linspace(0.2, 0.8, num=1)
p_set = np.zeros(1)
x_train_set_array = np.array(x_train_set)
y_train_set_array = np.array(y_train_set)
for imagenum in range(numsamples):
for class1 in range(NUM_CLASSES-1): # goes up to class 8
idx1 = np.asarray(np.where(np.equal(class1, y_train_set))[0])
x_trainsubset1 = x_train_set_array[idx1,:]
y_trainsubset1 = y_train_set_array[idx1,:]
x_trainsubset1 = x_trainsubset1.reshape(-1, N_CHANNELS, HEIGHT, WIDTH)
y_trainsubset1 = y_trainsubset1.reshape(-1, 1)
for class2 in range(class1+1, NUM_CLASSES):
idx2 = np.asarray(np.where(np.equal(class2, y_train_set))[0])
x_trainsubset2 = x_train_set_array[idx2,:]
y_trainsubset2 = y_train_set_array[idx2,:]
x_trainsubset2 = x_trainsubset2.reshape(-1, N_CHANNELS, HEIGHT, WIDTH)
y_trainsubset2 = y_trainsubset2.reshape(-1, 1)
imageindex1 = random.sample(range(x_trainsubset1.shape[0]),1)
imageindex2 = random.sample(range(x_trainsubset2.shape[0]),1)
# Draw the corresponding images and labels from the training data
image1 = x_trainsubset1[imageindex1,:]
image2 = x_trainsubset2[imageindex2,:]
label1 = y_trainsubset1[imageindex1,:]
label2 = y_trainsubset2[imageindex2,:]
# Reshape
image1 = image1.reshape(1, N_CHANNELS, HEIGHT, WIDTH)
image2 = image2.reshape(1, N_CHANNELS, HEIGHT, WIDTH)
label1 = label1.reshape(1, 1)
label2 = label2.reshape(1, 1)
# Save the original images
print "Saving original samples"
color_grid_vis(
image1,
1,
1,
'original_1_classes{}and{}_num{}.png'.format(class1,class2,imagenum)
)
color_grid_vis(
image2,
1,
1,
'original_2_classes{}and{}_num{}.png'.format(class1,class2,imagenum)
)
# Encode the images
image_code1 = enc_fn(image1)
image_code2 = enc_fn(image2)
# Change labels to matrix form before performing interpolations
label1 = np_utils.to_categorical(label1, NUM_CLASSES)
label2 = np_utils.to_categorical(label2, NUM_CLASSES)
# Combine the latent codes
for p in pvals:
new_code = np.multiply(p,image_code1) + np.multiply((1-p),image_code2)
new_label = np.multiply(p,label1) + np.multiply((1-p),label2)
new_label = new_label.reshape(1,1,NUM_CLASSES)
samples = np.zeros(
(1, N_CHANNELS, HEIGHT, WIDTH),
dtype='int32'
)
print "Generating samples"
for y in xrange(HEIGHT):
for x in xrange(WIDTH):
for ch in xrange(N_CHANNELS):
next_sample = dec1_fn(new_code, samples, ch, y, x)
samples[:,ch,y,x] = next_sample
x_augmentation_set = np.concatenate((x_augmentation_set, samples), axis=0)#LEILAEDIT for .npy saving
y_augmentation_set = np.concatenate((y_augmentation_set, new_label), axis=0)#LEILAEDIT for .npy saving
p_set = np.append(p_set,p)
color_grid_vis(
samples,
1,
1,
'interpolation1_classes{}and{}_pval{}_num{}.png'.format(class1,class2,p,imagenum)
)
# Sample two unique image indices from different classes
#classindices = random.sample(range(0,NUM_CLASSES),2)
#idx1 = np.where(np.equal(classindices[0],y_train_set))
#idx2 = np.where(np.equal(classindices[1],y_train_set))
#x_train_set_array = np.array(x_train_set)
#y_train_set_array = np.array(y_train_set)
#x_trainsubset1 = x_train_set_array[idx1,:]
#x_trainsubset2 = x_train_set_array[idx2,:]
#y_trainsubset1 = y_train_set_array[idx1,:]
#y_trainsubset2 = y_train_set_array[idx2,:]
#x_trainsubset1 = x_trainsubset1.reshape(-1, N_CHANNELS, HEIGHT, WIDTH)
#x_trainsubset2 = x_trainsubset2.reshape(-1, N_CHANNELS, HEIGHT, WIDTH)
#y_trainsubset1 = y_trainsubset1.reshape(-1, 1)
#y_trainsubset2 = y_trainsubset2.reshape(-1, 1)
#imageindex1 = random.sample(range(x_trainsubset1.shape[0]),1)
#imageindex2 = random.sample(range(x_trainsubset2.shape[0]),1)
# Draw the corresponding images and labels from the training data
#image1 = x_trainsubset1[imageindex1,:]
#image2 = x_trainsubset2[imageindex2,:]
#label1 = y_trainsubset1[imageindex1,:]
#label2 = y_trainsubset2[imageindex2,:]
# Reshape
#image1 = image1.reshape(1, N_CHANNELS, HEIGHT, WIDTH)
#image2 = image2.reshape(1, N_CHANNELS, HEIGHT, WIDTH)
#label1 = label1.reshape(1, 1)
#label2 = label2.reshape(1, 1)
x_augmentation_array = np.delete(x_augmentation_set, (0), axis=0)
y_augmentation_array = np.delete(y_augmentation_set, (0), axis=0)
p_set = np.delete(p_set, (0), axis=0)
x_augmentation_array = x_augmentation_array.astype(np.uint8)
np.save(OUT_DIR + '/' + 'x_augmentation_array_mean_4', x_augmentation_array) #LEILAEDIT for .npy saving
np.save(OUT_DIR + '/' + 'y_augmentation_array_mean_4', y_augmentation_array) #LEILAEDIT for .npy saving
np.save(OUT_DIR + '/' + 'p_set_array', p_set)
# Run
if MODE == 'one_level':
prints=[
('alpha', alpha),
('reconst', reconst_cost),
('kl1', kl_cost_1)
]
decayed_lr = tf.train.exponential_decay(
LR,
total_iters,
LR_DECAY_AFTER,
LR_DECAY_FACTOR,
staircase=True
)
lib.sampling_loop_cifar_filter_3.sampling_loop( #LEIlAEDIT. TODO: update to remove uncessary arguments
session=session,
inputs=[total_iters, all_images],
inject_iteration=True,
bn_vars=(bn_is_training, bn_stats_iter),
cost=full_cost,
stop_after=TIMES['stop_after'],
prints=prints,
optimizer=tf.train.AdamOptimizer(decayed_lr),
train_data=train_data,
test_data=dev_data,
callback=generate_and_save_samples,
callback_every=TIMES['callback_every'],
test_every=TIMES['test_every'],
save_checkpoints=True
)
| 49.070859 | 202 | 0.604014 |
cb7a5ba3219c21bdefc2885e8a3da99626cdb606 | 9,234 | py | Python | dju_image/image.py | liminspace/dju-image | b06eb3be2069cd6cb52cf1e26c2c761883142d4e | [
"MIT"
] | 6 | 2016-01-23T18:17:06.000Z | 2017-02-23T16:22:39.000Z | dju_image/image.py | liminspace/dju-image | b06eb3be2069cd6cb52cf1e26c2c761883142d4e | [
"MIT"
] | null | null | null | dju_image/image.py | liminspace/dju-image | b06eb3be2069cd6cb52cf1e26c2c761883142d4e | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import subprocess
from cStringIO import StringIO
from PIL import Image, ImageFile
from contextlib import contextmanager
from django.core.files.uploadedfile import UploadedFile
from dju_common.file import truncate_file
from . import settings as dju_settings
def image_get_format(f):
"""
Return image format for file-object f. (jpeg, png, gif etc.)
All formats: http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
Example:
if image_get_format(request.FILES['image']) == 'jpeg':
print 'Image is JPEG'
if image_get_format(open('/tmp/image.png', 'rb')) == 'png':
print 'File is PNG'
"""
f.seek(0)
try:
img = Image.open(f)
t = img.format.lower()
except IOError:
t = None
return t
def set_uploaded_file_content_type_and_file_ext(f, img_format):
assert isinstance(img_format, basestring)
img_format = img_format.lower()
if img_format not in dju_settings.DJU_IMG_UPLOAD_IMG_EXTS:
raise RuntimeError
if isinstance(f, UploadedFile):
f.content_type = 'image/{}'.format(img_format)
f.name = os.path.splitext(f.name)[0] + '.' + img_format
def is_image(f, types=('png', 'jpeg', 'gif'), set_content_type=True):
"""
Return True if file f is image (types type) and set its correct content_type and filename extension.
Example:
if is_image(request.FILES['file']):
print 'File is image'
if is_image(open('/tmp/image.jpeg', 'rb')):
print 'File is image'
"""
assert isinstance(types, (list, tuple))
t = image_get_format(f)
if t not in [t.lower() for t in types]:
return False
if set_content_type:
set_uploaded_file_content_type_and_file_ext(f, t)
return True
@contextmanager
def image_save_buffer_fix(maxblock=1048576):
"""
Contextmanager that change MAXBLOCK in ImageFile.
"""
before = ImageFile.MAXBLOCK
ImageFile.MAXBLOCK = maxblock
try:
yield
finally:
ImageFile.MAXBLOCK = before
def _save_img(img, f, img_format, **kwargs):
if isinstance(f, UploadedFile):
f = f.file
modes = ({},
{'mb_x': 5},
{'mb_x': 10},
{'mb_x': 10, 'disable_optimize': True},
{'mb_x': 10, 'disable_optimize': True, 'disable_progressive': True})
maxblock = max(ImageFile.MAXBLOCK, img.size[0] * img.size[1])
last_error = None
if img_format.upper() == 'JPEG' and img.mode != 'RGB':
current_format = img.format
img = img.convert('RGB')
img.format = current_format
for mode in modes:
try:
kw = kwargs.copy()
if mode.get('disable_optimize'):
kw.pop('optimize')
if mode.get('disable_progressive'):
kw.pop('progressive')
with image_save_buffer_fix(maxblock * mode.get('mb_x', 1)):
img.save(f, format=img_format, **kw)
last_error = None
break
except IOError, e:
last_error = e
if last_error:
raise last_error
if image_get_format(f) == 'jpeg' and dju_settings.DJU_IMG_USE_JPEGTRAN:
f.seek(0)
try:
p = subprocess.Popen(['jpegtran', '-copy', 'none', '-optimize', '-progressive'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
r = p.communicate(f.read())[0]
except IOError:
r = None
if r:
truncate_file(f)
f.write(r)
def get_image_as_rgb(f):
f.seek(0)
try:
p = subprocess.Popen(['convert', '-colorspace', 'rgb', '-', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
r = p.communicate(f.read())[0]
except IOError:
r = None
if r:
return Image.open(StringIO(r))
def optimize_png_file(f, o=None):
"""
Use pngquant for optimize PNG-image.
f - path to input image file or file-object.
o - path to output image file or file-object for save result.
NOTICE: f and o can not be of different type
"""
if isinstance(f, basestring):
if o is None:
o = f
else:
assert isinstance(o, basestring)
try:
subprocess.check_call(['pngquant', '--force', '--output', o, f])
except subprocess.CalledProcessError:
return False
return True
if not hasattr(f, 'read'):
raise RuntimeError
if o is None:
o = f
else:
if not hasattr(f, 'write'):
raise RuntimeError
f.seek(0)
try:
p = subprocess.Popen(['pngquant', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
r = p.communicate(f.read())[0]
except IOError:
r = None
if r:
truncate_file(o)
o.write(r)
return True
return False
def adjust_image(f, max_size=(800, 800), new_format=None, jpeg_quality=90, fill=False, stretch=False,
return_new_image=False, force_jpeg_save=True):
"""
Підганяє зображення під параметри.
max_size - максимальний розмір картинки. один з розмірів може бути None (авто)
new_format - формат файлу (jpeg, png, gif). якщо None, тоді буде використаний формат оригіналу
jpeg_quality - якість JPEG
fill - чи зображення має бути заповненим при обрізці (інакше буде вписане)
stretch - чи розтягувати, якщо картинка замаленька
return_new_image - якщо True, тоді буде повертатись новий об'єкт StringIO картинки. Інакше bool, чи файл змінювався.
force_jpeg_save - якщо True, тоді якщо файл JPEG, то він буде перезбережений в будь-якому випадку
"""
assert isinstance(max_size, (list, tuple)) and len(max_size) == 2
assert 0 < jpeg_quality <= 100
if new_format:
new_format = new_format.lower()
if new_format not in ('jpeg', 'png', 'gif'):
raise RuntimeError('Invalid new_format value.')
f.seek(0)
img = Image.open(f)
if ((new_format == 'jpeg' and img.mode != 'RGB') or
(new_format is None and img.format == 'JPEG' and img.mode != 'RGB')):
do_convert = True
if dju_settings.DJU_IMG_CONVERT_JPEG_TO_RGB:
img = get_image_as_rgb(f)
if img is not None:
do_convert = False
if do_convert:
current_format = img.format
img = img.convert('RGB')
img.format = current_format
max_width, max_height = max_size
img_width, img_height = img.size
img_format = img.format.lower()
ch_size = ch_format = False
if max_width is None:
max_width = int(((img_width / float(img_height)) * max_height))
elif max_height is None:
max_height = int(((img_height / float(img_width)) * max_width))
if (img_width, img_height) != (max_width, max_height):
tasks = []
if fill:
if (img_width < max_width or img_height < max_height) and not stretch:
k = max(max_width / float(img_width), max_height / float(img_height))
w, h = max_width / k, max_height / k
left, top = int((img_width - w) / 2.), int((img_height - h) / 2.)
tasks.append(('crop', ((left, top, int(left + w), int(top + h)),), {}))
else:
k = min(img_width / float(max_width), img_height / float(max_height))
w, h = img_width / k, img_height / k
tasks.append(('resize', ((int(w), int(h)), Image.LANCZOS), {}))
left, top = int((w - max_width) / 2.), int((h - max_height) / 2.)
tasks.append(('crop', ((left, top, left + max_width, top + max_height),), {}))
elif ((img_width > max_width or img_height > max_height) or
(img_width < max_width and img_height < max_height and stretch)):
k = max(img_width / float(max_width), img_height / float(max_height))
w, h = int(img_width / k), int(img_height / k)
tasks.append(('resize', ((w, h), Image.LANCZOS), {}))
for img_method, method_args, method_kwargs in tasks:
if ((img_method == 'resize' and method_args[0] == (img_width, img_height)) or
(img_method == 'crop' and method_args[0] == (0, 0, img.size[0], img.size[1]))):
continue
img = getattr(img, img_method)(*method_args, **method_kwargs)
ch_size = True
if new_format and new_format != img_format:
img_format = new_format
ch_format = True
if not ch_format and img_format == 'jpeg' and force_jpeg_save:
ch_format = True
if return_new_image:
t = StringIO()
_save_img(img, t, img_format=img_format, quality=jpeg_quality, progressive=True, optimize=True)
return t
if ch_size or ch_format:
img.load()
truncate_file(f)
_save_img(img, f, img_format=img_format, quality=jpeg_quality, progressive=True, optimize=True)
if isinstance(f, UploadedFile):
f.seek(0, 2)
f.size = f.tell()
set_uploaded_file_content_type_and_file_ext(f, img_format)
return ch_size or ch_format
| 37.384615 | 120 | 0.598224 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.