code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import smart_match
from math import sqrt
class MongeElkan:
def __init__(self, method=None):
self.method = smart_match.get_method(method)
def similarity(self, X, Y):
if not X and not Y:
return 1
if not X or not Y:
return 0
return sqrt(self.monge_elkan(X, Y) * self.monge_elkan(Y, X))
def monge_elkan(self, s, t):
sum_score = 0
for x in s:
max_score = 0
for y in t:
max_score = max(max_score, self.method.similarity(x, y))
sum_score += max_score
return sum_score / len(s)
def dissimilarity(self, s, t):
return 1 - self.similarity(s, t)
def __repr__(self):
return f'MongeElkan [method={self.method}]' | [
"smart_match.get_method"
] | [((124, 154), 'smart_match.get_method', 'smart_match.get_method', (['method'], {}), '(method)\n', (146, 154), False, 'import smart_match\n')] |
import numpy as np
from skimage.measure import label
from lib.utils_lung_segmentation import get_max_rect_in_mask
def getLargestCC(segmentation):
'''find largest connected component
return: binary mask of the largest connected component'''
labels = label(segmentation)
assert(labels.max() != 0 ) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
return largestCC
def test_max_rect_in_mask(random_blobs):
'''make sure that we can find the largest rectangle inside a bindary mask
check that the coordinates of the rectangle are the coorect ones'''
blobs_largest = getLargestCC(random_blobs)
coords_largest = get_max_rect_in_mask(blobs_largest)
assert coords_largest == (83, 125, 143, 155)
| [
"numpy.bincount",
"skimage.measure.label",
"lib.utils_lung_segmentation.get_max_rect_in_mask"
] | [((266, 285), 'skimage.measure.label', 'label', (['segmentation'], {}), '(segmentation)\n', (271, 285), False, 'from skimage.measure import label\n'), ((690, 725), 'lib.utils_lung_segmentation.get_max_rect_in_mask', 'get_max_rect_in_mask', (['blobs_largest'], {}), '(blobs_largest)\n', (710, 725), False, 'from lib.utils_lung_segmentation import get_max_rect_in_mask\n'), ((376, 400), 'numpy.bincount', 'np.bincount', (['labels.flat'], {}), '(labels.flat)\n', (387, 400), True, 'import numpy as np\n')] |
import math
from .hive_position import HivePosition
from .bee_movement import BeeMovement
class BeeTrackingObject:
object_id: int
start_frame_id: int
end_frame_id: int
end_age: int
position_estimates: [(int, int)]
angle: int = -1 # 0° is if the bee flies "to the right on the x-axis". Angle turns clockwise
flight_distance: float
flies_out_of_frame: bool
bee_movement: BeeMovement
def __init__(self, object_id: int, start_frame_id: int, age: int, initial_estimate: tuple):
self.object_id = object_id
self.start_frame_id = start_frame_id
self.end_frame_id = start_frame_id
self.end_age = age
self.position_estimates = [initial_estimate]
def _calculate_directions(self, hive_position: HivePosition, moving_offset: int):
"""
Calculates where the bee is going to
Args:
hive_position: The position of the hive in the frame
moving_offset: In pixels on the frame. Has to be calculated based on the image resolution!
"""
# Check if the bee moves more than the required offset
if self.flight_distance < moving_offset:
self.bee_movement = BeeMovement.NO_MOVEMENT
return
# Calculate angles
greater_than_angle = (hive_position.value - 90) % 360
smaller_than_angle = (hive_position.value + 90) % 360
start_coordinates = self.position_estimates[0]
end_coordinates = self.position_estimates[-1]
x_difference = end_coordinates[0] - start_coordinates[0]
y_difference = end_coordinates[1] - start_coordinates[1]
self.angle = int(math.degrees(math.atan2(y_difference, x_difference)) % 360)
if greater_than_angle < smaller_than_angle:
# HivePos.LEFT: greater than: 90, smaller than: 270
# Bee: 300 Deg
# Bee flies away -> is between angles has to be FALSE
is_between_angles = self.angle in range(greater_than_angle, smaller_than_angle)
else:
# HivePos.RIGHT: greater than: 270, smaller than: 90
# Bee: 300 Deg
# Bee flies to hive -> is between angles has to be TRUE
is_between_angles = self.angle not in range(smaller_than_angle, greater_than_angle)
if self.flies_out_of_frame and is_between_angles:
self.bee_movement = BeeMovement.TO_HIVE
elif self.flies_out_of_frame:
self.bee_movement = BeeMovement.FROM_HIVE
else:
self.bee_movement = BeeMovement.NO_MOVEMENT
# print(f'{self.object_id}: {self.angle} ({x_difference}, {y_difference}), to: {flies_to}')
def _calculate_distances(self):
start_coordinates = self.position_estimates[0]
end_coordinates = self.position_estimates[-1]
x_difference = end_coordinates[0] - start_coordinates[0]
y_difference = end_coordinates[1] - start_coordinates[1]
hypo = math.sqrt(x_difference ** 2 + y_difference ** 2)
self.flight_distance = hypo
def determine_movement(self, hive_position, moving_offset):
self._calculate_distances()
self._calculate_directions(hive_position, moving_offset)
def get_attribute_dict(self) -> dict:
return {
'object_id': self.object_id,
'start_frame_id': self.start_frame_id,
'end_frame_id': self.end_frame_id,
'end_age': self.end_age,
'estimates': self.position_estimates,
'angle': self.angle,
'flight_distance': self.flight_distance,
'flies_out_of_frame': self.flies_out_of_frame,
'bee_movement': self.bee_movement.name
}
| [
"math.sqrt",
"math.atan2"
] | [((2613, 2661), 'math.sqrt', 'math.sqrt', (['(x_difference ** 2 + y_difference ** 2)'], {}), '(x_difference ** 2 + y_difference ** 2)\n', (2622, 2661), False, 'import math\n'), ((1496, 1534), 'math.atan2', 'math.atan2', (['y_difference', 'x_difference'], {}), '(y_difference, x_difference)\n', (1506, 1534), False, 'import math\n')] |
import importlib
def load_class(cls, *args, **kwargs):
if cls is None:
return None
module_name, class_name = cls.rsplit(".", 1)
return getattr(importlib.import_module(module_name), class_name)(*args, **kwargs)
| [
"importlib.import_module"
] | [((165, 201), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (188, 201), False, 'import importlib\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object detection box prediction losses."""
import gin
import gin.tf
import tensorflow as tf
from tf3d import standard_fields
from tf3d.losses import utils as loss_utils
from tf3d.utils import batch_utils
from tf3d.utils import box_utils
from tf3d.utils import mask_utils
def _box_rotation_regression_loss(loss_type, is_balanced,
input_boxes_rotation_matrix,
input_boxes_instance_id,
output_boxes_rotation_matrix, delta):
"""Computes regression loss on object rotations."""
def fn():
"""Loss function for when number of input and output boxes is positive."""
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=input_boxes_instance_id)
else:
weights = tf.ones([tf.shape(input_boxes_instance_id)[0], 1],
dtype=tf.float32)
gt_rotation_matrix = tf.reshape(input_boxes_rotation_matrix, [-1, 9])
predicted_rotation_matrix = tf.reshape(output_boxes_rotation_matrix,
[-1, 9])
if loss_type == 'huber':
loss_fn = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
elif loss_type == 'absolute_difference':
loss_fn = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE)
else:
raise ValueError(('Unknown loss type %s.' % loss_type))
rotation_losses = loss_fn(
y_true=gt_rotation_matrix, y_pred=predicted_rotation_matrix)
return tf.reduce_mean(rotation_losses * tf.reshape(weights, [-1]))
cond_input = tf.greater(tf.shape(input_boxes_rotation_matrix)[0], 0)
cond_output = tf.greater(tf.shape(output_boxes_rotation_matrix)[0], 0)
cond = tf.logical_and(cond_input, cond_output)
return tf.cond(cond, fn, lambda: tf.constant(0.0, dtype=tf.float32))
def _box_size_regression_loss(loss_type, is_balanced, input_boxes_length,
input_boxes_height, input_boxes_width,
input_boxes_instance_id, output_boxes_length,
output_boxes_height, output_boxes_width, delta):
"""Computes regression loss on object sizes."""
def fn():
"""Loss function for when number of input and output boxes is positive."""
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=input_boxes_instance_id)
else:
weights = tf.ones([tf.shape(input_boxes_instance_id)[0], 1],
dtype=tf.float32)
gt_length = tf.reshape(input_boxes_length, [-1, 1])
gt_height = tf.reshape(input_boxes_height, [-1, 1])
gt_width = tf.reshape(input_boxes_width, [-1, 1])
predicted_length = tf.reshape(output_boxes_length, [-1, 1])
predicted_height = tf.reshape(output_boxes_height, [-1, 1])
predicted_width = tf.reshape(output_boxes_width, [-1, 1])
predicted_length /= gt_length
predicted_height /= gt_height
predicted_width /= gt_width
predicted_size = tf.concat(
[predicted_length, predicted_height, predicted_width], axis=1)
gt_size = tf.ones_like(predicted_size)
if loss_type == 'huber':
loss_fn = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
elif loss_type == 'absolute_difference':
loss_fn = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE)
else:
raise ValueError(('Unknown loss type %s.' % loss_type))
size_losses = loss_fn(y_true=gt_size, y_pred=predicted_size)
return tf.reduce_mean(size_losses * tf.reshape(weights, [-1]))
cond_input = tf.greater(tf.shape(input_boxes_length)[0], 0)
cond_output = tf.greater(tf.shape(output_boxes_length)[0], 0)
cond = tf.logical_and(cond_input, cond_output)
return tf.cond(cond, fn, lambda: tf.constant(0.0, dtype=tf.float32))
def _box_center_distance_loss(loss_type, is_balanced, input_boxes_center,
input_boxes_instance_id, output_boxes_center,
delta):
"""Computes regression loss on object center locations."""
def fn():
"""Loss function for when number of input and output boxes is positive."""
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=input_boxes_instance_id)
else:
weights = tf.ones([tf.shape(input_boxes_instance_id)[0], 1],
dtype=tf.float32)
gt_center = tf.reshape(input_boxes_center, [-1, 3])
predicted_center = tf.reshape(output_boxes_center, [-1, 3])
if loss_type == 'huber':
loss_fn = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
elif loss_type == 'absolute_difference':
loss_fn = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE)
else:
raise ValueError(('Unknown loss type %s.' % loss_type))
center_losses = loss_fn(y_true=gt_center, y_pred=predicted_center)
return tf.reduce_mean(center_losses * tf.reshape(weights, [-1]))
cond_input = tf.greater(tf.shape(input_boxes_center)[0], 0)
cond_output = tf.greater(tf.shape(output_boxes_center)[0], 0)
cond = tf.logical_and(cond_input, cond_output)
return tf.cond(cond, fn, lambda: tf.constant(0.0, dtype=tf.float32))
def _box_corner_distance_loss(
loss_type, is_balanced, input_boxes_length, input_boxes_height,
input_boxes_width, input_boxes_center, input_boxes_rotation_matrix,
input_boxes_instance_id, output_boxes_length, output_boxes_height,
output_boxes_width, output_boxes_center, output_boxes_rotation_matrix,
delta):
"""Computes regression loss on object corner locations."""
def fn():
"""Loss function for when number of input and output boxes is positive."""
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=input_boxes_instance_id)
else:
weights = tf.ones([tf.shape(input_boxes_instance_id)[0], 1],
dtype=tf.float32)
normalized_box_size = 5.0
predicted_boxes_length = output_boxes_length
predicted_boxes_height = output_boxes_height
predicted_boxes_width = output_boxes_width
predicted_boxes_center = output_boxes_center
predicted_boxes_rotation_matrix = output_boxes_rotation_matrix
gt_boxes_length = input_boxes_length
gt_boxes_height = input_boxes_height
gt_boxes_width = input_boxes_width
gt_boxes_center = input_boxes_center
gt_boxes_rotation_matrix = input_boxes_rotation_matrix
if loss_type in ['normalized_huber', 'normalized_euclidean']:
predicted_boxes_length /= (gt_boxes_length / normalized_box_size)
predicted_boxes_height /= (gt_boxes_height / normalized_box_size)
predicted_boxes_width /= (gt_boxes_width / normalized_box_size)
gt_boxes_length = tf.ones_like(
gt_boxes_length, dtype=tf.float32) * normalized_box_size
gt_boxes_height = tf.ones_like(
gt_boxes_height, dtype=tf.float32) * normalized_box_size
gt_boxes_width = tf.ones_like(
gt_boxes_width, dtype=tf.float32) * normalized_box_size
gt_box_corners = box_utils.get_box_corners_3d(
boxes_length=gt_boxes_length,
boxes_height=gt_boxes_height,
boxes_width=gt_boxes_width,
boxes_rotation_matrix=gt_boxes_rotation_matrix,
boxes_center=gt_boxes_center)
predicted_box_corners = box_utils.get_box_corners_3d(
boxes_length=predicted_boxes_length,
boxes_height=predicted_boxes_height,
boxes_width=predicted_boxes_width,
boxes_rotation_matrix=predicted_boxes_rotation_matrix,
boxes_center=predicted_boxes_center)
corner_weights = tf.tile(weights, [1, 8])
if loss_type in ['huber', 'normalized_huber']:
loss_fn = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
elif loss_type in ['normalized_absolute_difference', 'absolute_difference']:
loss_fn = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE)
else:
raise ValueError(('Unknown loss type %s.' % loss_type))
box_corner_losses = loss_fn(
y_true=tf.reshape(gt_box_corners, [-1, 3]),
y_pred=tf.reshape(predicted_box_corners, [-1, 3]))
return tf.reduce_mean(box_corner_losses * tf.reshape(corner_weights, [-1]))
cond_input = tf.greater(tf.shape(input_boxes_length)[0], 0)
cond_output = tf.greater(tf.shape(output_boxes_length)[0], 0)
cond = tf.logical_and(cond_input, cond_output)
return tf.cond(cond, fn, lambda: tf.constant(0.0, dtype=tf.float32))
def _get_voxels_valid_mask(inputs_1):
"""Returns the mask that removes voxels that are outside objects."""
num_voxels_mask = mask_utils.num_voxels_mask(inputs=inputs_1)
within_objects_mask = mask_utils.voxels_within_objects_mask(inputs=inputs_1)
return tf.logical_and(within_objects_mask, num_voxels_mask)
def _get_voxels_valid_inputs_outputs(inputs_1, outputs_1):
"""Applies the valid mask to input and output voxel tensors."""
valid_mask = _get_voxels_valid_mask(inputs_1=inputs_1)
inputs_1 = mask_utils.apply_mask_to_input_voxel_tensors(
inputs=inputs_1, valid_mask=valid_mask)
mask_utils.apply_mask_to_output_voxel_tensors(
outputs=outputs_1, valid_mask=valid_mask)
return inputs_1, outputs_1, valid_mask
def _box_rotation_regression_loss_on_voxel_tensors_unbatched(
inputs_1, outputs_1, loss_type, delta, is_balanced, is_intermediate):
"""Computes regression loss on predicted object rotation for each voxel."""
inputs_1, outputs_1, valid_mask = _get_voxels_valid_inputs_outputs(
inputs_1=inputs_1, outputs_1=outputs_1)
def loss_fn_unbatched():
"""Loss function."""
if is_intermediate:
output_boxes_rotation_matrix = outputs_1[
standard_fields.DetectionResultFields
.intermediate_object_rotation_matrix_voxels]
else:
output_boxes_rotation_matrix = outputs_1[
standard_fields.DetectionResultFields.object_rotation_matrix_voxels]
return _box_rotation_regression_loss(
loss_type=loss_type,
is_balanced=is_balanced,
input_boxes_rotation_matrix=inputs_1[
standard_fields.InputDataFields.object_rotation_matrix_voxels],
input_boxes_instance_id=inputs_1[
standard_fields.InputDataFields.object_instance_id_voxels],
output_boxes_rotation_matrix=output_boxes_rotation_matrix,
delta=delta)
return tf.cond(
tf.reduce_any(valid_mask),
loss_fn_unbatched, lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable(
'box_rotation_regression_loss_on_voxel_tensors',
blacklist=['inputs', 'outputs'])
def box_rotation_regression_loss_on_voxel_tensors(inputs,
outputs,
loss_type,
delta=0.5,
is_balanced=False,
is_intermediate=False):
"""Computes regression loss on object size.
Args:
inputs: A dictionary of tf.Tensors with our input data.
outputs: A dictionary of tf.Tensors with the network output.
loss_type: Loss type.
delta: float, the voxel where the huber loss function changes from a
quadratic to linear.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for each object instance.
is_intermediate: If True, intermediate tensors are used for computing
the loss.
Returns:
localization_loss: A tf.float32 scalar corresponding to localization loss.
"""
standard_fields.check_input_voxel_fields(inputs=inputs)
standard_fields.check_output_voxel_fields(outputs=outputs)
def fn(inputs_1, outputs_1):
return _box_rotation_regression_loss_on_voxel_tensors_unbatched(
inputs_1=inputs_1,
outputs_1=outputs_1,
loss_type=loss_type,
delta=delta,
is_balanced=is_balanced,
is_intermediate=is_intermediate)
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs, outputs=outputs, unbatched_loss_fn=fn)
def _box_size_regression_loss_on_voxel_tensors_unbatched(
inputs_1, outputs_1, loss_type, delta, is_balanced, is_intermediate):
"""Computes regression loss on predicted object size for each voxel."""
inputs_1, outputs_1, valid_mask = _get_voxels_valid_inputs_outputs(
inputs_1=inputs_1, outputs_1=outputs_1)
def loss_fn_unbatched():
"""Loss function."""
if is_intermediate:
output_boxes_length = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_length_voxels]
output_boxes_height = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_height_voxels]
output_boxes_width = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_width_voxels]
else:
output_boxes_length = outputs_1[
standard_fields.DetectionResultFields.object_length_voxels]
output_boxes_height = outputs_1[
standard_fields.DetectionResultFields.object_height_voxels]
output_boxes_width = outputs_1[
standard_fields.DetectionResultFields.object_width_voxels]
return _box_size_regression_loss(
loss_type=loss_type,
is_balanced=is_balanced,
input_boxes_length=inputs_1[
standard_fields.InputDataFields.object_length_voxels],
input_boxes_height=inputs_1[
standard_fields.InputDataFields.object_height_voxels],
input_boxes_width=inputs_1[
standard_fields.InputDataFields.object_width_voxels],
input_boxes_instance_id=inputs_1[
standard_fields.InputDataFields.object_instance_id_voxels],
output_boxes_length=output_boxes_length,
output_boxes_height=output_boxes_height,
output_boxes_width=output_boxes_width,
delta=delta)
return tf.cond(
tf.reduce_any(valid_mask),
loss_fn_unbatched, lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable(
'box_size_regression_loss_on_voxel_tensors',
blacklist=['inputs', 'outputs'])
def box_size_regression_loss_on_voxel_tensors(inputs,
outputs,
loss_type,
delta=0.5,
is_balanced=False,
is_intermediate=False):
"""Computes regression loss on object size.
Args:
inputs: A dictionary of tf.Tensors with our input data.
outputs: A dictionary of tf.Tensors with the network output.
loss_type: Loss type.
delta: float, the voxel where the huber loss function changes from a
quadratic to linear.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for each object instance.
is_intermediate: If True, intermediate tensors are used for computing
the loss.
Returns:
localization_loss: A tf.float32 scalar corresponding to localization loss.
"""
standard_fields.check_input_voxel_fields(inputs=inputs)
standard_fields.check_output_voxel_fields(outputs=outputs)
def fn(inputs_1, outputs_1):
return _box_size_regression_loss_on_voxel_tensors_unbatched(
inputs_1=inputs_1,
outputs_1=outputs_1,
loss_type=loss_type,
delta=delta,
is_balanced=is_balanced,
is_intermediate=is_intermediate)
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs, outputs=outputs, unbatched_loss_fn=fn)
def _box_center_distance_loss_on_voxel_tensors_unbatched(
inputs_1, outputs_1, loss_type, delta, is_balanced, is_intermediate):
"""Computes huber loss on predicted object centers for each voxel."""
inputs_1, outputs_1, valid_mask = _get_voxels_valid_inputs_outputs(
inputs_1=inputs_1, outputs_1=outputs_1)
def loss_fn_unbatched():
"""Loss function."""
if is_intermediate:
output_boxes_center = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_center_voxels]
else:
output_boxes_center = outputs_1[
standard_fields.DetectionResultFields.object_center_voxels]
return _box_center_distance_loss(
loss_type=loss_type,
is_balanced=is_balanced,
input_boxes_center=inputs_1[
standard_fields.InputDataFields.object_center_voxels],
input_boxes_instance_id=inputs_1[
standard_fields.InputDataFields.object_instance_id_voxels],
output_boxes_center=output_boxes_center,
delta=delta)
return tf.cond(
tf.reduce_any(valid_mask),
loss_fn_unbatched, lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable(
'box_center_distance_loss_on_voxel_tensors',
blacklist=['inputs', 'outputs'])
def box_center_distance_loss_on_voxel_tensors(inputs,
outputs,
loss_type,
delta=1.0,
is_balanced=False,
is_intermediate=False):
"""Computes huber loss on object center locations.
Args:
inputs: A dictionary of tf.Tensors with our input data.
outputs: A dictionary of tf.Tensors with the network output.
loss_type: Loss type.
delta: float, the voxel where the huber loss function changes from a
quadratic to linear.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for each object instance.
is_intermediate: If True, intermediate tensors are used for computing
the loss.
Returns:
localization_loss: A tf.float32 scalar corresponding to localization loss.
"""
standard_fields.check_input_voxel_fields(inputs=inputs)
standard_fields.check_output_voxel_fields(outputs=outputs)
def fn(inputs_1, outputs_1):
return _box_center_distance_loss_on_voxel_tensors_unbatched(
inputs_1=inputs_1,
outputs_1=outputs_1,
loss_type=loss_type,
delta=delta,
is_balanced=is_balanced,
is_intermediate=is_intermediate)
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs, outputs=outputs, unbatched_loss_fn=fn)
def _box_corner_distance_loss_on_voxel_tensors_unbatched(
inputs_1, outputs_1, loss_type, delta, is_balanced, is_intermediate):
"""Computes huber loss on predicted objects for each voxel."""
inputs_1, outputs_1, valid_mask = _get_voxels_valid_inputs_outputs(
inputs_1=inputs_1, outputs_1=outputs_1)
def loss_fn_unbatched():
"""Loss function."""
if is_intermediate:
output_boxes_length = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_length_voxels]
output_boxes_height = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_height_voxels]
output_boxes_width = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_width_voxels]
output_boxes_center = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_center_voxels]
output_boxes_rotation_matrix = outputs_1[
standard_fields.DetectionResultFields
.intermediate_object_rotation_matrix_voxels]
else:
output_boxes_length = outputs_1[
standard_fields.DetectionResultFields.object_length_voxels]
output_boxes_height = outputs_1[
standard_fields.DetectionResultFields.object_height_voxels]
output_boxes_width = outputs_1[
standard_fields.DetectionResultFields.object_width_voxels]
output_boxes_center = outputs_1[
standard_fields.DetectionResultFields.object_center_voxels]
output_boxes_rotation_matrix = outputs_1[
standard_fields.DetectionResultFields.object_rotation_matrix_voxels]
return _box_corner_distance_loss(
loss_type=loss_type,
is_balanced=is_balanced,
input_boxes_length=inputs_1[
standard_fields.InputDataFields.object_length_voxels],
input_boxes_height=inputs_1[
standard_fields.InputDataFields.object_height_voxels],
input_boxes_width=inputs_1[
standard_fields.InputDataFields.object_width_voxels],
input_boxes_center=inputs_1[
standard_fields.InputDataFields.object_center_voxels],
input_boxes_rotation_matrix=inputs_1[
standard_fields.InputDataFields.object_rotation_matrix_voxels],
input_boxes_instance_id=inputs_1[
standard_fields.InputDataFields.object_instance_id_voxels],
output_boxes_length=output_boxes_length,
output_boxes_height=output_boxes_height,
output_boxes_width=output_boxes_width,
output_boxes_center=output_boxes_center,
output_boxes_rotation_matrix=output_boxes_rotation_matrix,
delta=delta)
return tf.cond(
tf.reduce_any(valid_mask),
loss_fn_unbatched, lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable(
'box_corner_distance_loss_on_voxel_tensors',
blacklist=['inputs', 'outputs'])
def box_corner_distance_loss_on_voxel_tensors(
inputs,
outputs,
loss_type,
delta=1.0,
is_balanced=False,
is_intermediate=False):
"""Computes regression loss on object corner locations using object tensors.
Args:
inputs: A dictionary of tf.Tensors with our input data.
outputs: A dictionary of tf.Tensors with the network output.
loss_type: Loss type.
delta: float, the voxel where the huber loss function changes from a
quadratic to linear.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for each object instance.
is_intermediate: If True, intermediate tensors are used for computing
the loss.
Returns:
localization_loss: A tf.float32 scalar corresponding to localization loss.
"""
standard_fields.check_input_voxel_fields(inputs=inputs)
standard_fields.check_output_voxel_fields(outputs=outputs)
def fn(inputs_1, outputs_1):
return _box_corner_distance_loss_on_voxel_tensors_unbatched(
inputs_1=inputs_1,
outputs_1=outputs_1,
loss_type=loss_type,
delta=delta,
is_balanced=is_balanced,
is_intermediate=is_intermediate)
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs, outputs=outputs, unbatched_loss_fn=fn)
def _box_corner_distance_loss_on_object_tensors(
inputs, outputs, loss_type, delta, is_balanced):
"""Computes huber loss on object corner locations."""
valid_mask_class = tf.greater(
tf.reshape(inputs[standard_fields.InputDataFields.objects_class], [-1]),
0)
valid_mask_instance = tf.greater(
tf.reshape(inputs[standard_fields.InputDataFields.objects_instance_id],
[-1]), 0)
valid_mask = tf.logical_and(valid_mask_class, valid_mask_instance)
def fn():
for field in standard_fields.get_input_object_fields():
if field in inputs:
inputs[field] = tf.boolean_mask(inputs[field], valid_mask)
for field in standard_fields.get_output_object_fields():
if field in outputs:
outputs[field] = tf.boolean_mask(outputs[field], valid_mask)
return _box_corner_distance_loss(
loss_type=loss_type,
is_balanced=is_balanced,
input_boxes_length=inputs[
standard_fields.InputDataFields.objects_length],
input_boxes_height=inputs[
standard_fields.InputDataFields.objects_height],
input_boxes_width=inputs[standard_fields.InputDataFields.objects_width],
input_boxes_center=inputs[
standard_fields.InputDataFields.objects_center],
input_boxes_rotation_matrix=inputs[
standard_fields.InputDataFields.objects_rotation_matrix],
input_boxes_instance_id=inputs[
standard_fields.InputDataFields.objects_instance_id],
output_boxes_length=outputs[
standard_fields.DetectionResultFields.objects_length],
output_boxes_height=outputs[
standard_fields.DetectionResultFields.objects_height],
output_boxes_width=outputs[
standard_fields.DetectionResultFields.objects_width],
output_boxes_center=outputs[
standard_fields.DetectionResultFields.objects_center],
output_boxes_rotation_matrix=outputs[
standard_fields.DetectionResultFields.objects_rotation_matrix],
delta=delta)
return tf.cond(
tf.reduce_any(valid_mask), fn, lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable(
'box_corner_distance_loss_on_object_tensors',
blacklist=['inputs', 'outputs'])
def box_corner_distance_loss_on_object_tensors(
inputs,
outputs,
loss_type,
delta=1.0,
is_balanced=False):
"""Computes regression loss on object corner locations using object tensors.
Args:
inputs: A dictionary of tf.Tensors with our input data.
outputs: A dictionary of tf.Tensors with the network output.
loss_type: Loss type.
delta: float, the voxel where the huber loss function changes from a
quadratic to linear.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for each object instance.
Returns:
localization_loss: A tf.float32 scalar corresponding to localization loss.
"""
def fn(inputs_1, outputs_1):
return _box_corner_distance_loss_on_object_tensors(
inputs=inputs_1,
outputs=outputs_1,
loss_type=loss_type,
delta=delta,
is_balanced=is_balanced)
batch_size = len(inputs[standard_fields.InputDataFields.objects_length])
losses = []
for b in range(batch_size):
inputs_1 = batch_utils.get_batch_size_1_input_objects(inputs=inputs, b=b)
outputs_1 = batch_utils.get_batch_size_1_output_objects(
outputs=outputs, b=b)
cond_input = tf.greater(
tf.shape(inputs_1[standard_fields.InputDataFields.objects_length])[0],
0)
cond_output = tf.greater(
tf.shape(
outputs_1[standard_fields.DetectionResultFields.objects_length])[0],
0)
cond = tf.logical_and(cond_input, cond_output)
# pylint: disable=cell-var-from-loop
loss = tf.cond(cond, lambda: fn(inputs_1=inputs_1, outputs_1=outputs_1),
lambda: tf.constant(0.0, dtype=tf.float32))
# pylint: enable=cell-var-from-loop
losses.append(loss)
return tf.reduce_mean(tf.stack(losses))
| [
"tensorflow.tile",
"tensorflow.shape",
"tf3d.utils.box_utils.get_box_corners_3d",
"tensorflow.boolean_mask",
"gin.configurable",
"tensorflow.ones_like",
"tf3d.utils.mask_utils.apply_mask_to_input_voxel_tensors",
"tf3d.utils.batch_utils.get_batch_size_1_input_objects",
"tf3d.utils.mask_utils.apply_ma... | [((11299, 11402), 'gin.configurable', 'gin.configurable', (['"""box_rotation_regression_loss_on_voxel_tensors"""'], {'blacklist': "['inputs', 'outputs']"}), "('box_rotation_regression_loss_on_voxel_tensors', blacklist\n =['inputs', 'outputs'])\n", (11315, 11402), False, 'import gin\n'), ((14921, 15020), 'gin.configurable', 'gin.configurable', (['"""box_size_regression_loss_on_voxel_tensors"""'], {'blacklist': "['inputs', 'outputs']"}), "('box_size_regression_loss_on_voxel_tensors', blacklist=[\n 'inputs', 'outputs'])\n", (14937, 15020), False, 'import gin\n'), ((17694, 17793), 'gin.configurable', 'gin.configurable', (['"""box_center_distance_loss_on_voxel_tensors"""'], {'blacklist': "['inputs', 'outputs']"}), "('box_center_distance_loss_on_voxel_tensors', blacklist=[\n 'inputs', 'outputs'])\n", (17710, 17793), False, 'import gin\n'), ((22161, 22260), 'gin.configurable', 'gin.configurable', (['"""box_corner_distance_loss_on_voxel_tensors"""'], {'blacklist': "['inputs', 'outputs']"}), "('box_corner_distance_loss_on_voxel_tensors', blacklist=[\n 'inputs', 'outputs'])\n", (22177, 22260), False, 'import gin\n'), ((25745, 25845), 'gin.configurable', 'gin.configurable', (['"""box_corner_distance_loss_on_object_tensors"""'], {'blacklist': "['inputs', 'outputs']"}), "('box_corner_distance_loss_on_object_tensors', blacklist=[\n 'inputs', 'outputs'])\n", (25761, 25845), False, 'import gin\n'), ((2406, 2445), 'tensorflow.logical_and', 'tf.logical_and', (['cond_input', 'cond_output'], {}), '(cond_input, cond_output)\n', (2420, 2445), True, 'import tensorflow as tf\n'), ((4428, 4467), 'tensorflow.logical_and', 'tf.logical_and', (['cond_input', 'cond_output'], {}), '(cond_input, cond_output)\n', (4442, 4467), True, 'import tensorflow as tf\n'), ((5877, 5916), 'tensorflow.logical_and', 'tf.logical_and', (['cond_input', 'cond_output'], {}), '(cond_input, cond_output)\n', (5891, 5916), True, 'import tensorflow as tf\n'), ((9196, 9235), 'tensorflow.logical_and', 'tf.logical_and', (['cond_input', 'cond_output'], {}), '(cond_input, cond_output)\n', (9210, 9235), True, 'import tensorflow as tf\n'), ((9438, 9481), 'tf3d.utils.mask_utils.num_voxels_mask', 'mask_utils.num_voxels_mask', ([], {'inputs': 'inputs_1'}), '(inputs=inputs_1)\n', (9464, 9481), False, 'from tf3d.utils import mask_utils\n'), ((9506, 9560), 'tf3d.utils.mask_utils.voxels_within_objects_mask', 'mask_utils.voxels_within_objects_mask', ([], {'inputs': 'inputs_1'}), '(inputs=inputs_1)\n', (9543, 9560), False, 'from tf3d.utils import mask_utils\n'), ((9570, 9622), 'tensorflow.logical_and', 'tf.logical_and', (['within_objects_mask', 'num_voxels_mask'], {}), '(within_objects_mask, num_voxels_mask)\n', (9584, 9622), True, 'import tensorflow as tf\n'), ((9820, 9909), 'tf3d.utils.mask_utils.apply_mask_to_input_voxel_tensors', 'mask_utils.apply_mask_to_input_voxel_tensors', ([], {'inputs': 'inputs_1', 'valid_mask': 'valid_mask'}), '(inputs=inputs_1, valid_mask=\n valid_mask)\n', (9864, 9909), False, 'from tf3d.utils import mask_utils\n'), ((9914, 10006), 'tf3d.utils.mask_utils.apply_mask_to_output_voxel_tensors', 'mask_utils.apply_mask_to_output_voxel_tensors', ([], {'outputs': 'outputs_1', 'valid_mask': 'valid_mask'}), '(outputs=outputs_1, valid_mask\n =valid_mask)\n', (9959, 10006), False, 'from tf3d.utils import mask_utils\n'), ((12406, 12461), 'tf3d.standard_fields.check_input_voxel_fields', 'standard_fields.check_input_voxel_fields', ([], {'inputs': 'inputs'}), '(inputs=inputs)\n', (12446, 12461), False, 'from tf3d import standard_fields\n'), ((12464, 12522), 'tf3d.standard_fields.check_output_voxel_fields', 'standard_fields.check_output_voxel_fields', ([], {'outputs': 'outputs'}), '(outputs=outputs)\n', (12505, 12522), False, 'from tf3d import standard_fields\n'), ((12814, 12921), 'tf3d.losses.utils.apply_unbatched_loss_on_voxel_tensors', 'loss_utils.apply_unbatched_loss_on_voxel_tensors', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'unbatched_loss_fn': 'fn'}), '(inputs=inputs, outputs=\n outputs, unbatched_loss_fn=fn)\n', (12862, 12921), True, 'from tf3d.losses import utils as loss_utils\n'), ((16000, 16055), 'tf3d.standard_fields.check_input_voxel_fields', 'standard_fields.check_input_voxel_fields', ([], {'inputs': 'inputs'}), '(inputs=inputs)\n', (16040, 16055), False, 'from tf3d import standard_fields\n'), ((16058, 16116), 'tf3d.standard_fields.check_output_voxel_fields', 'standard_fields.check_output_voxel_fields', ([], {'outputs': 'outputs'}), '(outputs=outputs)\n', (16099, 16116), False, 'from tf3d import standard_fields\n'), ((16404, 16511), 'tf3d.losses.utils.apply_unbatched_loss_on_voxel_tensors', 'loss_utils.apply_unbatched_loss_on_voxel_tensors', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'unbatched_loss_fn': 'fn'}), '(inputs=inputs, outputs=\n outputs, unbatched_loss_fn=fn)\n', (16452, 16511), True, 'from tf3d.losses import utils as loss_utils\n'), ((18780, 18835), 'tf3d.standard_fields.check_input_voxel_fields', 'standard_fields.check_input_voxel_fields', ([], {'inputs': 'inputs'}), '(inputs=inputs)\n', (18820, 18835), False, 'from tf3d import standard_fields\n'), ((18838, 18896), 'tf3d.standard_fields.check_output_voxel_fields', 'standard_fields.check_output_voxel_fields', ([], {'outputs': 'outputs'}), '(outputs=outputs)\n', (18879, 18896), False, 'from tf3d import standard_fields\n'), ((19184, 19291), 'tf3d.losses.utils.apply_unbatched_loss_on_voxel_tensors', 'loss_utils.apply_unbatched_loss_on_voxel_tensors', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'unbatched_loss_fn': 'fn'}), '(inputs=inputs, outputs=\n outputs, unbatched_loss_fn=fn)\n', (19232, 19291), True, 'from tf3d.losses import utils as loss_utils\n'), ((23068, 23123), 'tf3d.standard_fields.check_input_voxel_fields', 'standard_fields.check_input_voxel_fields', ([], {'inputs': 'inputs'}), '(inputs=inputs)\n', (23108, 23123), False, 'from tf3d import standard_fields\n'), ((23126, 23184), 'tf3d.standard_fields.check_output_voxel_fields', 'standard_fields.check_output_voxel_fields', ([], {'outputs': 'outputs'}), '(outputs=outputs)\n', (23167, 23184), False, 'from tf3d import standard_fields\n'), ((23472, 23579), 'tf3d.losses.utils.apply_unbatched_loss_on_voxel_tensors', 'loss_utils.apply_unbatched_loss_on_voxel_tensors', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'unbatched_loss_fn': 'fn'}), '(inputs=inputs, outputs=\n outputs, unbatched_loss_fn=fn)\n', (23520, 23579), True, 'from tf3d.losses import utils as loss_utils\n'), ((24019, 24072), 'tensorflow.logical_and', 'tf.logical_and', (['valid_mask_class', 'valid_mask_instance'], {}), '(valid_mask_class, valid_mask_instance)\n', (24033, 24072), True, 'import tensorflow as tf\n'), ((1554, 1602), 'tensorflow.reshape', 'tf.reshape', (['input_boxes_rotation_matrix', '[-1, 9]'], {}), '(input_boxes_rotation_matrix, [-1, 9])\n', (1564, 1602), True, 'import tensorflow as tf\n'), ((1635, 1684), 'tensorflow.reshape', 'tf.reshape', (['output_boxes_rotation_matrix', '[-1, 9]'], {}), '(output_boxes_rotation_matrix, [-1, 9])\n', (1645, 1684), True, 'import tensorflow as tf\n'), ((3221, 3260), 'tensorflow.reshape', 'tf.reshape', (['input_boxes_length', '[-1, 1]'], {}), '(input_boxes_length, [-1, 1])\n', (3231, 3260), True, 'import tensorflow as tf\n'), ((3277, 3316), 'tensorflow.reshape', 'tf.reshape', (['input_boxes_height', '[-1, 1]'], {}), '(input_boxes_height, [-1, 1])\n', (3287, 3316), True, 'import tensorflow as tf\n'), ((3332, 3370), 'tensorflow.reshape', 'tf.reshape', (['input_boxes_width', '[-1, 1]'], {}), '(input_boxes_width, [-1, 1])\n', (3342, 3370), True, 'import tensorflow as tf\n'), ((3394, 3434), 'tensorflow.reshape', 'tf.reshape', (['output_boxes_length', '[-1, 1]'], {}), '(output_boxes_length, [-1, 1])\n', (3404, 3434), True, 'import tensorflow as tf\n'), ((3458, 3498), 'tensorflow.reshape', 'tf.reshape', (['output_boxes_height', '[-1, 1]'], {}), '(output_boxes_height, [-1, 1])\n', (3468, 3498), True, 'import tensorflow as tf\n'), ((3521, 3560), 'tensorflow.reshape', 'tf.reshape', (['output_boxes_width', '[-1, 1]'], {}), '(output_boxes_width, [-1, 1])\n', (3531, 3560), True, 'import tensorflow as tf\n'), ((3682, 3754), 'tensorflow.concat', 'tf.concat', (['[predicted_length, predicted_height, predicted_width]'], {'axis': '(1)'}), '([predicted_length, predicted_height, predicted_width], axis=1)\n', (3691, 3754), True, 'import tensorflow as tf\n'), ((3778, 3806), 'tensorflow.ones_like', 'tf.ones_like', (['predicted_size'], {}), '(predicted_size)\n', (3790, 3806), True, 'import tensorflow as tf\n'), ((5144, 5183), 'tensorflow.reshape', 'tf.reshape', (['input_boxes_center', '[-1, 3]'], {}), '(input_boxes_center, [-1, 3])\n', (5154, 5183), True, 'import tensorflow as tf\n'), ((5207, 5247), 'tensorflow.reshape', 'tf.reshape', (['output_boxes_center', '[-1, 3]'], {}), '(output_boxes_center, [-1, 3])\n', (5217, 5247), True, 'import tensorflow as tf\n'), ((7844, 8048), 'tf3d.utils.box_utils.get_box_corners_3d', 'box_utils.get_box_corners_3d', ([], {'boxes_length': 'gt_boxes_length', 'boxes_height': 'gt_boxes_height', 'boxes_width': 'gt_boxes_width', 'boxes_rotation_matrix': 'gt_boxes_rotation_matrix', 'boxes_center': 'gt_boxes_center'}), '(boxes_length=gt_boxes_length, boxes_height=\n gt_boxes_height, boxes_width=gt_boxes_width, boxes_rotation_matrix=\n gt_boxes_rotation_matrix, boxes_center=gt_boxes_center)\n', (7872, 8048), False, 'from tf3d.utils import box_utils\n'), ((8108, 8350), 'tf3d.utils.box_utils.get_box_corners_3d', 'box_utils.get_box_corners_3d', ([], {'boxes_length': 'predicted_boxes_length', 'boxes_height': 'predicted_boxes_height', 'boxes_width': 'predicted_boxes_width', 'boxes_rotation_matrix': 'predicted_boxes_rotation_matrix', 'boxes_center': 'predicted_boxes_center'}), '(boxes_length=predicted_boxes_length,\n boxes_height=predicted_boxes_height, boxes_width=predicted_boxes_width,\n boxes_rotation_matrix=predicted_boxes_rotation_matrix, boxes_center=\n predicted_boxes_center)\n', (8136, 8350), False, 'from tf3d.utils import box_utils\n'), ((8400, 8424), 'tensorflow.tile', 'tf.tile', (['weights', '[1, 8]'], {}), '(weights, [1, 8])\n', (8407, 8424), True, 'import tensorflow as tf\n'), ((11200, 11225), 'tensorflow.reduce_any', 'tf.reduce_any', (['valid_mask'], {}), '(valid_mask)\n', (11213, 11225), True, 'import tensorflow as tf\n'), ((14822, 14847), 'tensorflow.reduce_any', 'tf.reduce_any', (['valid_mask'], {}), '(valid_mask)\n', (14835, 14847), True, 'import tensorflow as tf\n'), ((17595, 17620), 'tensorflow.reduce_any', 'tf.reduce_any', (['valid_mask'], {}), '(valid_mask)\n', (17608, 17620), True, 'import tensorflow as tf\n'), ((22062, 22087), 'tensorflow.reduce_any', 'tf.reduce_any', (['valid_mask'], {}), '(valid_mask)\n', (22075, 22087), True, 'import tensorflow as tf\n'), ((23781, 23852), 'tensorflow.reshape', 'tf.reshape', (['inputs[standard_fields.InputDataFields.objects_class]', '[-1]'], {}), '(inputs[standard_fields.InputDataFields.objects_class], [-1])\n', (23791, 23852), True, 'import tensorflow as tf\n'), ((23905, 23982), 'tensorflow.reshape', 'tf.reshape', (['inputs[standard_fields.InputDataFields.objects_instance_id]', '[-1]'], {}), '(inputs[standard_fields.InputDataFields.objects_instance_id], [-1])\n', (23915, 23982), True, 'import tensorflow as tf\n'), ((24103, 24144), 'tf3d.standard_fields.get_input_object_fields', 'standard_fields.get_input_object_fields', ([], {}), '()\n', (24142, 24144), False, 'from tf3d import standard_fields\n'), ((24256, 24298), 'tf3d.standard_fields.get_output_object_fields', 'standard_fields.get_output_object_fields', ([], {}), '()\n', (24296, 24298), False, 'from tf3d import standard_fields\n'), ((25667, 25692), 'tensorflow.reduce_any', 'tf.reduce_any', (['valid_mask'], {}), '(valid_mask)\n', (25680, 25692), True, 'import tensorflow as tf\n'), ((26892, 26954), 'tf3d.utils.batch_utils.get_batch_size_1_input_objects', 'batch_utils.get_batch_size_1_input_objects', ([], {'inputs': 'inputs', 'b': 'b'}), '(inputs=inputs, b=b)\n', (26934, 26954), False, 'from tf3d.utils import batch_utils\n'), ((26971, 27036), 'tf3d.utils.batch_utils.get_batch_size_1_output_objects', 'batch_utils.get_batch_size_1_output_objects', ([], {'outputs': 'outputs', 'b': 'b'}), '(outputs=outputs, b=b)\n', (27014, 27036), False, 'from tf3d.utils import batch_utils\n'), ((27316, 27355), 'tensorflow.logical_and', 'tf.logical_and', (['cond_input', 'cond_output'], {}), '(cond_input, cond_output)\n', (27330, 27355), True, 'import tensorflow as tf\n'), ((27625, 27641), 'tensorflow.stack', 'tf.stack', (['losses'], {}), '(losses)\n', (27633, 27641), True, 'import tensorflow as tf\n'), ((1319, 1398), 'tf3d.losses.utils.get_balanced_loss_weights_multiclass', 'loss_utils.get_balanced_loss_weights_multiclass', ([], {'labels': 'input_boxes_instance_id'}), '(labels=input_boxes_instance_id)\n', (1366, 1398), True, 'from tf3d.losses import utils as loss_utils\n'), ((1773, 1849), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {'delta': 'delta', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(delta=delta, reduction=tf.keras.losses.Reduction.NONE)\n', (1794, 1849), True, 'import tensorflow as tf\n'), ((2279, 2316), 'tensorflow.shape', 'tf.shape', (['input_boxes_rotation_matrix'], {}), '(input_boxes_rotation_matrix)\n', (2287, 2316), True, 'import tensorflow as tf\n'), ((2351, 2389), 'tensorflow.shape', 'tf.shape', (['output_boxes_rotation_matrix'], {}), '(output_boxes_rotation_matrix)\n', (2359, 2389), True, 'import tensorflow as tf\n'), ((2481, 2515), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (2492, 2515), True, 'import tensorflow as tf\n'), ((2995, 3074), 'tf3d.losses.utils.get_balanced_loss_weights_multiclass', 'loss_utils.get_balanced_loss_weights_multiclass', ([], {'labels': 'input_boxes_instance_id'}), '(labels=input_boxes_instance_id)\n', (3042, 3074), True, 'from tf3d.losses import utils as loss_utils\n'), ((3852, 3928), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {'delta': 'delta', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(delta=delta, reduction=tf.keras.losses.Reduction.NONE)\n', (3873, 3928), True, 'import tensorflow as tf\n'), ((4319, 4347), 'tensorflow.shape', 'tf.shape', (['input_boxes_length'], {}), '(input_boxes_length)\n', (4327, 4347), True, 'import tensorflow as tf\n'), ((4382, 4411), 'tensorflow.shape', 'tf.shape', (['output_boxes_length'], {}), '(output_boxes_length)\n', (4390, 4411), True, 'import tensorflow as tf\n'), ((4503, 4537), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (4514, 4537), True, 'import tensorflow as tf\n'), ((4918, 4997), 'tf3d.losses.utils.get_balanced_loss_weights_multiclass', 'loss_utils.get_balanced_loss_weights_multiclass', ([], {'labels': 'input_boxes_instance_id'}), '(labels=input_boxes_instance_id)\n', (4965, 4997), True, 'from tf3d.losses import utils as loss_utils\n'), ((5293, 5369), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {'delta': 'delta', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(delta=delta, reduction=tf.keras.losses.Reduction.NONE)\n', (5314, 5369), True, 'import tensorflow as tf\n'), ((5768, 5796), 'tensorflow.shape', 'tf.shape', (['input_boxes_center'], {}), '(input_boxes_center)\n', (5776, 5796), True, 'import tensorflow as tf\n'), ((5831, 5860), 'tensorflow.shape', 'tf.shape', (['output_boxes_center'], {}), '(output_boxes_center)\n', (5839, 5860), True, 'import tensorflow as tf\n'), ((5952, 5986), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (5963, 5986), True, 'import tensorflow as tf\n'), ((6508, 6587), 'tf3d.losses.utils.get_balanced_loss_weights_multiclass', 'loss_utils.get_balanced_loss_weights_multiclass', ([], {'labels': 'input_boxes_instance_id'}), '(labels=input_boxes_instance_id)\n', (6555, 6587), True, 'from tf3d.losses import utils as loss_utils\n'), ((8492, 8568), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {'delta': 'delta', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(delta=delta, reduction=tf.keras.losses.Reduction.NONE)\n', (8513, 8568), True, 'import tensorflow as tf\n'), ((9087, 9115), 'tensorflow.shape', 'tf.shape', (['input_boxes_length'], {}), '(input_boxes_length)\n', (9095, 9115), True, 'import tensorflow as tf\n'), ((9150, 9179), 'tensorflow.shape', 'tf.shape', (['output_boxes_length'], {}), '(output_boxes_length)\n', (9158, 9179), True, 'import tensorflow as tf\n'), ((9271, 9305), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (9282, 9305), True, 'import tensorflow as tf\n'), ((11260, 11294), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (11271, 11294), True, 'import tensorflow as tf\n'), ((14882, 14916), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (14893, 14916), True, 'import tensorflow as tf\n'), ((17655, 17689), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (17666, 17689), True, 'import tensorflow as tf\n'), ((22122, 22156), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (22133, 22156), True, 'import tensorflow as tf\n'), ((25706, 25740), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (25717, 25740), True, 'import tensorflow as tf\n'), ((1922, 1997), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), '(reduction=tf.keras.losses.Reduction.NONE)\n', (1955, 1997), True, 'import tensorflow as tf\n'), ((2225, 2250), 'tensorflow.reshape', 'tf.reshape', (['weights', '[-1]'], {}), '(weights, [-1])\n', (2235, 2250), True, 'import tensorflow as tf\n'), ((4001, 4076), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), '(reduction=tf.keras.losses.Reduction.NONE)\n', (4034, 4076), True, 'import tensorflow as tf\n'), ((4265, 4290), 'tensorflow.reshape', 'tf.reshape', (['weights', '[-1]'], {}), '(weights, [-1])\n', (4275, 4290), True, 'import tensorflow as tf\n'), ((5442, 5517), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), '(reduction=tf.keras.losses.Reduction.NONE)\n', (5475, 5517), True, 'import tensorflow as tf\n'), ((5714, 5739), 'tensorflow.reshape', 'tf.reshape', (['weights', '[-1]'], {}), '(weights, [-1])\n', (5724, 5739), True, 'import tensorflow as tf\n'), ((7534, 7581), 'tensorflow.ones_like', 'tf.ones_like', (['gt_boxes_length'], {'dtype': 'tf.float32'}), '(gt_boxes_length, dtype=tf.float32)\n', (7546, 7581), True, 'import tensorflow as tf\n'), ((7639, 7686), 'tensorflow.ones_like', 'tf.ones_like', (['gt_boxes_height'], {'dtype': 'tf.float32'}), '(gt_boxes_height, dtype=tf.float32)\n', (7651, 7686), True, 'import tensorflow as tf\n'), ((7743, 7789), 'tensorflow.ones_like', 'tf.ones_like', (['gt_boxes_width'], {'dtype': 'tf.float32'}), '(gt_boxes_width, dtype=tf.float32)\n', (7755, 7789), True, 'import tensorflow as tf\n'), ((8677, 8752), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), '(reduction=tf.keras.losses.Reduction.NONE)\n', (8710, 8752), True, 'import tensorflow as tf\n'), ((8884, 8919), 'tensorflow.reshape', 'tf.reshape', (['gt_box_corners', '[-1, 3]'], {}), '(gt_box_corners, [-1, 3])\n', (8894, 8919), True, 'import tensorflow as tf\n'), ((8936, 8978), 'tensorflow.reshape', 'tf.reshape', (['predicted_box_corners', '[-1, 3]'], {}), '(predicted_box_corners, [-1, 3])\n', (8946, 8978), True, 'import tensorflow as tf\n'), ((9026, 9058), 'tensorflow.reshape', 'tf.reshape', (['corner_weights', '[-1]'], {}), '(corner_weights, [-1])\n', (9036, 9058), True, 'import tensorflow as tf\n'), ((24196, 24238), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['inputs[field]', 'valid_mask'], {}), '(inputs[field], valid_mask)\n', (24211, 24238), True, 'import tensorflow as tf\n'), ((24352, 24395), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['outputs[field]', 'valid_mask'], {}), '(outputs[field], valid_mask)\n', (24367, 24395), True, 'import tensorflow as tf\n'), ((27083, 27149), 'tensorflow.shape', 'tf.shape', (['inputs_1[standard_fields.InputDataFields.objects_length]'], {}), '(inputs_1[standard_fields.InputDataFields.objects_length])\n', (27091, 27149), True, 'import tensorflow as tf\n'), ((27203, 27276), 'tensorflow.shape', 'tf.shape', (['outputs_1[standard_fields.DetectionResultFields.objects_length]'], {}), '(outputs_1[standard_fields.DetectionResultFields.objects_length])\n', (27211, 27276), True, 'import tensorflow as tf\n'), ((27501, 27535), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (27512, 27535), True, 'import tensorflow as tf\n'), ((1445, 1478), 'tensorflow.shape', 'tf.shape', (['input_boxes_instance_id'], {}), '(input_boxes_instance_id)\n', (1453, 1478), True, 'import tensorflow as tf\n'), ((3121, 3154), 'tensorflow.shape', 'tf.shape', (['input_boxes_instance_id'], {}), '(input_boxes_instance_id)\n', (3129, 3154), True, 'import tensorflow as tf\n'), ((5044, 5077), 'tensorflow.shape', 'tf.shape', (['input_boxes_instance_id'], {}), '(input_boxes_instance_id)\n', (5052, 5077), True, 'import tensorflow as tf\n'), ((6634, 6667), 'tensorflow.shape', 'tf.shape', (['input_boxes_instance_id'], {}), '(input_boxes_instance_id)\n', (6642, 6667), True, 'import tensorflow as tf\n')] |
import sys
sys.path.insert(0, "build/lib.linux-x86_64-3.6")
sys.path.insert(0, "build/lib.linux-x86_64-3.8")
import pypet._ext1
class Derived(pypet._ext1.Pet):
def __init__(self, name):
pypet._ext1.Pet.__init__(self)
self.name = name
self.derived = True
def getName(self):
return(f'{self.name}: Moooo')
daisy = pypet._ext1.Pet("Daisy")
print(daisy.getName())
print()
pu1 = pypet._ext1.PetUser()
pu1.pets = [daisy]
print(pu1.pets[0])
print()
molly = Derived("Molly")
pu2 = pypet._ext1.PetUser()
print(molly)
print(molly.getName())
pu2.pets = [molly]
print(pu2.pets[0])
print(pu2.pets[0].getName())
| [
"sys.path.insert"
] | [((11, 59), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""build/lib.linux-x86_64-3.6"""'], {}), "(0, 'build/lib.linux-x86_64-3.6')\n", (26, 59), False, 'import sys\n'), ((60, 108), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""build/lib.linux-x86_64-3.8"""'], {}), "(0, 'build/lib.linux-x86_64-3.8')\n", (75, 108), False, 'import sys\n')] |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
"""
A :std:doc:`dimod composite <dimod:reference/samplers>` that tiles small problems
multiple times to a Chimera-structured sampler.
The :class:`.TilingComposite` takes a problem that can fit on a small
:std:doc:`Chimera <system:reference/intro>` graph and replicates it across a larger
Chimera graph to obtain samples from multiple areas of the solver in one call.
For example, a 2x2 Chimera lattice could be tiled 64 times (8x8) on a fully-yielded
D-Wave 2000Q system (16x16).
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_ for explanations
of technical terms in descriptions of Ocean tools.
"""
from __future__ import division
from math import sqrt, ceil
import dimod
import dwave_networkx as dnx
import numpy as np
__all__ = ['TilingComposite', 'draw_tiling']
class TilingComposite(dimod.Sampler, dimod.Composite, dimod.Structured):
"""Composite to tile a small problem across a Chimera-structured sampler.
Inherits from :class:`dimod.Sampler`, :class:`dimod.Composite`, and :class:`dimod.Structured`.
Enables parallel sampling for small problems (problems that are minor-embeddable in
a small part of a D-Wave solver's :std:doc:`Chimera <system:reference/intro>` graph).
The notation *CN* refers to a Chimera graph consisting of an NxN grid of unit cells.
Each Chimera unit cell is itself a bipartite graph with shores of size t. The D-Wave 2000Q QPU
supports a C16 Chimera graph: its 2048 qubits are logically mapped into a 16x16 matrix of
unit cell of 8 qubits (t=4).
A problem that can be minor-embedded in a single unit cell, for example, can therefore
be tiled across the unit cells of a D-Wave 2000Q as 16x16 duplicates. This enables
sampling 256 solutions in a single call.
Args:
sampler (:class:`dimod.Sampler`): Structured dimod sampler to be wrapped.
sub_m (int): Number of rows of Chimera unit cells for minor-embedding the problem once.
sub_n (int): Number of columns of Chimera unit cells for minor-embedding the problem once.
t (int, optional, default=4): Size of the shore within each Chimera unit cell.
Examples:
This example instantiates a composed sampler using composite :class:`.TilingComposite`
to tile a QUBO problem on a D-Wave solver, embedding it with composite
:class:`.EmbeddingComposite` and selecting the D-Wave solver with the user's
default :std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
The two-variable QUBO represents a
logical NOT gate (two nodes with biases of -1 that are coupled with strength 2) and is
easily minor-embedded in a single Chimera cell (it needs only any two coupled qubits) and
so can be tiled multiple times across a D-Wave solver for parallel solution (the two
nodes should typically have opposite values).
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import EmbeddingComposite
>>> from dwave.system.composites import TilingComposite
>>> sampler = EmbeddingComposite(TilingComposite(DWaveSampler(), 1, 1, 4))
>>> Q = {(1, 1): -1, (1, 2): 2, (2, 1): 0, (2, 2): -1}
>>> response = sampler.sample_qubo(Q)
>>> for sample in response.samples(): # doctest: +SKIP
... print(sample)
...
{1: 0, 2: 1}
{1: 1, 2: 0}
{1: 1, 2: 0}
{1: 1, 2: 0}
{1: 0, 2: 1}
{1: 0, 2: 1}
{1: 1, 2: 0}
{1: 0, 2: 1}
{1: 1, 2: 0}
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
nodelist = None
"""list: List of active qubits for the structured solver.
Examples:
This example creates a :class:`.TilingComposite` for a problem that requires
a 2x1 Chimera lattice to solve with a :class:`DWaveSampler` as the sampler.
It prints the active qubits retrieved from a D-Wave solver selected by
the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 2, 1, 4)
>>> sampler_tile.nodelist # doctest: +SKIP
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
edgelist = None
"""list: List of active couplers for the D-Wave solver.
Examples:
This example creates a :class:`.TilingComposite` for a problem that requires
a 1x2 Chimera lattice to solve with a :class:`DWaveSampler` as the sampler.
It prints the active couplers retrieved from a D-Wave solver selected by
the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 1, 2, 4)
>>> sampler_tile.edgelist # doctest: +SKIP
[[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 12],
[5, 13],
[6, 14],
[7, 15],
[8, 12],
[8, 13],
[8, 14],
[8, 15],
[9, 12],
[9, 13],
[9, 14],
[9, 15],
[10, 12],
[10, 13],
[10, 14],
[10, 15],
[11, 12],
[11, 13],
[11, 14],
[11, 15]]
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
parameters = None
"""dict[str, list]: Parameters in the form of a dict.
For an instantiated composed sampler, keys are the keyword parameters accepted by the
child sampler.
Examples:
This example instantiates a :class:`.TilingComposite` sampler using a D-Wave solver
selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`
and views the solver's parameters.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 1, 1, 4)
>>> sampler_tile.parameters # doctest: +SKIP
{u'anneal_offsets': ['parameters'],
u'anneal_schedule': ['parameters'],
u'annealing_time': ['parameters'],
u'answer_mode': ['parameters'],
u'auto_scale': ['parameters'],
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
properties = None
"""dict: Properties in the form of a dict.
For an instantiated composed sampler, contains one key :code:`'child_properties'` that
has a copy of the child sampler's properties.
Examples:
This example instantiates a :class:`.TilingComposite` sampler using a D-Wave solver
selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`
and views the solver's properties.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 1, 1, 4)
>>> sampler_tile.properties # doctest: +SKIP
{'child_properties': {u'anneal_offset_ranges': [[-0.2197463755538704,
0.03821687759418928],
[-0.2242514597680286, 0.01718456460967399],
[-0.20860153999435985, 0.05511969218508182],
[-0.2108920134230625, 0.056392603743884134],
[-0.21788292874621265, 0.03360435584845211],
[-0.21700680373359477, 0.005297355417068621],
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
children = None
"""list: The single wrapped structured sampler."""
def __init__(self, sampler, sub_m, sub_n, t=4):
self.parameters = sampler.parameters.copy()
self.properties = properties = {'child_properties': sampler.properties}
tile = dnx.chimera_graph(sub_m, sub_n, t)
self.nodelist = sorted(tile.nodes)
self.edgelist = sorted(sorted(edge) for edge in tile.edges)
# dimod.Structured abstract base class automatically populates adjacency and structure as
# mixins based on nodelist and edgelist
if not isinstance(sampler, dimod.Structured):
# we could also just tile onto the unstructured sampler but in that case we would need
# to know how many tiles to use
raise ValueError("given child sampler should be structured")
self.children = [sampler]
nodes_per_cell = t * 2
edges_per_cell = t * t
m = n = int(ceil(sqrt(ceil(len(sampler.structure.nodelist) / nodes_per_cell)))) # assume square lattice shape
system = dnx.chimera_graph(m, n, t, node_list=sampler.structure.nodelist, edge_list=sampler.structure.edgelist)
c2i = {chimera_index: linear_index for (linear_index, chimera_index) in system.nodes(data='chimera_index')}
sub_c2i = {chimera_index: linear_index for (linear_index, chimera_index) in tile.nodes(data='chimera_index')}
# Count the connections between these qubits
def _between(qubits1, qubits2):
edges = [edge for edge in system.edges if edge[0] in qubits1 and edge[1] in qubits2]
return len(edges)
# Get the list of qubits in a cell
def _cell_qubits(i, j):
return [c2i[(i, j, u, k)] for u in range(2) for k in range(t) if (i, j, u, k) in c2i]
# get a mask of complete cells
cells = [[False for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
qubits = _cell_qubits(i, j)
cells[i][j] = len(qubits) == nodes_per_cell and _between(qubits, qubits) == edges_per_cell
# List of 'embeddings'
self.embeddings = properties['embeddings'] = embeddings = []
# For each possible chimera cell check if the next few cells are complete
for i in range(m + 1 - sub_m):
for j in range(n + 1 - sub_n):
# Check if the sub cells are matched
match = all(cells[i + sub_i][j + sub_j] for sub_i in range(sub_m) for sub_j in range(sub_n))
# Check if there are connections between the cells.
for sub_i in range(sub_m):
for sub_j in range(sub_n):
if sub_m > 1 and sub_i < sub_m - 1:
match &= _between(_cell_qubits(i + sub_i, j + sub_j),
_cell_qubits(i + sub_i + 1, j + sub_j)) == t
if sub_n > 1 and sub_j < sub_n - 1:
match &= _between(_cell_qubits(i + sub_i, j + sub_j),
_cell_qubits(i + sub_i, j + sub_j + 1)) == t
if match:
# Pull those cells out into an embedding.
embedding = {}
for sub_i in range(sub_m):
for sub_j in range(sub_n):
cells[i + sub_i][j + sub_j] = False # Mark cell as matched
for u in range(2):
for k in range(t):
embedding[sub_c2i[sub_i, sub_j, u, k]] = {c2i[(i + sub_i, j + sub_j, u, k)]}
embeddings.append(embedding)
if len(embeddings) == 0:
raise ValueError("no tile embeddings found; is the sampler Chimera structured?")
@dimod.bqm_structured
def sample(self, bqm, **kwargs):
"""Sample from the provided binary quadratic model
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**kwargs:
Optional keyword arguments for the sampling method, specified per solver.
Returns:
:class:`dimod.Response`
Examples:
This example uses :class:`.TilingComposite` to instantiate a composed sampler
that submits a simple Ising problem of just two variables that map to qubits 0 and 1
on the D-Wave solver selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
(The simplicity of this example obviates the need for an embedding
composite.) Because the problem fits in a single
:std:doc:`Chimera <system:reference/intro>` unit cell, it is tiled
across the solver's entire Chimera graph, resulting in multiple samples.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import EmbeddingComposite, TilingComposite
>>> sampler = TilingComposite(DWaveSampler(), 1, 1, 4)
>>> response = sampler.sample_ising({0: -1, 1: 1}, {})
>>> for sample in response.samples(): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
# apply the embeddings to the given problem to tile it across the child sampler
embedded_bqm = dimod.BinaryQuadraticModel.empty(bqm.vartype)
__, __, target_adjacency = self.child.structure
for embedding in self.embeddings:
embedded_bqm.update(dimod.embed_bqm(bqm, embedding, target_adjacency))
# solve the problem on the child system
tiled_response = self.child.sample(embedded_bqm, **kwargs)
responses = []
for embedding in self.embeddings:
embedding = {v: chain for v, chain in embedding.items() if v in bqm.linear}
responses.append(dimod.unembed_response(tiled_response, embedding, bqm))
# stack the records
record = np.rec.array(np.hstack((resp.record for resp in responses)))
vartypes = set(resp.vartype for resp in responses)
if len(vartypes) > 1:
raise RuntimeError("inconsistent vartypes returned")
vartype = vartypes.pop()
info = {}
for resp in responses:
info.update(resp.info)
labels = responses[0].variable_labels
return dimod.Response(record, labels, info, vartype)
@property
def num_tiles(self):
return len(self.embeddings)
def draw_tiling(sampler, t=4):
"""Draw Chimera graph of sampler with colored tiles.
Args:
sampler (:class:`dwave_micro_client_dimod.TilingComposite`): A tiled dimod
sampler to be drawn.
t (int): The size of the shore within each
:std:doc:`Chimera <system:reference/intro>` cell.
Uses :std:doc:`dwave_networkx.draw_chimera <networkx:index>`.
Linear biases are overloaded to color the graph according to which tile each Chimera cell belongs to.
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
child = sampler.child
nodes_per_cell = t * 2
m = n = int(ceil(sqrt(ceil(len(child.structure.nodelist) / nodes_per_cell)))) # assume square lattice shape
system = dnx.chimera_graph(m, n, t, node_list=child.structure.nodelist, edge_list=child.structure.edgelist)
labels = {node: -len(sampler.embeddings) for node in system.nodes} # unused cells are blue
labels.update({node: i for i, embedding in enumerate(sampler.embeddings) for s in embedding.values() for node in s})
dnx.draw_chimera(system, linear_biases=labels)
| [
"dimod.unembed_response",
"numpy.hstack",
"dimod.Response",
"dimod.BinaryQuadraticModel.empty",
"dwave_networkx.chimera_graph",
"dimod.embed_bqm",
"dwave_networkx.draw_chimera"
] | [((17215, 17318), 'dwave_networkx.chimera_graph', 'dnx.chimera_graph', (['m', 'n', 't'], {'node_list': 'child.structure.nodelist', 'edge_list': 'child.structure.edgelist'}), '(m, n, t, node_list=child.structure.nodelist, edge_list=\n child.structure.edgelist)\n', (17232, 17318), True, 'import dwave_networkx as dnx\n'), ((17536, 17582), 'dwave_networkx.draw_chimera', 'dnx.draw_chimera', (['system'], {'linear_biases': 'labels'}), '(system, linear_biases=labels)\n', (17552, 17582), True, 'import dwave_networkx as dnx\n'), ((9583, 9617), 'dwave_networkx.chimera_graph', 'dnx.chimera_graph', (['sub_m', 'sub_n', 't'], {}), '(sub_m, sub_n, t)\n', (9600, 9617), True, 'import dwave_networkx as dnx\n'), ((10379, 10486), 'dwave_networkx.chimera_graph', 'dnx.chimera_graph', (['m', 'n', 't'], {'node_list': 'sampler.structure.nodelist', 'edge_list': 'sampler.structure.edgelist'}), '(m, n, t, node_list=sampler.structure.nodelist, edge_list=\n sampler.structure.edgelist)\n', (10396, 10486), True, 'import dwave_networkx as dnx\n'), ((15216, 15261), 'dimod.BinaryQuadraticModel.empty', 'dimod.BinaryQuadraticModel.empty', (['bqm.vartype'], {}), '(bqm.vartype)\n', (15248, 15261), False, 'import dimod\n'), ((16243, 16288), 'dimod.Response', 'dimod.Response', (['record', 'labels', 'info', 'vartype'], {}), '(record, labels, info, vartype)\n', (16257, 16288), False, 'import dimod\n'), ((15859, 15903), 'numpy.hstack', 'np.hstack', (['(resp.record for resp in responses)'], {}), '(resp.record for resp in responses)\n', (15868, 15903), True, 'import numpy as np\n'), ((15392, 15441), 'dimod.embed_bqm', 'dimod.embed_bqm', (['bqm', 'embedding', 'target_adjacency'], {}), '(bqm, embedding, target_adjacency)\n', (15407, 15441), False, 'import dimod\n'), ((15744, 15798), 'dimod.unembed_response', 'dimod.unembed_response', (['tiled_response', 'embedding', 'bqm'], {}), '(tiled_response, embedding, bqm)\n', (15766, 15798), False, 'import dimod\n')] |
import datetime
import re
import time
from collections import namedtuple
from django.conf import settings
from django.core.management.base import BaseCommand
from trello import ResourceUnavailable, TrelloClient
from core.models import Event
# Create new command
class Command(BaseCommand):
help = 'Syncs event in trello board. Need a token.'
missing_args_message = (
'You need to add a token! Get one here: '
'https://trello.com/1/authorize?key=01ab0348ca020573e7f728ae7400928a&scope=read%2Cwrite&'
'name=My+Application&expiration=1hour&response_type=token'
)
def add_arguments(self, parser):
parser.add_argument('trello_token', type=str)
def handle(self, *args, **options):
token = options['trello_token']
events = event_list()
sync(events, token)
# Get data
EventTuple = namedtuple('EventTuple', 'name id city date')
def event_list():
event = Event.objects.all()
result = []
for e in event:
name = e.name
_id = str(e.pk)
city = e.city
date = datetime.date(e.date.year, e.date.month, e.date.day or 1)
result.append(EventTuple(name, _id, city, date))
return result
# Sync to trello
ADMIN_BASE_URL = 'https://djangogirls.org/admin/core/event/'
def sync(events, token):
trello = TrelloClient(api_key=settings.TRELLO_API_KEY, token=token)
board = trello.get_board('55f7167c46760fcb5d68b385')
far_away, less_2_months, less_1_month, less_1_week, today, past = board.all_lists()
all_cards = {card_id(c): c for c in board.all_cards()}
date_today = datetime.date.today()
for e in events:
card = all_cards.get(e.id)
if not card:
card = create_card(e, far_away)
create_checklist(card)
# fetch card to get due date
try:
card.fetch()
except ResourceUnavailable:
print("Oopsie: too many requests! Let's wait 10 seconds!")
time.sleep(10)
card.fetch()
if e.date != card.due_date.date():
print('Changing due date of {} to {}'.format(e.city, e.date))
card.set_due(e.date)
distance = (e.date - date_today).days
if distance < 0:
right_list = past
elif distance == 0:
right_list = today
elif distance < 7:
right_list = less_1_week
elif distance < 30:
right_list = less_1_month
elif distance < 60:
right_list = less_2_months
else:
right_list = far_away
ensure_card_in_list(card, right_list)
def card_id(card):
m = re.search(ADMIN_BASE_URL + r'(\d+)',
card.desc)
return m.group(1)
def create_card(event, list):
print('Creating card {} ({})'.format(event.city, event.date.isoformat()))
return list.add_card(name=event.city,
desc=ADMIN_BASE_URL + event.id,
due=event.date.isoformat())
def create_checklist(card):
card.add_checklist("Things to do:", [
"2 month check", "1 month check", "Thank you email and request for stats", "Stats obtained"])
def ensure_checklist_in_card(card):
if not card.checklists:
print("Adding checklist to {} card.".format(card.name))
create_checklist(card)
def ensure_card_in_list(card, list):
if card.list_id != list.id:
print('Moving {} to {}'.format(
card.name, list.name))
card.change_list(list.id)
| [
"core.models.Event.objects.all",
"collections.namedtuple",
"time.sleep",
"trello.TrelloClient",
"datetime.date",
"datetime.date.today",
"re.search"
] | [((859, 904), 'collections.namedtuple', 'namedtuple', (['"""EventTuple"""', '"""name id city date"""'], {}), "('EventTuple', 'name id city date')\n", (869, 904), False, 'from collections import namedtuple\n'), ((937, 956), 'core.models.Event.objects.all', 'Event.objects.all', ([], {}), '()\n', (954, 956), False, 'from core.models import Event\n'), ((1330, 1388), 'trello.TrelloClient', 'TrelloClient', ([], {'api_key': 'settings.TRELLO_API_KEY', 'token': 'token'}), '(api_key=settings.TRELLO_API_KEY, token=token)\n', (1342, 1388), False, 'from trello import ResourceUnavailable, TrelloClient\n'), ((1613, 1634), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1632, 1634), False, 'import datetime\n'), ((2661, 2708), 're.search', 're.search', (["(ADMIN_BASE_URL + '(\\\\d+)')", 'card.desc'], {}), "(ADMIN_BASE_URL + '(\\\\d+)', card.desc)\n", (2670, 2708), False, 'import re\n'), ((1076, 1133), 'datetime.date', 'datetime.date', (['e.date.year', 'e.date.month', '(e.date.day or 1)'], {}), '(e.date.year, e.date.month, e.date.day or 1)\n', (1089, 1133), False, 'import datetime\n'), ((1988, 2002), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1998, 2002), False, 'import time\n')] |
# this creates a wrapper using ctypes for glfw from the header
# it's not fully automatic, but it does a good deal of work
import re
with open("../glfw/include/GL/glfw3.h") as header_file:
data = header_file.read()
# normalize whitespace
data = re.sub(r"[ \t]+", " ", data)
# delete beginning
data = data[data.index("*/", data.index("Input handling definitions")) + 2:]
# delete end
data = data[:data.rindex("/*", 0, data.index("Global definition cleanup"))]
# delete deprecated aliases
data = data[:data.rindex("/* GLFW 2.x key name aliases (deprecated) */")] + data[data.index("/* Mouse button definitions */"):]
# delete GLFWAPI and const
data = re.sub(r" *\bGLFWAPI\b *", "", data)
data = re.sub(r" *\bconst\b *", "", data)
# remove GLFW prefix
data = data.replace("GLFW_", "")
data = data.replace("GLFW", "")
# remove semicolon
data = data.replace(";", "")
# convert defines
data = re.sub(r"#define (\w+) (\w+)", r"\1 = \2", data)
# convert one-liner comments
data = re.sub(r"/\* *((?:[^/\n]|(?:[^*\n]/))+) *\*/", r"# \1", data)
# convert multiline comments
data = re.sub(r"/\*((?:[^/]|[^*]/)+)\*/", lambda r: "\"\"\"\n" + r.group(1).replace("*", "").strip() + "\n\"\"\"\n", data)
# convert function typedefs
data = re.sub(r"typedef +(\w+) *\( *\* *(\w+) *\) *\(([^)\n]+)\)", r"\2 = CFUNCTYPE(\1, \3)", data)
# rest of the typedefs are manual
def func_decl_helper(res):
arguments = res.group(3)
if arguments != "void":
arguments = ",".join(" ".join(arg.split()[:-1]) for arg in res.group(3).split(","))
return "{1} = glfwdll.glfw{1}\n{1}.restype = {0}\n{1}.argtypes = [{2}]".format(res.group(1), res.group(2), arguments)
# make empty argument lists empty
data = data.replace("(void)", "()")
# convert function declarations
data = re.sub(r"([^ \n]+) +glfw(\w+)\(([^)\n]*)\)", func_decl_helper, data)
# convert pointer types
data = data.replace("unsigned char*", "POINTER(c_ubyte)")
data = data.replace("char*", "c_char_p")
data = re.sub(r"(\w[\w ]+)\*", r"POINTER(\1)", data)
# convert void
data = re.sub(r"\bvoid\b", "None", data)
# convert other types
data = re.sub(r"\bint\b", "c_int", data)
data = re.sub(r"\bfloat\b", "c_float", data)
data = re.sub(r"\bdouble\b", "c_float", data)
data = re.sub(r"\bunsigned long\b", "c_ulong", data)
# normalize whitespace
data = re.sub(r" +", " ", data)
data = re.sub(r"\n\n+", "\n\n", data)
data = re.sub(r"\s*,\s*", ", ", data)
data = data.strip() + "\n"
with open("glfw_wrapper.py", "w") as output_file:
output_file.write(data)
| [
"re.sub"
] | [((252, 280), 're.sub', 're.sub', (['"""[ \\\\t]+"""', '""" """', 'data'], {}), "('[ \\\\t]+', ' ', data)\n", (258, 280), False, 'import re\n'), ((664, 701), 're.sub', 're.sub', (['""" *\\\\bGLFWAPI\\\\b *"""', '""""""', 'data'], {}), "(' *\\\\bGLFWAPI\\\\b *', '', data)\n", (670, 701), False, 'import re\n'), ((708, 743), 're.sub', 're.sub', (['""" *\\\\bconst\\\\b *"""', '""""""', 'data'], {}), "(' *\\\\bconst\\\\b *', '', data)\n", (714, 743), False, 'import re\n'), ((905, 955), 're.sub', 're.sub', (['"""#define (\\\\w+) (\\\\w+)"""', '"""\\\\1 = \\\\2"""', 'data'], {}), "('#define (\\\\w+) (\\\\w+)', '\\\\1 = \\\\2', data)\n", (911, 955), False, 'import re\n'), ((991, 1055), 're.sub', 're.sub', (['"""/\\\\* *((?:[^/\\\\n]|(?:[^*\\\\n]/))+) *\\\\*/"""', '"""# \\\\1"""', 'data'], {}), "('/\\\\* *((?:[^/\\\\n]|(?:[^*\\\\n]/))+) *\\\\*/', '# \\\\1', data)\n", (997, 1055), False, 'import re\n'), ((1242, 1347), 're.sub', 're.sub', (['"""typedef +(\\\\w+) *\\\\( *\\\\* *(\\\\w+) *\\\\) *\\\\(([^)\\\\n]+)\\\\)"""', '"""\\\\2 = CFUNCTYPE(\\\\1, \\\\3)"""', 'data'], {}), "('typedef +(\\\\w+) *\\\\( *\\\\* *(\\\\w+) *\\\\) *\\\\(([^)\\\\n]+)\\\\)',\n '\\\\2 = CFUNCTYPE(\\\\1, \\\\3)', data)\n", (1248, 1347), False, 'import re\n'), ((1794, 1866), 're.sub', 're.sub', (['"""([^ \\\\n]+) +glfw(\\\\w+)\\\\(([^)\\\\n]*)\\\\)"""', 'func_decl_helper', 'data'], {}), "('([^ \\\\n]+) +glfw(\\\\w+)\\\\(([^)\\\\n]*)\\\\)', func_decl_helper, data)\n", (1800, 1866), False, 'import re\n'), ((1994, 2041), 're.sub', 're.sub', (['"""(\\\\w[\\\\w ]+)\\\\*"""', '"""POINTER(\\\\1)"""', 'data'], {}), "('(\\\\w[\\\\w ]+)\\\\*', 'POINTER(\\\\1)', data)\n", (2000, 2041), False, 'import re\n'), ((2063, 2097), 're.sub', 're.sub', (['"""\\\\bvoid\\\\b"""', '"""None"""', 'data'], {}), "('\\\\bvoid\\\\b', 'None', data)\n", (2069, 2097), False, 'import re\n'), ((2127, 2161), 're.sub', 're.sub', (['"""\\\\bint\\\\b"""', '"""c_int"""', 'data'], {}), "('\\\\bint\\\\b', 'c_int', data)\n", (2133, 2161), False, 'import re\n'), ((2168, 2206), 're.sub', 're.sub', (['"""\\\\bfloat\\\\b"""', '"""c_float"""', 'data'], {}), "('\\\\bfloat\\\\b', 'c_float', data)\n", (2174, 2206), False, 'import re\n'), ((2213, 2252), 're.sub', 're.sub', (['"""\\\\bdouble\\\\b"""', '"""c_float"""', 'data'], {}), "('\\\\bdouble\\\\b', 'c_float', data)\n", (2219, 2252), False, 'import re\n'), ((2259, 2305), 're.sub', 're.sub', (['"""\\\\bunsigned long\\\\b"""', '"""c_ulong"""', 'data'], {}), "('\\\\bunsigned long\\\\b', 'c_ulong', data)\n", (2265, 2305), False, 'import re\n'), ((2336, 2359), 're.sub', 're.sub', (['""" +"""', '""" """', 'data'], {}), "(' +', ' ', data)\n", (2342, 2359), False, 'import re\n'), ((2368, 2399), 're.sub', 're.sub', (['"""\\\\n\\\\n+"""', '"""\n\n"""', 'data'], {}), "('\\\\n\\\\n+', '\\n\\n', data)\n", (2374, 2399), False, 'import re\n'), ((2406, 2437), 're.sub', 're.sub', (['"""\\\\s*,\\\\s*"""', '""", """', 'data'], {}), "('\\\\s*,\\\\s*', ', ', data)\n", (2412, 2437), False, 'import re\n')] |
# Copyright (c) 2021, Parallel Systems Architecture Laboratory (PARSA), EPFL &
# Machine Learning and Optimization Laboratory (MLO), EPFL. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the PARSA, EPFL & MLO, EPFL
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
#
# Modified file from Salesforce's LSTM and QRNN Language Model Toolkit
# (https://github.com/salesforce/awd-lstm-lm). See LICENSE for more details.
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = X = F.embedding(
words, masked_embed_weight,
padding_idx,
embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
if __name__ == '__main__':
V = 50
h = 4
bptt = 10
batch_size = 2
embed = torch.nn.Embedding(V, h)
words = np.random.random_integers(low=0, high=V-1, size=(batch_size, bptt))
words = torch.LongTensor(words)
words = Variable(words)
origX = embed(words)
X = embedded_dropout(embed, words)
print(origX)
print(X)
| [
"numpy.random.random_integers",
"torch.LongTensor",
"torch.nn.functional.embedding",
"torch.autograd.Variable",
"torch.nn.Embedding"
] | [((2519, 2649), 'torch.nn.functional.embedding', 'F.embedding', (['words', 'masked_embed_weight', 'padding_idx', 'embed.max_norm', 'embed.norm_type', 'embed.scale_grad_by_freq', 'embed.sparse'], {}), '(words, masked_embed_weight, padding_idx, embed.max_norm, embed.\n norm_type, embed.scale_grad_by_freq, embed.sparse)\n', (2530, 2649), True, 'import torch.nn.functional as F\n'), ((2779, 2803), 'torch.nn.Embedding', 'torch.nn.Embedding', (['V', 'h'], {}), '(V, h)\n', (2797, 2803), False, 'import torch\n'), ((2815, 2884), 'numpy.random.random_integers', 'np.random.random_integers', ([], {'low': '(0)', 'high': '(V - 1)', 'size': '(batch_size, bptt)'}), '(low=0, high=V - 1, size=(batch_size, bptt))\n', (2840, 2884), True, 'import numpy as np\n'), ((2893, 2916), 'torch.LongTensor', 'torch.LongTensor', (['words'], {}), '(words)\n', (2909, 2916), False, 'import torch\n'), ((2927, 2942), 'torch.autograd.Variable', 'Variable', (['words'], {}), '(words)\n', (2935, 2942), False, 'from torch.autograd import Variable\n'), ((2219, 2233), 'torch.autograd.Variable', 'Variable', (['mask'], {}), '(mask)\n', (2227, 2233), False, 'from torch.autograd import Variable\n')] |
'''
Investigating the offset of CIV emission in the Cloudy models
as a function of ionization, nebular metallicity, stellar metallicity,
stellar population type, age, etc.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
from cloudy_func import * # written by TAH
import warnings
from scipy.optimize import OptimizeWarning
warnings.simplefilter("error", OptimizeWarning) # for when no CIV emission seen
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# for the emission line profiles
def gaussian(xaxis, mean, A, sig, offset):
'''
Simple Gaussian functionc
'''
return A * np.exp(-np.power(xaxis-mean, 2.) / (2*np.power(sig, 2.))) + offset
# velocity offset plot using wavelengths instead of redshift
# the spectrum is already at systemic (so that z_sys=0)
def velocity_offset(lam_obs,lam_rest):
return 2.998e5 * ((lam_obs/lam_rest) - 1) # km/s
# model parameters
u = np.arange(-3.5,-1.4,0.2) # full range, 11 ionization points
zneb = [0.1,0.2,0.3,0.5] # full range, some have 0.4 as well
mass = 300 # 300 or 100
stars = 'binary' # binary or single
age = 7 # 7, 7.477, or 8
neb = 0 # for the nebular metallicity
civ = 1548.19 # angstroms
for ion in u[:5]:
# pulling model spectrum
spec = get_cloudy_spec(f'{stars}_cont_{mass}',mass,age,zneb[neb],ioni=ion)
spec['wavelength'] *= 1e4
spec['spectrum'] /= (2.998e18/spec.wavelength.values) # nu*Fnu --> Fnu
# zooming in around CIV
spec = reorder_spectrum(spec) # Cloudy orders it backwards
spec = spec.query('1490 < wavelength < 1610').copy()
spec['spectrum'] /= np.median(spec.spectrum.values) # normalizing it
# plotting CIV area of spectrum
plt.figure(figsize=(9,6))
plt.plot(spec.wavelength,spec.spectrum)
text_kwargs = {'transform':plt.gca().transAxes,'fontsize':15}
plt.text(0.025,0.94,f'logU: {round(ion,1)}',**text_kwargs)
# fitting the CIV emission
# gaussian(xaxis, mean, A, sig, offset)
try:
popt,pcov = curve_fit(gaussian,spec.wavelength,spec.spectrum,p0=[1548,1,2,1])
plt.plot(spec.wavelength,gaussian(spec.wavelength,*popt))
plt.axvline(popt[0])
# calculating offset
offset = velocity_offset(popt[0],civ)
plt.text(0.025,0.05,f'offset: {round(offset,2)} km/s',**text_kwargs)
except OptimizeWarning:
print('\nNo emission detected, finding max value.',end='\n\n')
zoomin = spec.query('1540 < wavelength < 1565').copy()
# wavelength of peak value
peak = zoomin.loc[zoomin['spectrum'].idxmax(),'wavelength']
plt.axvline(peak,ls=':')
# calculating offset using the max value (will likely stay the same)
offset = velocity_offset(peak,civ)
plt.text(0.025,0.05,f'offset: {round(offset,2)} km/s',**text_kwargs)
plt.yscale('log')
plt.ylim(0.25,6)
plt.gca().set_yticks([0.3,1.,3,])
plt.gca().set_yticklabels(['0.3','1.0','3.0',])
plt.xlabel('rest wavelength [$\AA$]')
plt.ylabel('normalized flux')
plt.tight_layout()
plt.show()
plt.close()
print()
# ---------------------------------------------------------- #
# -- running through all models to build table of offsets -- #
# ---------------------------------------------------------- #
# zstellar = [0.1,0.2,0.3,0.5]
# zneb = [0.1,0.3,0.5]
# offsets = pd.DataFrame({'z':[],'zneb':[],'u':[],'offset':[],'age':[],'mass':[],'stars':[]})
# for stars in ['binary','single']:
# print('For stars:',stars)
# for mass in [300,100]:
# print('For mass:',mass)
# for met in zstellar: # stellar metallicity
# print('For Z_stellar:',met)
# for neb in zneb: # nebular metallicity
# # checking for when stellar == nebular when it's 0.3 or 0.5
# if neb == 0.1 and met == 0.3: pass # no need to run this model twice
# elif neb == 0.1 and met == 0.5: pass # no need to run this model twice
# else:
# # need to check if matches stellar
# if neb == 0.1: neb = met # fix nebular to stellar metallicity
# print('For Z_neb:',neb)
# for ion in u:
# print('For logU:',round(ion,1),end=',\t')
# # pulling model spectrum
# spec = get_cloudy_spec(f'{stars}_cont_{mass}',mass,age,met,zneb=neb,ioni=ion)
# spec['wavelength'] *= 1e4
# spec['spectrum'] /= (2.998e18/spec.wavelength.values) # nu*Fnu --> Fnu
# # zooming in around CIV
# spec = spec.query('1490 < wavelength < 1610').copy()
# spec['spectrum'] /= np.median(spec.spectrum.values) # normalizing it
# spec = reorder_spectrum(spec) # Cloudy orders it backwards
# # fitting the CIV emission
# # gaussian(xaxis, mean, A, sig, offset)
# try:
# popt,pcov = curve_fit(gaussian,spec.wavelength,spec.spectrum,p0=[1548,1,2,1])
# # calculating offset
# offset = velocity_offset(popt[0],civ)
# print(f'offset: {round(offset,2)} km/s')
# except OptimizeWarning:
# print('Bad fit/no emission detected.')
# offset = np.nan
# filldf = pd.DataFrame({'z':[met],'zneb':[neb],'u':[ion],'offset':[round(offset,3)],\
# 'age':[int(age)],'mass':[int(mass)],'stars':[stars]})
# offsets = offsets.append(filldf,ignore_index=True)
# print()
# print(end='\n\n')
# print(end='\n\n')
# print(end='\n\n\n')
# # ---------------------------------------- #
# # ---------------------------------------- #
# print('Saving table to file...',end='\n\n')
# df_dtypes = {'zneb':float,'u':float,'offset':float,'age':int,'mass':int,'stars':str}
# offsets = offsets.astype(df_dtypes) # to make sure column dtypes don't change
# # offsets.to_csv('plots-data/offsets_civ.txt',sep='\t',index=False)
# print(offsets.head()) | [
"scipy.optimize.curve_fit",
"numpy.median",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"warnings.simplefilter",
"matplotlib.py... | [((427, 474), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'OptimizeWarning'], {}), "('error', OptimizeWarning)\n", (448, 474), False, 'import warnings\n'), ((983, 1009), 'numpy.arange', 'np.arange', (['(-3.5)', '(-1.4)', '(0.2)'], {}), '(-3.5, -1.4, 0.2)\n', (992, 1009), True, 'import numpy as np\n'), ((1728, 1759), 'numpy.median', 'np.median', (['spec.spectrum.values'], {}), '(spec.spectrum.values)\n', (1737, 1759), True, 'import numpy as np\n'), ((1819, 1845), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (1829, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1889), 'matplotlib.pyplot.plot', 'plt.plot', (['spec.wavelength', 'spec.spectrum'], {}), '(spec.wavelength, spec.spectrum)\n', (1857, 1889), True, 'import matplotlib.pyplot as plt\n'), ((2967, 2984), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2977, 2984), True, 'import matplotlib.pyplot as plt\n'), ((2989, 3006), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.25)', '(6)'], {}), '(0.25, 6)\n', (2997, 3006), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3138), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rest wavelength [$\\\\AA$]"""'], {}), "('rest wavelength [$\\\\AA$]')\n", (3110, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""normalized flux"""'], {}), "('normalized flux')\n", (3152, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3177, 3195), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3193, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3200, 3210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3208, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3215, 3226), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3224, 3226), True, 'import matplotlib.pyplot as plt\n'), ((2123, 2194), 'scipy.optimize.curve_fit', 'curve_fit', (['gaussian', 'spec.wavelength', 'spec.spectrum'], {'p0': '[1548, 1, 2, 1]'}), '(gaussian, spec.wavelength, spec.spectrum, p0=[1548, 1, 2, 1])\n', (2132, 2194), False, 'from scipy.optimize import curve_fit\n'), ((2263, 2283), 'matplotlib.pyplot.axvline', 'plt.axvline', (['popt[0]'], {}), '(popt[0])\n', (2274, 2283), True, 'import matplotlib.pyplot as plt\n'), ((1920, 1929), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2752), 'matplotlib.pyplot.axvline', 'plt.axvline', (['peak'], {'ls': '""":"""'}), "(peak, ls=':')\n", (2738, 2752), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3019), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3017, 3019), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3057), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3055, 3057), True, 'import matplotlib.pyplot as plt\n'), ((689, 716), 'numpy.power', 'np.power', (['(xaxis - mean)', '(2.0)'], {}), '(xaxis - mean, 2.0)\n', (697, 716), True, 'import numpy as np\n'), ((719, 737), 'numpy.power', 'np.power', (['sig', '(2.0)'], {}), '(sig, 2.0)\n', (727, 737), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rtk.hardware.component.resistor.fixed.Wirewound.py is part of the RTK
# Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##########################################################
Hardware.Component.Resistor.Fixed Package Wirewound Module
##########################################################
"""
import gettext
import locale
try:
import Configuration
import Utilities
from hardware.component.resistor.Resistor import Model as Resistor
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
from rtk.hardware.component.resistor.Resistor import Model as Resistor
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
# Add localization support.
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class Wirewound(Resistor):
"""
The Wirewound resistor data model contains the attributes and methods of
a Wirewound resistor. The attributes of a Wirewound resistor are:
:cvar list _lst_piR: list of MIL-HDBK-217FN2 resistance factor values.
:cvar list _lst_piE: list of MIL-HDBK-217FN2 operating environment factor
values.
:cvar list _lst_piQ_count: list of quality factor values for the
MIL-HDBK-217FN2 parts count method.
:cvar list _lst_piQ_stress: list of quality factor values for the
MIL-HDBK-217FN2 parts stress method.
:cvar list _lst_lambdab_count: list of base hazard rate values for the
MIL-HDBK-217FN2 parts count method.
:cvar int subcategory: default value: 29
Covers specifications MIL-R-93, MIL-R-39005.
Hazard Rate Models:
# MIL-HDBK-217F, section 9.5
"""
# MIL-HDK-217F hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_lst_piE = [1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8,
14.0, 38.0, 610.0]
_lst_piQ_count = [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]
_lst_piQ_stress = [0.03, 0.1, 0.3, 1.0, 5.0, 15.0]
_lst_lambdab_count = [0.0085, 0.018, 0.10, 0.045, 0.16, 0.15, 0.17, 0.30,
0.38, 0.26, 0.0068, 0.13, 0.37, 5.4]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 29 # Subcategory ID in rtkcom DB.
def __init__(self):
"""
Method to initialize a Wirewound resistor data model instance.
"""
super(Wirewound, self).__init__()
def calculate_part(self):
"""
Method to calculate the hazard rate for the Wirewound resistor data
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piR * piQ * piE'
# Base hazard rate.
_stress = self.operating_power / self.rated_power
try:
self.base_hr = 0.0031 * \
exp(((self.temperature_active + 273.0) / 398.0)**10.0) * \
exp((_stress * ((self.temperature_active + 273.0) /
273.0))**1.5)
self.hazard_rate_model['lambdab'] = self.base_hr
except OverflowError:
# TODO: Handle overflow error.
return True
# Resistance factor.
if self.resistance <= 10000.0:
self.piR = 1.0
elif self.resistance > 10000.0 and self.resistance <= 1.0E5:
self.piR = 1.7
elif self.resistance > 1.0E5 and self.resistance <= 1.0E6:
self.piR = 3.0
elif self.resistance > 1.0E6:
self.piR = 5.0
self.hazard_rate_model['piR'] = self.piR
return Resistor.calculate_part(self)
class WirewoundPower(Resistor):
"""
The Wirewound Power resistor data model contains the attributes and
methods of a Wirewound Power resistor. The attributes of a Wirewound
Power resistor are:
:cvar list _lst_piR: list of MIL-HDBK-217FN2 resistance factor values.
:cvar list _lst_piE: list of MIL-HDBK-217FN2 operating environment factor
values.
:cvar list _lst_piQ_count: list of quality factor values for the
MIL-HDBK-217FN2 parts count method.
:cvar list _lst_piQ_stress: list of quality factor values for the
MIL-HDBK-217FN2 parts stress method.
:cvar list _lst_lambdab_count: list of base hazard rate values for the
MIL-HDBK-217FN2 parts count method.
:cvar int subcategory: default value: 30
:ivar int specification: index of the specification applicable to the
resistor.
:ivar int style: index of the resistor style.
Covers specifications MIL-R-26 and MIL-R-39007.
Hazard Rate Models:
# MIL-HDBK-217F, section 9.6
"""
# MIL-HDK-217F hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_lst_piR = [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2, 1.6],
[1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0, 1.0, 1.2, 1.6],
[1.0, 1.0, 1.0, 1.2, 1.6, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 2.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 2.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 2.0, 0.0, 0.0],
[1.0, 1.2, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 2.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.2, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.5, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.4, 1.6, 2.0],
[1.0, 1.0, 1.0, 1.4, 1.6, 2.0],
[1.0, 1.0, 1.4, 2.4, 0.0, 0.0],
[1.0, 1.0, 1.2, 2.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.5, 0.0, 0.0, 0.0],
[1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]]
_lst_piE = [1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3,
13.0, 34.0, 610.0]
_lst_piQ_count = [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]
_lst_piQ_stress = [0.03, 0.1, 0.3, 1.0, 5.0, 15.0]
_lambdab_count = [[0.014, 0.031, 0.16, 0.077, 0.26, 0.073, 0.15, 0.19,
0.39, 0.42, 0.0042, 0.21, 0.62, 9.4],
[0.013, 0.028, 0.15, 0.070, 0.24, 0.065, 0.13, 0.18,
0.35, 0.38, 0.0038, 0.19, 0.56, 8.6]]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 30 # Subcategory ID in rtkcom DB.
def __init__(self):
"""
Method to initialize a Wirewound Power resistor data model instance.
"""
super(WirewoundPower, self).__init__()
self._lst_lambdab_count = []
self.specification = 0
self.style = 0
def set_attributes(self, values):
"""
Method to set the Wirewound Power resistor data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
(_code, _msg) = Resistor.set_attributes(self, values)
try:
self.specification = int(values[117])
self.style = int(values[118])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Wirewound Power resistor
data model attributes.
:return: (specification, style)
:rtype: tuple
"""
_values = Resistor.get_attributes(self)
_values = _values + (self.specification, self.style)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Wirewound Power resistor
data model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# WARNING: Refactor calculate_part; current McCabe Complexity metric = 19.
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 1:
self._lst_lambdab_count = self._lambdab_count[self.specification - 1]
elif self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piR * piQ * piE'
# Base hazard rate.
_stress = self.operating_power / self.rated_power
try:
self.base_hr = \
0.00148 * \
exp(((self.temperature_active + 273.0) / 298.0)**2.0) * \
exp((_stress / 0.5) * ((self.temperature_active + 273.0) /
273.0))
self.hazard_rate_model['lambdab'] = self.base_hr
except OverflowError:
# TODO: Handle overflow error.
return True
# Resistance factor.
if self.specification == 1: # MIL-R-39007
if self.resistance <= 500.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][0]
elif self.resistance > 500.0 and self.resistance <= 1000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][1]
elif self.resistance > 1000.0 and self.resistance <= 5000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][2]
elif self.resistance > 5000.0 and self.resistance <= 7500.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][3]
elif self.resistance > 7500.0 and self.resistance <= 10000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][4]
elif self.resistance > 10000.0 and self.resistance <= 15000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][5]
elif self.resistance > 15000.0 and self.resistance <= 20000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][6]
elif self.resistance > 20000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][7]
elif self.specification == 2: # MIL-R-26
if self.resistance <= 100.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][0]
elif self.resistance > 100.0 and self.resistance <= 1000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][1]
elif self.resistance > 1000.0 and self.resistance <= 10000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][2]
elif self.resistance > 10000.0 and self.resistance <= 100000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][3]
elif(self.resistance > 100000.0 and
self.resistance <= 150000.0):
self.piR = self._lst_piR[self.specification - 1][self.style - 1][4]
elif(self.resistance > 150000.0 and
self.resistance <= 200000.0):
self.piR = self._lst_piR[self.specification - 1][self.style - 1][5]
self.hazard_rate_model['piR'] = self.piR
return Resistor.calculate_part(self)
class WirewoundChassisMount(Resistor):
"""
The Wirewound Chassis Mount Power resistor data model contains the
attributes and methods of a Wirewound Chassis Mount Power resistor. The
attributes of a Wirewound Chassis Mount Power resistor are:
:cvar list _lst_piR: list of MIL-HDBK-217FN2 resistance factor values.
:cvar list _lst_piE: list of MIL-HDBK-217FN2 operating environment factor
values.
:cvar list _lst_piQ_count: list of quality factor values for the
MIL-HDBK-217FN2 parts count method.
:cvar list _lst_piQ_stress: list of quality factor values for the
MIL-HDBK-217FN2 parts stress method.
:cvar list _lst_lambdab_count: list of base hazard rate values for the
MIL-HDBK-217FN2 parts count method.
:cvar int subcategory: default value: 31
Covers specifications MIL-R-18546 and MIL-R-39009.
Hazard Rate Models:
# MIL-HDBK-217F, section 9.7
"""
# MIL-HDK-217F hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_lst_piR = [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.2, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.1, 1.2, 1.6],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.6],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],
[[1.0, 1.2, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.1, 1.2, 1.4, 0.0],
[1.0, 1.0, 1.0, 1.2, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]]
_lst_piE = [1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5,
13.0, 34.0, 610.0]
_lst_piQ_count = [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]
_lst_piQ_stress = [0.03, 0.1, 0.3, 1.0, 5.0, 15.0]
_lst_lambdab_count = [0.008, 0.18, 0.096, 0.045, 0.15, 0.044, 0.088, 0.12,
0.24, 0.25, 0.004, 0.13, 0.37, 5.5]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 31 # Subcategory ID in rtkcom DB.
def __init__(self):
"""
Method to initialize a Wirewound Chassis Mount Power resistor data
model instance.
"""
super(WirewoundChassisMount, self).__init__()
self.characteristic = 0
self.style = 0
def set_attributes(self, values):
"""
Method to set the Wirewound Chassis Mount Power resistor data model
attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
Resistor.set_attributes(self, values)
try:
self.characteristic = int(values[117])
self.style = int(values[118])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Wirewound Chassis Mount
Power resistor data model attributes.
:return: (specification, style)
:rtype: tuple
"""
_values = Resistor.get_attributes(self)
_values = _values + (self.characteristic, self.style)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Wirewound Chassis Mount
Power resistor data model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piR * piQ * piE'
# Base hazard rate.
_stress = self.operating_power / self.rated_power
self.base_hr = 0.00015 * \
exp(2.64 * ((self.temperature_active + 273.0) /
273.0)) * \
exp((_stress / -.466) * ((self.temperature_active +
273.0) / 273.0))
self.hazard_rate_model['lambdab'] = self.base_hr
# Resistance factor.
if self.resistance <= 500.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][0]
elif self.resistance > 500.0 and self.resistance <= 1000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][1]
elif self.resistance > 1000.0 and self.resistance <= 5000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][2]
elif self.resistance > 5000.0 and self.resistance <= 10000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][3]
elif self.resistance > 10000.0 and self.resistance <= 20000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][4]
elif self.resistance > 20000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][5]
self.hazard_rate_model['piR'] = self.piR
return Resistor.calculate_part(self)
| [
"locale.setlocale",
"rtk.Utilities.error_handler",
"rtk.hardware.component.resistor.Resistor.Model.set_attributes",
"rtk.hardware.component.resistor.Resistor.Model.get_attributes",
"rtk.hardware.component.resistor.Resistor.Model.calculate_part",
"math.exp"
] | [((2538, 2591), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', 'Configuration.LOCALE'], {}), '(locale.LC_ALL, Configuration.LOCALE)\n', (2554, 2591), False, 'import locale\n'), ((2659, 2694), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '""""""'], {}), "(locale.LC_ALL, '')\n", (2675, 2694), False, 'import locale\n'), ((5885, 5914), 'rtk.hardware.component.resistor.Resistor.Model.calculate_part', 'Resistor.calculate_part', (['self'], {}), '(self)\n', (5908, 5914), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((10689, 10726), 'rtk.hardware.component.resistor.Resistor.Model.set_attributes', 'Resistor.set_attributes', (['self', 'values'], {}), '(self, values)\n', (10712, 10726), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((11435, 11464), 'rtk.hardware.component.resistor.Resistor.Model.get_attributes', 'Resistor.get_attributes', (['self'], {}), '(self)\n', (11458, 11464), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((15232, 15261), 'rtk.hardware.component.resistor.Resistor.Model.calculate_part', 'Resistor.calculate_part', (['self'], {}), '(self)\n', (15255, 15261), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((18193, 18230), 'rtk.hardware.component.resistor.Resistor.Model.set_attributes', 'Resistor.set_attributes', (['self', 'values'], {}), '(self, values)\n', (18216, 18230), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((18954, 18983), 'rtk.hardware.component.resistor.Resistor.Model.get_attributes', 'Resistor.get_attributes', (['self'], {}), '(self)\n', (18977, 18983), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((20978, 21007), 'rtk.hardware.component.resistor.Resistor.Model.calculate_part', 'Resistor.calculate_part', (['self'], {}), '(self)\n', (21001, 21007), True, 'from rtk.hardware.component.resistor.Resistor import Model as Resistor\n'), ((10888, 10922), 'rtk.Utilities.error_handler', 'Utilities.error_handler', (['_err.args'], {}), '(_err.args)\n', (10911, 10922), True, 'import rtk.Utilities as Utilities\n'), ((11045, 11079), 'rtk.Utilities.error_handler', 'Utilities.error_handler', (['_err.args'], {}), '(_err.args)\n', (11068, 11079), True, 'import rtk.Utilities as Utilities\n'), ((18393, 18427), 'rtk.Utilities.error_handler', 'Utilities.error_handler', (['_err.args'], {}), '(_err.args)\n', (18416, 18427), True, 'import rtk.Utilities as Utilities\n'), ((18550, 18584), 'rtk.Utilities.error_handler', 'Utilities.error_handler', (['_err.args'], {}), '(_err.args)\n', (18573, 18584), True, 'import rtk.Utilities as Utilities\n'), ((19803, 19870), 'math.exp', 'exp', (['(_stress / -0.466 * ((self.temperature_active + 273.0) / 273.0))'], {}), '(_stress / -0.466 * ((self.temperature_active + 273.0) / 273.0))\n', (19806, 19870), False, 'from math import exp\n'), ((5153, 5220), 'math.exp', 'exp', (['((_stress * ((self.temperature_active + 273.0) / 273.0)) ** 1.5)'], {}), '((_stress * ((self.temperature_active + 273.0) / 273.0)) ** 1.5)\n', (5156, 5220), False, 'from math import exp\n'), ((19677, 19732), 'math.exp', 'exp', (['(2.64 * ((self.temperature_active + 273.0) / 273.0))'], {}), '(2.64 * ((self.temperature_active + 273.0) / 273.0))\n', (19680, 19732), False, 'from math import exp\n'), ((5074, 5130), 'math.exp', 'exp', (['(((self.temperature_active + 273.0) / 398.0) ** 10.0)'], {}), '(((self.temperature_active + 273.0) / 398.0) ** 10.0)\n', (5077, 5130), False, 'from math import exp\n'), ((12455, 12519), 'math.exp', 'exp', (['(_stress / 0.5 * ((self.temperature_active + 273.0) / 273.0))'], {}), '(_stress / 0.5 * ((self.temperature_active + 273.0) / 273.0))\n', (12458, 12519), False, 'from math import exp\n'), ((12377, 12432), 'math.exp', 'exp', (['(((self.temperature_active + 273.0) / 298.0) ** 2.0)'], {}), '(((self.temperature_active + 273.0) / 298.0) ** 2.0)\n', (12380, 12432), False, 'from math import exp\n')] |
##############################################################################
#
# Copyright (c) 2007 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope.component import getUtility
from zope.security.proxy import removeSecurityProxy
from zope.security.management import queryInteraction
from zope.app.security.interfaces import IAuthentication, PrincipalLookupError
from zope.app.security.principalregistry import Principal
from interfaces import IZojaxSecurityPolicy, _
def getPrincipal(id=None):
""" get current interaction principal """
if id is None:
interaction = queryInteraction()
if interaction is not None:
for participation in interaction.participations:
if participation.principal is not None:
return participation.principal
else:
try:
return getUtility(IAuthentication).getPrincipal(id)
except PrincipalLookupError:
return Principal('unknown', _('Unknown'), _('Unknown'), 'unknown', 'unknown')
def getPrincipals(ids):
auth = getUtility(IAuthentication)
for pid in ids:
try:
principal = auth.getPrincipal(pid)
except PrincipalLookupError:
continue
yield principal
def checkPermissionForPrincipal(principal, permission, object):
interaction = queryInteraction()
if IZojaxSecurityPolicy.providedBy(interaction):
return interaction.cached_decision(
removeSecurityProxy(object), principal.id,
interaction._groupsFor(principal), permission)
return False
def invalidateSecurityCache():
queryInteraction().invalidate_cache() | [
"interfaces._",
"zope.security.proxy.removeSecurityProxy",
"interfaces.IZojaxSecurityPolicy.providedBy",
"zope.component.getUtility",
"zope.security.management.queryInteraction"
] | [((1572, 1599), 'zope.component.getUtility', 'getUtility', (['IAuthentication'], {}), '(IAuthentication)\n', (1582, 1599), False, 'from zope.component import getUtility\n'), ((1848, 1866), 'zope.security.management.queryInteraction', 'queryInteraction', ([], {}), '()\n', (1864, 1866), False, 'from zope.security.management import queryInteraction\n'), ((1875, 1919), 'interfaces.IZojaxSecurityPolicy.providedBy', 'IZojaxSecurityPolicy.providedBy', (['interaction'], {}), '(interaction)\n', (1906, 1919), False, 'from interfaces import IZojaxSecurityPolicy, _\n'), ((1096, 1114), 'zope.security.management.queryInteraction', 'queryInteraction', ([], {}), '()\n', (1112, 1114), False, 'from zope.security.management import queryInteraction\n'), ((1977, 2004), 'zope.security.proxy.removeSecurityProxy', 'removeSecurityProxy', (['object'], {}), '(object)\n', (1996, 2004), False, 'from zope.security.proxy import removeSecurityProxy\n'), ((2134, 2152), 'zope.security.management.queryInteraction', 'queryInteraction', ([], {}), '()\n', (2150, 2152), False, 'from zope.security.management import queryInteraction\n'), ((1362, 1389), 'zope.component.getUtility', 'getUtility', (['IAuthentication'], {}), '(IAuthentication)\n', (1372, 1389), False, 'from zope.component import getUtility\n'), ((1484, 1496), 'interfaces._', '_', (['"""Unknown"""'], {}), "('Unknown')\n", (1485, 1496), False, 'from interfaces import IZojaxSecurityPolicy, _\n'), ((1498, 1510), 'interfaces._', '_', (['"""Unknown"""'], {}), "('Unknown')\n", (1499, 1510), False, 'from interfaces import IZojaxSecurityPolicy, _\n')] |
'''
ExperimentClient tests.
'''
import os
import unittest
import pandas as pd
import time
from mljar.client.project import ProjectClient
from mljar.client.dataset import DatasetClient
from mljar.client.experiment import ExperimentClient
from .project_based_test import ProjectBasedTest, get_postfix
class ExperimentClientTest(ProjectBasedTest):
def setUp(self):
proj_title = 'Test project-01'+get_postfix()
proj_task = 'bin_class'
self.expt_title = 'Test experiment-01'
self.validation_kfolds = 5
self.validation_shuffle = True
self.validation_stratify = True
self.validation_train_split = None
self.algorithms = ['xgb']
self.metric = 'logloss'
self.tuning_mode = 'Normal'
self.time_constraint = 1
self.create_enseble = False
# setup project
self.project_client = ProjectClient()
self.project = self.project_client.create_project(title = proj_title, task = proj_task)
# add training data
df = pd.read_csv('tests/data/test_1.csv')
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
dc = DatasetClient(self.project.hid)
self.dataset = dc.add_dataset_if_not_exists(df[cols], df[target])
def tearDown(self):
# wait before clean, to have time to initialize models
time.sleep(60)
# clean
self.project_client.delete_project(self.project.hid)
def test_create_with_kfold_cv(self):
#Create experiment test with k-fold CV.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "5-fold CV, Shuffle, Stratify")
self.assertEqual(experiment.metric, self.metric)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# get experiment by hid, there should be the same
experiment_2 = ec.get_experiment(experiment.hid)
self.assertEqual(experiment_2.hid, experiment.hid)
self.assertEqual(experiment_2.title, experiment.title)
self.assertEqual(experiment_2.metric, experiment.metric)
self.assertEqual(experiment_2.validation_scheme, experiment.validation_scheme)
self.assertTrue(experiment.equal(experiment_2))
# test __str__ method
self.assertTrue('id' in str(experiment_2))
self.assertTrue('title' in str(experiment_2))
self.assertTrue('metric' in str(experiment_2))
self.assertTrue('validation' in str(experiment_2))
def test_create_with_train_split(self):
#Create experiment with validation by train split.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, 0.72,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "Split 72/28, Shuffle, Stratify")
def test_create_with_validation_dataset(self):
#Create experiment with validation with dataset.
# add vald dataset
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
df = pd.read_csv('tests/data/test_1_vald.csv')
dc = DatasetClient(self.project.hid)
vald_dataset = dc.add_dataset_if_not_exists(df[cols], df[target])
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, vald_dataset, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, 0.72,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "With dataset")
def test_create_if_exists(self):
#Create experiment after experiment is already in project.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# try to create the same experiment
experiment_2 = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# both should be the same
self.assertEqual(experiment_2.hid, experiment.hid)
self.assertEqual(experiment_2.title, experiment.title)
self.assertEqual(experiment_2.metric, experiment.metric)
self.assertEqual(experiment_2.validation_scheme, experiment.validation_scheme)
self.assertTrue(experiment.equal(experiment_2))
if __name__ == "__main__":
unittest.main()
| [
"pandas.read_csv",
"mljar.client.experiment.ExperimentClient",
"time.sleep",
"mljar.client.dataset.DatasetClient",
"mljar.client.project.ProjectClient",
"unittest.main"
] | [((7921, 7936), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7934, 7936), False, 'import unittest\n'), ((884, 899), 'mljar.client.project.ProjectClient', 'ProjectClient', ([], {}), '()\n', (897, 899), False, 'from mljar.client.project import ProjectClient\n'), ((1037, 1073), 'pandas.read_csv', 'pd.read_csv', (['"""tests/data/test_1.csv"""'], {}), "('tests/data/test_1.csv')\n", (1048, 1073), True, 'import pandas as pd\n'), ((1190, 1221), 'mljar.client.dataset.DatasetClient', 'DatasetClient', (['self.project.hid'], {}), '(self.project.hid)\n', (1203, 1221), False, 'from mljar.client.dataset import DatasetClient\n'), ((1393, 1407), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (1403, 1407), False, 'import time\n'), ((1615, 1649), 'mljar.client.experiment.ExperimentClient', 'ExperimentClient', (['self.project.hid'], {}), '(self.project.hid)\n', (1631, 1649), False, 'from mljar.client.experiment import ExperimentClient\n'), ((3555, 3589), 'mljar.client.experiment.ExperimentClient', 'ExperimentClient', (['self.project.hid'], {}), '(self.project.hid)\n', (3571, 3589), False, 'from mljar.client.experiment import ExperimentClient\n'), ((4695, 4736), 'pandas.read_csv', 'pd.read_csv', (['"""tests/data/test_1_vald.csv"""'], {}), "('tests/data/test_1_vald.csv')\n", (4706, 4736), True, 'import pandas as pd\n'), ((4750, 4781), 'mljar.client.dataset.DatasetClient', 'DatasetClient', (['self.project.hid'], {}), '(self.project.hid)\n', (4763, 4781), False, 'from mljar.client.dataset import DatasetClient\n'), ((4894, 4928), 'mljar.client.experiment.ExperimentClient', 'ExperimentClient', (['self.project.hid'], {}), '(self.project.hid)\n', (4910, 4928), False, 'from mljar.client.experiment import ExperimentClient\n'), ((5915, 5949), 'mljar.client.experiment.ExperimentClient', 'ExperimentClient', (['self.project.hid'], {}), '(self.project.hid)\n', (5931, 5949), False, 'from mljar.client.experiment import ExperimentClient\n')] |
import time
import TSL2561
chip = TSL2561.TSL2561()
while True:
chip.power_on()
print("Raw Channel 0 = " + str(chip.read_channel0()))
print("Raw Channel 1 = " + str(chip.read_channel1()))
print("Lux Channel 0 = " + str(chip.calculate_lux(chip.read_channel0())))
print("Lux Channel 1 = " + str(chip.calculate_lux(chip.read_channel1())))
print("Full Spectrum Lux = " + str(chip.get_full_lux()))
print("IR Spectrum Lux = " + str(chip.get_ir_lux()))
print("Visible Lux = " + str(chip.get_visible_lux()))
print("")
chip.power_off()
time.sleep(1)
| [
"TSL2561.TSL2561",
"time.sleep"
] | [((34, 51), 'TSL2561.TSL2561', 'TSL2561.TSL2561', ([], {}), '()\n', (49, 51), False, 'import TSL2561\n'), ((539, 552), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (549, 552), False, 'import time\n')] |
# import modules
import subprocess
import io
def afplay(filepath):
params = io.getInfo(filepath)
time = params[3] / params[2]
cmd = 'afplay -q 1 %s'%(filepath)
subprocess.Popen(cmd, shell=True)
time.sleep()
return
| [
"io.getInfo",
"subprocess.Popen"
] | [((84, 104), 'io.getInfo', 'io.getInfo', (['filepath'], {}), '(filepath)\n', (94, 104), False, 'import io\n'), ((180, 213), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (196, 213), False, 'import subprocess\n')] |
from setuptools import setup, find_packages
try:
import s3stat
doc = s3stat.__doc__
except ImportError:
doc = "The docs are only available when the package is already installed. Sorry for this."
setup(
name="s3stat",
version="2.3.1",
description='An extensible Amazon S3 and Cloudfront log parser.',
long_description=doc,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/nagyv/s3stat',
include_package_data=True,
zip_safe=False,
install_requires=['boto', 'tempdir'],
py_modules=['s3stat'],
scripts=['s3stat.py'],
keywords="s3stat amazon statistics goaccess"
# tests_require=['pytest'],
# cmdclass = {
# 'test': PyTest,
# }
) | [
"setuptools.setup"
] | [((213, 615), 'setuptools.setup', 'setup', ([], {'name': '"""s3stat"""', 'version': '"""2.3.1"""', 'description': '"""An extensible Amazon S3 and Cloudfront log parser."""', 'long_description': 'doc', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/nagyv/s3stat"""', 'include_package_data': '(True)', 'zip_safe': '(False)', 'install_requires': "['boto', 'tempdir']", 'py_modules': "['s3stat']", 'scripts': "['s3stat.py']", 'keywords': '"""s3stat amazon statistics goaccess"""'}), "(name='s3stat', version='2.3.1', description=\n 'An extensible Amazon S3 and Cloudfront log parser.', long_description=\n doc, author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/nagyv/s3stat', include_package_data=True, zip_safe=\n False, install_requires=['boto', 'tempdir'], py_modules=['s3stat'],\n scripts=['s3stat.py'], keywords='s3stat amazon statistics goaccess')\n", (218, 615), False, 'from setuptools import setup, find_packages\n')] |
from collections import Sequence
from alphatwirl_interface.cut_flows import cut_flow, cut_flow_with_counter, cut_flow_with_weighted_counter
import six
def Selection(steps={}, cutflow_file=None, weight_attr=None):
'''
This class ties together several modules from alphatwirl to
bring a simplified Selection experience.
:param dict steps: A dictionary of selection steps
:param str cutflow_file: path to the cutflow output file.
example:
preselection = Selection(
dict(
All=(
'NMuon[0] == 2',
'muon_pt[0] > 20',
'muon_pt[1] > 20',
'Muon_Iso[0] < 0.1',
'Muon_Iso[1] < 0.1',
'Muon_Charge[0] == -1 * Muon_Charge[1]',
)),
'output/cutflow_preselection.txt'
)
# define in alphatwirl modules to pass to tree.scan
modules = [
preselection,
...
]
'''
rc_pair = None
if cutflow_file:
if weight_attr:
rc_pair = cut_flow_with_weighted_counter(steps, cutflow_file, weight_attr)
else:
rc_pair = cut_flow_with_counter(steps, cutflow_file)
else:
rc_pair = cut_flow(steps)
return rc_pair[0]
| [
"alphatwirl_interface.cut_flows.cut_flow_with_counter",
"alphatwirl_interface.cut_flows.cut_flow_with_weighted_counter",
"alphatwirl_interface.cut_flows.cut_flow"
] | [((1307, 1322), 'alphatwirl_interface.cut_flows.cut_flow', 'cut_flow', (['steps'], {}), '(steps)\n', (1315, 1322), False, 'from alphatwirl_interface.cut_flows import cut_flow, cut_flow_with_counter, cut_flow_with_weighted_counter\n'), ((1135, 1199), 'alphatwirl_interface.cut_flows.cut_flow_with_weighted_counter', 'cut_flow_with_weighted_counter', (['steps', 'cutflow_file', 'weight_attr'], {}), '(steps, cutflow_file, weight_attr)\n', (1165, 1199), False, 'from alphatwirl_interface.cut_flows import cut_flow, cut_flow_with_counter, cut_flow_with_weighted_counter\n'), ((1236, 1278), 'alphatwirl_interface.cut_flows.cut_flow_with_counter', 'cut_flow_with_counter', (['steps', 'cutflow_file'], {}), '(steps, cutflow_file)\n', (1257, 1278), False, 'from alphatwirl_interface.cut_flows import cut_flow, cut_flow_with_counter, cut_flow_with_weighted_counter\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import psoap
from psoap.data import lkca14, redshift, Chunk
from psoap import matrix_functions
from psoap import covariance
from psoap import orbit
# from matplotlib.ticker import FormatStrFormatter as FSF
# from matplotlib.ticker import MaxNLocator
# from matplotlib.ticker import MultipleLocator
# Specify orbital parameters and make a sanity plot
q = 0.2
K = 5.0 # km/s
e = 0.2 #
omega = 10.0 # deg
P = 10.0 # days
T0 = 0.0 # epoch
gamma = 5.0 # km/s
n_epochs = 10
obs_dates = np.array([2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1])
# obs_dates = np.linspace(5, 150, num=n_epochs)
orb = orbit.SB2(q, K, e, omega, P, T0, gamma, obs_dates)
vAs, vBs = orb.get_component_velocities()
dates_fine = np.linspace(0, 30, num=200)
vA_fine, vB_fine = orb.get_component_velocities(dates_fine)
vAs_relative = vAs - vAs[0]
np.save("SB2/vAs_relative.npy", vAs_relative)
vBs_relative = vBs - vBs[0]
np.save("SB2/vBs_relative.npy", vBs_relative)
fig, ax = plt.subplots(nrows=3, figsize=(6,6))
ax[0].plot(dates_fine, vA_fine, "b")
ax[0].plot(orb.obs_dates, vAs, "bo")
ax[0].plot(dates_fine, vB_fine, "g")
ax[0].plot(orb.obs_dates, vBs, "go")
ax[0].axhline(gamma, ls="-.", color="0.5")
ax[-1].set_xlabel(r"$t$ [days]")
ax[0].set_ylabel(r"$v_A$ [km $\mathrm{s}^{-1}$]")
# For subsequent axes, plot velocities of stars relative to first observation.
ax[1].plot(orb.obs_dates, vAs_relative, "bo")
ax[1].set_ylabel(r"$v_A$ relative")
ax[2].plot(orb.obs_dates, vBs_relative, "go")
ax[2].set_ylabel(r"$v_B$ relative")
fig.subplots_adjust(left=0.14, right=0.86, bottom=0.24)
fig.savefig("SB2/orbit.png")
# Load the fake primary spectra we prepared
wl_f, fl_f = np.load("primary_wl_fl.npy")
# Load the fake secondary spectra we prepared
wl_g, fl_g = np.load("secondary_wl_fl.npy")
n_f = len(wl_f)
n_g = len(wl_g)
print("n_f:", n_f, "n_g:", n_g)
# Shorten these to be the same.
if n_f < n_g:
n_pix = n_f
print("Shortening g to f")
else:
n_pix =n_g
print("Shortening f to g")
wl = wl_f[0:n_pix]
fl_f = fl_f[0:n_pix]
fl_g = fl_g[0:n_pix]
# Just assume that wl_f will be wl_g as well.
# Create fake wavelengths with Doppler shifts by apply these to the master wl
wls_f = np.empty((n_epochs, n_pix))
wls_g = np.empty((n_epochs, n_pix))
for i in range(n_epochs):
wls_f[i] = redshift(wl, vAs[i])
wls_g[i] = redshift(wl, vBs[i])
# Falling plot of all eight epochs of each spectrum, overlaid with the velocities for each
# Show spectra on each plot along with chosen amplitude scaling
fig, ax = plt.subplots(nrows=n_epochs, sharex=True)
for i in range(n_epochs):
ax[i].plot(wls_f[i], fl_f, "b")
ax[i].plot(wls_g[i], fl_g, "g")
ax[i].set_ylabel("epoch {:}".format(i))
ax[-1].set_xlabel(r"$\lambda [\AA]$")
fig.savefig("SB2/dataset_noiseless_full.png", dpi=300)
# Here is where we set up the number of chunks, and choose what region of overlaps we want.
# New chunks [start, stop]
# chunk_wls = [[5240, 5250], [5255, 5265], [5270, 5280]]
chunk_wls = [[5265, 5275]]
# Measure this as S/N per resolution element. That means that there is a sqrt(2.5) effect.
# let alpha be the percentage of the primary as the total flux.
ratio = 0.2
alpha = (1 / (ratio + 1))
print("Ratio: {}, alpha: {}".format(ratio, alpha))
# alpha = 0.90
# Assume a S/N = 40, so N = 1.0 / 40
S_N = 60 # per resolution element
noise_amp = 1.0 / (S_N/np.sqrt(2.5)) # per pixel
# Truncate down to a smaller region to ensure overlap between all orders.
for (wl0, wl1) in chunk_wls:
print("Creating chunk {:.0f} to {:.0f}".format(wl0, wl1))
# Keep everything the same size. These are how many pixels we plan to keep in common between
# epochs
ind = (wls_f[0] > wl0) & (wls_f[0] < wl1)
n_pix_common = np.sum(ind)
print("n_pix_common = {}".format(n_pix_common))
# Now choose a narrower, common wl grid, which will just be f.
# Now we should have a giant array of wavelengths that all share the same flux values, but shifted
wls_comb = np.zeros((n_epochs, n_pix_common))
fls_f = np.empty((n_epochs, n_pix_common))
fls_g = np.empty((n_epochs, n_pix_common))
fls_comb = np.empty((n_epochs, n_pix_common))
fls_noise = np.zeros((n_epochs, n_pix_common))
sigma_comb = noise_amp * np.ones((n_epochs, n_pix_common))
for i in range(n_epochs):
# Select a subset of wl_f that has the appropriate number of pixels
ind_0 = np.searchsorted(wls_f[i], wl0)
print("Inserting at index {}, wavelength {:.2f}".format(ind_0, wls_f[i, ind_0]))
wl_common = wls_f[i, ind_0:(ind_0 + n_pix_common)]
# Interpolate the master spectrum onto this grid
interp = interp1d(wls_f[i], fl_f)
fl_f_common = interp(wl_common)
interp = interp1d(wls_g[i], fl_g)
fl_g_common = interp(wl_common)
fl_common = alpha * fl_f_common + (1 - alpha) * fl_g_common
# Add noise to it
fl_common_noise = fl_common + np.random.normal(scale=noise_amp, size=n_pix_common)
# Store into array
wls_comb[i] = wl_common
fls_f[i] = fl_f_common
fls_g[i] = fl_g_common
fls_comb[i] = fl_common
fls_noise[i] = fl_common_noise
fig, ax = plt.subplots(nrows=4, sharex=True)
ax[0].plot(wl_common, alpha * fl_f_common, "b")
ax[0].set_ylabel(r"$f$")
ax[1].plot(wl_common, (1 - alpha) * fl_g_common, "g")
ax[1].set_ylabel(r"$g$")
ax[2].plot(wl_common, fl_common, "k")
ax[2].set_ylabel(r"$f + g$")
ax[3].plot(wl_common, fl_common_noise, "k")
ax[3].set_ylabel(r"$f + g +$ noise")
ax[-1].set_xlabel(r"$\lambda\;[\AA]$")
fig.savefig("SB2/epoch_{}.png".format(i), dpi=300)
# Save the created spectra into a chunk
date_comb = obs_dates[:,np.newaxis] * np.ones_like(wls_comb)
chunkSpec = Chunk(wls_comb, fls_noise, sigma_comb, date_comb)
wl0 = np.min(wls_comb)
wl1 = np.max(wls_comb)
chunkSpec.save(0, wl0, wl1, prefix="SB2/")
# 2D arrays before we have summed them or added noise.
print("STDEV primary", np.std(alpha * fls_f))
print("STDEV secondary", np.std((1 - alpha) * fls_g))
np.save("SB2/fls_f.npy", alpha * fls_f)
np.save("SB2/fls_g.npy", (1 - alpha) * fls_g)
np.save("SB2/fls_comb.npy", fls_comb)
| [
"numpy.sqrt",
"scipy.interpolate.interp1d",
"numpy.array",
"psoap.orbit.SB2",
"numpy.save",
"numpy.searchsorted",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"numpy.min",
"numpy.random.normal",
"numpy.ones",
"psoap.data.Chunk",
"numpy.std",
"numpy.ones_like",
"psoap.data.redshift",
... | [((577, 643), 'numpy.array', 'np.array', (['[2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1]'], {}), '([2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1])\n', (585, 643), True, 'import numpy as np\n'), ((700, 750), 'psoap.orbit.SB2', 'orbit.SB2', (['q', 'K', 'e', 'omega', 'P', 'T0', 'gamma', 'obs_dates'], {}), '(q, K, e, omega, P, T0, gamma, obs_dates)\n', (709, 750), False, 'from psoap import orbit\n'), ((808, 835), 'numpy.linspace', 'np.linspace', (['(0)', '(30)'], {'num': '(200)'}), '(0, 30, num=200)\n', (819, 835), True, 'import numpy as np\n'), ((925, 970), 'numpy.save', 'np.save', (['"""SB2/vAs_relative.npy"""', 'vAs_relative'], {}), "('SB2/vAs_relative.npy', vAs_relative)\n", (932, 970), True, 'import numpy as np\n'), ((1000, 1045), 'numpy.save', 'np.save', (['"""SB2/vBs_relative.npy"""', 'vBs_relative'], {}), "('SB2/vBs_relative.npy', vBs_relative)\n", (1007, 1045), True, 'import numpy as np\n'), ((1057, 1094), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'figsize': '(6, 6)'}), '(nrows=3, figsize=(6, 6))\n', (1069, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1788), 'numpy.load', 'np.load', (['"""primary_wl_fl.npy"""'], {}), "('primary_wl_fl.npy')\n", (1767, 1788), True, 'import numpy as np\n'), ((1849, 1879), 'numpy.load', 'np.load', (['"""secondary_wl_fl.npy"""'], {}), "('secondary_wl_fl.npy')\n", (1856, 1879), True, 'import numpy as np\n'), ((2290, 2317), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix)'], {}), '((n_epochs, n_pix))\n', (2298, 2317), True, 'import numpy as np\n'), ((2326, 2353), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix)'], {}), '((n_epochs, n_pix))\n', (2334, 2353), True, 'import numpy as np\n'), ((2619, 2660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'n_epochs', 'sharex': '(True)'}), '(nrows=n_epochs, sharex=True)\n', (2631, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2396, 2416), 'psoap.data.redshift', 'redshift', (['wl', 'vAs[i]'], {}), '(wl, vAs[i])\n', (2404, 2416), False, 'from psoap.data import lkca14, redshift, Chunk\n'), ((2432, 2452), 'psoap.data.redshift', 'redshift', (['wl', 'vBs[i]'], {}), '(wl, vBs[i])\n', (2440, 2452), False, 'from psoap.data import lkca14, redshift, Chunk\n'), ((3828, 3839), 'numpy.sum', 'np.sum', (['ind'], {}), '(ind)\n', (3834, 3839), True, 'import numpy as np\n'), ((4078, 4112), 'numpy.zeros', 'np.zeros', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4086, 4112), True, 'import numpy as np\n'), ((4125, 4159), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4133, 4159), True, 'import numpy as np\n'), ((4172, 4206), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4180, 4206), True, 'import numpy as np\n'), ((4222, 4256), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4230, 4256), True, 'import numpy as np\n'), ((4273, 4307), 'numpy.zeros', 'np.zeros', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4281, 4307), True, 'import numpy as np\n'), ((5930, 5979), 'psoap.data.Chunk', 'Chunk', (['wls_comb', 'fls_noise', 'sigma_comb', 'date_comb'], {}), '(wls_comb, fls_noise, sigma_comb, date_comb)\n', (5935, 5979), False, 'from psoap.data import lkca14, redshift, Chunk\n'), ((5990, 6006), 'numpy.min', 'np.min', (['wls_comb'], {}), '(wls_comb)\n', (5996, 6006), True, 'import numpy as np\n'), ((6017, 6033), 'numpy.max', 'np.max', (['wls_comb'], {}), '(wls_comb)\n', (6023, 6033), True, 'import numpy as np\n'), ((6255, 6294), 'numpy.save', 'np.save', (['"""SB2/fls_f.npy"""', '(alpha * fls_f)'], {}), "('SB2/fls_f.npy', alpha * fls_f)\n", (6262, 6294), True, 'import numpy as np\n'), ((6299, 6344), 'numpy.save', 'np.save', (['"""SB2/fls_g.npy"""', '((1 - alpha) * fls_g)'], {}), "('SB2/fls_g.npy', (1 - alpha) * fls_g)\n", (6306, 6344), True, 'import numpy as np\n'), ((6349, 6386), 'numpy.save', 'np.save', (['"""SB2/fls_comb.npy"""', 'fls_comb'], {}), "('SB2/fls_comb.npy', fls_comb)\n", (6356, 6386), True, 'import numpy as np\n'), ((3460, 3472), 'numpy.sqrt', 'np.sqrt', (['(2.5)'], {}), '(2.5)\n', (3467, 3472), True, 'import numpy as np\n'), ((4338, 4371), 'numpy.ones', 'np.ones', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4345, 4371), True, 'import numpy as np\n'), ((4496, 4526), 'numpy.searchsorted', 'np.searchsorted', (['wls_f[i]', 'wl0'], {}), '(wls_f[i], wl0)\n', (4511, 4526), True, 'import numpy as np\n'), ((4751, 4775), 'scipy.interpolate.interp1d', 'interp1d', (['wls_f[i]', 'fl_f'], {}), '(wls_f[i], fl_f)\n', (4759, 4775), False, 'from scipy.interpolate import interp1d\n'), ((4834, 4858), 'scipy.interpolate.interp1d', 'interp1d', (['wls_g[i]', 'fl_g'], {}), '(wls_g[i], fl_g)\n', (4842, 4858), False, 'from scipy.interpolate import interp1d\n'), ((5299, 5333), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'sharex': '(True)'}), '(nrows=4, sharex=True)\n', (5311, 5333), True, 'import matplotlib.pyplot as plt\n'), ((5891, 5913), 'numpy.ones_like', 'np.ones_like', (['wls_comb'], {}), '(wls_comb)\n', (5903, 5913), True, 'import numpy as np\n'), ((6169, 6190), 'numpy.std', 'np.std', (['(alpha * fls_f)'], {}), '(alpha * fls_f)\n', (6175, 6190), True, 'import numpy as np\n'), ((6221, 6248), 'numpy.std', 'np.std', (['((1 - alpha) * fls_g)'], {}), '((1 - alpha) * fls_g)\n', (6227, 6248), True, 'import numpy as np\n'), ((5033, 5085), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'noise_amp', 'size': 'n_pix_common'}), '(scale=noise_amp, size=n_pix_common)\n', (5049, 5085), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import logging
import os
import json
from dxlbootstrap.app import Application
from dxlclient.service import ServiceRegistrationInfo
from dxlclient.callbacks import RequestCallback
from dxlclient.message import ErrorResponse, Response
from ._epo import _Epo
# Configure local logger
logger = logging.getLogger(__name__)
class EpoService(Application):
"""
A DXL service that exposes the remote commands of one or more ePO servers to
the DXL fabric. When a DXL request message is received, the remote command is invoked
on the appropriate ePO server and its response is packaged and returned to the invoking
client via a DXL response message.
"""
# The type of the ePO DXL service that is registered with the fabric
DXL_SERVICE_TYPE = "/mcafee/service/epo/remote"
# The format for request topics that are associated with the ePO DXL service
DXL_REQUEST_FORMAT = "/mcafee/service/epo/remote/{0}"
# The timeout used when registering/unregistering the service
DXL_SERVICE_REGISTRATION_TIMEOUT = 60
# The name of the "General" section within the ePO service configuration file
GENERAL_CONFIG_SECTION = "General"
# The property used to specify ePO names within the "General" section of the
# ePO service configuration file
GENERAL_EPO_NAMES_CONFIG_PROP = "epoNames"
# The property used to specify the host of an ePO within within the ePO service
# configuration file
EPO_HOST_CONFIG_PROP = "host"
# The property used to specify the port of an ePO server within the ePO service
# configuration file (this property is optional)
EPO_PORT_CONFIG_PROP = "port"
# The property used to specify the user used to login to an ePO server within
# the ePO service configuration file
EPO_USER_CONFIG_PROP = "user"
# The property used to specify the password used to login to an ePO server within the ePO
# service configuration file
EPO_PASSWORD_CONFIG_PROP = "password"
# The property used to specify the unique identifier for the ePO server within
# the ePO service configuration file (this property is optional)
EPO_UNIQUE_ID_CONFIG_PROP = "uniqueId"
# Whether to verify that the hostname in the ePO's certificate matches the ePO
# server being connected to. (optional, enabled by default)
EPO_VERIFY_CERTIFICATE = "verifyCertificate"
# A path to a CA Bundle file containing certificates of trusted CAs.
# The CA Bundle is used to ensure that the ePO server being connected to was signed by a
# valid authority.
EPO_VERIFY_CERT_BUNDLE = "verifyCertBundle"
# Default value for verifying certificates
DEFAULT_VERIFY_CERTIFICATE = True
# The default port used to communicate with an ePO server
DEFAULT_EPO_PORT = 8443
def __init__(self, config_dir):
"""
Constructor parameters:
:param config_dir: The location of the configuration files for the
application
"""
super(EpoService, self).__init__(config_dir, "dxleposervice.config")
self._epo_by_topic = {}
self._dxl_service = None
@property
def client(self):
"""
The DXL client used by the application to communicate with the DXL
fabric
"""
return self._dxl_client
@property
def config(self):
"""
The application configuration (as read from the "dxleposervice.config" file)
"""
return self._config
def on_run(self):
"""
Invoked when the application has started running.
"""
logger.info("On 'run' callback.")
@staticmethod
def _get_option(config, section, option, default_value=None):
return config.get(section, option) \
if config.has_option(section, option) else default_value
@staticmethod
def _get_boolean_option(config, section, option, default_value=False):
return config.getboolean(section, option) \
if config.has_option(section, option) else default_value
def on_load_configuration(self, config):
"""
Invoked after the application-specific configuration has been loaded
This callback provides the opportunity for the application to parse
additional configuration properties.
:param config: The application configuration
"""
logger.info("On 'load configuration' callback.")
# Determine the ePO servers in the configuration file
epo_names_str = config.get(self.GENERAL_CONFIG_SECTION,
self.GENERAL_EPO_NAMES_CONFIG_PROP)
epo_names = epo_names_str.split(",")
if len(epo_names_str.strip()) is 0 or len(epo_names) is 0:
raise Exception(
"At least one ePO server must be defined in the service configuration file")
# For each ePO specified, create an instance of the ePO object (used to communicate with
# the ePO server via HTTP)
for epo_name in epo_names:
epo_name = epo_name.strip()
host = config.get(epo_name, self.EPO_HOST_CONFIG_PROP)
user = config.get(epo_name, self.EPO_USER_CONFIG_PROP)
password = config.get(epo_name, self.EPO_PASSWORD_CONFIG_PROP)
# Port (optional)
port = self._get_option(config, epo_name, self.EPO_PORT_CONFIG_PROP,
self.DEFAULT_EPO_PORT)
# Whether to verify the ePO server's certificate (optional)
verify = self._get_boolean_option(config, epo_name,
self.EPO_VERIFY_CERTIFICATE,
self.DEFAULT_VERIFY_CERTIFICATE)
# CA Bundle
if verify:
ca_bundle = self._get_option(config, epo_name,
self.EPO_VERIFY_CERT_BUNDLE)
if ca_bundle:
ca_bundle = self._get_path(ca_bundle)
verify = ca_bundle
if not os.access(verify, os.R_OK):
raise Exception(
"Unable to access CA bundle file/dir ({0}): {1}".format(
self.EPO_VERIFY_CERT_BUNDLE, verify))
# Create ePO wrapper
epo = _Epo(name=epo_name, host=host, port=port, user=user,
password=password, verify=verify)
# Unique identifier (optional, if not specified attempts to determine GUID)
unique_id = self._get_option(config, epo_name,
self.EPO_UNIQUE_ID_CONFIG_PROP)
if unique_id is None:
logger.info(
"Attempting to determine GUID for ePO server: %s ...",
epo_name)
unique_id = epo.lookup_guid()
logger.info(
"GUID '%s' found for ePO server: %s", unique_id, epo_name)
# Create the request topic based on the ePO's unique identifier
request_topic = self.DXL_REQUEST_FORMAT.format(unique_id)
logger.info(
"Request topic '%s' associated with ePO server: %s",
request_topic, epo_name)
# Associate ePO wrapper instance with the request topic
self._epo_by_topic[request_topic] = epo
def on_dxl_connect(self):
"""
Invoked after the client associated with the application has connected
to the DXL fabric.
"""
logger.info("On 'DXL connect' callback.")
def on_register_services(self):
"""
Invoked when services should be registered with the application
"""
# Register service
service = ServiceRegistrationInfo(self.client, self.DXL_SERVICE_TYPE)
for request_topic in self._epo_by_topic:
service.add_topic(str(request_topic),
_EpoRequestCallback(self.client,
self._epo_by_topic))
logger.info("Registering service ...")
self.client.register_service_sync(service,
self.DXL_SERVICE_REGISTRATION_TIMEOUT)
logger.info("Service registration succeeded.")
self._dxl_service = service
def _get_path(self, in_path):
"""
Returns an absolute path for a file specified in the configuration file (supports
files relative to the configuration file).
:param in_path: The specified path
:return: An absolute path for a file specified in the configuration file
"""
if not os.path.isfile(in_path) and not os.path.isabs(in_path):
config_rel_path = os.path.join(self._config_dir, in_path)
if os.path.isfile(config_rel_path):
in_path = config_rel_path
return in_path
class _EpoRequestCallback(RequestCallback):
"""
Request callback used to handle incoming service requests
"""
# UTF-8 encoding (used for encoding/decoding payloads)
UTF_8 = "utf-8"
# The key in the request used to specify the ePO command to invoke
CMD_NAME_KEY = "command"
# The key in the request used to specify the output format
# (json, xml, verbose, terse). This is optional
OUTPUT_KEY = "output"
# The key used to specify the parameters for the ePO command
PARAMS_KEY = "params"
# The default output format
DEFAULT_OUTPUT = "json"
def __init__(self, client, epo_by_topic):
"""
Constructs the callback
:param client: The DXL client associated with the service
:param epo_by_topic: The ePO server wrappers by associated request topics
"""
super(_EpoRequestCallback, self).__init__()
self._dxl_client = client
self._epo_by_topic = epo_by_topic
def on_request(self, request):
"""
Invoked when a request is received
:param request: The request that was received
"""
try:
# Build dictionary from the request payload
req_dict = json.loads(request.payload.decode(encoding=self.UTF_8))
# Determine the ePO command
if self.CMD_NAME_KEY not in req_dict:
raise Exception(
"A command name was not specified ('{0}')".format(
self.CMD_NAME_KEY))
command = req_dict[self.CMD_NAME_KEY]
# Determine the request parameters
req_params = {}
if self.PARAMS_KEY in req_dict:
req_params = req_dict[self.PARAMS_KEY]
# Determine the output format
output = self.DEFAULT_OUTPUT
if self.OUTPUT_KEY in req_dict:
output = req_dict[self.OUTPUT_KEY]
# Get the ePO server to invoke the command on
epo = self._epo_by_topic[request.destination_topic]
# Execute the ePO Remote Command
result = epo.execute(command, output, req_params)
# Create the response, set payload, and deliver
response = Response(request)
response.payload = result
self._dxl_client.send_response(response)
except Exception as ex:
logger.exception("Error while processing request")
# Send error response
self._dxl_client.send_response(
ErrorResponse(request,
error_message=str(ex).encode(
encoding=self.UTF_8)))
| [
"logging.getLogger",
"dxlclient.service.ServiceRegistrationInfo",
"dxlclient.message.Response",
"os.path.isabs",
"os.access",
"os.path.join",
"os.path.isfile"
] | [((333, 360), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (350, 360), False, 'import logging\n'), ((7824, 7883), 'dxlclient.service.ServiceRegistrationInfo', 'ServiceRegistrationInfo', (['self.client', 'self.DXL_SERVICE_TYPE'], {}), '(self.client, self.DXL_SERVICE_TYPE)\n', (7847, 7883), False, 'from dxlclient.service import ServiceRegistrationInfo\n'), ((8814, 8853), 'os.path.join', 'os.path.join', (['self._config_dir', 'in_path'], {}), '(self._config_dir, in_path)\n', (8826, 8853), False, 'import os\n'), ((8869, 8900), 'os.path.isfile', 'os.path.isfile', (['config_rel_path'], {}), '(config_rel_path)\n', (8883, 8900), False, 'import os\n'), ((11209, 11226), 'dxlclient.message.Response', 'Response', (['request'], {}), '(request)\n', (11217, 11226), False, 'from dxlclient.message import ErrorResponse, Response\n'), ((8728, 8751), 'os.path.isfile', 'os.path.isfile', (['in_path'], {}), '(in_path)\n', (8742, 8751), False, 'import os\n'), ((8760, 8782), 'os.path.isabs', 'os.path.isabs', (['in_path'], {}), '(in_path)\n', (8773, 8782), False, 'import os\n'), ((6103, 6129), 'os.access', 'os.access', (['verify', 'os.R_OK'], {}), '(verify, os.R_OK)\n', (6112, 6129), False, 'import os\n')] |
# Copyright (c) 2008-2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""1.4.0 to 1.5.0
Revision ID: 1.5.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '1.5.0'
down_revision = '1.4.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
Session = sessionmaker()
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
session = Session(bind=bind)
# create table harbor_label
HarborLabel.__table__.create(bind)
# create table harbor_resource_label
HarborResourceLabel.__table__.create(bind)
# create user_group
UserGroup.__table__.create(bind)
# project member
op.drop_constraint('project_member_ibfk_1', 'project_member', type_='foreignkey')
op.drop_constraint('project_member_ibfk_2', 'project_member', type_='foreignkey')
op.drop_constraint('project_member_ibfk_3', 'project_member', type_='foreignkey')
op.drop_constraint('PRIMARY', 'project_member', type_='primary')
op.drop_index('user_id', 'project_member')
op.drop_index('role', 'project_member')
op.execute('ALTER TABLE project_member ADD id INT PRIMARY KEY AUTO_INCREMENT;')
op.alter_column('project_member', 'user_id', existing_type=sa.Integer, existing_nullable=False, new_column_name='entity_id')
op.alter_column('project_member', 'creation_time', existing_type=mysql.TIMESTAMP, server_default = sa.text("CURRENT_TIMESTAMP"))
op.alter_column('project_member', 'update_time', existing_type=mysql.TIMESTAMP, server_default=sa.text("CURRENT_TIMESTAMP"), onupdate=sa.text("CURRENT_TIMESTAMP"))
op.add_column('project_member', sa.Column('entity_type', sa.String(1)))
session.query(ProjectMember).update({
ProjectMember.entity_type: 'u'
})
op.alter_column('project_member', 'entity_type', existing_type=sa.String(1), existing_nullable=True, nullable=False)
op.create_unique_constraint('unique_project_entity_type', 'project_member', ['project_id', 'entity_id', 'entity_type'])
# add job_uuid to replicationjob and img_scan_job
op.add_column('replication_job', sa.Column('job_uuid', sa.String(64)))
op.add_column('img_scan_job', sa.Column('job_uuid', sa.String(64)))
# add index to replication job
op.create_index('poid_status', 'replication_job', ['policy_id', 'status'])
# add index to img_scan_job
op.create_index('idx_status', 'img_scan_job', ['status'])
op.create_index('idx_digest', 'img_scan_job', ['digest'])
op.create_index('idx_uuid', 'img_scan_job', ['job_uuid'])
op.create_index('idx_repository_tag', 'img_scan_job', ['repository', 'tag'])
session.commit()
def downgrade():
"""
Downgrade has been disabled.
"""
| [
"alembic.op.get_bind",
"alembic.op.drop_constraint",
"alembic.op.alter_column",
"alembic.op.execute",
"alembic.op.drop_index",
"alembic.op.create_index",
"alembic.op.create_unique_constraint"
] | [((960, 973), 'alembic.op.get_bind', 'op.get_bind', ([], {}), '()\n', (971, 973), False, 'from alembic import op\n'), ((1257, 1343), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""project_member_ibfk_1"""', '"""project_member"""'], {'type_': '"""foreignkey"""'}), "('project_member_ibfk_1', 'project_member', type_=\n 'foreignkey')\n", (1275, 1343), False, 'from alembic import op\n'), ((1343, 1429), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""project_member_ibfk_2"""', '"""project_member"""'], {'type_': '"""foreignkey"""'}), "('project_member_ibfk_2', 'project_member', type_=\n 'foreignkey')\n", (1361, 1429), False, 'from alembic import op\n'), ((1429, 1515), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""project_member_ibfk_3"""', '"""project_member"""'], {'type_': '"""foreignkey"""'}), "('project_member_ibfk_3', 'project_member', type_=\n 'foreignkey')\n", (1447, 1515), False, 'from alembic import op\n'), ((1515, 1579), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""PRIMARY"""', '"""project_member"""'], {'type_': '"""primary"""'}), "('PRIMARY', 'project_member', type_='primary')\n", (1533, 1579), False, 'from alembic import op\n'), ((1584, 1626), 'alembic.op.drop_index', 'op.drop_index', (['"""user_id"""', '"""project_member"""'], {}), "('user_id', 'project_member')\n", (1597, 1626), False, 'from alembic import op\n'), ((1631, 1670), 'alembic.op.drop_index', 'op.drop_index', (['"""role"""', '"""project_member"""'], {}), "('role', 'project_member')\n", (1644, 1670), False, 'from alembic import op\n'), ((1675, 1754), 'alembic.op.execute', 'op.execute', (['"""ALTER TABLE project_member ADD id INT PRIMARY KEY AUTO_INCREMENT;"""'], {}), "('ALTER TABLE project_member ADD id INT PRIMARY KEY AUTO_INCREMENT;')\n", (1685, 1754), False, 'from alembic import op\n'), ((1759, 1887), 'alembic.op.alter_column', 'op.alter_column', (['"""project_member"""', '"""user_id"""'], {'existing_type': 'sa.Integer', 'existing_nullable': '(False)', 'new_column_name': '"""entity_id"""'}), "('project_member', 'user_id', existing_type=sa.Integer,\n existing_nullable=False, new_column_name='entity_id')\n", (1774, 1887), False, 'from alembic import op\n'), ((2476, 2599), 'alembic.op.create_unique_constraint', 'op.create_unique_constraint', (['"""unique_project_entity_type"""', '"""project_member"""', "['project_id', 'entity_id', 'entity_type']"], {}), "('unique_project_entity_type', 'project_member',\n ['project_id', 'entity_id', 'entity_type'])\n", (2503, 2599), False, 'from alembic import op\n'), ((2838, 2912), 'alembic.op.create_index', 'op.create_index', (['"""poid_status"""', '"""replication_job"""', "['policy_id', 'status']"], {}), "('poid_status', 'replication_job', ['policy_id', 'status'])\n", (2853, 2912), False, 'from alembic import op\n'), ((2950, 3007), 'alembic.op.create_index', 'op.create_index', (['"""idx_status"""', '"""img_scan_job"""', "['status']"], {}), "('idx_status', 'img_scan_job', ['status'])\n", (2965, 3007), False, 'from alembic import op\n'), ((3012, 3069), 'alembic.op.create_index', 'op.create_index', (['"""idx_digest"""', '"""img_scan_job"""', "['digest']"], {}), "('idx_digest', 'img_scan_job', ['digest'])\n", (3027, 3069), False, 'from alembic import op\n'), ((3074, 3131), 'alembic.op.create_index', 'op.create_index', (['"""idx_uuid"""', '"""img_scan_job"""', "['job_uuid']"], {}), "('idx_uuid', 'img_scan_job', ['job_uuid'])\n", (3089, 3131), False, 'from alembic import op\n'), ((3136, 3212), 'alembic.op.create_index', 'op.create_index', (['"""idx_repository_tag"""', '"""img_scan_job"""', "['repository', 'tag']"], {}), "('idx_repository_tag', 'img_scan_job', ['repository', 'tag'])\n", (3151, 3212), False, 'from alembic import op\n')] |
from __future__ import print_function
import pandas
import matplotlib; matplotlib.use('Agg')
import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt
from tabulate import tabulate
from munkres import Munkres
from collections import defaultdict
try:
from ordereddict import OrderedDict # can be installed using pip
except:
from collections import OrderedDict # only included from python 2.7 on
import mailpy
from box_util import boxoverlap, box3doverlap
from evaluate_kitti3dmot_model import *
def run(*argv):
"""
Parameters:
argv = [signture, dir ,"3D/2D","Baseline","Your model*", subfolder]
signture:
3D/2D:
Baseline: Name of basline
must match the folder where the results are stored.
tracked obejects are not in different
subfolders
Your model/*: name of your model
must match the folder where the results are stored.
Add * at the end if tracked obejects are not in different
subfolders
subfolder: (optional)
to store in a subfoler
"""
num_sample_pts = 41.0
# check for correct number of arguments. if user_sha and email are not supplied,
# no notification email is sent (this option is used for auto-updates)
if len(argv)<5:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# get unique sha key of submitted results
result_sha = argv[0]
obj_tracked = result_sha.split("_")[0]
dir = argv[1]
dt_typ= result_sha.split("_")[3]
baseline_name = argv[3]
mail = mailpy.Mail("")
D = argv[2]
#
if argv[2] == '2D':
eval_3diou, eval_2diou = False, True # eval 2d
elif argv[2] == '3D':
eval_3diou, eval_2diou = True, False # eval 3d
else:
print("Usage: python eval_kitti3dmot.py result_sha ?D(e.g. 2D or 3D)")
sys.exit(1);
# evaluate results
if len(argv) ==6:
table_name = 'results/{}/{}/results_{}_{}_table_{}.csv'.format(dir,argv[5],obj_tracked,dt_typ, D)
else:
table_name = 'results/{}/results_{}_{}_table_{}.csv'.format(dir,obj_tracked,dt_typ, D)
if os.path.exists(table_name):
df = pandas.read_csv(table_name)
if not (df["Model"] == baseline_name).any():
print("Evaluating baseline")
success, baseline, base_avgs = evaluate(result_sha, dir,baseline_name, mail,eval_3diou,eval_2diou)
df.loc[len(df.index)] = [baseline_name,baseline.sMOTA, baseline.MOTA, baseline.MOTP, baseline.MT, baseline.ML, baseline.id_switches, baseline.fragments,
baseline.F1, baseline.precision, baseline.recall, baseline.FAR, baseline.tp, baseline.fp, baseline.fn,
base_avgs[0], base_avgs[1], base_avgs[2]]
else:
print("Evaluating baseline :")
success, baseline, base_avgs = evaluate(result_sha, dir, baseline_name, mail,eval_3diou,eval_2diou)
# basline_data = [[baseline_name],[baseline.sMOTA], [baseline.MOTA], [baseline.MOTP], [baseline.MT], [baseline.ML], [baseline.id_switches], [baseline.fragments],
# [baseline.F1], [baseline.precision], [baseline.recall], [baseline.FAR], [baseline.tp], [baseline.fp], [baseline.fn],
# [base_avgs[0]], [base_avgs[1]], [base_avgs[2]]]
cols = ["Model","sMOTA", "MOTA", "MOTP", "MT", "ML", "IDS", "FRAG" ,"F1" ,"Prec", "Recall", "FAR", "TP", "FP","FN", "sAMOTA", "AMOTA", "AMOTP"]
df = pandas.DataFrame( columns =cols)
df.loc[len(df.index)] = [baseline_name,baseline.sMOTA, baseline.MOTA, baseline.MOTP, baseline.MT, baseline.ML, baseline.id_switches, baseline.fragments,
baseline.F1, baseline.precision, baseline.recall, baseline.FAR, baseline.tp, baseline.fp, baseline.fn,
base_avgs[0], base_avgs[1], base_avgs[2]]
other_name = argv[4]
mail = mailpy.Mail("")
print("Evaluating "+other_name[:-1] +" :")
success, other_model, om_avgs = evaluate(result_sha, dir, other_name, mail,eval_3diou,eval_2diou)
new_row = [other_name[:-1],other_model.sMOTA, other_model.MOTA, other_model.MOTP, other_model.MT, other_model.ML, other_model.id_switches, other_model.fragments, \
other_model.F1, other_model.precision, other_model.recall, other_model.FAR, other_model.tp, other_model.fp, other_model.fn,\
om_avgs[0], om_avgs[1], om_avgs[2]]
df.loc[len(df.index)] = new_row
print(df.loc[(df['Model'] == baseline_name) | (df['Model'] == other_name[:-1])])
string_format = df.to_latex(index=False)
if len(argv) ==6:
table_name = '.results/{}/{}/comparison_{}_{}_latex_{}.txt'.format(dir,argv[5],obj_tracked,dt_typ, D)
else:
table_name = 'results/{}/comparison_{}_{}_latex_{}.txt'.format(dir,obj_tracked,dt_typ, D)
print(string_format, file=open(table_name, 'w'))
if len(argv) ==6:
table_name = 'results/{}/{}/results_{}_{}_table_{}.csv'.format(dir,argv[5],obj_tracked,dt_typ, D)
else:
table_name = 'results/{}/results_{}_{}_table_{}.csv'.format(dir,obj_tracked,dt_typ, D)
file_ = open(table_name,"w")
df.to_csv(file_, index = False, header=True)
return other_model.MOTA, other_model.MOTP
if __name__ == "__main__":
run() | [
"os.path.exists",
"pandas.read_csv",
"mailpy.Mail",
"matplotlib.use",
"sys.exit",
"pandas.DataFrame"
] | [((73, 94), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (87, 94), False, 'import matplotlib\n'), ((1706, 1721), 'mailpy.Mail', 'mailpy.Mail', (['""""""'], {}), "('')\n", (1717, 1721), False, 'import mailpy\n'), ((2308, 2334), 'os.path.exists', 'os.path.exists', (['table_name'], {}), '(table_name)\n', (2322, 2334), False, 'import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt\n'), ((4031, 4046), 'mailpy.Mail', 'mailpy.Mail', (['""""""'], {}), "('')\n", (4042, 4046), False, 'import mailpy\n'), ((1484, 1495), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1492, 1495), False, 'import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt\n'), ((2350, 2377), 'pandas.read_csv', 'pandas.read_csv', (['table_name'], {}), '(table_name)\n', (2365, 2377), False, 'import pandas\n'), ((3618, 3648), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'cols'}), '(columns=cols)\n', (3634, 3648), False, 'import pandas\n'), ((2015, 2026), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2023, 2026), False, 'import sys, os, copy, math, numpy as np, matplotlib.pyplot as plt\n')] |
import time
from datetime import datetime
from serial import Serial # Library needed to open serial connection
PIN = 'a5'
PORT = 'COM11'
PORT = Serial(port=PORT, baudrate=9600, timeout=0) # Open the Serial port
def encode_command(command):
return bytearray(command, encoding='utf-8')
print('-' * 50)
print('AC:', 'Connecting to:', PORT)
print('AC:', 'Press Ctrl+C to close the program.')
try:
while True:
PORT.write(encode_command('S0|i{0}|r{0}|.'.format(PIN))) # Ask for analog status
time.sleep(0.1)
# Read untill ';' is found
result = "|"
while len(result) == 0 or result[-1] != ';':
result += PORT.read().decode('utf-8')
if result[-1] == '|': # Clear string at each command start
result = ""
result = result[:-1] # Skip the ';' character
print('AC ({}):'.format(datetime.now()), '{} is at ->'.format(PIN.upper()), result)
except KeyboardInterrupt:
pass
finally:
PORT.close()
print('AC:', 'Connection closed.')
print('-' * 50)
| [
"datetime.datetime.now",
"serial.Serial",
"time.sleep"
] | [((148, 191), 'serial.Serial', 'Serial', ([], {'port': 'PORT', 'baudrate': '(9600)', 'timeout': '(0)'}), '(port=PORT, baudrate=9600, timeout=0)\n', (154, 191), False, 'from serial import Serial\n'), ((521, 536), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (531, 536), False, 'import time\n'), ((899, 913), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (911, 913), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
#encoding=utf-8
# Copyright (c) 2012 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A utility wraps Python built-in loggings
"""
import logging
import logging.handlers
import os
import platform
import sys
unicode_type = unicode
bytes_type = str
basestring_type = str
try:
import curses
except ImportError:
curses = None
logger = logging.getLogger("com.baidu.bigflow")
def _safe_unicode(obj, encoding='utf-8'):
"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding, 'ignore')
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
try:
return unicode(obj)
except Exception as e:
return u""
else:
return str(obj).decode(encoding, 'ignore')
def _stderr_supports_color():
import sys
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
if colors is None:
colors = LogFormatter.DEFAULT_COLORS
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
# assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output as configured.
"""
if logger is None:
raise error.BigflowPlanningException("logger cannot be None")
if "__PYTHON_IN_REMOTE_SIDE" in os.environ:
# Do not do logging at runtime
logger.addHandler(logging.NullHandler())
else:
logger.setLevel(level)
if log_file:
channel = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=maxBytes,
backupCount=backupCount)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if not logger.handlers:
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def enable_pretty_logging_at_debug(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output only at DEBUG level
"""
if level == logging.DEBUG:
enable_pretty_logging(logger, level, log_file, backupCount, maxBytes)
else:
logger.addHandler(logging.NullHandler())
def init_log(level=logging.INFO):
""" init_log - initialize log module
Args:
level (str): msg above the level will be displayed
DEBUG < INFO < WARNING < ERROR < CRITICAL \n
``the default value is logging.INFO``
Raises:
OSError: fail to create log directories
IOError: fail to open log file
"""
log_file = os.environ.get("BIGFLOW_LOG_FILE", "")
if log_file:
log_file = os.path.abspath(log_file + ".log")
print >> sys.stderr, "Bigflow Log file is written to [%s]" % log_file
enable_pretty_logging(logger, level, log_file=log_file)
#enable_pretty_logging_at_debug(
# logging.getLogger("pbrpc"),
# level,
# log_file=log_file)
#enable_pretty_logging_at_debug(
# logging.getLogger("pbrpcrpc_client"),
# level,
# log_file=log_file)
init_log(logging.INFO)
| [
"logging.getLogger",
"logging.NullHandler",
"curses.setupterm",
"logging.StreamHandler",
"curses.tigetnum",
"logging.handlers.RotatingFileHandler",
"os.environ.get",
"curses.tparm",
"sys.stderr.isatty",
"logging.Formatter.__init__",
"curses.tigetstr",
"os.path.abspath"
] | [((1443, 1481), 'logging.getLogger', 'logging.getLogger', (['"""com.baidu.bigflow"""'], {}), "('com.baidu.bigflow')\n", (1460, 1481), False, 'import logging\n'), ((9228, 9266), 'os.environ.get', 'os.environ.get', (['"""BIGFLOW_LOG_FILE"""', '""""""'], {}), "('BIGFLOW_LOG_FILE', '')\n", (9242, 9266), False, 'import os\n'), ((2268, 2287), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (2285, 2287), False, 'import sys\n'), ((3965, 4014), 'logging.Formatter.__init__', 'logging.Formatter.__init__', (['self'], {'datefmt': 'datefmt'}), '(self, datefmt=datefmt)\n', (3991, 4014), False, 'import logging\n'), ((9303, 9337), 'os.path.abspath', 'os.path.abspath', (["(log_file + '.log')"], {}), "(log_file + '.log')\n", (9318, 9337), False, 'import os\n'), ((2314, 2332), 'curses.setupterm', 'curses.setupterm', ([], {}), '()\n', (2330, 2332), False, 'import curses\n'), ((7859, 7880), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (7878, 7880), False, 'import logging\n'), ((7967, 8070), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', ([], {'filename': 'log_file', 'maxBytes': 'maxBytes', 'backupCount': 'backupCount'}), '(filename=log_file, maxBytes=maxBytes,\n backupCount=backupCount)\n', (8003, 8070), False, 'import logging\n'), ((8340, 8363), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8361, 8363), False, 'import logging\n'), ((8808, 8829), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (8827, 8829), False, 'import logging\n'), ((2348, 2373), 'curses.tigetnum', 'curses.tigetnum', (['"""colors"""'], {}), "('colors')\n", (2363, 2373), False, 'import curses\n'), ((4658, 4682), 'curses.tigetstr', 'curses.tigetstr', (['"""setaf"""'], {}), "('setaf')\n", (4673, 4682), False, 'import curses\n'), ((4710, 4733), 'curses.tigetstr', 'curses.tigetstr', (['"""setf"""'], {}), "('setf')\n", (4725, 4733), False, 'import curses\n'), ((5036, 5059), 'curses.tigetstr', 'curses.tigetstr', (['"""sgr0"""'], {}), "('sgr0')\n", (5051, 5059), False, 'import curses\n'), ((4957, 4985), 'curses.tparm', 'curses.tparm', (['fg_color', 'code'], {}), '(fg_color, code)\n', (4969, 4985), False, 'import curses\n')] |
import json
import yaml
import os
import requests
from git import Repo
### convert device config in json to ansible playbook and run the playbook
PATH_TO_REPO = os.path.expanduser('~') + "/awx-playbooks/"
URL = 'http://10.4.19.251:32121/api/v2/'
USER = 'admin'
PWD = '<PASSWORD>'
### Set proper headers
headers = {"Content-Type":"application/json","Accept":"application/json"}
def sync_project():
response = requests.post(URL + "projects/9/update/", auth=(USER, PWD), headers=headers, verify=False)
if response.status_code != 202:
print('Status:', response.status_code, '\nHeaders:', response.headers, '\nError Response:',response)
return response.json()['id']
def sync_inventory_source():
response = requests.post(URL + "inventory_sources/10/update/", auth=(USER, PWD), headers=headers, verify=False)
if response.status_code != 202:
print('Status:', response.status_code, '\nHeaders:', response.headers, '\nError Response:',response)
return response.json()['id']
def add_template(new_yml):
# get a list of available templates for this project
response = requests.get(URL + "projects/9/playbooks/", auth=(USER, PWD), headers=headers, verify=False)
if response.status_code != 200:
print('Status:', response.status_code, '\nHeaders:', response.headers, '\nError Response:',response)
playbooks = response.json()
#print(json.dumps(playbooks, indent=2))
data = {
'name': new_yml,
'inventory': '2',
'project': '9',
'playbook': new_yml
}
# Create a job template
response = requests.post(URL + "job_templates/", auth=(USER, PWD), json=data, headers=headers, verify=False)
if response.status_code != 201:
print('Status:', response.status_code, '\nHeaders:', response.headers, '\nContent:', response.content, '\nError Response:',response)
exit()
return response.json()['id']
def run_template(template_id):
response = requests.post(URL + "job_templates/" + str(template_id) + "/launch/", auth=(USER, PWD), headers=headers, verify=False)
if response.status_code != 201:
print('Status:', response.status_code, '\nHeaders:', response.headers, '\nContent:', response.content, '\nError Response:',response)
return response.json()['id']
# return the file name
def git_push(id, output_yaml):
output_file = str(id) + '.yml'
output_file_path = PATH_TO_REPO + output_file
with open(output_file_path, 'w') as outfile:
yaml.dump(yaml.safe_load(output_yaml), outfile, default_flow_style=False, sort_keys=False)
try:
repo = Repo(PATH_TO_REPO)
repo.git.add('--all')
repo.index.commit("commit from python script")
origin = repo.remote(name='origin')
origin.push()
except:
print("error occured while pushing the code")
return output_file
def awplus_vlans(tasks):
tmp_dict = {}
for item in tasks:
item['vlan_id'] = item.pop('id')
tmp_dict['name'] = "Create vlans"
tmp_dict['awplus_vlans'] = {}
tmp_dict['awplus_vlans']['config'] = tasks
tmp_dict['awplus_vlans']['state'] = "merged"
return tmp_dict
def awplus_openflow(tasks):
tmp_dict = {}
for item in tasks['ports']:
item['openflow'] = True
tmp_dict['name'] = "Add openflow config"
tmp_dict['awplus_openflow'] = tasks
return tmp_dict
def json_to_yaml(input_json):
jobs = json.loads(input_json)['jobs']
formatted_jobs = []
for item in jobs:
tmp = {}
task_list = []
tmp = {
'connection': 'network_cli',
'hosts': item['name'],
'collections': 'alliedtelesis.awplus'
}
tasks = item['tasks']
for key in tasks:
if (key == "vlans"):
task_list.append(awplus_vlans(tasks[key]))
if (key == "openflow"):
task_list.append(awplus_openflow(tasks[key]))
tmp['tasks'] = task_list
formatted_jobs.append(tmp)
return yaml.safe_dump(formatted_jobs, sort_keys=False) | [
"json.loads",
"requests.post",
"yaml.safe_dump",
"requests.get",
"yaml.safe_load",
"git.Repo",
"os.path.expanduser"
] | [((163, 186), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (181, 186), False, 'import os\n'), ((413, 507), 'requests.post', 'requests.post', (["(URL + 'projects/9/update/')"], {'auth': '(USER, PWD)', 'headers': 'headers', 'verify': '(False)'}), "(URL + 'projects/9/update/', auth=(USER, PWD), headers=headers,\n verify=False)\n", (426, 507), False, 'import requests\n'), ((721, 825), 'requests.post', 'requests.post', (["(URL + 'inventory_sources/10/update/')"], {'auth': '(USER, PWD)', 'headers': 'headers', 'verify': '(False)'}), "(URL + 'inventory_sources/10/update/', auth=(USER, PWD),\n headers=headers, verify=False)\n", (734, 825), False, 'import requests\n'), ((1089, 1186), 'requests.get', 'requests.get', (["(URL + 'projects/9/playbooks/')"], {'auth': '(USER, PWD)', 'headers': 'headers', 'verify': '(False)'}), "(URL + 'projects/9/playbooks/', auth=(USER, PWD), headers=\n headers, verify=False)\n", (1101, 1186), False, 'import requests\n'), ((1542, 1644), 'requests.post', 'requests.post', (["(URL + 'job_templates/')"], {'auth': '(USER, PWD)', 'json': 'data', 'headers': 'headers', 'verify': '(False)'}), "(URL + 'job_templates/', auth=(USER, PWD), json=data, headers=\n headers, verify=False)\n", (1555, 1644), False, 'import requests\n'), ((3806, 3853), 'yaml.safe_dump', 'yaml.safe_dump', (['formatted_jobs'], {'sort_keys': '(False)'}), '(formatted_jobs, sort_keys=False)\n', (3820, 3853), False, 'import yaml\n'), ((2522, 2540), 'git.Repo', 'Repo', (['PATH_TO_REPO'], {}), '(PATH_TO_REPO)\n', (2526, 2540), False, 'from git import Repo\n'), ((3290, 3312), 'json.loads', 'json.loads', (['input_json'], {}), '(input_json)\n', (3300, 3312), False, 'import json\n'), ((2422, 2449), 'yaml.safe_load', 'yaml.safe_load', (['output_yaml'], {}), '(output_yaml)\n', (2436, 2449), False, 'import yaml\n')] |
import cv2
import sys
import json
from image_encoder.image_encoder import decode
import numpy
import requests
# Get user supplied values
def get_image(fpath):
with open(fpath) as f:
record = [json.loads(line) for line in f]
img = decode(record[0]["image"])
return img
def n_faces(fpath):
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
img = get_image(fpath)
image = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
output = {}
i = 1
for (x, y, w, h) in faces:
k = "face"+str(i)
output[k] = [int(x),int(y), int(x+w), int(y+h)]
i+=1
print(output)
to_send = {"fpath":fpath, "result":{"faces":output}}
requests.post('http://imagedb:5000/append',json = to_send)
return to_send
if __name__ == "__main__":
from pika_listener import QueueListener
Q = QueueListener(n_faces, 'imageq_n_faces')
Q.run()
# powered by bee
| [
"image_encoder.image_encoder.decode",
"json.loads",
"requests.post",
"numpy.array",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"pika_listener.QueueListener"
] | [((247, 273), 'image_encoder.image_encoder.decode', 'decode', (["record[0]['image']"], {}), "(record[0]['image'])\n", (253, 273), False, 'from image_encoder.image_encoder import decode\n'), ((413, 444), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['cascPath'], {}), '(cascPath)\n', (434, 444), False, 'import cv2\n'), ((568, 607), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (580, 607), False, 'import cv2\n'), ((1052, 1109), 'requests.post', 'requests.post', (['"""http://imagedb:5000/append"""'], {'json': 'to_send'}), "('http://imagedb:5000/append', json=to_send)\n", (1065, 1109), False, 'import requests\n'), ((1213, 1253), 'pika_listener.QueueListener', 'QueueListener', (['n_faces', '"""imageq_n_faces"""'], {}), "(n_faces, 'imageq_n_faces')\n", (1226, 1253), False, 'from pika_listener import QueueListener\n'), ((520, 536), 'numpy.array', 'numpy.array', (['img'], {}), '(img)\n', (531, 536), False, 'import numpy\n'), ((205, 221), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (215, 221), False, 'import json\n')] |
import argparse
from datetime import datetime
import glob
import html
import json
import os
import pytz
import shutil
import sys
import time
from yattag import Doc
# Constants
__SYSTEM__ = "GroupMe"
FONT_URL = "https://fonts.googleapis.com/css?family=Open+Sans"
def css_file():
return """
.message_container {
display: flex;
flex-direction: row;
padding-top: 5px;
padding-bottom: 5px;
justify-content: center;
}
.avatar {
width: 40px;
height: 40px;
flex-basis: 40px;
flex-shrink: 0;
background-color: #3A61BF;
display: flex;
justify-content: center;
align-items: center;
color: #FFFFFF;
border-radius: 50%;
}
.avatar > img {
width: 40px;
height: 40px;
border-radius: 50%;
}
.likes {
width: 30px;
height: 30px;
flex-basis: 30px;
flex-shrink: 0;
display: flex;
justify-content: flex-start;
align-items: flex-end;
font-size: 10px;
color: #bbb;
}
.likes > img {
max-width: 70%;
max-height: 70%;
align-self: center;
}
.message_box {
display: flex;
flex-direction: column;
flex-grow: 1;
margin-left: 10px;
margin-right: 10px;
}
.message {
white-space: pre-line;
overflow-wrap: break-word;
word-break: break-word;
}
.message > img {
max-width: 400px;
max-height: 400px;
}
.message > video {
max-width: 400px;
max-height: 400px;
}
.system_message {
width: 80%;
text-align: center;
font-size: 14px;
font-weight: bold;
color: #666666;
}
.user {
color: #555555;
font-size: 14px;
}
#container {
width: 768px;
}
body {
background-color: #eeeeee;
display: flex;
flex-direction: row;
justify-content: center;
font-family: 'Open Sans', sans-serif;
}
/* Tooltip container */
.tooltip {
position: relative;
}
/* Tooltip text */
.tooltip .tooltiptext {
visibility: hidden;
width: 120px;
background-color: black;
color: #fff;
text-align: center;
padding: 5px 0;
border-radius: 6px;
/* Position the tooltip text - see examples below! */
position: absolute;
z-index: 1;
width: 120px;
top: 100%;
left: 30%;
margin-left: -60px;
}
/* Show the tooltip text when you mouse over the tooltip container */
.tooltip:hover .tooltiptext {
visibility: visible;
}
.tooltip .tooltiptext::after {
content: " ";
position: absolute;
bottom: 100%; /* At the top of the tooltip */
left: 50%;
margin-left: -5px;
border-width: 5px;
border-style: solid;
border-color: transparent transparent black transparent;
}
"""
def render_time_message(page_elements, message, prev_time, timezone=None):
doc, tag, text = page_elements
# Handle change in day
message_time = datetime.fromtimestamp(message['created_at'], timezone)
if prev_time is None or prev_time.day != message_time.day:
with tag('div', klass='message_container'):
doc.attr(style="background-color: #e4e4e4")
with tag('span', klass='system_message'):
text(message_time.strftime('%b %d, %Y at %-I:%M %p'))
return message_time
def render_system_message(page_elements, message, timezone=None):
doc, tag, text = page_elements
message_time = datetime.fromtimestamp(message['created_at'], timezone)
with tag('div', klass='message_container'):
doc.attr(title=message_time.strftime('%b %d, %Y at %-I:%M %p'))
doc.attr(style="background-color: #e4e4e4")
with tag('span', klass='system_message'):
text(message['text'] or '<ATTACHMENT>')
def render_avatar(input_dir, page_elements, people, message):
doc, tag, text = page_elements
avatar_url = people[message['author']]['avatar_url']
if avatar_url:
avatar_path = "%s.avatar" % (message['author'])
avatar_path = os.path.join('avatars', avatar_path)
avatar_path = glob.glob("%s/%s*" % (input_dir, avatar_path))[0]
avatar_path = "/".join(avatar_path.split('/')[-2:])
doc.asis('<img src="%s"></img>' % (avatar_path))
else:
names = people[message['author']]['name'].split()
shorthand = names[0][0].upper()
if len(names) > 1:
shorthand += names[-1][0].upper()
text(shorthand)
def render_message(input_dir, page_elements, people, message, timezone=None):
doc, tag, text = page_elements
# Process mentions
mentions = []
for a in message['attachments']:
if a['type'] == "mentions":
mentions += a['loci']
message_time = datetime.fromtimestamp(message['created_at'], timezone)
with tag('div', klass='message_container'):
doc.attr(title=message_time.strftime('%b %d, %Y at %-I:%M %p'))
with tag('div', klass='avatar'):
render_avatar(input_dir, page_elements, people, message)
with tag('div', klass='message_box'):
with tag('span', klass='user'):
text(people[message['author']]['name'])
if len(message['attachments']) > 0:
for att in message['attachments']:
if att['type'] == 'image' or \
att['type'] == 'linked_image':
image_path = att['url'].split('/')[-1]
image_path = os.path.join('attachments', image_path)
r = glob.glob("%s/%s*" % (input_dir, image_path))
image_path = r[0]
image_path = "/".join(image_path.split('/')[-2:])
with tag('span', klass='message'):
doc.asis('<img src="%s"></img>' % (
image_path))
elif att['type'] == 'video':
video_path = att['url'].split('/')[-1]
video_path = os.path.join('attachments', video_path)
r = glob.glob("%s/%s*" % (input_dir, video_path))[0]
video_path = r
video_path = "/".join(video_path.split('/')[-2:])
with tag('span', klass='message'):
doc.asis('<video src="%s" controls></video>' % (
video_path))
if message['text']:
with tag('span', klass='message'):
_text = message['text']
# Remove video urls
for att in message['attachments']:
if att['type'] == 'video':
start_idx = _text.find(att['url'])
end_idx = start_idx + len(att['url'])
_text = _text[:start_idx] + _text[end_idx:]
# Split text into mentions and normal text
text_parts = []
prev_end = 0
for m in mentions:
start = m[0]
end = start + m[1]
text_parts.append((_text[prev_end:start], 'normal'))
text_parts.append((_text[start:end], 'bold'))
prev_end = end
text_parts.append((_text[prev_end:], 'normal'))
for t, style in text_parts:
with tag('span'):
doc.attr('style="font-weight: %s;"' % (style))
text(t)
with tag('span', klass='likes'):
if len(message['favorited_by']) > 0:
doc.attr(klass='likes tooltip')
doc.asis("<img src='assets/heart-full.png'></img>")
doc.text(len(message['favorited_by']))
else:
doc.asis("<img src='assets/heart.png'></img>")
with tag('div', klass='tooltiptext'):
for id in message['favorited_by']:
name = "Unknown"
if id in people:
name = people[id]['name']
with tag('div'):
text(name)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-dir', '-i', dest='input_dir', required=True)
parser.add_argument('--timezone', type=str,
help="Timezone to render message times in.")
args = parser.parse_args()
if not os.path.exists(os.path.join(args.input_dir, 'people.json')) or \
not os.path.exists(os.path.join(args.input_dir, 'messages.json')) or \
not os.path.exists(os.path.join(args.input_dir, 'group_info.json')):
print("Missing files!")
sys.exit(1)
with open(os.path.join(args.input_dir, 'people.json')) as fp:
people = json.load(fp)
with open(os.path.join(args.input_dir, 'messages.json')) as fp:
messages = json.load(fp)
with open(os.path.join(args.input_dir, 'group_info.json')) as fp:
group_info = json.load(fp)
page_elements = Doc().tagtext()
doc, tag, text = page_elements
tz = None
if args.timezone:
tz = pytz.timezone(args.timezone)
prev_time = None
with tag('html'):
with tag('head'):
doc.asis('<meta charset="utf-8">')
doc.asis('<link href="%s" rel="stylesheet">' % (FONT_URL))
doc.asis('<link rel="stylesheet" href="main.css">')
with tag('title'):
text('GroupMe archive - %s' % (group_info['name']))
with tag('body'):
with tag('div', id='container'):
with tag('h1'):
text(group_info['name'])
# Render messages
for message in messages:
# Check and render time divider
prev_time = render_time_message(page_elements, message,
prev_time, tz)
# Check message type
if people[message['author']]['name'] == __SYSTEM__:
# Render system message
render_system_message(page_elements, message,
tz)
else:
# Render normal message
render_message(args.input_dir, page_elements, people,
message, tz)
# Save rendered files
with open(os.path.join(args.input_dir, 'rendered.html'), 'w') as fp:
fp.write(doc.getvalue())
with open(os.path.join(args.input_dir, 'main.css'), 'w') as fp:
fp.write(css_file())
root_path = os.path.realpath(__file__)
assets_dir = os.path.join(os.path.dirname(root_path), 'assets')
project_assets_dir = os.path.join(args.input_dir, 'assets')
os.makedirs(project_assets_dir, exist_ok=True)
shutil.copy(os.path.join(assets_dir, 'heart.png'), project_assets_dir)
shutil.copy(os.path.join(assets_dir, 'heart-full.png'), project_assets_dir)
if __name__ == '__main__':
main()
| [
"pytz.timezone",
"datetime.datetime.fromtimestamp",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.realpath",
"os.path.dirname",
"sys.exit",
"json.load",
"yattag.Doc",
"glob.glob"
] | [((3252, 3307), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["message['created_at']", 'timezone'], {}), "(message['created_at'], timezone)\n", (3274, 3307), False, 'from datetime import datetime\n'), ((3752, 3807), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["message['created_at']", 'timezone'], {}), "(message['created_at'], timezone)\n", (3774, 3807), False, 'from datetime import datetime\n'), ((5051, 5106), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["message['created_at']", 'timezone'], {}), "(message['created_at'], timezone)\n", (5073, 5106), False, 'from datetime import datetime\n'), ((8597, 8622), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8620, 8622), False, 'import argparse\n'), ((11095, 11121), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (11111, 11121), False, 'import os\n'), ((11215, 11253), 'os.path.join', 'os.path.join', (['args.input_dir', '"""assets"""'], {}), "(args.input_dir, 'assets')\n", (11227, 11253), False, 'import os\n'), ((11258, 11304), 'os.makedirs', 'os.makedirs', (['project_assets_dir'], {'exist_ok': '(True)'}), '(project_assets_dir, exist_ok=True)\n', (11269, 11304), False, 'import os\n'), ((4336, 4372), 'os.path.join', 'os.path.join', (['"""avatars"""', 'avatar_path'], {}), "('avatars', avatar_path)\n", (4348, 4372), False, 'import os\n'), ((9121, 9132), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9129, 9132), False, 'import sys\n'), ((9217, 9230), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (9226, 9230), False, 'import json\n'), ((9319, 9332), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (9328, 9332), False, 'import json\n'), ((9425, 9438), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (9434, 9438), False, 'import json\n'), ((9561, 9589), 'pytz.timezone', 'pytz.timezone', (['args.timezone'], {}), '(args.timezone)\n', (9574, 9589), False, 'import pytz\n'), ((11152, 11178), 'os.path.dirname', 'os.path.dirname', (['root_path'], {}), '(root_path)\n', (11167, 11178), False, 'import os\n'), ((11321, 11358), 'os.path.join', 'os.path.join', (['assets_dir', '"""heart.png"""'], {}), "(assets_dir, 'heart.png')\n", (11333, 11358), False, 'import os\n'), ((11396, 11438), 'os.path.join', 'os.path.join', (['assets_dir', '"""heart-full.png"""'], {}), "(assets_dir, 'heart-full.png')\n", (11408, 11438), False, 'import os\n'), ((4395, 4441), 'glob.glob', 'glob.glob', (["('%s/%s*' % (input_dir, avatar_path))"], {}), "('%s/%s*' % (input_dir, avatar_path))\n", (4404, 4441), False, 'import glob\n'), ((9148, 9191), 'os.path.join', 'os.path.join', (['args.input_dir', '"""people.json"""'], {}), "(args.input_dir, 'people.json')\n", (9160, 9191), False, 'import os\n'), ((9246, 9291), 'os.path.join', 'os.path.join', (['args.input_dir', '"""messages.json"""'], {}), "(args.input_dir, 'messages.json')\n", (9258, 9291), False, 'import os\n'), ((9348, 9395), 'os.path.join', 'os.path.join', (['args.input_dir', '"""group_info.json"""'], {}), "(args.input_dir, 'group_info.json')\n", (9360, 9395), False, 'import os\n'), ((9460, 9465), 'yattag.Doc', 'Doc', ([], {}), '()\n', (9463, 9465), False, 'from yattag import Doc\n'), ((10888, 10933), 'os.path.join', 'os.path.join', (['args.input_dir', '"""rendered.html"""'], {}), "(args.input_dir, 'rendered.html')\n", (10900, 10933), False, 'import os\n'), ((10995, 11035), 'os.path.join', 'os.path.join', (['args.input_dir', '"""main.css"""'], {}), "(args.input_dir, 'main.css')\n", (11007, 11035), False, 'import os\n'), ((8877, 8920), 'os.path.join', 'os.path.join', (['args.input_dir', '"""people.json"""'], {}), "(args.input_dir, 'people.json')\n", (8889, 8920), False, 'import os\n'), ((8953, 8998), 'os.path.join', 'os.path.join', (['args.input_dir', '"""messages.json"""'], {}), "(args.input_dir, 'messages.json')\n", (8965, 8998), False, 'import os\n'), ((9031, 9078), 'os.path.join', 'os.path.join', (['args.input_dir', '"""group_info.json"""'], {}), "(args.input_dir, 'group_info.json')\n", (9043, 9078), False, 'import os\n'), ((5787, 5826), 'os.path.join', 'os.path.join', (['"""attachments"""', 'image_path'], {}), "('attachments', image_path)\n", (5799, 5826), False, 'import os\n'), ((5855, 5900), 'glob.glob', 'glob.glob', (["('%s/%s*' % (input_dir, image_path))"], {}), "('%s/%s*' % (input_dir, image_path))\n", (5864, 5900), False, 'import glob\n'), ((6334, 6373), 'os.path.join', 'os.path.join', (['"""attachments"""', 'video_path'], {}), "('attachments', video_path)\n", (6346, 6373), False, 'import os\n'), ((6402, 6447), 'glob.glob', 'glob.glob', (["('%s/%s*' % (input_dir, video_path))"], {}), "('%s/%s*' % (input_dir, video_path))\n", (6411, 6447), False, 'import glob\n')] |
from slack_sdk import WebClient
from slack_bolt.app.app import SlackAppDevelopmentServer, App
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
class TestDevServer:
signing_secret = "secret"
valid_token = "<PASSWORD>"
mock_api_server_base_url = "http://localhost:8888"
web_client = WebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
def setup_method(self):
self.old_os_env = remove_os_env_temporarily()
setup_mock_web_api_server(self)
def teardown_method(self):
cleanup_mock_web_api_server(self)
restore_os_env(self.old_os_env)
def test_instance(self):
server = SlackAppDevelopmentServer(
port=3001,
path="/slack/events",
app=App(signing_secret=self.signing_secret, client=self.web_client),
)
assert server is not None
| [
"tests.utils.remove_os_env_temporarily",
"tests.mock_web_api_server.cleanup_mock_web_api_server",
"slack_sdk.WebClient",
"tests.mock_web_api_server.setup_mock_web_api_server",
"tests.utils.restore_os_env",
"slack_bolt.app.app.App"
] | [((423, 486), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'valid_token', 'base_url': 'mock_api_server_base_url'}), '(token=valid_token, base_url=mock_api_server_base_url)\n', (432, 486), False, 'from slack_sdk import WebClient\n'), ((565, 592), 'tests.utils.remove_os_env_temporarily', 'remove_os_env_temporarily', ([], {}), '()\n', (590, 592), False, 'from tests.utils import remove_os_env_temporarily, restore_os_env\n'), ((601, 632), 'tests.mock_web_api_server.setup_mock_web_api_server', 'setup_mock_web_api_server', (['self'], {}), '(self)\n', (626, 632), False, 'from tests.mock_web_api_server import setup_mock_web_api_server, cleanup_mock_web_api_server\n'), ((673, 706), 'tests.mock_web_api_server.cleanup_mock_web_api_server', 'cleanup_mock_web_api_server', (['self'], {}), '(self)\n', (700, 706), False, 'from tests.mock_web_api_server import setup_mock_web_api_server, cleanup_mock_web_api_server\n'), ((715, 746), 'tests.utils.restore_os_env', 'restore_os_env', (['self.old_os_env'], {}), '(self.old_os_env)\n', (729, 746), False, 'from tests.utils import remove_os_env_temporarily, restore_os_env\n'), ((894, 957), 'slack_bolt.app.app.App', 'App', ([], {'signing_secret': 'self.signing_secret', 'client': 'self.web_client'}), '(signing_secret=self.signing_secret, client=self.web_client)\n', (897, 957), False, 'from slack_bolt.app.app import SlackAppDevelopmentServer, App\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import optparse
import os
import re
import sys
import vtk
from multiprocessing import Process
import parse_imx
RADIUS = 3 # For Open and Gauss
SCALE = 50.0 # For Rasterization
class RepairMeshParser(optparse.OptionParser):
def __init__(self):
optparse.OptionParser.__init__(self)
self.add_option("-a", "--areas", dest="areas_file",
help="The output areas file in csv", metavar="FILE")
self.add_option("-i", "--input-vrml", dest="input_vrml",
help="The mesh to de repaired in vrml file format", metavar="FILE")
self.add_option("-d", "--auto-dir", dest="vrmls_dir",
help="A directory with a bunch of vrmls", metavar="FILE")
self.add_option("-o", "--output-dir", dest="output_dir",
help="The output dir ussed when provides a dir as input", metavar="FILE")
self.add_option("-s", "--scale-factor", dest="scale", default=50.0,
help="Tehe scale factor used in the rasterization")
self.add_option("-c", "--combine", action="store_true", dest="combine",
help="Combine all polydatas in one object")
def write_image(image, filename):
"""Write vtk image data to file."""
aWriter = vtk.vtkMetaImageWriter()
aWriter.SetInputData(image)
aWriter.SetFileName(filename)
aWriter.SetFileDimensionality(3)
aWriter.SetCompression(False)
aWriter.Write()
def voxelizer(polydata, scale=SCALE, radius=RADIUS):
""" volume voxelization not anti-aliased """
# Get selection boundaries.
(minX, maxX, minY, maxY, minZ, maxZ) = [int(x * scale) for x in
polydata.GetBounds()] # convert tuple of floats to ints
# print(" Selection bounds are %s" % str((minX, maxX, minY, maxY, minZ, maxZ))) # dimensions of the resulting image
# print(" Dimensions: %s" % str((maxX - minX, maxY - minY, maxZ - minZ)))
padd = radius + 6
(minX, maxX, minY, maxY, minZ, maxZ) = (
minX - padd, maxX + padd, minY - padd, maxY + padd, minZ - padd, maxZ + padd)
ps1 = 1.0 / float(scale) # pixel size for the stencil, make sure it's a float division!
ps2 = 1.0 # pixel size for the image
## Convert a surface mesh into an image stencil that can be used to mask an image with vtkImageStencil.
polyToStencilFilter = vtk.vtkPolyDataToImageStencil()
polyToStencilFilter.SetInputData(polydata)
polyToStencilFilter.SetOutputWholeExtent(minX, maxX, minY, maxY, minZ, maxZ)
polyToStencilFilter.SetOutputSpacing(ps1, ps1, ps1)
polyToStencilFilter.SetOutputOrigin(0.0, 0.0, 0.0)
polyToStencilFilter.Update()
# Create an empty (3D) image of appropriate size.
image = vtk.vtkImageData()
image.SetSpacing(ps2, ps2, ps2)
image.SetOrigin(0.0, 0.0, 0.0)
image.SetExtent(minX, maxX, minY, maxY, minZ, maxZ)
image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
# Mask the empty image with the image stencil.
# First All the background to 0
# Needed otherwise introduces noise
stencil = vtk.vtkImageStencil()
stencil.SetInputData(image)
stencil.SetStencilData(polyToStencilFilter.GetOutput())
stencil.ReverseStencilOff()
stencil.SetBackgroundValue(0)
stencil.Update()
# Foreground to 255
stencil2 = vtk.vtkImageStencil()
stencil2.SetInputData(stencil.GetOutput())
stencil2.SetStencilData(polyToStencilFilter.GetOutput())
stencil2.ReverseStencilOn()
stencil2.SetBackgroundValue(255)
stencil2.Update()
finishImage = stencil2.GetOutput()
print(finishImage.GetNumberOfCells())
return stencil2.GetOutput()
def axisAligment(actor):
polyData = actor.GetMapper().GetInput()
centerCalculer = vtk.vtkCenterOfMass()
centerCalculer.SetInputData(polyData)
centerCalculer.SetUseScalarsAsWeights(False)
centerCalculer.Update()
center = centerCalculer.GetCenter()
print(center)
centerTransform = vtk.vtkTransform()
centerTransform.Translate(-center[0], -center[1], -center[2])
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(polyData)
transformFilter.SetTransform(centerTransform)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(transformFilter.GetOutput())
mapper.Update()
actor.SetMapper(mapper)
polyData = actor.GetMapper().GetInput()
centerCalculer = vtk.vtkCenterOfMass()
centerCalculer.SetInputData(polyData)
centerCalculer.SetUseScalarsAsWeights(False)
centerCalculer.Update()
centerAux = centerCalculer.GetCenter()
print(centerAux)
pointsMatrixAux = []
for i in range(0, polyData.GetNumberOfPoints()):
point = polyData.GetPoint(i)
pointsMatrixAux.append(point)
pointMatrix = np.matrix(pointsMatrixAux)
pointMatrixT = pointMatrix.transpose()
covarianzeMatrix = pointMatrixT * pointMatrix
u, s, vh = np.linalg.svd(covarianzeMatrix, full_matrices=True)
rotationMatrix = vtk.vtkMatrix4x4()
for i in range(3):
for j in range(3):
rotationMatrix.SetElement(i, j, u[i, j])
rotationMatrix.SetElement(i, 3, 0)
for i in range(3):
rotationMatrix.SetElement(3, i, 0)
rotationMatrix.SetElement(3, 3, 1)
rotationTransform = vtk.vtkTransform()
rotationTransform.SetMatrix(rotationMatrix)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(rotationTransform)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(transformFilter.GetOutput())
actor.SetMapper(mapper)
return center, rotationTransform
def open_image(image, radius):
openFilter = vtk.vtkImageDilateErode3D()
openFilter.SetDilateValue(255)
openFilter.SetErodeValue(0)
openFilter.SetKernelSize(radius, radius, radius)
openFilter.SetInputData(image)
openFilter.Update()
return openFilter.GetOutput()
def dump_voxels(actor, filename):
poly = actor.GetMapper().GetInput()
pre_image = voxelizer(poly, 50)
image = open_image(pre_image, RADIUS)
write_image(image, filename)
def open_actor(actor, actor_index=0, scale=SCALE, radius=RADIUS):
poly = actor.GetMapper().GetInput()
pre_image = voxelizer(poly, scale)
opened_image = open_image(pre_image, radius)
gauss = vtk.vtkImageGaussianSmooth()
gauss.SetDimensionality(3)
gauss.SetStandardDeviation(radius, radius, radius)
gauss.SetInputData(opened_image)
gauss.Update()
image_to_contour = gauss.GetOutput()
contour = vtk.vtkMarchingCubes()
contour.SetInputData(image_to_contour)
contour.SetValue(0, 127.5)
contour.ComputeScalarsOff()
contour.Update()
repared_poly = contour.GetOutput()
if repared_poly.GetNumberOfCells() == 0:
print("ERROR: number_of_cells = 0", end=' ')
# write_image(image_to_contour, "/tmp/%d.mhd"%actor_index)
raise ValueError("ERROR: number_of_cells = 0")
# (minX, maxX, minY, maxY, minZ, maxZ) = [int(x) for x in repared_poly.GetBounds()] #convert tuple of floats to ints
# print " Repared bounds are %s"%str((minX, maxX, minY, maxY, minZ, maxZ)) #dimensions of the resulting image
# print " Dimensions: %s"%str((maxX - minX, maxY - minY, maxZ - minZ))
actor.GetMapper().SetInputData(repared_poly)
def compute_area(actor):
polydata = actor.GetMapper().GetInput()
number_of_cells = polydata.GetNumberOfCells()
area = 0
for i in range(number_of_cells):
area += vtk.vtkMeshQuality.TriangleArea(polydata.GetCell(i))
return area
def combine_actors(actors_list):
appender = vtk.vtkAppendPolyData()
for actor in actors_list:
poly = actor.GetMapper().GetInput()
appender.AddInput(poly)
appender.Update()
combined_poly = appender.GetOutput()
combined_actor = vtk.vtkActor()
combined_actor.SetMapper(vtk.vtkPolyDataMapper())
combined_actor.GetMapper().SetInputData(combined_poly)
return combined_actor
def show_actor(actor, ren, rw):
ren.RemoveAllViewProps()
ren.AddActor(actor)
ren.ResetCamera()
rw.Render()
def compute_all_areas(actors_list, scale=SCALE):
areas = []
for i, actor in enumerate(actors_list):
# scale = SCALE
sys.stdout.write("%d " % i)
area_pre = compute_area(actor)
try:
open_actor(actor, i, scale)
except ValueError as e:
# [KNOWN BUG] The sizes are corrected, but not the position
scale = scale * 2
open_actor(actor, i, scale)
area_post = compute_area(actor) / scale ** 2
areas.append((i, area_pre, area_post, area_post / area_pre))
sys.stdout.flush()
print("\n")
return areas
def compute_centroids(actors_list):
return [a.GetCenter() for a in actors_list]
def csv_areas(actors_list, filename, scale=SCALE, names=None):
centroids = compute_centroids(actors_list) # Centroids of original actors
print(
"-------- Repairing original mesh and Calculating areas (This process might take a long time, please wait) -----------")
areas = compute_all_areas(actors_list, scale)
print("-------- Saving CSV file -----------")
if names is not None:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z,Name\n"
for i in range(len(areas)):
data = []
data.extend(areas[i])
data.extend(centroids[i])
data.append(names[i])
csv += "%d,%f,%f,%f,%f,%f,%f,%s\n" % tuple(data)
else:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z\n"
for i in range(len(areas)):
data = []
data.extend(areas[i])
data.extend(centroids[i])
csv += "%d,%f,%f,%f,%f,%f,%f\n" % tuple(data)
with open(filename, 'w') as f:
f.write(csv)
def underScale(actor, scale):
transform = vtk.vtkTransform()
relation = float(1) / float(scale)
transform.Scale(relation, relation, relation)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(transform)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transformFilter.GetOutputPort())
mapper.Update()
actor.SetMapper(mapper)
return actor
def reduceMesh(actor, reduction):
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(actor.GetMapper().GetInput())
decimate.SetTargetReduction(reduction / 100)
decimate.Update()
decimateMapper = vtk.vtkPolyDataMapper()
decimateMapper.SetInputConnection(decimate.GetOutputPort())
decimateMapper.Update()
actor.SetMapper(decimateMapper)
return actor
# Only for future versions of VTK, at the moment is a beta feature
def save_obj(rw, dir, name):
exporter = vtk.vtkOBJExporter()
if not os.path.isdir(dir):
os.makedirs(dir)
path = "%s/%s" % (dir, name)
exporter.SetFilePrefix(path)
exporter.SetRenderWindow(rw)
exporter.Write()
def save_stl(polydata, dir, name):
exporter = vtk.vtkSTLWriter()
if not os.path.isdir(dir):
os.makedirs(dir)
path = '%s/%s.stl' % (dir, name)
exporter.SetFileName(path)
exporter.SetInputData(polydata)
exporter.Write()
def save_vrml(name, dir, rw):
if not os.path.isdir(dir):
os.makedirs(dir)
path = '%s/%s.vrml' % (dir, name)
rw.Render()
exporter = vtk.vtkVRMLExporter()
exporter.SetFileName(path)
exporter.SetRenderWindow(rw)
rw.Render()
exporter.Write()
def initActorForExport(actor, rw, scale, reduction):
ren = rw.GetRenderers().GetFirstRenderer()
actor = underScale(actor, scale)
actor = reduceMesh(actor, reduction)
ren.AddActor(actor)
def toOriginalPos(actor, center, rotationTransform):
rotMat = vtk.vtkMatrix4x4()
rotationTransform.GetTranspose(rotMat)
rotTrans = vtk.vtkTransform()
rotTrans.SetMatrix(rotMat)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(rotTrans)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transformFilter.GetOutputPort())
mapper.Update()
actor.SetMapper(mapper)
centerTransform = vtk.vtkTransform()
centerTransform.Translate(center[0], center[1], center[2])
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(centerTransform)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transformFilter.GetOutputPort())
mapper.Update()
actor.SetMapper(mapper)
centerCalculer = vtk.vtkCenterOfMass()
centerCalculer.SetInputData(actor.GetMapper().GetInput())
centerCalculer.SetUseScalarsAsWeights(False)
centerCalculer.Update()
center = centerCalculer.GetCenter()
print(center)
def main(input_filename, areas_filename, scale, is_imx, exportPath=None, exportType=False, reduction=70, radius=RADIUS,
combine=False):
# TODO: The following doesn't hide the RenderWindow :/
# factGraphics = vtk.vtkGraphicsFactory()
# factGraphics.SetUseMesaClasses(1)
# factImage = vtk.vtkImagingFactory()
# factImage.SetUseMesaClasses(1)
if exportPath is None:
pos = areas_filename.rfind("/")
filename = os.path.splitext(input_filename)[0]
posFilename = filename.rfind("/")
exportPath = areas_filename[:pos] + "/Meshes" + filename[posFilename:]
else:
filename = os.path.splitext(input_filename)[0]
pos = filename.rfind("/")
if pos == -1:
exportPath += "/"
exportPath += filename[pos:]
print(exportPath)
if is_imx:
vrml_filename = os.path.splitext(input_filename)[0] + ".vrml"
names_filename = os.path.splitext(input_filename)[0] + ".names"
args = ["{}".format(input_filename), "{}".format(vrml_filename), "{}".format(names_filename)]
p = Process(target=parse_imx.main, args=[args])
p.start()
p.join()
names_list = []
with open(names_filename) as f:
for line in f:
line = re.sub(r'\n', '', line)
names_list.append(line)
else:
vrml_filename = input_filename
names_list = None
rw = vtk.vtkRenderWindow()
rwi = vtk.vtkRenderWindowInteractor()
rwi.SetRenderWindow(rw)
# rw.OffScreenRenderingOn()
importer = vtk.vtkVRMLImporter()
importer.SetFileName(vrml_filename)
# importer = vtk.vtk3DSImporter()
# importer.SetFileName("cube.3ds")
importer.Read()
importer.SetRenderWindow(rw)
importer.Update()
rw.Render()
ren = importer.GetRenderer()
actors = ren.GetActors()
actors.InitTraversal()
rwExport = vtk.vtkRenderWindow()
# rwExport.OffScreenRenderingOn()
renExport = vtk.vtkRenderer()
rwExport.AddRenderer(renExport)
rwExport.Render()
if is_imx:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z,Name\n"
else:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z\n"
print(
"-------- Repairing original mesh and Calculating areas (This process might take a long time, please wait) -----------")
for i in range(ren.GetNumberOfPropsRendered()):
sys.stdout.write("%d _" % i)
actor = actors.GetNextActor()
polydata = actor.GetMapper().GetInput()
polydataCopy = vtk.vtkPolyData()
polydataCopy.DeepCopy(polydata)
area_pre = compute_area(actor)
centroid = actor.GetCenter()
rescaled = False
try:
rw.Render()
(center, rotation) = axisAligment(actor)
rw.Render()
open_actor(actor, i, scale, radius)
except ValueError as e:
# [KNOWN BUG] The sizes are corrected, but not the position
scale = scale * 2
open_actor(actor, i, scale, radius)
rescaled = True
area_post = compute_area(actor) / scale ** 2
if is_imx:
data = []
data.extend([i, area_pre, area_post, area_post / area_pre])
data.extend(centroid)
data.append(names_list[i])
csv += "%d,%f,%f,%f,%f,%f,%f,%s\n" % tuple(data)
else:
data = []
data.extend([i, area_pre, area_post, area_post / area_pre])
data.extend(centroid)
csv += "%d,%f,%f,%f,%f,%f,%f\n" % tuple(data)
if exportType != "None":
initActorForExport(actor, rwExport, scale, reduction)
toOriginalPos(actor, center, rotation)
if names_list is not None:
name = names_list[i]
else:
name = i
if exportType == "Stl":
save_stl(actor.GetMapper().GetInput(), exportPath, str(name) + "_R")
save_stl(polydataCopy, exportPath, str(name) + "_O")
renExport.RemoveActor(actor)
elif exportType == "Vrml":
save_vrml(str(name) + "_R", exportPath, rwExport)
renExport.RemoveActor(actor)
actorOld = vtk.vtkActor()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydataCopy)
actorOld.SetMapper(mapper)
renExport.AddActor(actorOld)
save_vrml(str(name) + "_O", exportPath, rwExport)
renExport.RemoveActor(actorOld)
elif exportType == "Obj":
save_obj(rwExport, exportPath, str(name) + "_R")
renExport.RemoveActor(actor)
actorOld = vtk.vtkActor()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydataCopy)
actorOld.SetMapper(mapper)
renExport.AddActor(actorOld)
save_obj(rwExport, exportPath, str(name) + "_O")
renExport.RemoveActor(actorOld)
ren.RemoveActor(actor)
if rescaled:
scale /= 2
with open(areas_filename, 'w') as f:
f.write(csv)
if is_imx:
os.remove(vrml_filename)
os.remove(names_filename)
rw.Finalize()
print("")
| [
"vtk.vtkPolyDataToImageStencil",
"multiprocessing.Process",
"vtk.vtkOBJExporter",
"vtk.vtkImageStencil",
"vtk.vtkDecimatePro",
"vtk.vtkVRMLExporter",
"os.remove",
"vtk.vtkVRMLImporter",
"vtk.vtkImageDilateErode3D",
"os.path.isdir",
"vtk.vtkRenderer",
"vtk.vtkMetaImageWriter",
"sys.stdout.flu... | [((1355, 1379), 'vtk.vtkMetaImageWriter', 'vtk.vtkMetaImageWriter', ([], {}), '()\n', (1377, 1379), False, 'import vtk\n'), ((2470, 2501), 'vtk.vtkPolyDataToImageStencil', 'vtk.vtkPolyDataToImageStencil', ([], {}), '()\n', (2499, 2501), False, 'import vtk\n'), ((2841, 2859), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (2857, 2859), False, 'import vtk\n'), ((3181, 3202), 'vtk.vtkImageStencil', 'vtk.vtkImageStencil', ([], {}), '()\n', (3200, 3202), False, 'import vtk\n'), ((3422, 3443), 'vtk.vtkImageStencil', 'vtk.vtkImageStencil', ([], {}), '()\n', (3441, 3443), False, 'import vtk\n'), ((3849, 3870), 'vtk.vtkCenterOfMass', 'vtk.vtkCenterOfMass', ([], {}), '()\n', (3868, 3870), False, 'import vtk\n'), ((4071, 4089), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (4087, 4089), False, 'import vtk\n'), ((4179, 4203), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (4201, 4203), False, 'import vtk\n'), ((4340, 4363), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (4361, 4363), False, 'import vtk\n'), ((4532, 4553), 'vtk.vtkCenterOfMass', 'vtk.vtkCenterOfMass', ([], {}), '()\n', (4551, 4553), False, 'import vtk\n'), ((4911, 4937), 'numpy.matrix', 'np.matrix', (['pointsMatrixAux'], {}), '(pointsMatrixAux)\n', (4920, 4937), True, 'import numpy as np\n'), ((5046, 5097), 'numpy.linalg.svd', 'np.linalg.svd', (['covarianzeMatrix'], {'full_matrices': '(True)'}), '(covarianzeMatrix, full_matrices=True)\n', (5059, 5097), True, 'import numpy as np\n'), ((5120, 5138), 'vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (5136, 5138), False, 'import vtk\n'), ((5418, 5436), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (5434, 5436), False, 'import vtk\n'), ((5508, 5532), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (5530, 5532), False, 'import vtk\n'), ((5691, 5714), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (5712, 5714), False, 'import vtk\n'), ((5884, 5911), 'vtk.vtkImageDilateErode3D', 'vtk.vtkImageDilateErode3D', ([], {}), '()\n', (5909, 5911), False, 'import vtk\n'), ((6521, 6549), 'vtk.vtkImageGaussianSmooth', 'vtk.vtkImageGaussianSmooth', ([], {}), '()\n', (6547, 6549), False, 'import vtk\n'), ((6749, 6771), 'vtk.vtkMarchingCubes', 'vtk.vtkMarchingCubes', ([], {}), '()\n', (6769, 6771), False, 'import vtk\n'), ((7831, 7854), 'vtk.vtkAppendPolyData', 'vtk.vtkAppendPolyData', ([], {}), '()\n', (7852, 7854), False, 'import vtk\n'), ((8045, 8059), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (8057, 8059), False, 'import vtk\n'), ((10090, 10108), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (10106, 10108), False, 'import vtk\n'), ((10221, 10245), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (10243, 10245), False, 'import vtk\n'), ((10367, 10390), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (10388, 10390), False, 'import vtk\n'), ((10570, 10590), 'vtk.vtkDecimatePro', 'vtk.vtkDecimatePro', ([], {}), '()\n', (10588, 10590), False, 'import vtk\n'), ((10740, 10763), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (10761, 10763), False, 'import vtk\n'), ((11022, 11042), 'vtk.vtkOBJExporter', 'vtk.vtkOBJExporter', ([], {}), '()\n', (11040, 11042), False, 'import vtk\n'), ((11272, 11290), 'vtk.vtkSTLWriter', 'vtk.vtkSTLWriter', ([], {}), '()\n', (11288, 11290), False, 'import vtk\n'), ((11631, 11652), 'vtk.vtkVRMLExporter', 'vtk.vtkVRMLExporter', ([], {}), '()\n', (11650, 11652), False, 'import vtk\n'), ((12026, 12044), 'vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (12042, 12044), False, 'import vtk\n'), ((12103, 12121), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (12119, 12121), False, 'import vtk\n'), ((12176, 12200), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (12198, 12200), False, 'import vtk\n'), ((12350, 12373), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (12371, 12373), False, 'import vtk\n'), ((12508, 12526), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (12524, 12526), False, 'import vtk\n'), ((12613, 12637), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (12635, 12637), False, 'import vtk\n'), ((12794, 12817), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (12815, 12817), False, 'import vtk\n'), ((12951, 12972), 'vtk.vtkCenterOfMass', 'vtk.vtkCenterOfMass', ([], {}), '()\n', (12970, 12972), False, 'import vtk\n'), ((14611, 14632), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (14630, 14632), False, 'import vtk\n'), ((14643, 14674), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ([], {}), '()\n', (14672, 14674), False, 'import vtk\n'), ((14751, 14772), 'vtk.vtkVRMLImporter', 'vtk.vtkVRMLImporter', ([], {}), '()\n', (14770, 14772), False, 'import vtk\n'), ((15088, 15109), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (15107, 15109), False, 'import vtk\n'), ((15164, 15181), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (15179, 15181), False, 'import vtk\n'), ((326, 362), 'optparse.OptionParser.__init__', 'optparse.OptionParser.__init__', (['self'], {}), '(self)\n', (356, 362), False, 'import optparse\n'), ((8089, 8112), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (8110, 8112), False, 'import vtk\n'), ((8466, 8493), 'sys.stdout.write', 'sys.stdout.write', (["('%d ' % i)"], {}), "('%d ' % i)\n", (8482, 8493), False, 'import sys\n'), ((8891, 8909), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8907, 8909), False, 'import sys\n'), ((11054, 11072), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (11067, 11072), False, 'import os\n'), ((11082, 11098), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11093, 11098), False, 'import os\n'), ((11302, 11320), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (11315, 11320), False, 'import os\n'), ((11330, 11346), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11341, 11346), False, 'import os\n'), ((11516, 11534), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (11529, 11534), False, 'import os\n'), ((11544, 11560), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11555, 11560), False, 'import os\n'), ((14268, 14311), 'multiprocessing.Process', 'Process', ([], {'target': 'parse_imx.main', 'args': '[args]'}), '(target=parse_imx.main, args=[args])\n', (14275, 14311), False, 'from multiprocessing import Process\n'), ((15591, 15619), 'sys.stdout.write', 'sys.stdout.write', (["('%d _' % i)"], {}), "('%d _' % i)\n", (15607, 15619), False, 'import sys\n'), ((15729, 15746), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (15744, 15746), False, 'import vtk\n'), ((18414, 18438), 'os.remove', 'os.remove', (['vrml_filename'], {}), '(vrml_filename)\n', (18423, 18438), False, 'import os\n'), ((18447, 18472), 'os.remove', 'os.remove', (['names_filename'], {}), '(names_filename)\n', (18456, 18472), False, 'import os\n'), ((13628, 13660), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (13644, 13660), False, 'import os\n'), ((13814, 13846), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (13830, 13846), False, 'import os\n'), ((14036, 14068), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (14052, 14068), False, 'import os\n'), ((14107, 14139), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (14123, 14139), False, 'import os\n'), ((14462, 14485), 're.sub', 're.sub', (['"""\\\\n"""', '""""""', 'line'], {}), "('\\\\n', '', line)\n", (14468, 14485), False, 'import re\n'), ((17446, 17460), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (17458, 17460), False, 'import vtk\n'), ((17486, 17509), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (17507, 17509), False, 'import vtk\n'), ((17937, 17951), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (17949, 17951), False, 'import vtk\n'), ((17977, 18000), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (17998, 18000), False, 'import vtk\n')] |
'''
service.py
ancilla
Created by <NAME> (<EMAIL>) on 01/08/20
Copyright 2019 FrenzyLabs, LLC.
'''
import json
from .base import BaseHandler
import importlib
import socket
from ...data.models import Service
import asyncio
import functools
import requests
class WifiResource(BaseHandler):
def initialize(self, node):
self.node = node
self.session = requests.Session()
self.session.headers.update({"Content-Type" : "application/json", "Accept": "application/json"})
self.wifi_host = "http://localhost:8080"
# self.wifi_host = "http://192.168.27.1:8080"
def handle_response(self, resp):
try:
if resp.exception():
print(f"Resp Exception = {resp.exception()}")
self.set_status(400)
self.write({"errors": [str(resp.exception())] })
# elif resp.result():
# print(f"Resp Result = {resp.result()}")
# self.write({"data": resp.result()})
except Exception as e:
print(f"HandleExcept = {str(e)}")
async def make_request(self, req, content_type = 'json', auth = True, options = {"verify": False, "timeout": 25.001}):
prepped = self.session.prepare_request(req)
if not auth:
del prepped.headers['Authorization']
# print(f"prepped = {prepped.headers}", flush=True)
loop = asyncio.get_event_loop()
makerequest = functools.partial(self.session.send, prepped, **options)
future = loop.run_in_executor(None, makerequest)
future.add_done_callback(lambda res: self.handle_response(res))
resp = None
try:
resp = await future
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
msg = template.format(type(ex).__name__, ex.args)
print(f"{msg}")
return resp
async def post(self, *args):
# print(f"WIFI POST = {self.params}")
url = f'{self.wifi_host}/connect'
req = requests.Request('POST', url, json=self.params)
resp = await self.make_request(req)
print(f"WIFI POST RESP = {resp}")
if resp:
self.set_status(resp.status_code)
content = {}
if resp.status_code == 200:
body = ''
try:
body = resp.json()
except Exception as e:
body = resp.text
content["data"] = body
else:
content["errors"] = [resp.text]
self.write(content)
async def get(self, *args):
# myparams = { k: self.get_argument(k) for k in self.request.arguments }
# print(f'My params = {myparams}')
wifipath = "status"
if (len(args) > 0):
if args[0] == "/scan":
wifipath = "scan"
url = f'{self.wifi_host}/{wifipath}'
# print(f"GET WIFI URL = {url}")
req = requests.Request('GET', url)
resp = await self.make_request(req)
if resp:
self.set_status(resp.status_code)
content = {}
if resp.status_code == 200:
body = ''
try:
body = resp.json()
except Exception as e:
body = resp.text
content["data"] = body
else:
content["errors"] = [resp.text]
self.write(content)
| [
"asyncio.get_event_loop",
"functools.partial",
"requests.Session",
"requests.Request"
] | [((375, 393), 'requests.Session', 'requests.Session', ([], {}), '()\n', (391, 393), False, 'import requests\n'), ((1304, 1328), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1326, 1328), False, 'import asyncio\n'), ((1349, 1405), 'functools.partial', 'functools.partial', (['self.session.send', 'prepped'], {}), '(self.session.send, prepped, **options)\n', (1366, 1405), False, 'import functools\n'), ((1927, 1974), 'requests.Request', 'requests.Request', (['"""POST"""', 'url'], {'json': 'self.params'}), "('POST', url, json=self.params)\n", (1943, 1974), False, 'import requests\n'), ((2750, 2778), 'requests.Request', 'requests.Request', (['"""GET"""', 'url'], {}), "('GET', url)\n", (2766, 2778), False, 'import requests\n')] |
#!/usr/bin/env python
from holtztools import plots,html
from astropy.io import fits,ascii
import numpy as np
import math
import pdb
import argparse
import os
import matplotlib.pyplot as plt
def throughplot(instrument='apogee-s',outfile=None,inter=False) :
'''
Routine to make zeropoint/throughput plots from apogeeSci summary files
with information including FWHM, GDRMS, CART
'''
# instrument specific
if instrument == 'apogee-s' :
gain=3.
carts=[20,25]
fiber_rad=0.65
telescope='lco25m'
else :
gain=1.9
carts=[0,10]
fiber_rad=1.
telescope='apo25m'
# read summary data made by mkmonitor
a=fits.open(instrument+'Sci.fits')[1].data
gd = np.where(a['NREADS'] >= 47)[0]
a=a[gd]
# use weather information if we can
clouds=np.zeros(len(a)).astype(int)
nmiss=0
nhave=0
try :
c=ascii.read(os.environ['APOGEEREDUCEPLAN_DIR']+'/data/'+telescope+'/clouds.txt')
try:
for i,p in enumerate(a['PLATE']) :
j=np.where((c['plate'] == p) & (c['MJD'] == a['MJD'][i]) )[0]
if len(j)>0 :
if len(j)>1 :
print('double cloud match',p,a['MJD'][i])
pdb.set_trace()
clouds[i] = c['clouds_level'][j[0]]
nhave+=1
else :
nmiss+=1
print('no clouds match found for',a['MJD'][i],p)
except :
print('error!',i,p,j)
pdb.set_trace()
gd=np.where(clouds <= 1)[0]
a=a[gd]
except :
print('cant open clouds file')
# seeing correction factor
sigma = a['FWHM']/2.354
sigma = a['SEEING']/2.354
ee = 1. - np.exp(-(fiber_rad**2)/(2*sigma**2))
corr = a['ZERONORM']-2.5*np.log10(ee)
gd = np.where(np.isfinite(corr))[0]
a=a[gd]
ee=ee[gd]
corr=corr[gd]
# run number for LCO
run = ((a['MJD']-57850)/29.+0.5).astype(int)
# rough throughput calculation
h=6.63e-27
c=3.e10
lam=1.6e-4
dlam=0.3
dt=10.6
area=math.pi*(125.**2-50.**2)
fvega=11.38e-11
through=10**(0.4*a['ZERONORM'])*h*c/lam/dlam/dt*gain/area/fvega/ee
# straight DHA
dha=a['HA']-a['DESIGN_HA'][:,0]
#dha=np.abs(dha)
# "normalized" DHA
j=np.where(a['HA']<a['DESIGN_HA'][:,0])[0]
dha[j]/=(a['DESIGN_HA'][j,0]-a['DESIGN_HA'][j,1])
j=np.where(a['HA']>=a['DESIGN_HA'][:,0])[0]
dha[j]/=(a['DESIGN_HA'][j,2]-a['DESIGN_HA'][j,0])
#plots with MJD
files=[]
out='monitor/'+instrument+'/'+instrument
# point size by FWHM
psize=a['FWHM']/1.*40
j=np.where(psize == 0.)[0]
psize[j] = 10
# histograms by run
fig,ax=plots.multi(2,3,figsize=(8,12))
file=out+'zero_hist.png'
runs=list(set(run))
runs.append(999)
for r in runs :
gd = np.where(run == r)[0]
if r == 999 :
gd = np.where(run < 999)[0]
if r >= 8 : lw=2
else : lw=1
print(r,len(gd))
try:
n,b,p=plt.hist(a['GDRMS'][gd],histtype='step',bins=np.arange(0,1,0.05),label='{:3d}'.format(r),linewidth=lw,normed=False)
if r == 999 : n/=2
ax[0,0].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:2d}'.format(r))
ax[0,0].set_xlabel('GDRMS')
except : pass
try:
n,b,p=plt.hist(a['ZERONORM'][gd],histtype='step',bins=np.arange(12,15.5,0.1),linewidth=lw,normed=False,label='{:2d}'.format(r))
if r == 999 : n/=2
ax[0,1].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:2d}'.format(r))
ax[0,1].set_xlabel('ZERONORM')
n,b,p=plt.hist(corr[gd],histtype='step',bins=np.arange(12,16,0.1),linewidth=lw,normed=False,label='{:3d}'.format(r))
if r == 999 : n/=2
ax[1,0].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:3d}'.format(r))
ax[1,0].set_xlabel('ZERONORM (adjusted)')
n,b,p=plt.hist(a['ZERORMS'][gd],histtype='step',bins=np.arange(0,1,0.05),linewidth=lw,normed=False,label='{:3d}'.format(r))
if r == 999 : n/=2
ax[1,1].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:3d}'.format(r))
ax[1,1].set_xlabel('ZERORMS')
n,b,p=plt.hist(through[gd],histtype='step',bins=np.arange(0,0.34,0.02),linewidth=lw,normed=False,label='{:3d}'.format(r))
if r == 999 : n/=2
ax[2,0].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:3d}'.format(r))
ax[2,0].set_xlabel('THROUGHPUT (adjusted)')
except : pass
if instrument == 'apogee-s' :
ax[0,0].legend(fontsize=6,loc=1,title='Run')
ax[0,1].legend(fontsize=6,loc=2,title='Run')
ax[1,0].legend(fontsize=6,loc=2,title='Run')
ax[1,1].legend(fontsize=6,loc=1,title='Run')
ax[2,1].remove()
fig.tight_layout()
fig.savefig(file)
files.append([os.path.basename(file)])
ctype = [a['FWHM'],a['SEEING'],a['GDRMS'],dha,a['CART']]
name = ['zero_fwhm','zero_seeing','zero_gdrms','zero_dha','zero_cart']
zr=[[0.5,2.],[0.5,2.],[0,0.8],[-2,2],carts]
zt=['FWHM','SEEING','GDRMS','DHA','CART']
for j,c in enumerate(ctype) :
fig,ax=plots.multi(1,4,hspace=0.001,sharex=True,figsize=(24,6))
file=out+name[j]+'.png'
plots.plotc(ax[0],a['MJD'],a['ZERONORM'],c,yr=[12,15.5],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='ZERONORM',zt=zt[j])
plots.plotc(ax[1],a['MJD'],corr,c,yr=[12,15.5],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='ZERONORM (adjusted)',zt=zt[j])
plots.plotc(ax[2],a['MJD'],a['ZERORMS'],c,yr=[0,1],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='ZERORMS',zt=zt[j])
plots.plotc(ax[3],a['MJD'],through,c,yr=[0,0.3],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='throughput',zt=zt[j])
fig.savefig(file)
files.append([os.path.basename(file)])
fig,ax=plots.multi(1,1)
plots.plotc(ax,a['SEEING'],a['ZERONORM'],a['GDRMS'],xr=[0.,3.0],yr=[13,15.5],zr=[0.2,1.2],xt='Seeing',yt='ZERONORM',zt='GDRMS',colorbar=True,size=1)
#plots.plotc(ax[1],a['SEEING'],corr,a['GDRMS'],xr=[0.,3.0],yr=[13,15.5],zr=[0.2,1.2],xt='Seeing',yt='seeing-corrected ZERONORM',zt='GDRMS',colorbar=True,size=1)
file=out+'_seeing.png'
fig.savefig(file)
files.append([os.path.basename(file)])
out='monitor/'+instrument+'/'+instrument
html.htmltab(files,file=out+'zero.html')
if inter :
pdb.set_trace()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Make throughput plots",
usage="through --instrument apogee-s")
parser.add_argument("-i", "--instrument", type=str,
required=True,
help="instrument to plot",
choices=['apogee-s', 'apogee-n'])
args = parser.parse_args()
throughplot(instrument=args.instrument)
| [
"numpy.log10",
"argparse.ArgumentParser",
"numpy.where",
"holtztools.html.htmltab",
"numpy.exp",
"holtztools.plots.multi",
"holtztools.plots.plotc",
"numpy.isfinite",
"os.path.basename",
"pdb.set_trace",
"astropy.io.fits.open",
"astropy.io.ascii.read",
"numpy.arange"
] | [((2788, 2822), 'holtztools.plots.multi', 'plots.multi', (['(2)', '(3)'], {'figsize': '(8, 12)'}), '(2, 3, figsize=(8, 12))\n', (2799, 2822), False, 'from holtztools import plots, html\n'), ((5967, 5984), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(1)'], {}), '(1, 1)\n', (5978, 5984), False, 'from holtztools import plots, html\n'), ((5988, 6160), 'holtztools.plots.plotc', 'plots.plotc', (['ax', "a['SEEING']", "a['ZERONORM']", "a['GDRMS']"], {'xr': '[0.0, 3.0]', 'yr': '[13, 15.5]', 'zr': '[0.2, 1.2]', 'xt': '"""Seeing"""', 'yt': '"""ZERONORM"""', 'zt': '"""GDRMS"""', 'colorbar': '(True)', 'size': '(1)'}), "(ax, a['SEEING'], a['ZERONORM'], a['GDRMS'], xr=[0.0, 3.0], yr=[\n 13, 15.5], zr=[0.2, 1.2], xt='Seeing', yt='ZERONORM', zt='GDRMS',\n colorbar=True, size=1)\n", (5999, 6160), False, 'from holtztools import plots, html\n'), ((6444, 6487), 'holtztools.html.htmltab', 'html.htmltab', (['files'], {'file': "(out + 'zero.html')"}), "(files, file=out + 'zero.html')\n", (6456, 6487), False, 'from holtztools import plots, html\n'), ((6565, 6669), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Make throughput plots"""', 'usage': '"""through --instrument apogee-s"""'}), "(description='Make throughput plots', usage=\n 'through --instrument apogee-s')\n", (6588, 6669), False, 'import argparse\n'), ((744, 771), 'numpy.where', 'np.where', (["(a['NREADS'] >= 47)"], {}), "(a['NREADS'] >= 47)\n", (752, 771), True, 'import numpy as np\n'), ((912, 1001), 'astropy.io.ascii.read', 'ascii.read', (["(os.environ['APOGEEREDUCEPLAN_DIR'] + '/data/' + telescope + '/clouds.txt')"], {}), "(os.environ['APOGEEREDUCEPLAN_DIR'] + '/data/' + telescope +\n '/clouds.txt')\n", (922, 1001), False, 'from astropy.io import fits, ascii\n'), ((1798, 1840), 'numpy.exp', 'np.exp', (['(-fiber_rad ** 2 / (2 * sigma ** 2))'], {}), '(-fiber_rad ** 2 / (2 * sigma ** 2))\n', (1804, 1840), True, 'import numpy as np\n'), ((2374, 2414), 'numpy.where', 'np.where', (["(a['HA'] < a['DESIGN_HA'][:, 0])"], {}), "(a['HA'] < a['DESIGN_HA'][:, 0])\n", (2382, 2414), True, 'import numpy as np\n'), ((2475, 2516), 'numpy.where', 'np.where', (["(a['HA'] >= a['DESIGN_HA'][:, 0])"], {}), "(a['HA'] >= a['DESIGN_HA'][:, 0])\n", (2483, 2516), True, 'import numpy as np\n'), ((2709, 2731), 'numpy.where', 'np.where', (['(psize == 0.0)'], {}), '(psize == 0.0)\n', (2717, 2731), True, 'import numpy as np\n'), ((5293, 5354), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(4)'], {'hspace': '(0.001)', 'sharex': '(True)', 'figsize': '(24, 6)'}), '(1, 4, hspace=0.001, sharex=True, figsize=(24, 6))\n', (5304, 5354), False, 'from holtztools import plots, html\n'), ((5386, 5523), 'holtztools.plots.plotc', 'plots.plotc', (['ax[0]', "a['MJD']", "a['ZERONORM']", 'c'], {'yr': '[12, 15.5]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""ZERONORM"""', 'zt': 'zt[j]'}), "(ax[0], a['MJD'], a['ZERONORM'], c, yr=[12, 15.5], zr=zr[j],\n size=psize, colorbar=True, xt='MJD', yt='ZERONORM', zt=zt[j])\n", (5397, 5523), False, 'from holtztools import plots, html\n'), ((5515, 5654), 'holtztools.plots.plotc', 'plots.plotc', (['ax[1]', "a['MJD']", 'corr', 'c'], {'yr': '[12, 15.5]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""ZERONORM (adjusted)"""', 'zt': 'zt[j]'}), "(ax[1], a['MJD'], corr, c, yr=[12, 15.5], zr=zr[j], size=psize,\n colorbar=True, xt='MJD', yt='ZERONORM (adjusted)', zt=zt[j])\n", (5526, 5654), False, 'from holtztools import plots, html\n'), ((5646, 5778), 'holtztools.plots.plotc', 'plots.plotc', (['ax[2]', "a['MJD']", "a['ZERORMS']", 'c'], {'yr': '[0, 1]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""ZERORMS"""', 'zt': 'zt[j]'}), "(ax[2], a['MJD'], a['ZERORMS'], c, yr=[0, 1], zr=zr[j], size=\n psize, colorbar=True, xt='MJD', yt='ZERORMS', zt=zt[j])\n", (5657, 5778), False, 'from holtztools import plots, html\n'), ((5769, 5900), 'holtztools.plots.plotc', 'plots.plotc', (['ax[3]', "a['MJD']", 'through', 'c'], {'yr': '[0, 0.3]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""throughput"""', 'zt': 'zt[j]'}), "(ax[3], a['MJD'], through, c, yr=[0, 0.3], zr=zr[j], size=psize,\n colorbar=True, xt='MJD', yt='throughput', zt=zt[j])\n", (5780, 5900), False, 'from holtztools import plots, html\n'), ((6508, 6523), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (6521, 6523), False, 'import pdb\n'), ((694, 728), 'astropy.io.fits.open', 'fits.open', (["(instrument + 'Sci.fits')"], {}), "(instrument + 'Sci.fits')\n", (703, 728), False, 'from astropy.io import fits, ascii\n'), ((1600, 1621), 'numpy.where', 'np.where', (['(clouds <= 1)'], {}), '(clouds <= 1)\n', (1608, 1621), True, 'import numpy as np\n'), ((1864, 1876), 'numpy.log10', 'np.log10', (['ee'], {}), '(ee)\n', (1872, 1876), True, 'import numpy as np\n'), ((1896, 1913), 'numpy.isfinite', 'np.isfinite', (['corr'], {}), '(corr)\n', (1907, 1913), True, 'import numpy as np\n'), ((2927, 2945), 'numpy.where', 'np.where', (['(run == r)'], {}), '(run == r)\n', (2935, 2945), True, 'import numpy as np\n'), ((4990, 5012), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (5006, 5012), False, 'import os\n'), ((6369, 6391), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (6385, 6391), False, 'import os\n'), ((1572, 1587), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1585, 1587), False, 'import pdb\n'), ((2989, 3008), 'numpy.where', 'np.where', (['(run < 999)'], {}), '(run < 999)\n', (2997, 3008), True, 'import numpy as np\n'), ((5930, 5952), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (5946, 5952), False, 'import os\n'), ((1070, 1125), 'numpy.where', 'np.where', (["((c['plate'] == p) & (c['MJD'] == a['MJD'][i]))"], {}), "((c['plate'] == p) & (c['MJD'] == a['MJD'][i]))\n", (1078, 1125), True, 'import numpy as np\n'), ((3158, 3179), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (3167, 3179), True, 'import numpy as np\n'), ((3489, 3513), 'numpy.arange', 'np.arange', (['(12)', '(15.5)', '(0.1)'], {}), '(12, 15.5, 0.1)\n', (3498, 3513), True, 'import numpy as np\n'), ((3782, 3804), 'numpy.arange', 'np.arange', (['(12)', '(16)', '(0.1)'], {}), '(12, 16, 0.1)\n', (3791, 3804), True, 'import numpy as np\n'), ((4092, 4113), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (4101, 4113), True, 'import numpy as np\n'), ((4384, 4408), 'numpy.arange', 'np.arange', (['(0)', '(0.34)', '(0.02)'], {}), '(0, 0.34, 0.02)\n', (4393, 4408), True, 'import numpy as np\n'), ((1286, 1301), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1299, 1301), False, 'import pdb\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import matplotlib.pyplot as plt
from lorawan_toa import *
def get_line(list_size, n_sf, bw=125):
return [ get_toa(i, n_sf, n_bw=bw)["t_packet"] for i in list_size ]
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("SF and ToA (BW=125 kHz)")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", label="SF12", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 11), "g-", label="SF11", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 10), "k-", label="SF10", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 9), "c-", label="SF9", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 8), "m-", label="SF8", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 7), "y-", label="SF7", linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/lora-toa-125.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 No DwellTime")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-", label="SF12",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-", label="SF11",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-", label="SF10",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-", label="SF9",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-", label="SF8",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", label="SF7",
linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-without-dwelltime.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 DwellTime 400ms")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# required dwellTime consideration
ax.plot([0], [0], "b-", label="SF12", linewidth=3, alpha=1)
ax.plot([0], [0], "c-", label="SF11", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 19), get_line(mpsrange(8, 19), 10), "k-", label="SF10", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 9), "c-", label="SF9", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 133), get_line(mpsrange(8, 133), 8), "m-", label="SF8", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", label="SF7", linewidth=3, alpha=1)
ax.plot(x, [400 for i in range(0, 255)], "r,", linewidth=1, alpha=0.7)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-with-dwelltime.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", linewidth=1.2, alpha=0.7)
# required dwellTime consideration
ax.plot([0], [0], "b-", label="SF12/125kHz", linewidth=3, alpha=1)
ax.plot([0], [0], "g-", label="SF11/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 19), get_line(mpsrange(8, 19), 10), "k-",
label="SF10/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 9), "c-",
label="SF9/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 133), get_line(mpsrange(8, 133), 8), "m-",
label="SF8/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-",
label="SF7/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=250), "b--",
label="SF7/250kHz", linewidth=3, alpha=0.5)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-toa.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 and ARIB STD-T108")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", linewidth=1.2, alpha=0.7)
# required dwellTime consideration
ax.plot([0], [0], "b-", label="SF12/125kHz", linewidth=3, alpha=1)
ax.plot([0], [0], "g-", label="SF11/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 19), get_line(mpsrange(8, 19), 10), "k-",
label="SF10/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 9), "c-",
label="SF9/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 133), get_line(mpsrange(8, 133), 8), "m-",
label="SF8/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-",
label="SF7/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=250), "b--",
label="SF7/250kHz", linewidth=3, alpha=0.5)
ax.plot(x, [400 for i in range(0, 255)], "r--", linewidth=2, alpha=0.7)
ax.plot(x, [200 for i in range(0, 255)], "r--", linewidth=2, alpha=0.7)
ax.plot(x, [4000 for i in range(0, 255)], "r--", linewidth=2, alpha=0.7)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-with-arib180.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 vs Others (SF12)")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 12, bw=500), "r-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-",
label="SF12/125kHz", linewidth=3.0, alpha=1)
# LoRa: SF12 / 500 kHz
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 12, bw=500), "r-",
label="SF12/500kHz", linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="best", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-vs-others-sf12.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 vs Others (SF10)")
x = range(0, 255)
ax.plot(x, get_line(x, 10), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10, bw=500), "r-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "b-",
label="SF10/125kHz", linewidth=3.0, alpha=1)
# LoRa: SF10 / 500 kHz
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 10, bw=500), "r-",
label="SF10/500kHz", linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="best", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-vs-others-sf10.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("LoRaWAN")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# SF BW bit rate Max. MACPayload
# 12 125 250 59
# 11 125 440 59
# 10 125 980 59
# 9 125 1760 123
# 8 125 3125 250
# 7 125 5470 250
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-",
label="SF12/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-",
label="SF11/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-",
label="SF10/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-",
label="SF9/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-",
label="SF8/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-",
label="SF7/125kHz", linewidth=2.0)
# SF BW bit rate Max. MACPayload
# 7 250 11000 250
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=250), "b-.",
label="SF7/250kHz", linewidth=2.0)
# SF BW bit rate Max. MACPayload
# 12 500 980 61
# 11 500 1760 137
# 10 500 3900 250
# 9 500 7000 250
# 8 500 12500 250
# 7 500 21900 250
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 12, bw=500), "b--",
label="SF12/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 137), get_line(mpsrange(8, 137), 11, bw=500), "g--",
label="SF11/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 10, bw=500), "k--",
label="SF10/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 9, bw=500), "c--",
label="SF9/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8, bw=500), "m--",
label="SF8/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=500), "y--",
label="SF7/500kHz", linewidth=2.0)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper right", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/lorawan-toa.png")
| [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((286, 353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (296, 353), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1070, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1130, 1197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (1140, 1197), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2518, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2589, 2656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (2599, 2656), True, 'import matplotlib.pyplot as plt\n'), ((3918, 3928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3926, 3928), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (4004, 4061), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6009), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6007, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6064, 6131), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (6074, 6131), True, 'import matplotlib.pyplot as plt\n'), ((8305, 8315), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8313, 8315), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8446), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (8389, 8446), True, 'import matplotlib.pyplot as plt\n'), ((9152, 9162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9160, 9162), True, 'import matplotlib.pyplot as plt\n'), ((9228, 9295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (9238, 9295), True, 'import matplotlib.pyplot as plt\n'), ((10003, 10013), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10011, 10013), True, 'import matplotlib.pyplot as plt\n'), ((10079, 10146), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 8)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')\n", (10089, 10146), True, 'import matplotlib.pyplot as plt\n'), ((12682, 12692), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12690, 12692), True, 'import matplotlib.pyplot as plt\n')] |
import tensorflow as tf
if __name__ == "__main__":
with tf.Session() as sess:
game_dir = "Gobang"
model_dir = "model2_10_10_5"
batch = "11000"
# 初始化变量
sess.run(tf.global_variables_initializer())
# 获取最新的checkpoint,其实就是解析了checkpoint文件
latest_ckpt = tf.train.latest_checkpoint("../" + game_dir + "/" + model_dir + "/" + batch)
# 加载图
restore_saver = tf.train.import_meta_graph("../" + game_dir + "/" + model_dir + "/" + batch + "/policy_value_net.model.meta")
# 恢复图,即将weights等参数加入图对应位置中
restore_saver.restore(sess, latest_ckpt)
# 将图中的变量转为常量
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ["action_fc/LogSoftmax", "evaluation_fc2/Tanh"])
# 将新的图保存到"/pretrained/graph.bytes"文件中
tf.train.write_graph(output_graph_def, "../" + game_dir + "/" + model_dir + "/" + batch, "graph.bytes", as_text=False) | [
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.train.import_meta_graph",
"tensorflow.train.write_graph",
"tensorflow.train.latest_checkpoint"
] | [((61, 73), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (71, 73), True, 'import tensorflow as tf\n'), ((311, 387), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["('../' + game_dir + '/' + model_dir + '/' + batch)"], {}), "('../' + game_dir + '/' + model_dir + '/' + batch)\n", (337, 387), True, 'import tensorflow as tf\n'), ((427, 540), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["('../' + game_dir + '/' + model_dir + '/' + batch +\n '/policy_value_net.model.meta')"], {}), "('../' + game_dir + '/' + model_dir + '/' + batch +\n '/policy_value_net.model.meta')\n", (453, 540), True, 'import tensorflow as tf\n'), ((671, 791), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['action_fc/LogSoftmax', 'evaluation_fc2/Tanh']"], {}), "(sess, sess.graph_def, [\n 'action_fc/LogSoftmax', 'evaluation_fc2/Tanh'])\n", (715, 791), True, 'import tensorflow as tf\n'), ((854, 976), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['output_graph_def', "('../' + game_dir + '/' + model_dir + '/' + batch)", '"""graph.bytes"""'], {'as_text': '(False)'}), "(output_graph_def, '../' + game_dir + '/' + model_dir +\n '/' + batch, 'graph.bytes', as_text=False)\n", (874, 976), True, 'import tensorflow as tf\n'), ((207, 240), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (238, 240), True, 'import tensorflow as tf\n')] |
from django.db import connection
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view()
def root(request):
return Response({"message": "Hello, from Yappa!",
"next step": "go to the next example: "
"connect you managed Postgresql!"})
| [
"rest_framework.response.Response",
"rest_framework.decorators.api_view"
] | [((128, 138), 'rest_framework.decorators.api_view', 'api_view', ([], {}), '()\n', (136, 138), False, 'from rest_framework.decorators import api_view\n'), ((169, 288), 'rest_framework.response.Response', 'Response', (["{'message': 'Hello, from Yappa!', 'next step':\n 'go to the next example: connect you managed Postgresql!'}"], {}), "({'message': 'Hello, from Yappa!', 'next step':\n 'go to the next example: connect you managed Postgresql!'})\n", (177, 288), False, 'from rest_framework.response import Response\n')] |
from PyQt4.QtCore import (QAbstractTableModel, QModelIndex, QVariant, Qt,
SIGNAL)
import operator
import logging
from globalvalues.constants.plottingconstants import PlottingConstants
from PyQt4 import QtGui, QtCore
from globalvalues.appsettings import AppSettings
logger = logging.getLogger('console')
class LogLayoutTableModel(QAbstractTableModel):
'''see http://stackoverflow.com/questions/13144486/pyqt-checkbox-delegate-inside-tableview
QTableView can display checkboxes without a delegate. Look for the CheckStateRole.
If you use the proper data, setData and flags methods for your model, you should be fine without any delegate.
'''
def __init__(self, parent, logList, logHeaders, *args):
QAbstractTableModel.__init__(self, parent, *args)
self.logList = logList
self.logHeaders = logHeaders
def rowCount(self, parent):
return len(self.logList)
def columnCount(self, parent):
return len(self.logHeaders)
def data(self, index, role=Qt.DisplayRole):
if (not index.isValid() or
not (0 <= index.row() < len(self.logList))):
return None
column = index.column()
if role == Qt.DisplayRole:
#Magic number here but how else to specify this column?
if column == 0:
try:
logger.debug(" row: "+str(index.row())+" column: "+str(index.column()))
idNameList = self.logList[index.row()]
#check that the sub-list is correct length
if AppSettings.isDebugMode:
assert len(idNameList)==2
value = idNameList[1]
return value
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logger.debug(message)
return None
else:
chkBoxItem = QtGui.QTableWidgetItem()
chkBoxItem.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
chkBoxItem.setCheckState(QtCore.Qt.Unchecked)
return chkBoxItem
return None
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
logger.debug("--logHeadersData() "+str(self.logHeaders[col]))
return self.logHeaders[col]
return QAbstractTableModel.headerData(self, col, orientation, role)
def sort(self, col, order):
"""sort table by given column number col"""
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.logList = sorted(self.logList,
key=operator.itemgetter(col))
if order == Qt.DescendingOrder:
self.logList.reverse()
self.emit(SIGNAL("layoutChanged()")) | [
"logging.getLogger",
"PyQt4.QtCore.QAbstractTableModel.headerData",
"PyQt4.QtGui.QTableWidgetItem",
"PyQt4.QtCore.SIGNAL",
"PyQt4.QtCore.QAbstractTableModel.__init__",
"operator.itemgetter"
] | [((280, 308), 'logging.getLogger', 'logging.getLogger', (['"""console"""'], {}), "('console')\n", (297, 308), False, 'import logging\n'), ((734, 783), 'PyQt4.QtCore.QAbstractTableModel.__init__', 'QAbstractTableModel.__init__', (['self', 'parent', '*args'], {}), '(self, parent, *args)\n', (762, 783), False, 'from PyQt4.QtCore import QAbstractTableModel, QModelIndex, QVariant, Qt, SIGNAL\n'), ((2540, 2600), 'PyQt4.QtCore.QAbstractTableModel.headerData', 'QAbstractTableModel.headerData', (['self', 'col', 'orientation', 'role'], {}), '(self, col, orientation, role)\n', (2570, 2600), False, 'from PyQt4.QtCore import QAbstractTableModel, QModelIndex, QVariant, Qt, SIGNAL\n'), ((2704, 2738), 'PyQt4.QtCore.SIGNAL', 'SIGNAL', (['"""layoutAboutToBeChanged()"""'], {}), "('layoutAboutToBeChanged()')\n", (2710, 2738), False, 'from PyQt4.QtCore import QAbstractTableModel, QModelIndex, QVariant, Qt, SIGNAL\n'), ((2919, 2944), 'PyQt4.QtCore.SIGNAL', 'SIGNAL', (['"""layoutChanged()"""'], {}), "('layoutChanged()')\n", (2925, 2944), False, 'from PyQt4.QtCore import QAbstractTableModel, QModelIndex, QVariant, Qt, SIGNAL\n'), ((2057, 2081), 'PyQt4.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', ([], {}), '()\n', (2079, 2081), False, 'from PyQt4 import QtGui, QtCore\n'), ((2800, 2824), 'operator.itemgetter', 'operator.itemgetter', (['col'], {}), '(col)\n', (2819, 2824), False, 'import operator\n')] |
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from metakernel import Magic
class SASsessionMagic(Magic):
def __init__(self, *args, **kwargs):
super(SASsessionMagic, self).__init__(*args, **kwargs)
def line_SASsession(self, *args):
"""
SAS Kernel magic allows a programatic way to submit configuration
details.
This magic is only available within the SAS Kernel
"""
if len(args) > 1:
args = ''.join(args)
elif len(args) == 1:
args = ''.join(args[0])
args = args.replace(' ', '')
args = args.replace('"', '')
args = args.replace("'", '')
sess_params = dict(s.split('=') for s in args.split(','))
self.kernel._allow_stdin = True
self.kernel._start_sas(**sess_params)
def register_magics(kernel):
kernel.register_magics(SASsessionMagic)
def register_ipython_magics():
from metakernel import IPythonKernel
from IPython.core.magic import register_line_magic
kernel = IPythonKernel()
magic = SASsessionMagic(kernel)
# Make magics callable:
kernel.line_magics["SASsession"] = magic
@register_line_magic
def SASsession(line):
kernel.call_magic("%SASsession " + line)
| [
"metakernel.IPythonKernel"
] | [((1563, 1578), 'metakernel.IPythonKernel', 'IPythonKernel', ([], {}), '()\n', (1576, 1578), False, 'from metakernel import IPythonKernel\n')] |
from datetime import date
from models import gtfs, config, util, nextbus, routeconfig
import argparse
import shapely
import partridge as ptg
import numpy as np
from pathlib import Path
import requests
import json
import boto3
import gzip
import hashlib
import math
import zipfile
# Downloads and parses the GTFS specification
# and saves the configuration for all routes to S3.
# The S3 object contains data merged from GTFS and the Nextbus API (for agencies using Nextbus).
# The frontend can then request this S3 URL directly without hitting the Python backend.
# For each direction, the JSON object contains a coords array defining the shape of the route,
# where the values are objects containing lat/lon properties:
#
# "coords":[
# {"lat":37.80707,"lon":-122.41727}
# {"lat":37.80727,"lon":-122.41562},
# {"lat":37.80748,"lon":-122.41398},
# {"lat":37.80768,"lon":-122.41234},
# ...
# ]
#
# For each direction, the JSON object also contains a stop_geometry object where the keys are stop IDs
# and the values are objects with a distance property (cumulative distance in meters to that stop along the GTFS # shape),
# and an after_index property (index into the coords array of the last coordinate before that stop).
#
# "stop_geometry":{
# "5184":{"distance":8,"after_index":0},
# "3092":{"distance":279,"after_index":1},
# "3095":{"distance":573,"after_index":3},
# "4502":{"distance":1045,"after_index":8},
# ...
#}
#
# In order to match a Nextbus direction with a GTFS shape_id, this finds the GTFS shape_id for that route where
# distance(first coordinate of shape, first stop location) + distance(last coordinate of shape, last stop location)
# is a minimum.
#
# Currently the script just overwrites the one S3 path, but this process could be extended in the future to
# store different paths for different dates, to allow fetching historical data for route configurations.
#
def match_nextbus_direction(nextbus_route_config, geometry):
shape_start = geometry.coords[0]
shape_end = geometry.coords[-1]
nextbus_dir_infos = nextbus_route_config.get_direction_infos()
terminal_dists = []
for nextbus_dir_info in nextbus_dir_infos:
nextbus_dir_stop_ids = nextbus_dir_info.get_stop_ids()
first_stop_info = nextbus_route_config.get_stop_info(nextbus_dir_stop_ids[0])
last_stop_info = nextbus_route_config.get_stop_info(nextbus_dir_stop_ids[-1])
# Determine distance between first nextbus stop and start of GTFS shape,
# plus distance between last stop and end of GTFS shape,
# for all Nextbus directions for this route.
start_dist = util.haver_distance(first_stop_info.lat, first_stop_info.lon, shape_start[1], shape_start[0])
end_dist = util.haver_distance(last_stop_info.lat, last_stop_info.lon, shape_end[1], shape_end[0])
terminal_dist = start_dist + end_dist
terminal_dists.append(terminal_dist)
terminal_dist_order = np.argsort(terminal_dists)
best_nextbus_dir_index = terminal_dist_order[0] # index of the "best" shape for this direction, with the minimum terminal_dist
best_nextbus_dir_info = nextbus_dir_infos[best_nextbus_dir_index]
best_terminal_dist = terminal_dists[best_nextbus_dir_index]
return best_nextbus_dir_info, best_terminal_dist
def get_stop_geometry(stop_xy, shape_lines_xy, shape_cumulative_dist, start_index):
# Finds the first position of a particular stop along a shape (after the start_index'th line segment in shape_lines_xy),
# using XY coordinates in meters.
# The returned dict is used by the frontend to draw line segments along a route between two stops.
num_shape_lines = len(shape_lines_xy)
best_offset = 99999999
best_index = 0
shape_index = start_index
while shape_index < num_shape_lines:
shape_line_offset = shape_lines_xy[shape_index].distance(stop_xy)
if shape_line_offset < best_offset:
best_offset = shape_line_offset
best_index = shape_index
if best_offset < 50 and shape_line_offset > best_offset:
break
shape_index += 1
shape_point = shapely.geometry.Point(shape_lines_xy[best_index].coords[0])
distance_after_shape_point = stop_xy.distance(shape_point)
distance_to_shape_point = shape_cumulative_dist[best_index]
stop_dist = distance_to_shape_point + distance_after_shape_point
if best_offset > 30:
print(f' stop_dist = {int(stop_dist)} = ({int(distance_to_shape_point)} + {int(distance_after_shape_point)}), offset = {int(best_offset)}, after_index = {best_index} ')
return {
'distance': int(stop_dist), # total distance in meters along the route shape to this stop
'after_index': best_index, # the index of the coordinate of the shape just before this stop
'offset': int(best_offset) # distance in meters between this stop and the closest line segment of shape
}
def get_unique_shapes(direction_trips_df, stop_times_df, stops_map, normalize_gtfs_stop_id):
# Finds the unique shapes associated with a GTFS route/direction, merging shapes that contain common subsequences of stops.
# These unique shapes may represent multiple branches of a route.
# Returns a list of dicts with properties 'shape_id', 'count', and 'stop_ids', sorted by count in descending order.
stop_times_trip_id_values = stop_times_df['trip_id'].values
direction_shape_id_values = direction_trips_df['shape_id'].values
unique_shapes_map = {}
direction_shape_ids, direction_shape_id_counts = np.unique(direction_shape_id_values, return_counts=True)
direction_shape_id_order = np.argsort(-1 * direction_shape_id_counts)
direction_shape_ids = direction_shape_ids[direction_shape_id_order]
direction_shape_id_counts = direction_shape_id_counts[direction_shape_id_order]
for shape_id, shape_id_count in zip(direction_shape_ids, direction_shape_id_counts):
shape_trip = direction_trips_df[direction_shape_id_values == shape_id].iloc[0]
shape_trip_id = shape_trip.trip_id
shape_trip_stop_times = stop_times_df[stop_times_trip_id_values == shape_trip_id].sort_values('stop_sequence')
shape_trip_stop_ids = [
normalize_gtfs_stop_id(gtfs_stop_id)
for gtfs_stop_id in shape_trip_stop_times['stop_id'].values
]
unique_shape_key = hashlib.sha256(json.dumps(shape_trip_stop_ids).encode('utf-8')).hexdigest()[0:12]
#print(f' shape {shape_id} ({shape_id_count})')
if unique_shape_key not in unique_shapes_map:
for other_shape_key, other_shape_info in unique_shapes_map.items():
#print(f" checking match with {shape_id} and {other_shape_info['shape_id']}")
if is_subsequence(shape_trip_stop_ids, other_shape_info['stop_ids']):
print(f" shape {shape_id} is subsequence of shape {other_shape_info['shape_id']}")
unique_shape_key = other_shape_key
break
elif is_subsequence(other_shape_info['stop_ids'], shape_trip_stop_ids):
print(f" shape {other_shape_info['shape_id']} is subsequence of shape {shape_id}")
shape_id_count += other_shape_info['count']
del unique_shapes_map[other_shape_key]
break
if unique_shape_key not in unique_shapes_map:
unique_shapes_map[unique_shape_key] = {
'count': 0,
'shape_id': shape_id,
'stop_ids': shape_trip_stop_ids
}
unique_shapes_map[unique_shape_key]['count'] += shape_id_count
sorted_shapes = sorted(unique_shapes_map.values(), key=lambda shape: -1 * shape['count'])
for shape_info in sorted_shapes:
count = shape_info['count']
shape_id = shape_info['shape_id']
stop_ids = shape_info['stop_ids']
first_stop_id = stop_ids[0]
last_stop_id = stop_ids[-1]
first_stop = stops_map[first_stop_id]
last_stop = stops_map[last_stop_id]
print(f' shape_id: {shape_id} ({count}x) stops:{len(stop_ids)} from {first_stop_id} {first_stop.stop_name} to {last_stop_id} {last_stop.stop_name} {",".join(stop_ids)}')
return sorted_shapes
def download_gtfs_data(agency: config.Agency, gtfs_cache_dir):
gtfs_url = agency.gtfs_url
if gtfs_url is None:
raise Exception(f'agency {agency.id} does not have gtfs_url in config')
cache_dir = Path(gtfs_cache_dir)
if not cache_dir.exists():
print(f'downloading gtfs data from {gtfs_url}')
r = requests.get(gtfs_url)
if r.status_code != 200:
raise Exception(f"Error fetching {gtfs_url}: HTTP {r.status_code}: {r.text}")
zip_path = f'{util.get_data_dir()}/gtfs-{agency.id}.zip'
with open(zip_path, 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(gtfs_cache_dir)
def is_subsequence(smaller, bigger):
smaller_len = len(smaller)
bigger_len = len(bigger)
if smaller_len > bigger_len:
return False
try:
start_pos = bigger.index(smaller[0])
except ValueError:
return False
end_pos = start_pos+smaller_len
if end_pos > bigger_len:
return False
return smaller == bigger[start_pos:end_pos]
def save_routes_for_agency(agency: config.Agency, save_to_s3=True):
agency_id = agency.id
gtfs_cache_dir = f'{util.get_data_dir()}/gtfs-{agency_id}'
download_gtfs_data(agency, gtfs_cache_dir)
feed = ptg.load_geo_feed(gtfs_cache_dir, {})
print(f"Loading {agency_id} routes...")
routes_df = feed.routes
if agency.gtfs_agency_id is not None:
routes_df = routes_df[routes_df.agency_id == agency.gtfs_agency_id]
routes_data = []
print(f"Loading {agency_id} trips...")
trips_df = feed.trips
trips_df['direction_id'] = trips_df['direction_id'].astype(str)
print(f"Loading {agency_id} stop times...")
stop_times_df = feed.stop_times
print(f"Loading {agency_id} shapes...")
shapes_df = feed.shapes
print(f"Loading {agency_id} stops...")
stops_df = feed.stops
# gtfs_stop_ids_map allows looking up row from stops.txt via GTFS stop_id
gtfs_stop_ids_map = {stop.stop_id: stop for stop in stops_df.itertuples()}
stop_id_gtfs_field = agency.stop_id_gtfs_field
# get OpenTransit stop ID for GTFS stop_id (may be the same)
def normalize_gtfs_stop_id(gtfs_stop_id):
if stop_id_gtfs_field != 'stop_id':
return getattr(gtfs_stop_ids_map[gtfs_stop_id], stop_id_gtfs_field)
else:
return gtfs_stop_id
# stops_map allows looking up row from stops.txt via OpenTransit stop ID
if stop_id_gtfs_field != 'stop_id':
stops_map = {getattr(stop, stop_id_gtfs_field): stop for stop in stops_df.itertuples()}
else:
stops_map = gtfs_stop_ids_map
if agency.provider == 'nextbus':
nextbus_route_order = [route.id for route in nextbus.get_route_list(agency.nextbus_id)]
for route in routes_df.itertuples():
gtfs_route_id = route.route_id
short_name = route.route_short_name
long_name = route.route_long_name
if isinstance(short_name, str) and isinstance(long_name, str):
title = f'{short_name} - {long_name}'
elif isinstance(short_name, str):
title = short_name
else:
title = long_name
type = int(route.route_type) if hasattr(route, 'route_type') else None
url = route.route_url if hasattr(route, 'route_url') and isinstance(route.route_url, str) else None
#color = route.route_color
#text_color = route.route_text_color
route_id = getattr(route, agency.route_id_gtfs_field)
if agency.provider == 'nextbus':
route_id = route_id.replace('-', '_') # hack to handle muni route IDs where e.g. GTFS has "T-OWL" but nextbus has "T_OWL"
try:
nextbus_route_config = nextbus.get_route_config(agency.nextbus_id, route_id)
title = nextbus_route_config.title
except Exception as ex:
print(ex)
continue
try:
sort_order = nextbus_route_order.index(route_id)
except ValueError as ex:
print(ex)
sort_order = None
else:
sort_order = int(route.route_sort_order) if hasattr(route, 'route_sort_order') else None
print(f'route {route_id} {title}')
route_data = {
'id': route_id,
'title': title,
'url': url,
'type': type,
#'color': color,
#'text_color': text_color,
'gtfs_route_id': gtfs_route_id,
'sort_order': sort_order,
'stops': {},
'directions': [],
}
directions = []
route_directions_df = feed.get('route_directions.txt') # unofficial trimet gtfs extension
if not route_directions_df.empty:
route_directions_df = route_directions_df[route_directions_df['route_id'] == gtfs_route_id]
else:
route_directions_df = None
routes_data.append(route_data)
route_trips_df = trips_df[trips_df['route_id'] == gtfs_route_id]
route_direction_id_values = route_trips_df['direction_id'].values
def add_custom_direction(custom_direction_info):
direction_id = custom_direction_info['id']
print(f' custom direction = {direction_id}')
gtfs_direction_id = custom_direction_info['gtfs_direction_id']
direction_trips_df = route_trips_df[route_direction_id_values == gtfs_direction_id]
included_stop_ids = custom_direction_info.get('included_stop_ids', [])
excluded_stop_ids = custom_direction_info.get('excluded_stop_ids', [])
shapes = get_unique_shapes(
direction_trips_df=direction_trips_df,
stop_times_df=stop_times_df,
stops_map=stops_map,
normalize_gtfs_stop_id=normalize_gtfs_stop_id
)
def contains_included_stops(shape_stop_ids):
min_index = 0
for stop_id in included_stop_ids:
try:
index = shape_stop_ids.index(stop_id, min_index)
except ValueError:
return False
min_index = index + 1 # stops must appear in same order as in included_stop_ids
return True
def contains_excluded_stop(shape_stop_ids):
for stop_id in excluded_stop_ids:
try:
index = shape_stop_ids.index(stop_id)
return True
except ValueError:
pass
return False
matching_shapes = []
for shape in shapes:
shape_stop_ids = shape['stop_ids']
if contains_included_stops(shape_stop_ids) and not contains_excluded_stop(shape_stop_ids):
matching_shapes.append(shape)
if len(matching_shapes) != 1:
matching_shape_ids = [shape['shape_id'] for shape in matching_shapes]
error_message = f'{len(matching_shapes)} shapes found for route {route_id} with GTFS direction ID {gtfs_direction_id}'
if len(included_stop_ids) > 0:
error_message += f" including {','.join(included_stop_ids)}"
if len(excluded_stop_ids) > 0:
error_message += f" excluding {','.join(excluded_stop_ids)}"
if len(matching_shape_ids) > 0:
error_message += f": {','.join(matching_shape_ids)}"
raise Exception(error_message)
matching_shape = matching_shapes[0]
matching_shape_id = matching_shape['shape_id']
matching_shape_count = matching_shape['count']
print(f' matching shape = {matching_shape_id} ({matching_shape_count} times)')
add_direction(
id=direction_id,
gtfs_shape_id=matching_shape_id,
gtfs_direction_id=gtfs_direction_id,
stop_ids=matching_shape['stop_ids'],
title=custom_direction_info.get('title', None)
)
def add_default_direction(direction_id):
print(f' default direction = {direction_id}')
direction_trips_df = route_trips_df[route_direction_id_values == direction_id]
shapes = get_unique_shapes(
direction_trips_df=direction_trips_df,
stop_times_df=stop_times_df,
stops_map=stops_map,
normalize_gtfs_stop_id=normalize_gtfs_stop_id)
best_shape = shapes[0]
best_shape_id = best_shape['shape_id']
best_shape_count = best_shape['count']
print(f' most common shape = {best_shape_id} ({best_shape_count} times)')
add_direction(
id=direction_id,
gtfs_shape_id=best_shape_id,
gtfs_direction_id=direction_id,
stop_ids=best_shape['stop_ids']
)
def add_direction(id, gtfs_shape_id, gtfs_direction_id, stop_ids, title = None):
if title is None:
default_direction_info = agency.default_directions.get(gtfs_direction_id, {})
title_prefix = default_direction_info.get('title_prefix', None)
last_stop_id = stop_ids[-1]
last_stop = stops_map[last_stop_id]
if title_prefix is not None:
title = f"{title_prefix} to {last_stop.stop_name}"
else:
title = f"To {last_stop.stop_name}"
print(f' title = {title}')
dir_data = {
'id': id,
'title': title,
'gtfs_shape_id': gtfs_shape_id,
'gtfs_direction_id': gtfs_direction_id,
'stops': stop_ids,
'stop_geometry': {},
}
route_data['directions'].append(dir_data)
for stop_id in stop_ids:
stop = stops_map[stop_id]
stop_data = {
'id': stop_id,
'lat': round(stop.geometry.y, 5), # stop_lat in gtfs
'lon': round(stop.geometry.x, 5), # stop_lon in gtfs
'title': stop.stop_name,
'url': stop.stop_url if hasattr(stop, 'stop_url') and isinstance(stop.stop_url, str) else None,
}
route_data['stops'][stop_id] = stop_data
geometry = shapes_df[shapes_df['shape_id'] == gtfs_shape_id]['geometry'].values[0]
# partridge returns GTFS geometries for each shape_id as a shapely LineString
# (https://shapely.readthedocs.io/en/stable/manual.html#linestrings).
# Each coordinate is an array in [lon,lat] format (note: longitude first, latitude second)
dir_data['coords'] = [
{
'lat': round(coord[1], 5),
'lon': round(coord[0], 5)
} for coord in geometry.coords
]
if agency.provider == 'nextbus':
# match nextbus direction IDs with GTFS direction IDs
best_nextbus_dir_info, best_terminal_dist = match_nextbus_direction(nextbus_route_config, geometry)
print(f' {direction_id} = {best_nextbus_dir_info.id} (terminal_dist={int(best_terminal_dist)}) {" (questionable match)" if best_terminal_dist > 300 else ""}')
# dir_data['title'] = best_nextbus_dir_info.title
dir_data['nextbus_direction_id'] = best_nextbus_dir_info.id
start_lat = geometry.coords[0][1]
start_lon = geometry.coords[0][0]
#print(f" start_lat = {start_lat} start_lon = {start_lon}")
deg_lat_dist = util.haver_distance(start_lat, start_lon, start_lat-0.1, start_lon)*10
deg_lon_dist = util.haver_distance(start_lat, start_lon, start_lat, start_lon-0.1)*10
# projection function from lon/lat coordinates in degrees (z ignored) to x/y coordinates in meters.
# satisfying the interface of shapely.ops.transform (https://shapely.readthedocs.io/en/stable/manual.html#shapely.ops.transform).
# This makes it possible to use shapely methods to calculate the distance in meters between geometries
def project_xy(lon, lat, z=None):
return (round((lon - start_lon) * deg_lon_dist, 1), round((lat - start_lat) * deg_lat_dist, 1))
xy_geometry = shapely.ops.transform(project_xy, geometry)
shape_lon_lat = np.array(geometry).T
shape_lon = shape_lon_lat[0]
shape_lat = shape_lon_lat[1]
shape_prev_lon = np.r_[shape_lon[0], shape_lon[:-1]]
shape_prev_lat = np.r_[shape_lat[0], shape_lat[:-1]]
# shape_cumulative_dist[i] is the cumulative distance in meters along the shape geometry from 0th to ith coordinate
shape_cumulative_dist = np.cumsum(util.haver_distance(shape_lon, shape_lat, shape_prev_lon, shape_prev_lat))
shape_lines_xy = [shapely.geometry.LineString(xy_geometry.coords[i:i+2]) for i in range(0, len(xy_geometry.coords) - 1)]
# this is the total distance of the GTFS shape, which may not be exactly the same as the
# distance along the route between the first and last Nextbus stop
dir_data['distance'] = int(shape_cumulative_dist[-1])
print(f" distance = {dir_data['distance']}")
# Find each stop along the route shape, so that the frontend can draw line segments between stops along the shape
start_index = 0
for stop_id in stop_ids:
stop_info = route_data['stops'][stop_id]
# Need to project lon/lat coords to x/y in order for shapely to determine the distance between
# a point and a line (shapely doesn't support distance for lon/lat coords)
stop_xy = shapely.geometry.Point(project_xy(stop_info['lon'], stop_info['lat']))
stop_geometry = get_stop_geometry(stop_xy, shape_lines_xy, shape_cumulative_dist, start_index)
if stop_geometry['offset'] > 100:
print(f" !! bad geometry for stop {stop_id}: {stop_geometry['offset']} m from route line segment")
continue
dir_data['stop_geometry'][stop_id] = stop_geometry
start_index = stop_geometry['after_index']
if route_id in agency.custom_directions:
for custom_direction_info in agency.custom_directions[route_id]:
add_custom_direction(custom_direction_info)
else:
for direction_id in np.unique(route_direction_id_values):
add_default_direction(direction_id)
if routes_data[0]['sort_order'] is not None:
sort_key = lambda route_data: route_data['sort_order']
else:
sort_key = lambda route_data: route_data['id']
routes_data = sorted(routes_data, key=sort_key)
data_str = json.dumps({
'version': routeconfig.DefaultVersion,
'routes': routes_data
}, separators=(',', ':'))
cache_path = routeconfig.get_cache_path(agency_id)
with open(cache_path, "w") as f:
f.write(data_str)
if save_to_s3:
s3 = boto3.resource('s3')
s3_path = routeconfig.get_s3_path(agency_id)
s3_bucket = config.s3_bucket
print(f'saving to s3://{s3_bucket}/{s3_path}')
object = s3.Object(s3_bucket, s3_path)
object.put(
Body=gzip.compress(bytes(data_str, 'utf-8')),
CacheControl='max-age=86400',
ContentType='application/json',
ContentEncoding='gzip',
ACL='public-read'
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Save route configuration from GTFS and possibly Nextbus API')
parser.add_argument('--agency', required=False, help='Agency ID')
parser.add_argument('--s3', dest='s3', action='store_true', help='store in s3')
parser.set_defaults(s3=False)
args = parser.parse_args()
agencies = [config.get_agency(args.agency)] if args.agency is not None else config.agencies
for agency in agencies:
save_routes_for_agency(agency, args.s3)
| [
"zipfile.ZipFile",
"models.nextbus.get_route_list",
"shapely.geometry.Point",
"numpy.argsort",
"numpy.array",
"partridge.load_geo_feed",
"argparse.ArgumentParser",
"pathlib.Path",
"json.dumps",
"boto3.resource",
"models.nextbus.get_route_config",
"models.config.get_agency",
"shapely.ops.tran... | [((2964, 2990), 'numpy.argsort', 'np.argsort', (['terminal_dists'], {}), '(terminal_dists)\n', (2974, 2990), True, 'import numpy as np\n'), ((4156, 4216), 'shapely.geometry.Point', 'shapely.geometry.Point', (['shape_lines_xy[best_index].coords[0]'], {}), '(shape_lines_xy[best_index].coords[0])\n', (4178, 4216), False, 'import shapely\n'), ((5580, 5636), 'numpy.unique', 'np.unique', (['direction_shape_id_values'], {'return_counts': '(True)'}), '(direction_shape_id_values, return_counts=True)\n', (5589, 5636), True, 'import numpy as np\n'), ((5668, 5710), 'numpy.argsort', 'np.argsort', (['(-1 * direction_shape_id_counts)'], {}), '(-1 * direction_shape_id_counts)\n', (5678, 5710), True, 'import numpy as np\n'), ((8532, 8552), 'pathlib.Path', 'Path', (['gtfs_cache_dir'], {}), '(gtfs_cache_dir)\n', (8536, 8552), False, 'from pathlib import Path\n'), ((9646, 9683), 'partridge.load_geo_feed', 'ptg.load_geo_feed', (['gtfs_cache_dir', '{}'], {}), '(gtfs_cache_dir, {})\n', (9663, 9683), True, 'import partridge as ptg\n'), ((23533, 23634), 'json.dumps', 'json.dumps', (["{'version': routeconfig.DefaultVersion, 'routes': routes_data}"], {'separators': "(',', ':')"}), "({'version': routeconfig.DefaultVersion, 'routes': routes_data},\n separators=(',', ':'))\n", (23543, 23634), False, 'import json\n'), ((23671, 23708), 'models.routeconfig.get_cache_path', 'routeconfig.get_cache_path', (['agency_id'], {}), '(agency_id)\n', (23697, 23708), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((24300, 24403), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save route configuration from GTFS and possibly Nextbus API"""'}), "(description=\n 'Save route configuration from GTFS and possibly Nextbus API')\n", (24323, 24403), False, 'import argparse\n'), ((2644, 2742), 'models.util.haver_distance', 'util.haver_distance', (['first_stop_info.lat', 'first_stop_info.lon', 'shape_start[1]', 'shape_start[0]'], {}), '(first_stop_info.lat, first_stop_info.lon, shape_start[1\n ], shape_start[0])\n', (2663, 2742), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((2757, 2848), 'models.util.haver_distance', 'util.haver_distance', (['last_stop_info.lat', 'last_stop_info.lon', 'shape_end[1]', 'shape_end[0]'], {}), '(last_stop_info.lat, last_stop_info.lon, shape_end[1],\n shape_end[0])\n', (2776, 2848), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((8652, 8674), 'requests.get', 'requests.get', (['gtfs_url'], {}), '(gtfs_url)\n', (8664, 8674), False, 'import requests\n'), ((23806, 23826), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (23820, 23826), False, 'import boto3\n'), ((23845, 23879), 'models.routeconfig.get_s3_path', 'routeconfig.get_s3_path', (['agency_id'], {}), '(agency_id)\n', (23868, 23879), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((8951, 8981), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (8966, 8981), False, 'import zipfile\n'), ((9547, 9566), 'models.util.get_data_dir', 'util.get_data_dir', ([], {}), '()\n', (9564, 9566), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((20973, 21016), 'shapely.ops.transform', 'shapely.ops.transform', (['project_xy', 'geometry'], {}), '(project_xy, geometry)\n', (20994, 21016), False, 'import shapely\n'), ((23196, 23232), 'numpy.unique', 'np.unique', (['route_direction_id_values'], {}), '(route_direction_id_values)\n', (23205, 23232), True, 'import numpy as np\n'), ((24636, 24666), 'models.config.get_agency', 'config.get_agency', (['args.agency'], {}), '(args.agency)\n', (24653, 24666), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((8822, 8841), 'models.util.get_data_dir', 'util.get_data_dir', ([], {}), '()\n', (8839, 8841), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((11107, 11148), 'models.nextbus.get_route_list', 'nextbus.get_route_list', (['agency.nextbus_id'], {}), '(agency.nextbus_id)\n', (11129, 11148), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((12121, 12174), 'models.nextbus.get_route_config', 'nextbus.get_route_config', (['agency.nextbus_id', 'route_id'], {}), '(agency.nextbus_id, route_id)\n', (12145, 12174), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((20249, 20318), 'models.util.haver_distance', 'util.haver_distance', (['start_lat', 'start_lon', '(start_lat - 0.1)', 'start_lon'], {}), '(start_lat, start_lon, start_lat - 0.1, start_lon)\n', (20268, 20318), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((20347, 20416), 'models.util.haver_distance', 'util.haver_distance', (['start_lat', 'start_lon', 'start_lat', '(start_lon - 0.1)'], {}), '(start_lat, start_lon, start_lat, start_lon - 0.1)\n', (20366, 20416), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((21046, 21064), 'numpy.array', 'np.array', (['geometry'], {}), '(geometry)\n', (21054, 21064), True, 'import numpy as np\n'), ((21455, 21528), 'models.util.haver_distance', 'util.haver_distance', (['shape_lon', 'shape_lat', 'shape_prev_lon', 'shape_prev_lat'], {}), '(shape_lon, shape_lat, shape_prev_lon, shape_prev_lat)\n', (21474, 21528), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((21561, 21617), 'shapely.geometry.LineString', 'shapely.geometry.LineString', (['xy_geometry.coords[i:i + 2]'], {}), '(xy_geometry.coords[i:i + 2])\n', (21588, 21617), False, 'import shapely\n'), ((6414, 6445), 'json.dumps', 'json.dumps', (['shape_trip_stop_ids'], {}), '(shape_trip_stop_ids)\n', (6424, 6445), False, 'import json\n')] |
"""AVLetters lip dataset.
The original dataset is available from
http://www.ee.surrey.ac.uk/Projects/LILiR/datasets/avletters1/index.html
This dataset consists of three repetitions by each of 10 talkers,
five male (two with moustaches) and five female,
of the isolated letters A-Z, a total of 780 utterances
References
----------
<NAME>, T.Cootes, <NAME>, <NAME>, and <NAME>.
Extraction of visual features for lipreading.
IEEE Trans. on Pattern Analysis and Machine Vision,
vol. 24, no. 2, pp. 198-213, 2002.
"""
# License: BSD 3 clause
import numpy as np
from string import ascii_uppercase
import random
from os import listdir
from os.path import dirname, exists, isfile, join
from scipy.io import loadmat
folderpath = join(dirname(__file__), './avletters/Lips/')
def fetch_avletters_averaged():
"""Load the AVLetters dataset with averaged frames
================ =======================
Classes 26
Samples total 780
Dimensionality (12, 60, 80)
Features real, between 255 and 0
================ =======================
Returns
-------
(lip_videos, label) : tuple
lip_videos : ndarray of shape (780, 12, 60, 80)
The lip videos with averaged frames.
Each video consists of 12 60x80 image frames.
persons : ndarray of shape (780,)
The persons corresponding to the lip videos.
label : ndarray of shape (780,)
Labels corresponding to the lip videos.
Those labels are ranging from 0-23 and
correspond to the letters spoken in the lip video.
"""
if not (exists(folderpath)):
raise IOError("Data not found")
lip_paths = []
for f in listdir(folderpath):
if isfile(join(folderpath, f)) and f.endswith('.mat'):
lip_paths.append(f)
n_samples = 780
n_frames = 12
n_rows = 60
n_columns = 80
people = ['Anya', 'Bill', 'Faye', 'John', 'Kate', 'Nicola', 'Stephen',
'Steve', 'Verity', 'Yi']
lip_videos = np.empty(shape=(n_samples, n_frames, n_rows, n_columns), dtype=float)
persons = np.zeros(shape=(n_samples,), dtype='<U8')
label = np.empty(shape=(n_samples,), dtype=int)
# Save all lip videos in the preferred form
for i, lip_path in enumerate(lip_paths):
# Load the lip video
lip_mat = loadmat(folderpath + lip_path)
n_frames_curr = int(lip_mat['siz'][0,2])
lip_video = lip_mat['vid'].reshape(n_columns, n_rows, n_frames_curr)
lip_video = lip_video.transpose(2, 1, 0)
# Average the video frames over a window of size
# `n_frames_curr - n_frames + 1` so that the new video
# has `n_frames` frames.
window_size = n_frames_curr - n_frames + 1
for j in range(n_frames):
lip_videos[i, j] = lip_video[j:j+window_size].mean(axis=0)
for p in people:
if p in lip_path:
persons[i] = p
label[i] = ord(lip_path[0]) - ord('A')
return (lip_videos, persons, label)
| [
"os.path.exists",
"os.listdir",
"scipy.io.loadmat",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"numpy.empty"
] | [((736, 753), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (743, 753), False, 'from os.path import dirname, exists, isfile, join\n'), ((1790, 1809), 'os.listdir', 'listdir', (['folderpath'], {}), '(folderpath)\n', (1797, 1809), False, 'from os import listdir\n'), ((2113, 2182), 'numpy.empty', 'np.empty', ([], {'shape': '(n_samples, n_frames, n_rows, n_columns)', 'dtype': 'float'}), '(shape=(n_samples, n_frames, n_rows, n_columns), dtype=float)\n', (2121, 2182), True, 'import numpy as np\n'), ((2197, 2238), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples,)', 'dtype': '"""<U8"""'}), "(shape=(n_samples,), dtype='<U8')\n", (2205, 2238), True, 'import numpy as np\n'), ((2251, 2290), 'numpy.empty', 'np.empty', ([], {'shape': '(n_samples,)', 'dtype': 'int'}), '(shape=(n_samples,), dtype=int)\n', (2259, 2290), True, 'import numpy as np\n'), ((1696, 1714), 'os.path.exists', 'exists', (['folderpath'], {}), '(folderpath)\n', (1702, 1714), False, 'from os.path import dirname, exists, isfile, join\n'), ((2432, 2462), 'scipy.io.loadmat', 'loadmat', (['(folderpath + lip_path)'], {}), '(folderpath + lip_path)\n', (2439, 2462), False, 'from scipy.io import loadmat\n'), ((1829, 1848), 'os.path.join', 'join', (['folderpath', 'f'], {}), '(folderpath, f)\n', (1833, 1848), False, 'from os.path import dirname, exists, isfile, join\n')] |
#
# Copyright 2021 Rovio Entertainment Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from assertions import assert_df
from rovio_ingest.extensions.dataframe_extension import add_dataframe_druid_extension, normalize_date
import pyspark.sql.types as t
from pyspark.sql import DataFrame
from test_helper import row, get_df
def to_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%d')
def test_repartition_by_druid_segment_size(spark):
add_dataframe_druid_extension()
schema = t.StructType([
t.StructField('date', t.DateType()),
t.StructField('country', t.StringType()),
t.StructField('dau', t.IntegerType()),
t.StructField('revenue', t.DoubleType()),
])
rows = [
row(date=to_date("2019-10-17"), country="US", dau=50, revenue=100.0),
row(date=to_date("2019-10-17"), country="GB", dau=20, revenue=20.0),
row(date=to_date("2019-10-17"), country="DE", dau=20, revenue=20.0),
row(date=to_date("2019-10-16"), country="US", dau=50, revenue=100.0),
row(date=to_date("2019-10-16"), country="FI", dau=20, revenue=20.0),
row(date=to_date("2019-10-16"), country="GB", dau=20, revenue=20.0),
row(date=to_date("2019-10-16"), country="DE", dau=20, revenue=20.0)
]
df: DataFrame = get_df(spark, rows, schema)
# note how we can call .repartitionByDruidSegmentSize directly on Dataset[Row]
# the nice thing is this allows continuous method chaining on Dataset without braking the chain
df = df.repartition_by_druid_segment_size('date', segment_granularity='DAY', rows_per_segment=2)
# group & count
# because we can't know which exact rows end up in each partition within the same date
# however we know how many partitions there should be for each date
df = df.groupBy('__PARTITION_TIME__', '__PARTITION_NUM__').count()
expected: DataFrame = get_df(spark, [
row(__PARTITION_TIME__=to_date("2019-10-17"), __PARTITION_NUM__=0, count=2),
row(__PARTITION_TIME__=to_date("2019-10-16"), __PARTITION_NUM__=1, count=2),
row(__PARTITION_TIME__=to_date("2019-10-17"), __PARTITION_NUM__=1, count=1),
row(__PARTITION_TIME__=to_date("2019-10-16"), __PARTITION_NUM__=0, count=2),
], t.StructType([
t.StructField('__PARTITION_TIME__', t.TimestampType()),
t.StructField('__PARTITION_NUM__', t.IntegerType()),
t.StructField('count', t.LongType()),
]))
assert_df(df, expected)
def test_normalize_date(spark):
value = datetime(2020, 5, 6, 19, 46, 00)
# WEEK: so 6th (Wednesday) -> 4th (Monday)
assert normalize_date(spark, value, 'WEEK') == datetime(2020, 5, 4)
| [
"datetime.datetime",
"datetime.datetime.strptime",
"pyspark.sql.types.DoubleType",
"rovio_ingest.extensions.dataframe_extension.add_dataframe_druid_extension",
"test_helper.get_df",
"pyspark.sql.types.IntegerType",
"pyspark.sql.types.StringType",
"pyspark.sql.types.TimestampType",
"pyspark.sql.types... | [((898, 937), 'datetime.datetime.strptime', 'datetime.strptime', (['date_str', '"""%Y-%m-%d"""'], {}), "(date_str, '%Y-%m-%d')\n", (915, 937), False, 'from datetime import datetime\n'), ((995, 1026), 'rovio_ingest.extensions.dataframe_extension.add_dataframe_druid_extension', 'add_dataframe_druid_extension', ([], {}), '()\n', (1024, 1026), False, 'from rovio_ingest.extensions.dataframe_extension import add_dataframe_druid_extension, normalize_date\n'), ((1836, 1863), 'test_helper.get_df', 'get_df', (['spark', 'rows', 'schema'], {}), '(spark, rows, schema)\n', (1842, 1863), False, 'from test_helper import row, get_df\n'), ((2993, 3016), 'assertions.assert_df', 'assert_df', (['df', 'expected'], {}), '(df, expected)\n', (3002, 3016), False, 'from assertions import assert_df\n'), ((3063, 3094), 'datetime.datetime', 'datetime', (['(2020)', '(5)', '(6)', '(19)', '(46)', '(0)'], {}), '(2020, 5, 6, 19, 46, 0)\n', (3071, 3094), False, 'from datetime import datetime\n'), ((3154, 3190), 'rovio_ingest.extensions.dataframe_extension.normalize_date', 'normalize_date', (['spark', 'value', '"""WEEK"""'], {}), "(spark, value, 'WEEK')\n", (3168, 3190), False, 'from rovio_ingest.extensions.dataframe_extension import add_dataframe_druid_extension, normalize_date\n'), ((3194, 3214), 'datetime.datetime', 'datetime', (['(2020)', '(5)', '(4)'], {}), '(2020, 5, 4)\n', (3202, 3214), False, 'from datetime import datetime\n'), ((1086, 1098), 'pyspark.sql.types.DateType', 't.DateType', ([], {}), '()\n', (1096, 1098), True, 'import pyspark.sql.types as t\n'), ((1134, 1148), 'pyspark.sql.types.StringType', 't.StringType', ([], {}), '()\n', (1146, 1148), True, 'import pyspark.sql.types as t\n'), ((1180, 1195), 'pyspark.sql.types.IntegerType', 't.IntegerType', ([], {}), '()\n', (1193, 1195), True, 'import pyspark.sql.types as t\n'), ((1231, 1245), 'pyspark.sql.types.DoubleType', 't.DoubleType', ([], {}), '()\n', (1243, 1245), True, 'import pyspark.sql.types as t\n'), ((2853, 2870), 'pyspark.sql.types.TimestampType', 't.TimestampType', ([], {}), '()\n', (2868, 2870), True, 'import pyspark.sql.types as t\n'), ((2916, 2931), 'pyspark.sql.types.IntegerType', 't.IntegerType', ([], {}), '()\n', (2929, 2931), True, 'import pyspark.sql.types as t\n'), ((2965, 2977), 'pyspark.sql.types.LongType', 't.LongType', ([], {}), '()\n', (2975, 2977), True, 'import pyspark.sql.types as t\n')] |
#!/usr/bin/env python
#
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import http.client
import os
import re
import tiny_test_fw
import ttfw_idf
from idf_http_server_test import adder as client
from tiny_test_fw import Utility
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_Protocols')
def test_examples_protocol_http_server_file_serving(env, extra_data): # type: (tiny_test_fw.Env.Env, None) -> None # pylint: disable=unused-argument
# Acquire DUT
dut1 = env.get_dut('http file_serving', 'examples/protocols/http_server/file_serving', dut_class=ttfw_idf.ESP32DUT)
# Get binary file
binary_file = os.path.join(dut1.app.binary_path, 'file_server.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('file_server_bin_size', '{}KB'.format(bin_size // 1024))
Utility.console_log('Erasing the flash on the chip')
# erase the flash
dut1.erase_flash()
# Upload binary and start testing
Utility.console_log('Starting http file serving simple test app')
dut1.start_app()
# Parse IP address of STA
Utility.console_log('Waiting to connect with AP')
got_ip = dut1.expect(re.compile(r'IPv4 address: (\d+\.\d+\.\d+\.\d+)'), timeout=30)[0]
# Expected logs
dut1.expect('Initializing SPIFFS', timeout=30)
got_port = dut1.expect(re.compile(r"Starting HTTP Server on port: '(\d+)'"), timeout=30)[0]
Utility.console_log('Got IP : ' + got_ip)
Utility.console_log('Got Port : ' + got_port)
# Run test script
conn = client.start_session(got_ip, got_port)
# upload a file onto the server
upload_data = 'Test data to be sent to the server'
upload_file_name = 'example.txt'
upload_file_hash = hashlib.md5(upload_data.encode('UTF-8'))
upload_file_digest = upload_file_hash.digest()
Utility.console_log('\nTesting the uploading of file on the file server')
client.postreq(conn, '/upload/' + str(upload_file_name), upload_data)
try:
dut1.expect('File reception complete', timeout=10)
except Exception:
Utility.console_log('Failed the test to upload file on the file server')
raise
Utility.console_log('Passed the test to uploaded file on the file server')
# Download the uploaded file from the file server
Utility.console_log("\nTesting for Download of \"existing\" file from the file server")
download_data = client.getreq(conn, '/' + str(upload_file_name))
try:
dut1.expect('File sending complete', timeout=10)
except Exception:
Utility.console_log('Failed the test to download existing file from the file server')
raise
Utility.console_log('Passed the test to downloaded existing file from the file server')
download_file_hash = hashlib.md5(download_data)
download_file_digest = download_file_hash.digest()
if download_file_digest != upload_file_digest:
raise RuntimeError('The md5 hash of the downloaded file does not match with that of the uploaded file')
# Upload existing file on the file server
Utility.console_log("\nTesting the upload of \"already existing\" file on the file server")
client.postreq(conn, '/upload/' + str(upload_file_name), data=None)
try:
dut1.expect('File already exists : /spiffs/' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test for uploading existing file on the file server')
raise
Utility.console_log('Passed the test for uploading existing file on the file server')
# Previous URI was an invalid URI so the server should have closed the connection.
# Trying to send request to the server
try:
client.getreq(conn, '/')
except http.client.RemoteDisconnected:
# It is correct behavior that the connection was closed by the server
pass
except Exception:
Utility.console_log('Connection was not closed successfully by the server after last invalid URI')
raise
conn = client.start_session(got_ip, got_port)
# Delete the existing file from the file server
Utility.console_log("\nTesting the deletion of \"existing\" file on the file server")
client.postreq(conn, '/delete/' + str(upload_file_name), data=None)
try:
dut1.expect('Deleting file : /' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test for deletion of existing file on the file server')
raise
Utility.console_log('Passed the test for deletion of existing file on the file server')
conn = client.start_session(got_ip, got_port)
# Try to delete non existing file from the file server
Utility.console_log("\nTesting the deletion of \"non existing\" file on the file server")
client.postreq(conn, '/delete/' + str(upload_file_name), data=None)
try:
dut1.expect('File does not exist : /' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test for deleting non existing file on the file server')
raise
Utility.console_log('Passed the test for deleting non existing file on the file server')
conn = client.start_session(got_ip, got_port)
# Try to download non existing file from the file server
Utility.console_log("\nTesting for Download of \"non existing\" file from the file server")
download_data = client.getreq(conn, '/' + str(upload_file_name))
try:
dut1.expect('Failed to stat file : /spiffs/' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test to download non existing file from the file server')
raise
Utility.console_log('Passed the test to downloaded non existing file from the file server')
if __name__ == '__main__':
test_examples_protocol_http_server_file_serving() # pylint: disable=no-value-for-parameter
| [
"os.path.getsize",
"hashlib.md5",
"re.compile",
"tiny_test_fw.Utility.console_log",
"os.path.join",
"ttfw_idf.idf_example_test",
"idf_http_server_test.adder.start_session",
"idf_http_server_test.adder.getreq"
] | [((800, 859), 'ttfw_idf.idf_example_test', 'ttfw_idf.idf_example_test', ([], {'env_tag': '"""Example_WIFI_Protocols"""'}), "(env_tag='Example_WIFI_Protocols')\n", (825, 859), False, 'import ttfw_idf\n'), ((1189, 1242), 'os.path.join', 'os.path.join', (['dut1.app.binary_path', '"""file_server.bin"""'], {}), "(dut1.app.binary_path, 'file_server.bin')\n", (1201, 1242), False, 'import os\n'), ((1258, 1286), 'os.path.getsize', 'os.path.getsize', (['binary_file'], {}), '(binary_file)\n', (1273, 1286), False, 'import os\n'), ((1377, 1429), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Erasing the flash on the chip"""'], {}), "('Erasing the flash on the chip')\n", (1396, 1429), False, 'from tiny_test_fw import Utility\n'), ((1517, 1582), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Starting http file serving simple test app"""'], {}), "('Starting http file serving simple test app')\n", (1536, 1582), False, 'from tiny_test_fw import Utility\n'), ((1639, 1688), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Waiting to connect with AP"""'], {}), "('Waiting to connect with AP')\n", (1658, 1688), False, 'from tiny_test_fw import Utility\n'), ((1951, 1994), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (["('Got IP : ' + got_ip)"], {}), "('Got IP : ' + got_ip)\n", (1970, 1994), False, 'from tiny_test_fw import Utility\n'), ((1999, 2044), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (["('Got Port : ' + got_port)"], {}), "('Got Port : ' + got_port)\n", (2018, 2044), False, 'from tiny_test_fw import Utility\n'), ((2079, 2117), 'idf_http_server_test.adder.start_session', 'client.start_session', (['got_ip', 'got_port'], {}), '(got_ip, got_port)\n', (2099, 2117), True, 'from idf_http_server_test import adder as client\n'), ((2367, 2443), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""\nTesting the uploading of file on the file server"""'], {}), '("""\nTesting the uploading of file on the file server""")\n', (2386, 2443), False, 'from tiny_test_fw import Utility\n'), ((2705, 2779), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Passed the test to uploaded file on the file server"""'], {}), "('Passed the test to uploaded file on the file server')\n", (2724, 2779), False, 'from tiny_test_fw import Utility\n'), ((2839, 2932), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""\nTesting for Download of "existing" file from the file server"""'], {}), '(\n """\nTesting for Download of "existing" file from the file server""")\n', (2858, 2932), False, 'from tiny_test_fw import Utility\n'), ((3198, 3290), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Passed the test to downloaded existing file from the file server"""'], {}), "(\n 'Passed the test to downloaded existing file from the file server')\n", (3217, 3290), False, 'from tiny_test_fw import Utility\n'), ((3312, 3338), 'hashlib.md5', 'hashlib.md5', (['download_data'], {}), '(download_data)\n', (3323, 3338), False, 'import hashlib\n'), ((3609, 3706), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""\nTesting the upload of "already existing" file on the file server"""'], {}), '(\n """\nTesting the upload of "already existing" file on the file server""")\n', (3628, 3706), False, 'from tiny_test_fw import Utility\n'), ((4006, 4096), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Passed the test for uploading existing file on the file server"""'], {}), "(\n 'Passed the test for uploading existing file on the file server')\n", (4025, 4096), False, 'from tiny_test_fw import Utility\n'), ((4553, 4591), 'idf_http_server_test.adder.start_session', 'client.start_session', (['got_ip', 'got_port'], {}), '(got_ip, got_port)\n', (4573, 4591), True, 'from idf_http_server_test import adder as client\n'), ((4648, 4739), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""\nTesting the deletion of "existing" file on the file server"""'], {}), '(\n """\nTesting the deletion of "existing" file on the file server""")\n', (4667, 4739), False, 'from tiny_test_fw import Utility\n'), ((5028, 5120), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Passed the test for deletion of existing file on the file server"""'], {}), "(\n 'Passed the test for deletion of existing file on the file server')\n", (5047, 5120), False, 'from tiny_test_fw import Utility\n'), ((5128, 5166), 'idf_http_server_test.adder.start_session', 'client.start_session', (['got_ip', 'got_port'], {}), '(got_ip, got_port)\n', (5148, 5166), True, 'from idf_http_server_test import adder as client\n'), ((5230, 5325), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""\nTesting the deletion of "non existing" file on the file server"""'], {}), '(\n """\nTesting the deletion of "non existing" file on the file server""")\n', (5249, 5325), False, 'from tiny_test_fw import Utility\n'), ((5621, 5714), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Passed the test for deleting non existing file on the file server"""'], {}), "(\n 'Passed the test for deleting non existing file on the file server')\n", (5640, 5714), False, 'from tiny_test_fw import Utility\n'), ((5722, 5760), 'idf_http_server_test.adder.start_session', 'client.start_session', (['got_ip', 'got_port'], {}), '(got_ip, got_port)\n', (5742, 5760), True, 'from idf_http_server_test import adder as client\n'), ((5826, 5923), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""\nTesting for Download of "non existing" file from the file server"""'], {}), '(\n """\nTesting for Download of "non existing" file from the file server""")\n', (5845, 5923), False, 'from tiny_test_fw import Utility\n'), ((6226, 6322), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Passed the test to downloaded non existing file from the file server"""'], {}), "(\n 'Passed the test to downloaded non existing file from the file server')\n", (6245, 6322), False, 'from tiny_test_fw import Utility\n'), ((4239, 4263), 'idf_http_server_test.adder.getreq', 'client.getreq', (['conn', '"""/"""'], {}), "(conn, '/')\n", (4252, 4263), True, 'from idf_http_server_test import adder as client\n'), ((1714, 1769), 're.compile', 're.compile', (['"""IPv4 address: (\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+)"""'], {}), "('IPv4 address: (\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+)')\n", (1724, 1769), False, 'import re\n'), ((1878, 1930), 're.compile', 're.compile', (['"""Starting HTTP Server on port: \'(\\\\d+)\'"""'], {}), '("Starting HTTP Server on port: \'(\\\\d+)\'")\n', (1888, 1930), False, 'import re\n'), ((2614, 2686), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Failed the test to upload file on the file server"""'], {}), "('Failed the test to upload file on the file server')\n", (2633, 2686), False, 'from tiny_test_fw import Utility\n'), ((3094, 3184), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Failed the test to download existing file from the file server"""'], {}), "(\n 'Failed the test to download existing file from the file server')\n", (3113, 3184), False, 'from tiny_test_fw import Utility\n'), ((3902, 3992), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Failed the test for uploading existing file on the file server"""'], {}), "(\n 'Failed the test for uploading existing file on the file server')\n", (3921, 3992), False, 'from tiny_test_fw import Utility\n'), ((4428, 4536), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Connection was not closed successfully by the server after last invalid URI"""'], {}), "(\n 'Connection was not closed successfully by the server after last invalid URI'\n )\n", (4447, 4536), False, 'from tiny_test_fw import Utility\n'), ((4922, 5014), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Failed the test for deletion of existing file on the file server"""'], {}), "(\n 'Failed the test for deletion of existing file on the file server')\n", (4941, 5014), False, 'from tiny_test_fw import Utility\n'), ((5514, 5607), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Failed the test for deleting non existing file on the file server"""'], {}), "(\n 'Failed the test for deleting non existing file on the file server')\n", (5533, 5607), False, 'from tiny_test_fw import Utility\n'), ((6118, 6212), 'tiny_test_fw.Utility.console_log', 'Utility.console_log', (['"""Failed the test to download non existing file from the file server"""'], {}), "(\n 'Failed the test to download non existing file from the file server')\n", (6137, 6212), False, 'from tiny_test_fw import Utility\n')] |
from flask import Blueprint
needs = Blueprint('needs', __name__)
from . import views
from ..models import Permission
@needs.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
| [
"flask.Blueprint"
] | [((37, 65), 'flask.Blueprint', 'Blueprint', (['"""needs"""', '__name__'], {}), "('needs', __name__)\n", (46, 65), False, 'from flask import Blueprint\n')] |
# -*- coding: utf-8 -*-
from setuptools import setup
try:
with open('requirements.txt') as f:
required = f.read().splitlines()
except:
required = ['requests>=2.7.0', 'pyrestcli>=0.6.4']
try:
with open('test_requirements.txt') as f:
test_required = f.read().splitlines()
except:
pass
setup(name="carto",
author="<NAME>",
author_email="<EMAIL>",
description="SDK around CARTO's APIs",
version="1.8.1",
url="https://github.com/CartoDB/carto-python",
install_requires=required,
packages=["carto"])
| [
"setuptools.setup"
] | [((318, 542), 'setuptools.setup', 'setup', ([], {'name': '"""carto"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""SDK around CARTO\'s APIs"""', 'version': '"""1.8.1"""', 'url': '"""https://github.com/CartoDB/carto-python"""', 'install_requires': 'required', 'packages': "['carto']"}), '(name=\'carto\', author=\'<NAME>\', author_email=\'<EMAIL>\', description=\n "SDK around CARTO\'s APIs", version=\'1.8.1\', url=\n \'https://github.com/CartoDB/carto-python\', install_requires=required,\n packages=[\'carto\'])\n', (323, 542), False, 'from setuptools import setup\n')] |
import machine
from machine import Pin, I2C
import googlyscreen, functions
i2c_builtin = I2C(scl=Pin(5), sda=Pin(4), freq=400000) # 5 = D1, 4 = D2
screen = googlyscreen.GooglyScreen(i2c_builtin)
def push_data():
data = screen.environment_data
functions.push_data(data)
def main_loop():
count = 0
while True:
vX = screen.xaxis_reading
screen.refresh()
count += 1
if (count == 200): # 20 seconds - JUST FOR TESTING !!!
count = 0
functions.push_data()
machine.lightsleep(100)
if (screen.idle):
print("idle")
return
screen.text("hello world !", 14, 0)
while True:
screen.text("double tap", 12, 16)
screen.text("to continue", 12, 24)
screen.tap_wait(5 * 60 * 1000, push_data())
print("tapped")
screen.idle_reset()
main_loop() | [
"machine.Pin",
"machine.lightsleep",
"functions.push_data",
"googlyscreen.GooglyScreen"
] | [((159, 197), 'googlyscreen.GooglyScreen', 'googlyscreen.GooglyScreen', (['i2c_builtin'], {}), '(i2c_builtin)\n', (184, 197), False, 'import googlyscreen, functions\n'), ((256, 281), 'functions.push_data', 'functions.push_data', (['data'], {}), '(data)\n', (275, 281), False, 'import googlyscreen, functions\n'), ((99, 105), 'machine.Pin', 'Pin', (['(5)'], {}), '(5)\n', (102, 105), False, 'from machine import Pin, I2C\n'), ((111, 117), 'machine.Pin', 'Pin', (['(4)'], {}), '(4)\n', (114, 117), False, 'from machine import Pin, I2C\n'), ((543, 566), 'machine.lightsleep', 'machine.lightsleep', (['(100)'], {}), '(100)\n', (561, 566), False, 'import machine\n'), ((512, 533), 'functions.push_data', 'functions.push_data', ([], {}), '()\n', (531, 533), False, 'import googlyscreen, functions\n')] |
import csv
from collections import deque
from dataclasses import dataclass
from decimal import Decimal
from datetime import date
import io
import itertools
from typing import List, Optional
from dateutil.parser import parse as dateparse
from dateutil.relativedelta import relativedelta
from casparser.exceptions import IncompleteCASError, GainsError
from casparser.enums import FundType, GainType, TransactionType
from casparser.types import CASParserDataType, TransactionDataType
from .utils import CII, get_fin_year, nav_search
PURCHASE_TXNS = {
TransactionType.DIVIDEND_REINVEST.name,
TransactionType.PURCHASE.name,
TransactionType.PURCHASE_SIP.name,
TransactionType.REVERSAL.name,
# Segregated folios are not supported
# TransactionType.SEGREGATION.name,
TransactionType.SWITCH_IN.name,
TransactionType.SWITCH_IN_MERGER.name,
}
SALE_TXNS = {
TransactionType.REDEMPTION.name,
TransactionType.SWITCH_OUT.name,
TransactionType.SWITCH_OUT_MERGER.name,
}
@dataclass
class GainEntry112A:
"""GainEntry for schedule 112A of ITR."""
acquired: str # AE, BE
isin: str
name: str
units: Decimal
sale_nav: Decimal
sale_value: Decimal
purchase_value: Decimal
fmv_nav: Decimal
fmv: Decimal
stt: Decimal
stamp_duty: Decimal
@property
def consideration_value(self):
if self.acquired == "BE":
return min(self.fmv, self.sale_value)
else:
return Decimal("0.00") # FMV not considered
@property
def actual_coa(self):
return max(self.purchase_value, self.consideration_value)
@property
def expenditure(self):
return self.stt + self.stamp_duty
@property
def deductions(self):
return self.actual_coa + self.expenditure
@property
def balance(self):
return self.sale_value - self.deductions
@dataclass
class MergedTransaction:
"""Represent net transaction on a given date"""
dt: date
nav: Decimal = Decimal(0.0)
purchase: Decimal = Decimal(0.0)
purchase_units: Decimal = Decimal(0.0)
sale: Decimal = Decimal(0.0)
sale_units: Decimal = Decimal(0.0)
stamp_duty: Decimal = Decimal(0.0)
stt: Decimal = Decimal(0.0)
tds: Decimal = Decimal(0.0)
def add(self, txn: TransactionDataType):
txn_type = txn["type"]
if txn_type in PURCHASE_TXNS and txn["units"] is not None:
self.nav = txn["nav"]
self.purchase_units += txn["units"]
self.purchase += txn["amount"]
elif txn_type in SALE_TXNS and txn["units"] is not None:
self.nav = txn["nav"]
self.sale_units += txn["units"]
self.sale += txn["amount"]
elif txn_type == TransactionType.STT_TAX.name:
self.stt += txn["amount"]
elif txn_type == TransactionType.STAMP_DUTY_TAX.name:
self.stamp_duty += txn["amount"]
elif txn_type == TransactionType.TDS_TAX.name:
self.tds += txn["amount"]
elif txn_type == TransactionType.SEGREGATION.name:
self.nav = Decimal(0.0)
self.purchase_units += txn["units"]
self.purchase = Decimal(0.0)
@dataclass
class Fund:
"""Fund details"""
scheme: str
folio: str
isin: str
type: str
@property
def name(self):
return f"{self.scheme} [{self.folio}]"
def __lt__(self, other: "Fund"):
return self.scheme < other.scheme
@dataclass
class GainEntry:
"""Gain data of a realised transaction"""
fy: str
fund: Fund
type: str
purchase_date: date
purchase_nav: Decimal
purchase_value: Decimal
stamp_duty: Decimal
sale_date: date
sale_nav: Decimal
sale_value: Decimal
stt: Decimal
units: Decimal
def __post_init__(self):
self.__cutoff_date = date(2018, 1, 31)
self.__sell_cutoff_date = date(2018, 4, 1)
self.__update_nav()
def __update_nav(self):
self._cached_isin = self.fund.isin
self._cached_nav = nav_search(self._cached_isin)
@property
def gain_type(self):
"""Identify gain type based on the current fund type, buy and sell dates."""
ltcg = {
FundType.EQUITY.name: self.purchase_date + relativedelta(years=1),
FundType.DEBT.name: self.purchase_date + relativedelta(years=3),
}
return GainType.LTCG if self.sale_date > ltcg[self.type] else GainType.STCG
@property
def gain(self) -> Decimal:
return Decimal(round(self.sale_value - self.purchase_value, 2))
@property
def fmv_nav(self) -> Decimal:
if self.fund.isin != self._cached_isin:
self.__update_nav()
return self._cached_nav
@property
def fmv(self) -> Decimal:
if self.fmv_nav is None:
return self.purchase_value
return self.fmv_nav * self.units
@property
def index_ratio(self) -> Decimal:
return Decimal(
round(CII[get_fin_year(self.sale_date)] / CII[get_fin_year(self.purchase_date)], 2)
)
@property
def coa(self) -> Decimal:
if self.fund.type == FundType.DEBT.name:
return Decimal(round(self.purchase_value * self.index_ratio, 2))
if self.purchase_date < self.__cutoff_date:
if self.sale_date < self.__sell_cutoff_date:
return self.sale_value
return max(self.purchase_value, min(self.fmv, self.sale_value))
return self.purchase_value
@property
def ltcg_taxable(self) -> Decimal:
if self.gain_type == GainType.LTCG:
return Decimal(round(self.sale_value - self.coa, 2))
return Decimal(0.0)
@property
def ltcg(self) -> Decimal:
if self.gain_type == GainType.LTCG:
return self.gain
return Decimal(0.0)
@property
def stcg(self) -> Decimal:
if self.gain_type == GainType.STCG:
return self.gain
return Decimal(0.0)
def get_fund_type(transactions: List[TransactionDataType]) -> FundType:
"""
Detect Fund Type.
- UNKNOWN if there are no redemption transactions
- EQUITY if STT_TAX transactions are present in the portfolio
- DEBT if no STT_TAX transactions are present along with redemptions
:param transactions: list of transactions for a single fund parsed from the CAS
:return: type of fund
"""
valid = any(
[
x["units"] is not None and x["units"] < 0 and x["type"] != TransactionType.REVERSAL.name
for x in transactions
]
)
if not valid:
return FundType.UNKNOWN
return (
FundType.EQUITY
if any([x["type"] == TransactionType.STT_TAX.name for x in transactions])
else FundType.DEBT
)
class FIFOUnits:
"""First-In First-Out units calculator."""
def __init__(self, fund: Fund, transactions: List[TransactionDataType]):
"""
:param fund: name of fund, mainly for reporting purposes.
:param transactions: list of transactions for the fund
"""
self._fund: Fund = fund
self._original_transactions = transactions
if fund.type not in ("EQUITY", "DEBT"):
self.fund_type = get_fund_type(transactions)
else:
self.fund_type = getattr(FundType, fund.type)
self._merged_transactions = self.merge_transactions()
self.transactions = deque()
self.invested = Decimal(0.0)
self.balance = Decimal(0.0)
self.gains: List[GainEntry] = []
self.process()
@property
def clean_transactions(self):
"""remove redundant transactions, without amount"""
return filter(lambda x: x["amount"] is not None, self._original_transactions)
def merge_transactions(self):
"""Group transactions by date with taxes and investments/redemptions separated."""
merged_transactions = {}
for txn in sorted(self.clean_transactions, key=lambda x: (x["date"], -x["amount"])):
dt = txn["date"]
if isinstance(dt, str):
dt = dateparse(dt).date()
if dt not in merged_transactions:
merged_transactions[dt] = MergedTransaction(dt)
merged_transactions[dt].add(txn)
return merged_transactions
def process(self):
self.gains = []
for dt in sorted(self._merged_transactions.keys()):
txn = self._merged_transactions[dt]
if txn.purchase_units > 0:
self.buy(dt, txn.purchase_units, txn.nav, txn.stamp_duty)
if txn.sale_units < 0:
self.sell(dt, txn.sale_units, txn.nav, txn.stt)
return self.gains
def buy(self, txn_date: date, quantity: Decimal, nav: Decimal, tax: Decimal):
self.transactions.append((txn_date, quantity, nav, tax))
self.invested += quantity * nav
self.balance += quantity
def sell(self, sell_date: date, quantity: Decimal, nav: Decimal, tax: Decimal):
fin_year = get_fin_year(sell_date)
original_quantity = abs(quantity)
pending_units = original_quantity
while pending_units >= 1e-2:
try:
purchase_date, units, purchase_nav, purchase_tax = self.transactions.popleft()
except IndexError:
raise GainsError(
f"FIFOUnits mismatch for {self._fund.name}. Please contact support."
)
if units <= pending_units:
gain_units = units
else:
gain_units = pending_units
purchase_value = round(gain_units * purchase_nav, 2)
sale_value = round(gain_units * nav, 2)
stamp_duty = round(purchase_tax * gain_units / units, 2)
stt = round(tax * gain_units / original_quantity, 2)
ge = GainEntry(
fy=fin_year,
fund=self._fund,
type=self.fund_type.name,
purchase_date=purchase_date,
purchase_nav=purchase_nav,
purchase_value=purchase_value,
stamp_duty=stamp_duty,
sale_date=sell_date,
sale_nav=nav,
sale_value=sale_value,
stt=stt,
units=gain_units,
)
self.gains.append(ge)
self.balance -= gain_units
self.invested -= purchase_value
pending_units -= units
if pending_units < 0 and purchase_nav is not None:
# Sale is partially matched against the last buy transactions
# Re-add the remaining units to the FIFO queue
self.transactions.appendleft(
(purchase_date, -1 * pending_units, purchase_nav, purchase_tax)
)
class CapitalGainsReport:
"""Generate Capital Gains Report from the parsed CAS data"""
def __init__(self, data: CASParserDataType):
self._data: CASParserDataType = data
self._gains: List[GainEntry] = []
self.errors = []
self.invested_amount = Decimal(0.0)
self.current_value = Decimal(0.0)
self.process_data()
@property
def gains(self) -> List[GainEntry]:
return list(sorted(self._gains, key=lambda x: (x.fy, x.fund, x.sale_date)))
def has_gains(self) -> bool:
return len(self.gains) > 0
def has_error(self) -> bool:
return len(self.errors) > 0
def get_fy_list(self) -> List[str]:
return list(sorted(set([f.fy for f in self.gains]), reverse=True))
def process_data(self):
self._gains = []
for folio in self._data.get("folios", []):
for scheme in folio.get("schemes", []):
transactions = scheme["transactions"]
fund = Fund(
scheme=scheme["scheme"],
folio=folio["folio"],
isin=scheme["isin"],
type=scheme["type"],
)
if len(transactions) > 0:
if scheme["open"] >= 0.01:
raise IncompleteCASError(
"Incomplete CAS found. For gains computation, "
"all folios should have zero opening balance"
)
try:
fifo = FIFOUnits(fund, transactions)
self.invested_amount += fifo.invested
self.current_value += scheme["valuation"]["value"]
self._gains.extend(fifo.gains)
except GainsError as exc:
self.errors.append((fund.name, str(exc)))
def get_summary(self):
"""Calculate capital gains summary"""
summary = []
for (fy, fund), txns in itertools.groupby(self.gains, key=lambda x: (x.fy, x.fund)):
ltcg = stcg = ltcg_taxable = Decimal(0.0)
for txn in txns:
ltcg += txn.ltcg
stcg += txn.stcg
ltcg_taxable += txn.ltcg_taxable
summary.append([fy, fund.name, fund.isin, fund.type, ltcg, ltcg_taxable, stcg])
return summary
def get_summary_csv_data(self) -> str:
"""Return summary data as a csv string."""
headers = ["FY", "Fund", "ISIN", "Type", "LTCG(Realized)", "LTCG(Taxable)", "STCG"]
with io.StringIO() as csv_fp:
writer = csv.writer(csv_fp)
writer.writerow(headers)
for entry in self.get_summary():
writer.writerow(entry)
csv_fp.seek(0)
csv_data = csv_fp.read()
return csv_data
def get_gains_csv_data(self) -> str:
"""Return details gains data as a csv string."""
headers = [
"FY",
"Fund",
"ISIN",
"Type",
"Units",
"Purchase Date",
"Purchase Value",
"Stamp Duty",
"Acquisition Value",
"Sale Date",
"Sale Value",
"STT",
"LTCG Realized",
"LTCG Taxable",
"STCG",
]
with io.StringIO() as csv_fp:
writer = csv.writer(csv_fp)
writer.writerow(headers)
for gain in self.gains:
writer.writerow(
[
gain.fy,
gain.fund.name,
gain.fund.isin,
gain.type,
gain.units,
gain.purchase_date,
gain.purchase_value,
gain.stamp_duty,
gain.coa,
gain.sale_date,
gain.sale_value,
gain.stt,
gain.ltcg,
gain.ltcg_taxable,
gain.stcg,
]
)
csv_fp.seek(0)
csv_data = csv_fp.read()
return csv_data
def generate_112a(self, fy) -> List[GainEntry112A]:
fy_transactions = sorted(
list(filter(lambda x: x.fy == fy and x.fund.type == "EQUITY", self.gains)),
key=lambda x: x.fund,
)
rows: List[GainEntry112A] = []
for fund, txns in itertools.groupby(fy_transactions, key=lambda x: x.fund):
consolidated_entry: Optional[GainEntry112A] = None
entries = []
for txn in txns:
if txn.purchase_date <= date(2018, 1, 31):
entries.append(
GainEntry112A(
"BE",
fund.isin,
fund.scheme,
txn.units,
txn.sale_nav,
txn.sale_value,
txn.purchase_value,
txn.fmv_nav,
txn.fmv,
txn.stt,
txn.stamp_duty,
)
)
else:
if consolidated_entry is None:
consolidated_entry = GainEntry112A(
"AE",
fund.isin,
fund.scheme,
txn.units,
txn.sale_nav,
txn.sale_value,
txn.purchase_value,
Decimal(0.0),
Decimal(0.0),
txn.stt,
txn.stamp_duty,
)
else:
consolidated_entry.purchase_value += txn.purchase_value
consolidated_entry.stt += txn.stt
consolidated_entry.stamp_duty += txn.stamp_duty
consolidated_entry.units += txn.units
consolidated_entry.sale_value += txn.sale_value
consolidated_entry.sale_nav = Decimal(round(txn.sale_value / txn.units, 3))
rows.extend(entries)
if consolidated_entry is not None:
rows.append(consolidated_entry)
return rows
def generate_112a_csv_data(self, fy):
headers = [
"Share/Unit acquired(1a)",
"ISIN Code(2)",
"Name of the Share/Unit(3)",
"No. of Shares/Units(4)",
"Sale-price per Share/Unit(5)",
"Full Value of Consideration(Total Sale Value)(6) = 4 * 5",
"Cost of acquisition without indexation(7)",
"Cost of acquisition(8)",
"If the long term capital asset was acquired before 01.02.2018(9)",
"Fair Market Value per share/unit as on 31st January 2018(10)",
"Total Fair Market Value of capital asset as per section 55(2)(ac)(11) = 4 * 10",
"Expenditure wholly and exclusively in connection with transfer(12)",
"Total deductions(13) = 7 + 12",
"Balance(14) = 6 - 13",
]
with io.StringIO() as csv_fp:
writer = csv.writer(csv_fp)
writer.writerow(headers)
for row in self.generate_112a(fy):
writer.writerow(
[
row.acquired,
row.isin,
row.name,
str(row.units),
str(row.sale_nav),
str(row.sale_value),
str(row.actual_coa),
str(row.purchase_value),
str(row.consideration_value),
str(row.fmv_nav),
str(row.fmv),
str(row.expenditure),
str(row.deductions),
str(row.balance),
]
)
csv_fp.seek(0)
csv_data = csv_fp.read()
return csv_data
| [
"dateutil.parser.parse",
"casparser.exceptions.GainsError",
"collections.deque",
"itertools.groupby",
"dateutil.relativedelta.relativedelta",
"csv.writer",
"datetime.date",
"casparser.exceptions.IncompleteCASError",
"io.StringIO",
"decimal.Decimal"
] | [((2008, 2020), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2015, 2020), False, 'from decimal import Decimal\n'), ((2045, 2057), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2052, 2057), False, 'from decimal import Decimal\n'), ((2088, 2100), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2095, 2100), False, 'from decimal import Decimal\n'), ((2121, 2133), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2128, 2133), False, 'from decimal import Decimal\n'), ((2160, 2172), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2167, 2172), False, 'from decimal import Decimal\n'), ((2199, 2211), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2206, 2211), False, 'from decimal import Decimal\n'), ((2231, 2243), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2238, 2243), False, 'from decimal import Decimal\n'), ((2263, 2275), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (2270, 2275), False, 'from decimal import Decimal\n'), ((3855, 3872), 'datetime.date', 'date', (['(2018)', '(1)', '(31)'], {}), '(2018, 1, 31)\n', (3859, 3872), False, 'from datetime import date\n'), ((3907, 3923), 'datetime.date', 'date', (['(2018)', '(4)', '(1)'], {}), '(2018, 4, 1)\n', (3911, 3923), False, 'from datetime import date\n'), ((5702, 5714), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (5709, 5714), False, 'from decimal import Decimal\n'), ((5849, 5861), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (5856, 5861), False, 'from decimal import Decimal\n'), ((5996, 6008), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (6003, 6008), False, 'from decimal import Decimal\n'), ((7453, 7460), 'collections.deque', 'deque', ([], {}), '()\n', (7458, 7460), False, 'from collections import deque\n'), ((7485, 7497), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (7492, 7497), False, 'from decimal import Decimal\n'), ((7521, 7533), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (7528, 7533), False, 'from decimal import Decimal\n'), ((11158, 11170), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (11165, 11170), False, 'from decimal import Decimal\n'), ((11200, 11212), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (11207, 11212), False, 'from decimal import Decimal\n'), ((12894, 12953), 'itertools.groupby', 'itertools.groupby', (['self.gains'], {'key': '(lambda x: (x.fy, x.fund))'}), '(self.gains, key=lambda x: (x.fy, x.fund))\n', (12911, 12953), False, 'import itertools\n'), ((15441, 15497), 'itertools.groupby', 'itertools.groupby', (['fy_transactions'], {'key': '(lambda x: x.fund)'}), '(fy_transactions, key=lambda x: x.fund)\n', (15458, 15497), False, 'import itertools\n'), ((1478, 1493), 'decimal.Decimal', 'Decimal', (['"""0.00"""'], {}), "('0.00')\n", (1485, 1493), False, 'from decimal import Decimal\n'), ((12996, 13008), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (13003, 13008), False, 'from decimal import Decimal\n'), ((13468, 13481), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (13479, 13481), False, 'import io\n'), ((13514, 13532), 'csv.writer', 'csv.writer', (['csv_fp'], {}), '(csv_fp)\n', (13524, 13532), False, 'import csv\n'), ((14252, 14265), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (14263, 14265), False, 'import io\n'), ((14298, 14316), 'csv.writer', 'csv.writer', (['csv_fp'], {}), '(csv_fp)\n', (14308, 14316), False, 'import csv\n'), ((18329, 18342), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (18340, 18342), False, 'import io\n'), ((18375, 18393), 'csv.writer', 'csv.writer', (['csv_fp'], {}), '(csv_fp)\n', (18385, 18393), False, 'import csv\n'), ((4278, 4300), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(1)'}), '(years=1)\n', (4291, 4300), False, 'from dateutil.relativedelta import relativedelta\n'), ((4355, 4377), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(3)'}), '(years=3)\n', (4368, 4377), False, 'from dateutil.relativedelta import relativedelta\n'), ((9374, 9459), 'casparser.exceptions.GainsError', 'GainsError', (['f"""FIFOUnits mismatch for {self._fund.name}. Please contact support."""'], {}), "(f'FIFOUnits mismatch for {self._fund.name}. Please contact support.'\n )\n", (9384, 9459), False, 'from casparser.exceptions import IncompleteCASError, GainsError\n'), ((15656, 15673), 'datetime.date', 'date', (['(2018)', '(1)', '(31)'], {}), '(2018, 1, 31)\n', (15660, 15673), False, 'from datetime import date\n'), ((8133, 8146), 'dateutil.parser.parse', 'dateparse', (['dt'], {}), '(dt)\n', (8142, 8146), True, 'from dateutil.parser import parse as dateparse\n'), ((12181, 12301), 'casparser.exceptions.IncompleteCASError', 'IncompleteCASError', (['"""Incomplete CAS found. For gains computation, all folios should have zero opening balance"""'], {}), "(\n 'Incomplete CAS found. For gains computation, all folios should have zero opening balance'\n )\n", (12199, 12301), False, 'from casparser.exceptions import IncompleteCASError, GainsError\n'), ((16692, 16704), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (16699, 16704), False, 'from decimal import Decimal\n'), ((16734, 16746), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (16741, 16746), False, 'from decimal import Decimal\n'), ((3102, 3114), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (3109, 3114), False, 'from decimal import Decimal\n'), ((3191, 3203), 'decimal.Decimal', 'Decimal', (['(0.0)'], {}), '(0.0)\n', (3198, 3203), False, 'from decimal import Decimal\n')] |
from controller.user import User
from view.console import Console
from model.registry import Registry
def main():
user = User()
registry = Registry()
view = Console(registry)
user.start_app(view)
if __name__ == '__main__':
main()
| [
"view.console.Console",
"controller.user.User",
"model.registry.Registry"
] | [((127, 133), 'controller.user.User', 'User', ([], {}), '()\n', (131, 133), False, 'from controller.user import User\n'), ((149, 159), 'model.registry.Registry', 'Registry', ([], {}), '()\n', (157, 159), False, 'from model.registry import Registry\n'), ((171, 188), 'view.console.Console', 'Console', (['registry'], {}), '(registry)\n', (178, 188), False, 'from view.console import Console\n')] |
import requests
import json
from datetime import datetime, timedelta
import pytz
import re
import dateutil.parser
from pprint import pprint
from django.conf import settings
class TwitterPostScraper:
def __init__(self, post_link, resp):
self.resp = resp
self.data = {"link": post_link}
def get_username(self):
return self.resp.get('includes').get('users')[0].get('username')
def get_profile_img_url(self):
return self.resp.get('includes').get('users')[0].get('profile_image_url')
def get_account_name(self):
return self.resp.get('includes').get('users')[0].get('name')
def get_timestamp(self):
dt = dateutil.parser.parse(self.resp.get('data')[0].get('created_at'))
return datetime.strftime(dt.astimezone(pytz.timezone("Asia/Kolkata")), "%Y-%m-%d %H:%M:%S")
def get_caption(self):
return self.resp['data'][0].get('text')
def get_likes(self):
return self.resp['data'][0].get('public_metrics').get('like_count')
def get_retweet(self):
return self.resp['data'][0].get('public_metrics').get('retweet_count')
def get_replies(self):
return self.resp['data'][0].get('public_metrics').get('reply_count')
def media_exists(self):
return self.resp.get('includes').get('media')
def get_media(self):
url = []
if self.resp.get('includes').get('media')[0].get('type') == 'video':
views = self.resp.get('includes').get('media')[0].get(
'public_metrics').get('view_count')
media_obj = {
'media_url': self.resp.get('includes').get('media')[0].get('preview_image_url'),
'media_key': self.resp.get('includes').get('media')[0].get('media_key'),
'is_video': True,
'view_count': views
}
url.append(media_obj)
else:
views = None
for i in self.resp.get('includes').get('media'):
media_obj = {
'media_url': i.get('url'),
'media_key': i.get('media_key'),
'is_video': False
}
url.append(media_obj)
return views, url
def get_data(self):
X = {
'username': self.get_username(),
'account_name': self.get_account_name(),
'profile_image_url': self.get_profile_img_url(),
'timestamp': self.get_timestamp(),
'likes': self.get_likes(),
'caption': self.get_caption(),
'comments': self.get_replies(),
'retweets': self.get_retweet(),
'total_views': self.get_media()[0] if self.media_exists() else None,
'urls': self.get_media()[1] if self.media_exists() else [],
}
self.data = {**self.data, **X}
return self.data
def tw_headers_and_params():
headers = {
'Authorization': 'Bearer ' + settings.TWITTER_AUTH_TOKEN,
}
params = (
('expansions', 'author_id,attachments.media_keys'),
('tweet.fields', 'public_metrics,created_at'),
('user.fields', 'username,verified,profile_image_url'),
('media.fields', 'public_metrics,preview_image_url,url'),
)
return headers, params
def get_tw_post_details(post_link):
num = int(post_link.strip('/').split('/')[-1])
try:
resp = requests.get(f'https://api.twitter.com/2/tweets?ids={num}', headers=tw_headers_and_params()[
0], params=tw_headers_and_params()[1]).json()
# pprint(resp)
tw = TwitterPostScraper(post_link, resp)
data = tw.get_data()
return {"error": None, "result": data}
except Exception as e:
# print(e)
return {"error": "An error occurred!!", "result": None, "link": post_link, "msg": str(e)}
| [
"pytz.timezone"
] | [((786, 815), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (799, 815), False, 'import pytz\n')] |
import yadisk
import sys
import os
def auth():
y = yadisk.YaDisk("7d9ca04e4fe848bbb1d1c6ba4916a5b4", "b7400bc636e144d988e749333afa388b")
url = y.get_code_url()
print("Go to the following url: %s" % url)
code = input("Enter the confirmation code: ")
try:
response = y.get_token(code)
except yadisk.exceptions.BadRequestError:
print("Bad code")
sys.exit(1)
y.token = response.access_token
if y.check_token():
return y.token
else:
return False
def get_token():
base_path = os.path.dirname(__file__)
print(base_path)
try:
f = open(base_path + '/../config/token.txt', 'r')
token = f.read()
f.close()
except OSError:
f = open(base_path + '/../config/token.txt', 'w')
token = auth()
f.write(token)
f.close()
return token
def reset_token():
base_path = os.path.dirname(__file__)
f = open(base_path + '/config/token.txt', 'w')
token = auth()
f.write(token)
f.close()
return token
| [
"os.path.dirname",
"yadisk.YaDisk",
"sys.exit"
] | [((57, 146), 'yadisk.YaDisk', 'yadisk.YaDisk', (['"""7d9ca04e4fe848bbb1d1c6ba4916a5b4"""', '"""b7400bc636e144d988e749333afa388b"""'], {}), "('7d9ca04e4fe848bbb1d1c6ba4916a5b4',\n 'b7400bc636e144d988e749333afa388b')\n", (70, 146), False, 'import yadisk\n'), ((557, 582), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (572, 582), False, 'import os\n'), ((913, 938), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (928, 938), False, 'import os\n'), ((394, 405), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (402, 405), False, 'import sys\n')] |
"""
Created on Sat Oct 30 19:29:30 2021
@author: siddharthvenkatesh
This is a command line interface for scraper1830.
"""
import click
from .scraper1830 import Scraper1830
@click.group()
def cli_entry():
pass
@cli_entry.command()
@click.option(
"--id", prompt="Enter Game ID", help="The id for the 1830 game on 18xx.games"
)
def plot_history(id):
scraper = Scraper1830(id)
scraper.plot_player_history()
if __name__ == "__main__":
cli_entry()
| [
"click.group",
"click.option"
] | [((178, 191), 'click.group', 'click.group', ([], {}), '()\n', (189, 191), False, 'import click\n'), ((242, 338), 'click.option', 'click.option', (['"""--id"""'], {'prompt': '"""Enter Game ID"""', 'help': '"""The id for the 1830 game on 18xx.games"""'}), "('--id', prompt='Enter Game ID', help=\n 'The id for the 1830 game on 18xx.games')\n", (254, 338), False, 'import click\n')] |
import re, sys, time;
from mWindowsAPI import *;
from mWindowsSDK import *;
from mConsole import oConsole;
def fDumpThreadInfo(oThread, sISA, bDumpContext):
oConsole.fOutput(" * Thread: %s" % (repr(oThread),));
o0TEB = oThread.fo0GetTEB();
if o0TEB:
oConsole.fOutput(" * TEB:");
for sLine in oThread.o0TEB.fasDump("Thread %d/0x%X TEB" % (oThread.uId, oThread.uId)):
oConsole.fOutput(" | " + sLine);
if bDumpContext:
d0uRegisterValue_by_sbName = oThread.fd0uGetRegisterValueByName();
if d0uRegisterValue_by_sbName:
oConsole.fOutput(" * Registers:");
# Filter out partial register names such as ah, al, ax, ... (and optionally eax, ... on x64 ISAs)
rFilterRegisterNames = re.compile({
"x64": rb"^r([a-z]+|\d+)$",
"x86": rb"^e[a-z]+$",
}[sISA]);
asbFilteredRegisterNames = [
str(sbRegisterName, 'latin1') for sbRegisterName in d0uRegisterValue_by_sbName.keys()
if rFilterRegisterNames.match(sbRegisterName)
];
# Show them in a table:
sRegisterOutputFormat = {
"x64": "%-3s = %16s",
"x86": "%-3s = %8s",
}[sISA];
aasRegisterOrder = [
["ax", "bx", "cx", "dx"],
["si", "di", "sp", "bp"],
["r8", "r9", "r10", "r11"],
["r12", "r13", "r14", "r15"],
["ip"],
];
for asRegisterOrder in aasRegisterOrder:
asRegistersOutput = [];
for sRegisterOrder in asRegisterOrder:
for sRegisterName in asbFilteredRegisterNames:
if sRegisterName.endswith(sRegisterOrder):
uRegisterValue = d0uRegisterValue_by_sbName[bytes(sRegisterName, 'latin1')]
asRegistersOutput.append(sRegisterOutputFormat % (sRegisterName, "%X" % uRegisterValue));
if asRegistersOutput:
oConsole.fOutput(" | ", " ".join(asRegistersOutput));
asFlagNames = ["zf", "cf", "if", "af", "rf", "pf", "tf", "df", "of", "sf"];
oConsole.fOutput(" | flags = ", " ".join([
"%s:%d" % (sFlagName, d0uRegisterValue_by_sbName[bytes(sFlagName, 'latin1')])
for sFlagName in asFlagNames
]));
def faoGetAndDumpProcessThreads(oTestProcess):
oConsole.fStatus(" * Calling <cProcess #%X>.faoGetThreads()..." % (oTestProcess.uId,));
aoThreads = oTestProcess.faoGetThreads();
oConsole.fOutput(" + <cProcess #%X>.faoGetThreads() => [%s]" % (oTestProcess.uId, ", ".join(["0x%X" % (oThread.uId,) for oThread in aoThreads]),));
return aoThreads;
def fTestThread(sComSpec, sThisProcessISA, sExpectedChildProcessISA):
oConsole.fOutput("=== Testing thread related functions ", sPadding = "=");
oConsole.fOutput("* This process ISA: %s, test thread ISA: %s" % (sThisProcessISA, sExpectedChildProcessISA));
oConsole.fStatus(" * Calling cProcess.foCreateForBinaryPath(%s, bSuspended = True)..." % (repr(sComSpec),));
oTestProcess = cConsoleProcess.foCreateForBinaryPath(sComSpec, bSuspended = True);
try:
oConsole.fOutput(" + cProcess.foCreateForBinaryPath(%s, bSuspended = True) = <cProcess #%X>" % (repr(sComSpec), oTestProcess.uId));
time.sleep(1); # Allow process to start
# cProcess
assert oTestProcess.sISA == sExpectedChildProcessISA, \
"cProcess.sISA == %s instead of %s" % (oTestProcess.sISA, sExpectedChildProcessISA);
# List all threads in process
aoThreads = faoGetAndDumpProcessThreads(oTestProcess);
fDumpThreadInfo(aoThreads[0], oTestProcess.sISA, bDumpContext = True);
# Create an additional test thread
oConsole.fStatus(" * Calling <cProcess #%X>.fuCreateThreadForAddress(0, bSuspended = True)..." % (oTestProcess.uId,));
uTestThreadId = oTestProcess.fuCreateThreadForAddress(0, bSuspended = True);
oConsole.fOutput(" + <cProcess #%X>.fuCreateThreadForAddress(0, bSuspended = True) = 0x%X" % (oTestProcess.uId, uTestThreadId));
aoThreads = faoGetAndDumpProcessThreads(oTestProcess);
assert uTestThreadId in [oThread.uId for oThread in aoThreads], \
"Thread 0x%X not found in list of threads!?" % uTestThreadId;
oConsole.fStatus(" * Calling <cProcess #%X>.foGetThreadForId(0x%X)..." % (oTestProcess.uId, uTestThreadId));
oTestThread = oTestProcess.foGetThreadForId(uTestThreadId);
oConsole.fOutput(" + <cProcess #%X>.foGetThreadForId(0x%X) = <cThread #%0X>" % (oTestProcess.uId, uTestThreadId, oTestThread.uId));
oConsole.fOutput(" ", repr(oTestThread));
fDumpThreadInfo(oTestThread, oTestProcess.sISA, bDumpContext = True);
oConsole.fStatus(" * Calling <cThread #%0X>.fbTerminate()..." % uTestThreadId);
assert oTestThread.fbTerminate(), \
"Expected to be able to terminate the thread";
oConsole.fOutput(" + <cThread #%X>.fbTerminate() = True" % uTestThreadId);
oConsole.fOutput(" ", repr(oTestThread));
oConsole.fStatus(" * Calling <cThread #%X>.fbWait(1)..." % uTestThreadId);
assert oTestThread.fbWait(1), \
"Expected to be able to wait for the thread to terminate in 1 second!";
oConsole.fOutput(" + <cThread #%X>.fbWait(1) = True" % uTestThreadId);
oConsole.fOutput(" ", repr(oTestThread));
fDumpThreadInfo(oTestThread, oTestProcess.sISA, bDumpContext = True);
# We will have to wait a bit for the terminated thread to be removed from the process.
aoThreads = faoGetAndDumpProcessThreads(oTestProcess);
assert uTestThreadId not in [oThread.uId for oThread in aoThreads], \
"Thread 0x%X found in list of threads after it was terminated!?" % uTestThreadId;
for oThread in aoThreads:
assert not oThread.bIsTerminated, \
"Thread 0x%X is already terminated!?" % (oThread.uId,);
oConsole.fStatus(" * Calling <cThread #%X>.fbTerminate()..." % (oThread.uId,));
assert oThread.fbTerminate(), \
"Cannot terminated thread #%X!" % (oThread.uId,);
oConsole.fOutput(" + <cThread #%X>.fbTerminate() = True" % (oThread.uId,));
oConsole.fStatus(" * Calling <cThread #%X>.fbWait(1)..." % (oThread.uId,));
assert oThread.fbWait(1), \
"Cannot terminated thread #%X!" % (oThread.uId,);
oConsole.fOutput(" + <cThread #%X>.fbWait(1) = True" % (oThread.uId,));
assert oThread.bIsTerminated, \
"Thread was not terminated #%X!" % (oThread.uId,);
oConsole.fStatus(" * Calling <cProcess>.faoGetThreads()...");
aoThreads = faoGetAndDumpProcessThreads(oTestProcess);
assert len(aoThreads) == 0, \
"Threads exist after they were terminated!?";
assert oTestProcess.bIsTerminated, \
"Test process was not terminated!";
try:
fohOpenForThreadIdAndDesiredAccess(0, THREAD_ALL_ACCESS);
except:
pass;
else:
raise AssertionError("Opening a non-existing thread somehow worked!?");
ohThread = foh0OpenForThreadIdAndDesiredAccess(0, THREAD_ALL_ACCESS, bMustExist = False);
assert ohThread is None, \
"Opening a non-existing thread somehow worked!?";
finally:
if oTestProcess and oTestProcess.bIsRunning:
oTestProcess.fbTerminate(); | [
"mConsole.oConsole.fOutput",
"mConsole.oConsole.fStatus",
"time.sleep",
"re.compile"
] | [((2195, 2287), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cProcess #%X>.faoGetThreads()...' % (oTestProcess.uId,))"], {}), "(' * Calling <cProcess #%X>.faoGetThreads()...' % (\n oTestProcess.uId,))\n", (2211, 2287), False, 'from mConsole import oConsole\n'), ((2572, 2643), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (['"""=== Testing thread related functions """'], {'sPadding': '"""="""'}), "('=== Testing thread related functions ', sPadding='=')\n", (2588, 2643), False, 'from mConsole import oConsole\n'), ((2649, 2763), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["('* This process ISA: %s, test thread ISA: %s' % (sThisProcessISA,\n sExpectedChildProcessISA))"], {}), "('* This process ISA: %s, test thread ISA: %s' % (\n sThisProcessISA, sExpectedChildProcessISA))\n", (2665, 2763), False, 'from mConsole import oConsole\n'), ((262, 292), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (['""" * TEB:"""'], {}), "(' * TEB:')\n", (278, 292), False, 'from mConsole import oConsole\n'), ((3105, 3118), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3115, 3118), False, 'import re, sys, time\n'), ((3534, 3662), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cProcess #%X>.fuCreateThreadForAddress(0, bSuspended = True)...'\n % (oTestProcess.uId,))"], {}), "(\n ' * Calling <cProcess #%X>.fuCreateThreadForAddress(0, bSuspended = True)...'\n % (oTestProcess.uId,))\n", (3550, 3662), False, 'from mConsole import oConsole\n'), ((3739, 3877), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' + <cProcess #%X>.fuCreateThreadForAddress(0, bSuspended = True) = 0x%X' %\n (oTestProcess.uId, uTestThreadId))"], {}), "(\n ' + <cProcess #%X>.fuCreateThreadForAddress(0, bSuspended = True) = 0x%X'\n % (oTestProcess.uId, uTestThreadId))\n", (3755, 3877), False, 'from mConsole import oConsole\n'), ((4082, 4195), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cProcess #%X>.foGetThreadForId(0x%X)...' % (oTestProcess.uId,\n uTestThreadId))"], {}), "(' * Calling <cProcess #%X>.foGetThreadForId(0x%X)...' % (\n oTestProcess.uId, uTestThreadId))\n", (4098, 4195), False, 'from mConsole import oConsole\n'), ((4260, 4401), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' + <cProcess #%X>.foGetThreadForId(0x%X) = <cThread #%0X>' % (\n oTestProcess.uId, uTestThreadId, oTestThread.uId))"], {}), "(\n ' + <cProcess #%X>.foGetThreadForId(0x%X) = <cThread #%0X>' % (\n oTestProcess.uId, uTestThreadId, oTestThread.uId))\n", (4276, 4401), False, 'from mConsole import oConsole\n'), ((4525, 4604), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cThread #%0X>.fbTerminate()...' % uTestThreadId)"], {}), "(' * Calling <cThread #%0X>.fbTerminate()...' % uTestThreadId)\n", (4541, 4604), False, 'from mConsole import oConsole\n'), ((4705, 4779), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' + <cThread #%X>.fbTerminate() = True' % uTestThreadId)"], {}), "(' + <cThread #%X>.fbTerminate() = True' % uTestThreadId)\n", (4721, 4779), False, 'from mConsole import oConsole\n'), ((4839, 4913), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cThread #%X>.fbWait(1)...' % uTestThreadId)"], {}), "(' * Calling <cThread #%X>.fbWait(1)...' % uTestThreadId)\n", (4855, 4913), False, 'from mConsole import oConsole\n'), ((5035, 5105), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' + <cThread #%X>.fbWait(1) = True' % uTestThreadId)"], {}), "(' + <cThread #%X>.fbWait(1) = True' % uTestThreadId)\n", (5051, 5105), False, 'from mConsole import oConsole\n'), ((6319, 6380), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (['""" * Calling <cProcess>.faoGetThreads()..."""'], {}), "(' * Calling <cProcess>.faoGetThreads()...')\n", (6335, 6380), False, 'from mConsole import oConsole\n'), ((391, 425), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' | ' + sLine)"], {}), "(' | ' + sLine)\n", (407, 425), False, 'from mConsole import oConsole\n'), ((558, 594), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (['""" * Registers:"""'], {}), "(' * Registers:')\n", (574, 594), False, 'from mConsole import oConsole\n'), ((729, 796), 're.compile', 're.compile', (["{'x64': b'^r([a-z]+|\\\\d+)$', 'x86': b'^e[a-z]+$'}[sISA]"], {}), "({'x64': b'^r([a-z]+|\\\\d+)$', 'x86': b'^e[a-z]+$'}[sISA])\n", (739, 796), False, 'import re, sys, time\n'), ((5698, 5777), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cThread #%X>.fbTerminate()...' % (oThread.uId,))"], {}), "(' * Calling <cThread #%X>.fbTerminate()...' % (oThread.uId,))\n", (5714, 5777), False, 'from mConsole import oConsole\n'), ((5883, 5958), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' + <cThread #%X>.fbTerminate() = True' % (oThread.uId,))"], {}), "(' + <cThread #%X>.fbTerminate() = True' % (oThread.uId,))\n", (5899, 5958), False, 'from mConsole import oConsole\n'), ((5966, 6041), 'mConsole.oConsole.fStatus', 'oConsole.fStatus', (["(' * Calling <cThread #%X>.fbWait(1)...' % (oThread.uId,))"], {}), "(' * Calling <cThread #%X>.fbWait(1)...' % (oThread.uId,))\n", (5982, 6041), False, 'from mConsole import oConsole\n'), ((6143, 6214), 'mConsole.oConsole.fOutput', 'oConsole.fOutput', (["(' + <cThread #%X>.fbWait(1) = True' % (oThread.uId,))"], {}), "(' + <cThread #%X>.fbWait(1) = True' % (oThread.uId,))\n", (6159, 6214), False, 'from mConsole import oConsole\n')] |
# Snafu: Snake Functions - OpenShift Executor
import requests
import os
import configparser
import subprocess
container = "jszhaw/snafu"
endpoints = {}
def executecontrol(flaskrequest, tenant):
if not tenant in endpoints:
username = os.getenv("OPENSHIFT_USERNAME")
password = os.getenv("OPENSHIFT_PASSWORD")
password = os.getenv("OPENSHIFT_PROJECT")
if not username or not password or not project:
return
os.system("oc login https://console.appuio.ch/ --username={} --password={}".format(username, password))
os.system("oc project {}".format(project))
os.system("oc new-app --name snafu-{} jszhaw/snafu".format(tenant))
p = subprocess.run("oc status | grep svc/snafu-{} | cut -d " " -f 3".format(tenant), shell=True, stdout=subprocess.PIPE)
endpoints[tenant] = "http://{}".format(p.decode("utf-8"))
# FIXME: mounting the tenant's volume container to /opt/functions-local
endpoint = endpoints[tenant]
headers = {}
headers["X-Amz-Date"] = flaskrequest.headers.get("X-Amz-Date")
data = flaskrequest.data.decode("utf-8")
#method=r.method -> requests.post
reply = requests.post(endpoint + flaskrequest.path, data=data, headers=headers)
if reply.status_code == 200:
return reply.content.decode("utf-8")
else:
return
| [
"requests.post",
"os.getenv"
] | [((1097, 1168), 'requests.post', 'requests.post', (['(endpoint + flaskrequest.path)'], {'data': 'data', 'headers': 'headers'}), '(endpoint + flaskrequest.path, data=data, headers=headers)\n', (1110, 1168), False, 'import requests\n'), ((240, 271), 'os.getenv', 'os.getenv', (['"""OPENSHIFT_USERNAME"""'], {}), "('OPENSHIFT_USERNAME')\n", (249, 271), False, 'import os\n'), ((285, 316), 'os.getenv', 'os.getenv', (['"""OPENSHIFT_PASSWORD"""'], {}), "('OPENSHIFT_PASSWORD')\n", (294, 316), False, 'import os\n'), ((330, 360), 'os.getenv', 'os.getenv', (['"""OPENSHIFT_PROJECT"""'], {}), "('OPENSHIFT_PROJECT')\n", (339, 360), False, 'import os\n')] |
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for Sonnet optimizers."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from root_gnn.src import types
import tensorflow as tf
from typing import Sequence
def check_updates_parameters(updates: Sequence[types.ParameterUpdate],
parameters: Sequence[tf.Variable]):
if len(updates) != len(parameters):
raise ValueError("`updates` and `parameters` must be the same length.")
if not parameters:
raise ValueError("`parameters` cannot be empty.")
if all(x is None for x in updates):
raise ValueError("No updates provided for any parameter.")
def check_same_dtype(update: types.ParameterUpdate, parameter: tf.Variable):
if update.dtype != parameter.dtype:
raise ValueError(
"DType of update {!r} is not equal to that of parameter {!r}".format(
update, parameter))
def deduplicate_indexed_slices(indexed_slice: tf.IndexedSlices):
"""Sums `values` associated with any non-unique `indices`.
Args:
indexed_slice: An indexed slice with potentially duplicated indices.
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
values, indices = indexed_slice.values, indexed_slice.indices
unique_indices, new_index_positions = tf.unique(indices)
summed_values = tf.math.unsorted_segment_sum(values, new_index_positions,
tf.shape(unique_indices)[0])
return summed_values, unique_indices | [
"tensorflow.unique",
"tensorflow.shape"
] | [((2180, 2198), 'tensorflow.unique', 'tf.unique', (['indices'], {}), '(indices)\n', (2189, 2198), True, 'import tensorflow as tf\n'), ((2322, 2346), 'tensorflow.shape', 'tf.shape', (['unique_indices'], {}), '(unique_indices)\n', (2330, 2346), True, 'import tensorflow as tf\n')] |
from flask_restplus import fields
from apis.v1.v1_api import api
movie_ns = api.namespace('movies', description='Movie Module')
movie = movie_ns.model('Movie', {
'id': fields.Integer(required=True, description='Movie id'),
'99popularity': fields.Float(attribute='ninety_nine_popularity', required=True),
'name': fields.String(required=True, description='Movie name'),
'director': fields.String(required=True, description='Director name of the movie'),
'imdb_score': fields.Float(required=True, description='IMDB rating of the movie'),
'genres': fields.List(fields.String(attribute='name'))
})
movie_serializer = movie_ns.model('MovieSerializer', {
'status': fields.Boolean(required=True),
'data': fields.Nested(movie, skip_none=True),
'message': fields.String(),
})
movies_serializer = movie_ns.model('MoviesSerializer', {
'status': fields.Boolean(required=True),
'message': fields.String(skip_none=True),
'data': fields.List(fields.Nested(movie, skip_none=True), required=True)
}) | [
"flask_restplus.fields.Nested",
"flask_restplus.fields.Integer",
"flask_restplus.fields.String",
"flask_restplus.fields.Float",
"flask_restplus.fields.Boolean",
"apis.v1.v1_api.api.namespace"
] | [((78, 129), 'apis.v1.v1_api.api.namespace', 'api.namespace', (['"""movies"""'], {'description': '"""Movie Module"""'}), "('movies', description='Movie Module')\n", (91, 129), False, 'from apis.v1.v1_api import api\n'), ((176, 229), 'flask_restplus.fields.Integer', 'fields.Integer', ([], {'required': '(True)', 'description': '"""Movie id"""'}), "(required=True, description='Movie id')\n", (190, 229), False, 'from flask_restplus import fields\n'), ((251, 314), 'flask_restplus.fields.Float', 'fields.Float', ([], {'attribute': '"""ninety_nine_popularity"""', 'required': '(True)'}), "(attribute='ninety_nine_popularity', required=True)\n", (263, 314), False, 'from flask_restplus import fields\n'), ((328, 382), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(True)', 'description': '"""Movie name"""'}), "(required=True, description='Movie name')\n", (341, 382), False, 'from flask_restplus import fields\n'), ((400, 470), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(True)', 'description': '"""Director name of the movie"""'}), "(required=True, description='Director name of the movie')\n", (413, 470), False, 'from flask_restplus import fields\n'), ((490, 557), 'flask_restplus.fields.Float', 'fields.Float', ([], {'required': '(True)', 'description': '"""IMDB rating of the movie"""'}), "(required=True, description='IMDB rating of the movie')\n", (502, 557), False, 'from flask_restplus import fields\n'), ((691, 720), 'flask_restplus.fields.Boolean', 'fields.Boolean', ([], {'required': '(True)'}), '(required=True)\n', (705, 720), False, 'from flask_restplus import fields\n'), ((734, 770), 'flask_restplus.fields.Nested', 'fields.Nested', (['movie'], {'skip_none': '(True)'}), '(movie, skip_none=True)\n', (747, 770), False, 'from flask_restplus import fields\n'), ((787, 802), 'flask_restplus.fields.String', 'fields.String', ([], {}), '()\n', (800, 802), False, 'from flask_restplus import fields\n'), ((880, 909), 'flask_restplus.fields.Boolean', 'fields.Boolean', ([], {'required': '(True)'}), '(required=True)\n', (894, 909), False, 'from flask_restplus import fields\n'), ((926, 955), 'flask_restplus.fields.String', 'fields.String', ([], {'skip_none': '(True)'}), '(skip_none=True)\n', (939, 955), False, 'from flask_restplus import fields\n'), ((585, 616), 'flask_restplus.fields.String', 'fields.String', ([], {'attribute': '"""name"""'}), "(attribute='name')\n", (598, 616), False, 'from flask_restplus import fields\n'), ((981, 1017), 'flask_restplus.fields.Nested', 'fields.Nested', (['movie'], {'skip_none': '(True)'}), '(movie, skip_none=True)\n', (994, 1017), False, 'from flask_restplus import fields\n')] |
import logging
from .base import BaseTransaction
logger = logging.getLogger(__name__)
class VoteTransaction(BaseTransaction):
def can_be_applied_to_wallet(self, wallet, wallet_manager, block_height):
vote = self.asset["votes"][0]
if vote.startswith("+"):
if wallet.vote:
logger.warning("Wallet already votes")
return False
else:
if not wallet.vote:
logger.error("Wallet hasn't voted yet")
return False
elif wallet.vote != vote[1:]:
logger.error("Wallet vote doesn't match")
return False
if not wallet_manager.is_delegate(vote[1:]):
logger.error("Only delegates can be voted for")
return False
return super().can_be_applied_to_wallet(wallet, wallet_manager, block_height)
def apply_to_sender_wallet(self, wallet):
super().apply_to_sender_wallet(wallet)
vote = self.asset["votes"][0]
if vote.startswith("+"):
wallet.vote = vote[1:]
else:
wallet.vote = None
def revert_for_sender_wallet(self, wallet):
super().revert_for_sender_wallet(wallet)
vote = self.asset["votes"][0]
if vote.startswith("+"):
wallet.vote = None
else:
wallet.vote = vote[1:]
def validate_for_transaction_pool(self, pool, transactions):
if pool.sender_has_transactions_of_type(self):
return (
"Sender {} already has a transaction of type {} in the "
"pool".format(self.sender_public_key, self.type)
)
return None
| [
"logging.getLogger"
] | [((60, 87), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (77, 87), False, 'import logging\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reach force torque sensor arm element used for configuration."""
import dataclasses
from pyreach.gyms import reach_element
@dataclasses.dataclass(frozen=True)
class ReachForceTorqueSensor(reach_element.ReachElement):
"""Represents a Reach Force Torque Sensor configuration.
Attributes:
reach_name: The underlying Reach device type name of the force torque
sensor. May be empty.
is_synchronous: If True, the next Gym observation will synchronize all
observations element that have this flag set otherwise the next
observation is asynchronous. This argument is optional and defaults to
False.
"""
is_synchronous: bool = False
| [
"dataclasses.dataclass"
] | [((707, 741), 'dataclasses.dataclass', 'dataclasses.dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (728, 741), False, 'import dataclasses\n')] |
'''
Create format report from json to html using jinja2
'''
import json
import sys
import argparse
from jinja2 import Template
def get_template(argument: str):
'''
get template content
'''
if argument == '-':
return Template(sys.stdin.read())
return Template(open(argument).read())
def get_entries(argument: str):
'''
get stream for reading json file
'''
if argument == '-':
return sys.stdin
return open(argument, mode='rt')
def main():
'''
Main entry point
'''
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--input',
help='file containing result of comparison,' +
' use \'-\' for stdin (default)',
metavar=('<name>', '<file>'),
nargs=2,
action='append'
)
parser.add_argument(
'-t',
'--template',
help='file containing template' +
', use \'-\' for stdin (default:\'metrics.jinja.txt\')',
metavar='<template file>',
default='metrics.jinja.txt'
)
args = parser.parse_args()
# Verify args
assert(args.input is not None), 'please specify at least one input'
use_stdin = len(list(filter(lambda value: value[0] == '-', args.input)))
if args.template == '-':
use_stdin += 1
assert use_stdin <= 1, 'stdin used multiple times'
template = get_template(args.template)
template_args = {}
for pair in args.input:
template_args[pair[0]] = json.load(get_entries(pair[1]))
print(template.render(**template_args))
if __name__ == '__main__':
main()
| [
"sys.stdin.read",
"argparse.ArgumentParser"
] | [((549, 574), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (572, 574), False, 'import argparse\n'), ((251, 267), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (265, 267), False, 'import sys\n')] |
# Copyright 2015-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Implementation of Fujitsu ML2 Mechanism driver for ML2 Plugin."""
from networking_fujitsu._i18n import _
from networking_fujitsu.ml2.common import utils
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.plugins.ml2 import api
from neutron.plugins.ml2.common import exceptions as ml2_exc
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
LOG = logging.getLogger(__name__)
FUJITSU_DRIVER = 'networking_fujitsu.ml2.'
CFAB_DRIVER = FUJITSU_DRIVER + 'cfab.cfabdriver.CFABdriver'
VFAB_ID_DEFAULT = "default"
VFAB_ID_MIN = 1
VFAB_ID_MAX = 3000
_SUPPORTED_NET_TYPES = ['vlan']
ML2_FUJITSU_GROUP = "fujitsu_cfab"
ML2_FUJITSU = [
cfg.StrOpt(
'address', default='',
help=_("The address of the C-Fabric to telnet to.")),
cfg.StrOpt(
'username', default='admin',
help=_("The C-Fabric username to use.")),
cfg.StrOpt(
'password', default='<PASSWORD>', secret=True,
help=_("The C-Fabric password to use.")),
cfg.ListOpt(
'physical_networks', default='',
help=_("List of <physical_network>:<vfab_id> tuples specifying "
"physical_network names and corresponding vfab ids.")),
cfg.BoolOpt(
'share_pprofile', default=False,
help=_("Whether to share a C-Fabric pprofile among Neutron "
"ports using the same VLAN ID.")),
cfg.StrOpt(
'pprofile_prefix', default='',
help=_("The prefix string for pprofile name.")),
cfg.BoolOpt(
'save_config', default=True,
help=_("Whether to save configuration."))]
cfg.CONF.register_opts(ML2_FUJITSU, ML2_FUJITSU_GROUP)
class CFABMechanismDriver(api.MechanismDriver):
"""ML2 Mechanism driver for Fujitsu C-Fabric switches.
This is the upper layer driver class that interfaces to lower layer (CLI)
below.
"""
def __init__(self):
self._driver = None
self._physical_networks = {}
self._switch = None
self.initialize()
def initialize(self):
"""Initialize of variables needed by this class."""
self._parse_physical_networks()
self._switch = {
'address': cfg.CONF.fujitsu_cfab.address,
'username': cfg.CONF.fujitsu_cfab.username,
'password': cfg.CONF.fujitsu_cfab.password
}
if not self._switch['address']:
raise cfg.RequiredOptError(
'address', cfg.OptGroup(ML2_FUJITSU_GROUP))
self._driver = importutils.import_object(CFAB_DRIVER, cfg.CONF)
def _parse_physical_networks(self):
"""Interpret physical_networks as physical_network:vfab_id entries."""
method = "_parse_physical_networks"
for entry in cfg.CONF.fujitsu_cfab.physical_networks:
try:
physical_network, vfab_id = entry.split(':')
except ValueError:
LOG.exception("Illegal physical_networks entry")
raise ml2_exc.MechanismDriverError(method=method)
if not (vfab_id == VFAB_ID_DEFAULT or
VFAB_ID_MIN <= int(vfab_id) <= VFAB_ID_MAX):
LOG.exception("Illegal VFAB in physical_networks entry")
raise ml2_exc.MechanismDriverError(method=method)
self._physical_networks[physical_network] = vfab_id
def _get_vfab_id(self, physical_network):
"""Get vfab_id corresponding to the physical_network."""
try:
vfab_id = self._physical_networks[physical_network]
except KeyError:
LOG.exception(
"Network not found in the configured physical network")
raise ml2_exc.MechanismDriverError(method="_get_vfab_id")
return vfab_id
@log_helpers.log_method_call
def create_port_postcommit(self, mech_context):
"""Calls setup process for C-Fabric.
Case1: Baremetal deploy
Setup VLAN for specified physical port.
Case2: Otherwise
Associate the assigned MAC address to the portprofile.
"""
port = mech_context.current
network = mech_context.network
if utils.is_baremetal(port):
return
if not is_supported(network):
return
method = 'create_port_postcommit'
port_id = port['id']
network_id = port['network_id']
tenant_id = port['tenant_id']
mac = port['mac_address']
vfab_id = self._get_vfab_id(utils.get_physical_network(network))
vlanid = utils.get_segmentation_id(network)
try:
self._driver.associate_mac_to_network(
self._switch['address'],
self._switch['username'],
self._switch['password'],
vfab_id,
vlanid,
mac
)
except Exception:
LOG.exception("Failed to associate mac %s", mac)
raise ml2_exc.MechanismDriverError(method=method)
LOG.info(
"created port (postcommit): port_id=%(port_id)s "
"network_id=%(network_id)s tenant_id=%(tenant_id)s",
{'port_id': port_id,
'network_id': network_id, 'tenant_id': tenant_id})
@log_helpers.log_method_call
def delete_port_postcommit(self, mech_context):
"""Calls cleanup process for C-Fabric.
Case1: Baremetal deploy
Clear VLAN/LAG for specified physical port.
Case2: Otherwise
Dissociate MAC address from the portprofile.
"""
method = 'delete_port_postcommit'
port = mech_context.current
network = mech_context.network
port_id = port['id']
network_id = port['network_id']
tenant_id = port['tenant_id']
if utils.is_baremetal(port):
if validate_baremetal_deploy(mech_context):
params = self.get_physical_net_params(mech_context)
try:
self.clear_vlan(params)
except Exception:
LOG.exception("Failed to clear VLAN(%s).",
params['vlanid'])
raise ml2_exc.MechanismDriverError(method=method)
elif not is_supported(network):
pass
else:
physical_network = utils.get_physical_network(network)
vlanid = utils.get_segmentation_id(network)
vfab_id = self._get_vfab_id(physical_network)
mac = port['mac_address']
try:
self._driver.dissociate_mac_from_network(
self._switch['address'],
self._switch['username'],
self._switch['password'],
vfab_id,
vlanid,
mac)
except Exception:
LOG.exception("Failed to dissociate MAC %s", mac)
raise ml2_exc.MechanismDriverError(method=method)
LOG.info(
"delete port (postcommit): port_id=%(p_id)s "
"network_id=%(net_id)s tenant_id=%(tenant_id)s",
{'p_id': port_id, 'net_id': network_id, 'tenant_id': tenant_id})
@log_helpers.log_method_call
def setup_vlan(self, params):
"""Provision VLAN with specified port(s).
This method will select driver's method.
Case1: param['lag'] is True
This method calls 'setup_vlan_with_lag' and clears VLAN and LAG.
Case2: param['lag'] is False
This method calls 'setup_vlan' and setup only VLAN.
@param params a dictionary of the return value for
get_physical_net_params
@return None
"""
target = 'setup_vlan_with_lag' if params['lag'] else 'setup_vlan'
try:
setup_method = getattr(self._driver, target)
# This plugin supposes 1 C-Fabric(fabric_id) management.
# Therefore, not to identify target IP address by using
# switch_info(mac_address).
LOG.info("call %(target)s. params: %(params)s",
{'target': target, 'params': params})
setup_method(
params['address'],
params['username'],
params['password'],
params['vfab_id'],
params['vlanid'],
params['ports'],
params['mac'],
)
except Exception:
LOG.exception("Failed to setup VLAN(%s)", params['vlanid'])
raise ml2_exc.MechanismDriverError(method='setup_vlan')
@log_helpers.log_method_call
def clear_vlan(self, params):
"""Clear VLAN with specified port(s).
This method will select driver's method.
Case1: param['lag'] is True
This method calls 'clear_vlan_with_lag' and clears VLAN and LAG.
Case2: param['lag'] is False
This method calls 'clear_vlan' and clears only VLAN.
@param params A dictionary of the return value for
get_physical_net_params
@return None
"""
target = 'clear_vlan_with_lag' if params['lag'] else 'clear_vlan'
try:
clear_method = getattr(self._driver, target)
# This plugin supposes 1 C-Fabric(fabric_id) management.
# Therefore, not to identify target IP address by using
# switch_info(mac_address).
LOG.info("Call %(target)s. params: %(params)s",
{'target': target, 'params': params})
clear_method(
params['address'],
params['username'],
params['password'],
params['vfab_id'],
params['vlanid'],
params['ports'],
params['mac'],
)
except Exception:
LOG.exception("Failed to clear VLAN(%s)", params['vlanid'])
raise ml2_exc.MechanismDriverError(target)
@log_helpers.log_method_call
def get_physical_net_params(self, mech_context):
"""Validate physical network parameters for baremetal deployment.
Validates network & port params and returns dictionary.
'local_link_information' is a dictionary from Ironic-port. This value
includes as follows:
'switch_id': A string of switch's MAC address
This value is equal to 'chassis_id' from LLDP TLV.
'port_id': A string of switch interface name.
This value is equal to 'port_id' from LLDP TLV.
'switch_info': A string of switch name.
This value is equal to 'system_name' from LLDP TLV.
@param mech_context a Context instance
@return A dictionary parameters for baremetal deploy
"""
port = mech_context.current
network = mech_context.network
local_link_info = utils.get_physical_connectivity(port)
return {
"address": self._switch['address'],
"username": self._switch['username'],
"password": self._switch['password'],
"ports": ','.join(p['port_id'] for p in local_link_info),
"vfab_id": self._get_vfab_id(utils.get_physical_network(network)),
"vlanid": utils.get_segmentation_id(network),
"mac": port['mac_address'],
"lag": utils.is_lag(local_link_info)
}
@log_helpers.log_method_call
def bind_port(self, context):
port = context.current
vnic_type = port['binding:vnic_type']
LOG.debug("Attempting to bind port %(port)s with vnic_type "
"%(vnic_type)s on network %(network)s",
{'port': port['id'], 'vnic_type': vnic_type,
'network': context.network.current['id']})
if validate_baremetal_deploy(context):
params = self.get_physical_net_params(context)
segments = context.segments_to_bind
self.setup_vlan(params)
context.set_binding(segments[0][api.ID],
portbindings.VIF_TYPE_OTHER, {},
status=constants.PORT_STATUS_ACTIVE)
def is_supported(network):
"""Validate network parameter(network_type and segmentation_id).
@param a network object
@return True if network_type is 'VLAN' and segmentation_id is included
otherwise False
"""
net_type = utils.get_network_type(network)
if net_type not in _SUPPORTED_NET_TYPES:
LOG.warning("Network type(%s) is not supported. Skip it.",
net_type)
return False
return True if utils.get_segmentation_id(network) else False
def validate_baremetal_deploy(mech_context):
"""Validate baremetal deploy.
@param mech_context a context object
@return True if enable to baremetal deploy otherwise False
"""
port = mech_context.current
network = mech_context.network
if (utils.is_baremetal(port) and
is_supported(network) and utils.get_physical_connectivity(port)):
return True
return False
| [
"networking_fujitsu.ml2.common.utils.get_physical_connectivity",
"networking_fujitsu.ml2.common.utils.get_physical_network",
"networking_fujitsu.ml2.common.utils.is_lag",
"networking_fujitsu.ml2.common.utils.get_segmentation_id",
"networking_fujitsu._i18n._",
"oslo_utils.importutils.import_object",
"neu... | [((1115, 1142), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1132, 1142), True, 'from oslo_log import log as logging\n'), ((2324, 2378), 'oslo_config.cfg.CONF.register_opts', 'cfg.CONF.register_opts', (['ML2_FUJITSU', 'ML2_FUJITSU_GROUP'], {}), '(ML2_FUJITSU, ML2_FUJITSU_GROUP)\n', (2346, 2378), False, 'from oslo_config import cfg\n'), ((13225, 13256), 'networking_fujitsu.ml2.common.utils.get_network_type', 'utils.get_network_type', (['network'], {}), '(network)\n', (13247, 13256), False, 'from networking_fujitsu.ml2.common import utils\n'), ((3222, 3270), 'oslo_utils.importutils.import_object', 'importutils.import_object', (['CFAB_DRIVER', 'cfg.CONF'], {}), '(CFAB_DRIVER, cfg.CONF)\n', (3247, 3270), False, 'from oslo_utils import importutils\n'), ((4883, 4907), 'networking_fujitsu.ml2.common.utils.is_baremetal', 'utils.is_baremetal', (['port'], {}), '(port)\n', (4901, 4907), False, 'from networking_fujitsu.ml2.common import utils\n'), ((5260, 5294), 'networking_fujitsu.ml2.common.utils.get_segmentation_id', 'utils.get_segmentation_id', (['network'], {}), '(network)\n', (5285, 5294), False, 'from networking_fujitsu.ml2.common import utils\n'), ((6527, 6551), 'networking_fujitsu.ml2.common.utils.is_baremetal', 'utils.is_baremetal', (['port'], {}), '(port)\n', (6545, 6551), False, 'from networking_fujitsu.ml2.common import utils\n'), ((11686, 11723), 'networking_fujitsu.ml2.common.utils.get_physical_connectivity', 'utils.get_physical_connectivity', (['port'], {}), '(port)\n', (11717, 11723), False, 'from networking_fujitsu.ml2.common import utils\n'), ((13439, 13473), 'networking_fujitsu.ml2.common.utils.get_segmentation_id', 'utils.get_segmentation_id', (['network'], {}), '(network)\n', (13464, 13473), False, 'from networking_fujitsu.ml2.common import utils\n'), ((13755, 13779), 'networking_fujitsu.ml2.common.utils.is_baremetal', 'utils.is_baremetal', (['port'], {}), '(port)\n', (13773, 13779), False, 'from networking_fujitsu.ml2.common import utils\n'), ((13817, 13854), 'networking_fujitsu.ml2.common.utils.get_physical_connectivity', 'utils.get_physical_connectivity', (['port'], {}), '(port)\n', (13848, 13854), False, 'from networking_fujitsu.ml2.common import utils\n'), ((1454, 1500), 'networking_fujitsu._i18n._', '_', (['"""The address of the C-Fabric to telnet to."""'], {}), "('The address of the C-Fabric to telnet to.')\n", (1455, 1500), False, 'from networking_fujitsu._i18n import _\n'), ((1569, 1603), 'networking_fujitsu._i18n._', '_', (['"""The C-Fabric username to use."""'], {}), "('The C-Fabric username to use.')\n", (1570, 1603), False, 'from networking_fujitsu._i18n import _\n'), ((1690, 1724), 'networking_fujitsu._i18n._', '_', (['"""The C-Fabric password to use."""'], {}), "('The C-Fabric password to use.')\n", (1691, 1724), False, 'from networking_fujitsu._i18n import _\n'), ((1798, 1913), 'networking_fujitsu._i18n._', '_', (['"""List of <physical_network>:<vfab_id> tuples specifying physical_network names and corresponding vfab ids."""'], {}), "('List of <physical_network>:<vfab_id> tuples specifying physical_network names and corresponding vfab ids.'\n )\n", (1799, 1913), False, 'from networking_fujitsu._i18n import _\n'), ((2000, 2090), 'networking_fujitsu._i18n._', '_', (['"""Whether to share a C-Fabric pprofile among Neutron ports using the same VLAN ID."""'], {}), "('Whether to share a C-Fabric pprofile among Neutron ports using the same VLAN ID.'\n )\n", (2001, 2090), False, 'from networking_fujitsu._i18n import _\n'), ((2174, 2215), 'networking_fujitsu._i18n._', '_', (['"""The prefix string for pprofile name."""'], {}), "('The prefix string for pprofile name.')\n", (2175, 2215), False, 'from networking_fujitsu._i18n import _\n'), ((2285, 2320), 'networking_fujitsu._i18n._', '_', (['"""Whether to save configuration."""'], {}), "('Whether to save configuration.')\n", (2286, 2320), False, 'from networking_fujitsu._i18n import _\n'), ((5206, 5241), 'networking_fujitsu.ml2.common.utils.get_physical_network', 'utils.get_physical_network', (['network'], {}), '(network)\n', (5232, 5241), False, 'from networking_fujitsu.ml2.common import utils\n'), ((12060, 12094), 'networking_fujitsu.ml2.common.utils.get_segmentation_id', 'utils.get_segmentation_id', (['network'], {}), '(network)\n', (12085, 12094), False, 'from networking_fujitsu.ml2.common import utils\n'), ((12155, 12184), 'networking_fujitsu.ml2.common.utils.is_lag', 'utils.is_lag', (['local_link_info'], {}), '(local_link_info)\n', (12167, 12184), False, 'from networking_fujitsu.ml2.common import utils\n'), ((3166, 3197), 'oslo_config.cfg.OptGroup', 'cfg.OptGroup', (['ML2_FUJITSU_GROUP'], {}), '(ML2_FUJITSU_GROUP)\n', (3178, 3197), False, 'from oslo_config import cfg\n'), ((3948, 3991), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': 'method'}), '(method=method)\n', (3976, 3991), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((4388, 4439), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': '"""_get_vfab_id"""'}), "(method='_get_vfab_id')\n", (4416, 4439), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((5673, 5716), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': 'method'}), '(method=method)\n', (5701, 5716), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((7063, 7098), 'networking_fujitsu.ml2.common.utils.get_physical_network', 'utils.get_physical_network', (['network'], {}), '(network)\n', (7089, 7098), False, 'from networking_fujitsu.ml2.common import utils\n'), ((7120, 7154), 'networking_fujitsu.ml2.common.utils.get_segmentation_id', 'utils.get_segmentation_id', (['network'], {}), '(network)\n', (7145, 7154), False, 'from networking_fujitsu.ml2.common import utils\n'), ((9286, 9335), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': '"""setup_vlan"""'}), "(method='setup_vlan')\n", (9314, 9335), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((10695, 10731), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', (['target'], {}), '(target)\n', (10723, 10731), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((12000, 12035), 'networking_fujitsu.ml2.common.utils.get_physical_network', 'utils.get_physical_network', (['network'], {}), '(network)\n', (12026, 12035), False, 'from networking_fujitsu.ml2.common import utils\n'), ((3694, 3737), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': 'method'}), '(method=method)\n', (3722, 3737), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((6917, 6960), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': 'method'}), '(method=method)\n', (6945, 6960), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n'), ((7664, 7707), 'neutron.plugins.ml2.common.exceptions.MechanismDriverError', 'ml2_exc.MechanismDriverError', ([], {'method': 'method'}), '(method=method)\n', (7692, 7707), True, 'from neutron.plugins.ml2.common import exceptions as ml2_exc\n')] |
from __future__ import print_function
from math import log10
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from dataset import DataSetFromFolder
import torch.cuda
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision.utils as vutils
from models import generator_model,dicriminator_model
from util import initialize_weights
from torchvision import transforms
import numpy as np
from os.path import join
h = 64
n = 128
batch_size = 16
cudnn.benchmark = True
lr = 1e-3
dtype = torch.cuda.FloatTensor
torch.cuda.manual_seed(619)
path = "dataset/"
train_transform = transforms.Compose([transforms.CenterCrop(160),
transforms.Scale(size=64),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# train_set = DataSetFromFolder(join(join(path,"CelebA"), "train"), train_transform)
# train_loader = DataLoader(dataset=train_set, num_workers=1, batch_size=16, shuffle=True)
G = generator_model(h, n).type(dtype)
G.apply(initialize_weights)
D = dicriminator_model(h, n).type(dtype)
D.apply(initialize_weights)
real_data = Variable(torch.FloatTensor(batch_size, 3, 64, 64)).type(dtype)
logits_real = Variable(torch.FloatTensor(batch_size, h)).type(dtype)
logits_fake = Variable(torch.FloatTensor(batch_size, h)).type(dtype)
optimG = optim.Adam(G.parameters(), lr=lr)
optimD = optim.Adam(D.parameters(), lr=lr)
| [
"torchvision.transforms.CenterCrop",
"models.dicriminator_model",
"torchvision.transforms.Scale",
"models.generator_model",
"torchvision.transforms.Normalize",
"torch.cuda.manual_seed",
"torchvision.transforms.ToTensor",
"torch.FloatTensor"
] | [((596, 623), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(619)'], {}), '(619)\n', (618, 623), False, 'import torch\n'), ((683, 709), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(160)'], {}), '(160)\n', (704, 709), False, 'from torchvision import transforms\n'), ((732, 757), 'torchvision.transforms.Scale', 'transforms.Scale', ([], {'size': '(64)'}), '(size=64)\n', (748, 757), False, 'from torchvision import transforms\n'), ((780, 801), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (799, 801), False, 'from torchvision import transforms\n'), ((823, 877), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (843, 877), False, 'from torchvision import transforms\n'), ((1067, 1088), 'models.generator_model', 'generator_model', (['h', 'n'], {}), '(h, n)\n', (1082, 1088), False, 'from models import generator_model, dicriminator_model\n'), ((1135, 1159), 'models.dicriminator_model', 'dicriminator_model', (['h', 'n'], {}), '(h, n)\n', (1153, 1159), False, 'from models import generator_model, dicriminator_model\n'), ((1222, 1262), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', '(3)', '(64)', '(64)'], {}), '(batch_size, 3, 64, 64)\n', (1239, 1262), False, 'import torch\n'), ((1299, 1331), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', 'h'], {}), '(batch_size, h)\n', (1316, 1331), False, 'import torch\n'), ((1368, 1400), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', 'h'], {}), '(batch_size, h)\n', (1385, 1400), False, 'import torch\n')] |
import cv2
import numpy as np
img = cv2.imread('../Resources/Photos/park.jpg')
b,g,r = cv2.split(img)
# cv2.imshow('Blue',b)
# cv2.imshow('Green',g)
# cv2.imshow('Red',r)
blank = np.zeros(img.shape[:2],dtype='uint8')
blue = cv2.merge([b,blank,blank])
green = cv2.merge([blank,g,blank])
red = cv2.merge([blank,blank,r])
# cv2.imshow('Blue',blue)
# cv2.imshow('Green',green)
# cv2.imshow('Red',red)
# print(f'img -> {img.shape}, b->{b.shape}, g->{g.shape}, r-> {r.shape}')
merged = cv2.merge([b,g,r])
cv2.imshow('Merged',merged)
cv2.waitKey(0) | [
"cv2.merge",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.split",
"cv2.imread"
] | [((37, 79), 'cv2.imread', 'cv2.imread', (['"""../Resources/Photos/park.jpg"""'], {}), "('../Resources/Photos/park.jpg')\n", (47, 79), False, 'import cv2\n'), ((89, 103), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (98, 103), False, 'import cv2\n'), ((182, 220), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (190, 220), True, 'import numpy as np\n'), ((227, 255), 'cv2.merge', 'cv2.merge', (['[b, blank, blank]'], {}), '([b, blank, blank])\n', (236, 255), False, 'import cv2\n'), ((262, 290), 'cv2.merge', 'cv2.merge', (['[blank, g, blank]'], {}), '([blank, g, blank])\n', (271, 290), False, 'import cv2\n'), ((295, 323), 'cv2.merge', 'cv2.merge', (['[blank, blank, r]'], {}), '([blank, blank, r])\n', (304, 323), False, 'import cv2\n'), ((484, 504), 'cv2.merge', 'cv2.merge', (['[b, g, r]'], {}), '([b, g, r])\n', (493, 504), False, 'import cv2\n'), ((503, 531), 'cv2.imshow', 'cv2.imshow', (['"""Merged"""', 'merged'], {}), "('Merged', merged)\n", (513, 531), False, 'import cv2\n'), ((531, 545), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (542, 545), False, 'import cv2\n')] |
# encoding: utf-8
"""
@author: ccj
@contact:
"""
import numpy as np
from typing import List, Dict, Tuple, Any
import torch
import torch.nn.functional as F
def crop_white(image: np.ndarray, value: int = 255) -> np.ndarray:
"""
Crop white border from image
:param image: Type: np.ndarray, image to be processed
:param value: Type: int, default value is 255
:return:
Cropped image after removing the white border
"""
assert (image.shape[2] == 3), "image shape should be [W, H, 3]"
assert (image.dtype == np.uint8), "image type should be np.uint8"
ys, = (image.min((1, 2)) < value).nonzero()
xs, = (image.min(0).min(1) < value).nonzero()
if len(xs) == 0 or len(ys) == 0:
return image
return image[ys.min(): ys.max()+1, xs.min(): xs.max()+1]
def get_tiles(image: np.ndarray, tile_size: int = 256, n_tiles: int = 36,
mode: int = 0) -> Tuple[Dict[str, Any], bool]:
"""
Crop big image to multiple pieces of small patches
:param image: Type np.ndarray, image to be cropped
:param tile_size: Type int, size of small patches
:param n_tiles: Type int, number of small patches
:param mode: Type int, pad type for cropping
:return:
dict includes small pacthes and its responding index, bool flag indicates if the
image can get enough small patches
"""
result = []
h, w, c = image.shape
pad_h = (tile_size - h % tile_size) % tile_size + ((tile_size * mode) // 2)
pad_w = (tile_size - w % tile_size) % tile_size + ((tile_size * mode) // 2)
img2 = np.pad(image, [[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w//2], [0, 0]], constant_values=255)
img3 = img2.reshape(
img2.shape[0] // tile_size,
tile_size,
img2.shape[1] // tile_size,
tile_size,
3
)
img3 = img3.transpose(0, 2, 1, 3, 4).reshape(-1, tile_size, tile_size,3)
n_tiles_with_info = (img3.reshape(img3.shape[0], -1).sum(1) < tile_size ** 2 * 3 * 255).sum()
if len(img3) < n_tiles:
img3 = np.pad(img3, [[0, n_tiles-len(img3)], [0, 0], [0, 0], [0, 0]], constant_values=255)
idxs = np.argsort(img3.reshape(img3.shape[0], -1).sum(-1))[:n_tiles]
img3 = img3[idxs]
for i in range(len(img3)):
result.append({'img': img3[i], 'idx': i})
return result, n_tiles_with_info >= n_tiles
def glue_to_one_picture(image: np.ndarray, tile_size: int = 256, n_tiles: int = 36,
random_idx: bool = True) -> np.ndarray:
"""
reorganize the distribution of images
:param image: Type: np.ndarray, image to be processed
:param tile_size: Type int, size of small patches
:param n_tiles: Type int, number of small patches
:param random_idx: Type bool, determine if the small patches are randomly organized
:return:
a image that is generated by reorganizing cropped patches from original image
"""
tiles, _ = get_tiles(image, tile_size=tile_size, n_tiles=n_tiles, mode=0)
if random_idx:
patch_idxes = np.random.choice(list(range(n_tiles)), n_tiles, replace=False)
else:
patch_idxes = list(range(n_tiles))
n_row_tiles = int(np.sqrt(n_tiles))
images = np.zeros((tile_size * n_row_tiles, tile_size * n_row_tiles, 3)).astype(np.uint8)
index = 0
for h in range(n_row_tiles):
for w in range(n_row_tiles):
if len(tiles) > patch_idxes[index]:
this_img = tiles[patch_idxes[index]]["img"]
index = index + 1
else:
this_img = np.zeros((tile_size, tile_size, 3)).astype(np.uint8)
images[h * tile_size:(h + 1) * tile_size, w * tile_size:(w + 1) * tile_size, :] = this_img
return images
def cutmix(batch: List[Dict[str, Any]], hparams: Dict[str, Any]) -> Dict[str, Any]:
"""
Apply cutmix transform for one batch of images
:param batch: Type: dict, batch of dataset (default: {"image": image, "target": target}
:param hparams: Type: config, config file
:return:
batch of dataset
"""
image, target = batch["image"], batch["target"]
batch_size = image.shape[0]
img_h, img_w = image.shape[2:]
imgs, labs = [], []
for j in range(batch_size):
p = np.random.uniform(0., 1.)
if p >= hparams["cutmix_prob"]:
idx = int(np.random.uniform(0, batch_size))
# choose x, y and beta dist
x = np.random.uniform(0, img_w)
y = np.random.uniform(0, img_h)
b = np.random.uniform(0., 1.)
w = img_w * np.sqrt(1 - b)
h = img_h * np.sqrt(1 - b)
x0 = int(np.round(max(0, x - w/2)))
x1 = int(np.round(min(img_w, x + w/2)))
y0 = int(np.round(max(0, y - h/2)))
y1 = int(np.round(min(img_h, y + h/2)))
one = image[j, :, y0:y1, 0:x0]
two = image[idx, :, y0:y1, x0:x1]
three = image[j, :, y0:y1, x1: img_w]
middle = torch.cat((one, two, three), dim=2)
img = torch.cat((image[j, :, 0:y0, :], middle, image[j, :, y1:img_h, :]), dim=1)
imgs.append(img)
a = w * h / img_w / img_h
if len(target.shape) < 2:
if hparams.ohe_mode:
lab1 = F.one_hot(target[j], num_classes=hparams.num_class)
lab2 = F.one_hot(target[idx], num_classes=hparams.num_class)
else:
lab1 = target[j]
lab2 = target[idx]
else:
lab1 = target[j, :]
lab2 = target[idx, :]
labs.append((1 - a) * lab1 + a * lab2)
else:
imgs.append(image[j, :, :, :])
if len(target.shape) < 2:
if hparams.ohe_mode:
labs.append(F.one_hot(target[j], num_classes=hparams.num_class).float())
else:
labs.append(target[j])
else:
labs.append(target[j, :])
image2 = torch.stack(imgs)
label2 = torch.stack(labs)
return {
"image": image2,
"target": label2,
}
def mixup(batch: List[Dict[str, Any]], hparams: Dict[str, Any]) -> Dict[str, Any]:
"""
Apply mixup transform for one batch of images
:param batch: Type: dict, batch of dataset (default: {"image": image, "target": target}
:param hparams: Type: config, config file
:return:
batch of dataset
"""
image, target = batch["image"], batch["target"]
batch_size = image.shape[0]
imgs, labs = [], []
for j in range(batch_size):
p = np.random.uniform(0., 1.)
if p >= hparams["mixup_prob"]:
idx = int(np.random.uniform(0, batch_size))
# choose beta dist
b = np.random.uniform(0., 1.)
img = (1 - b) * image[j, :, :, :] + b * image[idx, :, :, :]
imgs.append(img)
if len(target.shape) < 2:
if hparams.ohe_mode:
lab1 = F.one_hot(target[j], num_classes=hparams.num_class)
lab2 = F.one_hot(target[idx], num_classes=hparams.num_class)
else:
lab1 = target[j]
lab2 = target[idx]
else:
lab1 = target[j, :]
lab2 = target[idx, :]
labs.append((1 - b) * lab1 + b * lab2)
else:
imgs.append(image[j, :, :, :])
if len(target.shape) < 2:
if hparams.ohe_mode:
labs.append(F.one_hot(target[j], num_classes=hparams.num_class).float())
else:
labs.append(target[j])
else:
labs.append(target[j, :])
image2 = torch.stack(imgs)
label2 = torch.stack(labs)
return {
"image": image2,
"target": label2,
}
class MixCollator:
def __init__(self, hparams: Dict[str, Any]):
super(MixCollator, self).__init__()
self.hparams = hparams
def __call__(self, batch: List[Dict[str, Any]]) -> Dict[str, Any]:
batch = torch.utils.data.dataloader.default_collate(batch)
if self.hparams["mix_aug"]["cutmix"]:
batch = cutmix(batch, self.hparams)
if self.hparams["mix_aug"]["mixup"]:
batch = mixup(batch, self.hparams)
return batch
| [
"torch.utils.data.dataloader.default_collate",
"numpy.sqrt",
"torch.stack",
"numpy.zeros",
"torch.nn.functional.one_hot",
"numpy.random.uniform",
"numpy.pad",
"torch.cat"
] | [((1593, 1709), 'numpy.pad', 'np.pad', (['image', '[[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w // 2], [0, 0]]'], {'constant_values': '(255)'}), '(image, [[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w //\n 2], [0, 0]], constant_values=255)\n', (1599, 1709), True, 'import numpy as np\n'), ((6057, 6074), 'torch.stack', 'torch.stack', (['imgs'], {}), '(imgs)\n', (6068, 6074), False, 'import torch\n'), ((6088, 6105), 'torch.stack', 'torch.stack', (['labs'], {}), '(labs)\n', (6099, 6105), False, 'import torch\n'), ((7798, 7815), 'torch.stack', 'torch.stack', (['imgs'], {}), '(imgs)\n', (7809, 7815), False, 'import torch\n'), ((7829, 7846), 'torch.stack', 'torch.stack', (['labs'], {}), '(labs)\n', (7840, 7846), False, 'import torch\n'), ((3206, 3222), 'numpy.sqrt', 'np.sqrt', (['n_tiles'], {}), '(n_tiles)\n', (3213, 3222), True, 'import numpy as np\n'), ((4285, 4312), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4302, 4312), True, 'import numpy as np\n'), ((6660, 6687), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6677, 6687), True, 'import numpy as np\n'), ((8150, 8200), 'torch.utils.data.dataloader.default_collate', 'torch.utils.data.dataloader.default_collate', (['batch'], {}), '(batch)\n', (8193, 8200), False, 'import torch\n'), ((3237, 3300), 'numpy.zeros', 'np.zeros', (['(tile_size * n_row_tiles, tile_size * n_row_tiles, 3)'], {}), '((tile_size * n_row_tiles, tile_size * n_row_tiles, 3))\n', (3245, 3300), True, 'import numpy as np\n'), ((4463, 4490), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'img_w'], {}), '(0, img_w)\n', (4480, 4490), True, 'import numpy as np\n'), ((4507, 4534), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'img_h'], {}), '(0, img_h)\n', (4524, 4534), True, 'import numpy as np\n'), ((4551, 4578), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4568, 4578), True, 'import numpy as np\n'), ((5018, 5053), 'torch.cat', 'torch.cat', (['(one, two, three)'], {'dim': '(2)'}), '((one, two, three), dim=2)\n', (5027, 5053), False, 'import torch\n'), ((5072, 5146), 'torch.cat', 'torch.cat', (['(image[j, :, 0:y0, :], middle, image[j, :, y1:img_h, :])'], {'dim': '(1)'}), '((image[j, :, 0:y0, :], middle, image[j, :, y1:img_h, :]), dim=1)\n', (5081, 5146), False, 'import torch\n'), ((6828, 6855), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6845, 6855), True, 'import numpy as np\n'), ((4373, 4405), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (4390, 4405), True, 'import numpy as np\n'), ((4602, 4616), 'numpy.sqrt', 'np.sqrt', (['(1 - b)'], {}), '(1 - b)\n', (4609, 4616), True, 'import numpy as np\n'), ((4641, 4655), 'numpy.sqrt', 'np.sqrt', (['(1 - b)'], {}), '(1 - b)\n', (4648, 4655), True, 'import numpy as np\n'), ((6747, 6779), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (6764, 6779), True, 'import numpy as np\n'), ((5318, 5369), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (5327, 5369), True, 'import torch.nn.functional as F\n'), ((5397, 5450), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[idx]'], {'num_classes': 'hparams.num_class'}), '(target[idx], num_classes=hparams.num_class)\n', (5406, 5450), True, 'import torch.nn.functional as F\n'), ((7059, 7110), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (7068, 7110), True, 'import torch.nn.functional as F\n'), ((7138, 7191), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[idx]'], {'num_classes': 'hparams.num_class'}), '(target[idx], num_classes=hparams.num_class)\n', (7147, 7191), True, 'import torch.nn.functional as F\n'), ((3589, 3624), 'numpy.zeros', 'np.zeros', (['(tile_size, tile_size, 3)'], {}), '((tile_size, tile_size, 3))\n', (3597, 3624), True, 'import numpy as np\n'), ((5857, 5908), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (5866, 5908), True, 'import torch.nn.functional as F\n'), ((7598, 7649), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (7607, 7649), True, 'import torch.nn.functional as F\n')] |
"""
compare_neigh_overlap.py
Plots distributions of Jaccard distances for overlapping ipsilateral
neighborhoods (blue) and homologous contralateral neighborhoods (red)
in the adult and L4.
crated: <NAME>
data: 01 November 2018
"""
import os
from configparser import ConfigParser,ExtendedInterpolation
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#Brittin modules
from connectome.load import from_db
from connectome.format_graphs import low_pass_edge_filter,mid_pass_edge_filter,high_pass_edge_filter
from networks.stats import get_neighborhood_similarity,get_neighborhood_overlap_similarity
import ioaux
CONFIG = os.environ['CONFIG']
SOURCE = "data/neighborhood_similarity.csv"
def add_neighborhood_similarity(data,label,A,reflected,left,right):
for (a,b) in get_neighborhood_similarity(A,reflected,left):
if b == -1 or b > 1: continue
data.append(label + ['homologous',a,b])
def add_overlap_similarity(data,label,A,vertices):
for (a,b) in get_neighborhood_overlap_similarity(A,vertices):
if b == -1 or b > 1: continue
data.append(label + ['proximal',a,b])
def ipsilateral_pass_filter(G,args):
[left,right] = args
edges = []
for e in G.es:
u = G.vs[e.source]['name']
v = G.vs[e.target]['name']
c1 = (u in left) and (v in right)
c2 = (u in right) and (v in left)
if c1 or c2: edges.append(e)
G.delete_edges(edges)
def contralateral_pass_filter(G,args):
[left,right] = args
edges = []
for e in G.es:
u = G.vs[e.source]['name']
v = G.vs[e.target]['name']
c1 = (u in left) and (v in right)
c2 = (u in right) and (v in left)
if not (c1 or c2): edges.append(e)
G.delete_edges(edges)
def add_data(cfg,data,_label,edge_filter=None,args=None):
N2U = 'N2U'
JSH = 'JSH'
left = ioaux.read.into_list(cfg['mat']['left_nodes'])
right = ioaux.read.into_list(cfg['mat']['right_nodes'])
lrmap = ioaux.read.into_lr_dict(cfg['mat']['lrmap'])
#_remove = ['VC01','VD01','VB01','VB02','HSNL','HSNR','PVNL','PVNR']
_remove = ['VC01','VD01','VB01','VB02','HSNL','HSNR','PVNL','PVNR','PLNL','PLNR','PVR','PVR.']
label = ['Adult L/R'] + _label
n2u = from_db(N2U,adjacency=True,remove=_remove)
if edge_filter: edge_filter(n2u.A,args)
reflected = n2u.A.map_vertex_names(lrmap)
add_neighborhood_similarity(data,label,n2u.A,reflected,left,right)
add_overlap_similarity(data,label,n2u.A,left + right)
label = ['L4 L/R'] + _label
jsh = from_db(JSH,adjacency=True,remove=_remove)
if edge_filter: edge_filter(jsh.A,args)
reflected = jsh.A.map_vertex_names(lrmap)
add_neighborhood_similarity(data,label,jsh.A,reflected,left,right)
add_overlap_similarity(data,label,jsh.A,left + right)
label = ['Adult/L4'] + _label
vertices = sorted((set(n2u.neurons)&set(jsh.neurons))-set(_remove))
add_neighborhood_similarity(data,label,n2u.A,jsh.A,left,right)
add_overlap_similarity(data,label,n2u.A,vertices)
add_overlap_similarity(data,label,jsh.A,vertices)
def run(_cfg,source_data=None):
cfg = ConfigParser(interpolation=ExtendedInterpolation())
cfg.read(_cfg)
left = ioaux.read.into_list(cfg['mat']['left_nodes'])
right = ioaux.read.into_list(cfg['mat']['right_nodes'])
data = []
add_data(cfg,data,['all','all'])
add_data(cfg,data,['all','low'],edge_filter=low_pass_edge_filter,args=35)
add_data(cfg,data,['all','mid'],edge_filter=mid_pass_edge_filter,args=(35,66))
add_data(cfg,data,['all','high'],edge_filter=high_pass_edge_filter,args=66)
add_data(cfg,data,['ipsilateral','all'],edge_filter=ipsilateral_pass_filter,args=[left,right])
add_data(cfg,data,['contralateral','all'],edge_filter=contralateral_pass_filter,args=[left,right])
df = pd.DataFrame(data,columns=["Comparison","Network","Edge threshold","Measure","Cell","Jaccard Distance"])
if source_data: df.to_csv(source_data,index=False)
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c','--config',
dest = 'config',
action = 'store',
default = CONFIG,
required = False,
help = 'Config file')
params = parser.parse_args()
run(params.config,source_data=SOURCE)
| [
"networks.stats.get_neighborhood_similarity",
"connectome.load.from_db",
"networks.stats.get_neighborhood_overlap_similarity",
"pandas.DataFrame",
"ioaux.read.into_list",
"argparse.ArgumentParser",
"ioaux.read.into_lr_dict",
"configparser.ExtendedInterpolation"
] | [((813, 860), 'networks.stats.get_neighborhood_similarity', 'get_neighborhood_similarity', (['A', 'reflected', 'left'], {}), '(A, reflected, left)\n', (840, 860), False, 'from networks.stats import get_neighborhood_similarity, get_neighborhood_overlap_similarity\n'), ((1015, 1063), 'networks.stats.get_neighborhood_overlap_similarity', 'get_neighborhood_overlap_similarity', (['A', 'vertices'], {}), '(A, vertices)\n', (1050, 1063), False, 'from networks.stats import get_neighborhood_similarity, get_neighborhood_overlap_similarity\n'), ((1889, 1935), 'ioaux.read.into_list', 'ioaux.read.into_list', (["cfg['mat']['left_nodes']"], {}), "(cfg['mat']['left_nodes'])\n", (1909, 1935), False, 'import ioaux\n'), ((1948, 1995), 'ioaux.read.into_list', 'ioaux.read.into_list', (["cfg['mat']['right_nodes']"], {}), "(cfg['mat']['right_nodes'])\n", (1968, 1995), False, 'import ioaux\n'), ((2008, 2052), 'ioaux.read.into_lr_dict', 'ioaux.read.into_lr_dict', (["cfg['mat']['lrmap']"], {}), "(cfg['mat']['lrmap'])\n", (2031, 2052), False, 'import ioaux\n'), ((2275, 2319), 'connectome.load.from_db', 'from_db', (['N2U'], {'adjacency': '(True)', 'remove': '_remove'}), '(N2U, adjacency=True, remove=_remove)\n', (2282, 2319), False, 'from connectome.load import from_db\n'), ((2587, 2631), 'connectome.load.from_db', 'from_db', (['JSH'], {'adjacency': '(True)', 'remove': '_remove'}), '(JSH, adjacency=True, remove=_remove)\n', (2594, 2631), False, 'from connectome.load import from_db\n'), ((3264, 3310), 'ioaux.read.into_list', 'ioaux.read.into_list', (["cfg['mat']['left_nodes']"], {}), "(cfg['mat']['left_nodes'])\n", (3284, 3310), False, 'import ioaux\n'), ((3323, 3370), 'ioaux.read.into_list', 'ioaux.read.into_list', (["cfg['mat']['right_nodes']"], {}), "(cfg['mat']['right_nodes'])\n", (3343, 3370), False, 'import ioaux\n'), ((3884, 3998), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Comparison', 'Network', 'Edge threshold', 'Measure', 'Cell',\n 'Jaccard Distance']"}), "(data, columns=['Comparison', 'Network', 'Edge threshold',\n 'Measure', 'Cell', 'Jaccard Distance'])\n", (3896, 3998), True, 'import pandas as pd\n'), ((4084, 4187), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (4107, 4187), False, 'import argparse\n'), ((3208, 3231), 'configparser.ExtendedInterpolation', 'ExtendedInterpolation', ([], {}), '()\n', (3229, 3231), False, 'from configparser import ConfigParser, ExtendedInterpolation\n')] |
#%%
import ast
from preprocess import to_n_gram
from grammar_checker import Checker
import pickle
LANGUAGES = ast.literal_eval(open("language_short_names.txt", "r").read())
class Autocorrect:
def __init__(self, language = 'en-US') -> None:
self.language = language
self.tool = self.load_dictionary()
# Detects language
def language_detect(self,input_string = None) -> str:
if input_string != None:
self.input_string = input_string
# Language Identification using multinb
loaded_model = pickle.load(open('../model_training/new_models/multinb.pickle', 'rb'))
predict_lang = loaded_model.predict(to_n_gram(self.input_string))[0]
self.language = [k for k, v in LANGUAGES.items() if v == predict_lang][0]
print("Loading Dictionary")
self.tool = self.load_dictionary()
print(f'Language Detected: {LANGUAGES[self.language]}')
# Loads Dictionary
def load_dictionary(self, language = None):
language = self.language if language == None else language
self.language = language
return Checker(self.language)
# word suggession
def suggestion(self,input_string): # with probability
self.tool.tool(input_string)
return [self.tool.repeated_words,self.tool.correct_grammar]
# Output Grammer + Spelling correction
def correct(self,input_string):
#return self.tool.correct(input_string)
pass
# %% Tests
'''
correct = Autocorrect()
sentence = "an lá go mbeidh meáin na Gaeilge agus an Bhéarla ar comhchéim"
correct.language_detect(sentence.lower())
correct.suggestion(sentence.lower())
'''
#%% Spell check for html
'''
sentence = "this is a a sample sentece"
correct = Autocorrect()
correct.language_detect(sentence)
samplelist = correct.suggestion(sentence)
# %%
newlist = []
corrected = False
for i in range(len(sentence.split())):
try:
if sentence.split()[i] == sentence.split()[i+1]:
newlist.append('<div class="err">'+sentence.split()[i]+'</div>')
continue
elif ' '.join([sentence.split()[i],sentence.split()[i+1]]) in samplelist[1]:
newlist.append('<div class="err">'+' '.join([sentence.split()[i],sentence.split()[i+1]])+'</div>')
corrected = True
continue
newlist.append(sentence.split()[i])
except IndexError:
if not corrected:
newlist.append(sentence.split()[i])
else:
pass
' '.join(newlist)
'''
| [
"preprocess.to_n_gram",
"grammar_checker.Checker"
] | [((1150, 1172), 'grammar_checker.Checker', 'Checker', (['self.language'], {}), '(self.language)\n', (1157, 1172), False, 'from grammar_checker import Checker\n'), ((687, 715), 'preprocess.to_n_gram', 'to_n_gram', (['self.input_string'], {}), '(self.input_string)\n', (696, 715), False, 'from preprocess import to_n_gram\n')] |
from struct import pack, unpack, calcsize
from enum import Enum
import logging
START_DELIMITER = 0x7E
class XBeeOutFrame(object):
def __bytes__(self):
raise NotImplementedError("Subclass should implement this method")
@staticmethod
def calc_checksum(partial_frame):
'''
partial_frame do not contain first 3 bytes and the last byte of checksum
'''
return pack("!B", 0xff - (sum(partial_frame) & 0xff))
class XBeeInFrame(object):
AT_RESPONSE = 0x88
MODEM_STATUS = 0x8A
TX_STATUS = 0x8B
RX_PACKET = 0x90
@staticmethod
def verify_frame(frame):
val = sum(frame[3:]) & 0xff
return val == 0xff
@classmethod
def from_bytes(cls, data):
decoder = {
cls.AT_RESPONSE: XBeeATResponse,
cls.MODEM_STATUS: XBeeModemStatus,
cls.TX_STATUS: XBeeTXStatus,
cls.RX_PACKET: XBeeRXPacket,
}
if data[0] != START_DELIMITER:
raise ValueError("Delimiter is incorrect.")
if cls.verify_frame(data) == False:
raise ValueError("Frame is corrupted.")
if data[3] in decoder:
return decoder[data[3]](data)
else:
raise ValueError("Unknown frame of type 0x{:x}".format(data[3]))
class XBeeATResponse(XBeeInFrame):
def __init__(self, data):
'''
value is a bytearray
'''
assert data[3] == self.AT_RESPONSE
self.frame_id = data[4]
self.status = data[7]
self.key = data[5:7].decode()
self.value = data[8:-1]
def __str__(self):
return "ATResponse: {} = {}".format(self.key, self.value)
class XBeeRXPacket(XBeeInFrame):
def __init__(self, frame):
assert frame[3] == self.RX_PACKET
self.addr64 = int.from_bytes(frame[4:12], 'big')
self.addr16 = int.from_bytes(frame[12:14], 'big')
self.data = frame[15:-1]
def __str__(self):
return "RXPacket from {:x} of {} bytes".format(self.addr64, len(self.data))
class XBeeTXStatus(XBeeInFrame):
class DeliveryStatus(Enum):
SUCCESS = 0
MAC_ACK_FAILURE = 0x01
CCA_FAILURE = 0x02
INVALID_DEST_ENDPOINT = 0x15
NETWORK_ACK_FAILURE = 0x21
NOT_JOINED = 0x22
SELF_ADDRESSED = 0x23
ADDRESS_NOT_FOUND = 0x24
ROUTE_NOT_FOUND = 0x25
BROADCAST_SOURCE_FAIL = 0x26
INVALID_BINDING_TABLE_INDEX = 0x2B
RESOURCE_BUSY_1 = 0x2c
ATTEMPT_BROADCAST_WITH_APS = 0x2d
ATTEMPT_UNICAST_WITH_APS_BUT_EE00 = 0x2e
RESOURCE_BUSY_2 = 0x32
DATA_PAYLOAD_TOO_LARGE = 0x74
INDIRECT_MESSAGE_UNREQ = 0x75
def __init__(self, frame):
assert frame[3] == self.TX_STATUS
self.frame_id = frame[4]
self.addr16 = int.from_bytes(frame[5:7], 'big')
self.delivery_status = self.DeliveryStatus(frame[8])
self.discovery_status = frame[9]
def __str__(self):
return "TXStatus: delivery={}, discovery={}, frame={}".format(
self.delivery_status, self.discovery_status, self.frame_id)
class XBeeModemStatus(XBeeInFrame):
class Status(Enum):
HW_RESET= 0
WDT_RESET = 1
JOIN = 2
DISASSOC = 3
COORDINATOR_START = 6
KEY_UPDATE = 7
def __init__(self, frame):
assert frame[3] == self.MODEM_STATUS
self.status = self.Status(frame[4])
def __str__(self):
return "ModemStatus: {}".format(self.status)
class XBeeTXRequest(XBeeOutFrame):
TX_REQUEST_CMD = 0x10
TX_REQ_HEADER_FMT = "!BBQHBB"
TX_REQ_HEADER_SIZE = calcsize(TX_REQ_HEADER_FMT)
def __init__(self, addr64, *data, **kwargs):
self.data = b''.join(data)
if isinstance(addr64, bytes):
self.addr64 = int.from_bytes(addr64, 'big')
if isinstance(addr64, str):
self.addr64 = int(addr64, 16)
elif isinstance(addr64, int):
self.addr64 = addr64
else:
raise TypeError("Addr64 should be bytes, string or int")
self.frame_id = kwargs.get("frame_id", 0)
def __bytes__(self):
length = len(self.data) + self.TX_REQ_HEADER_SIZE
ohdr = pack("!BH", 0x7e, length)
ihdr = pack(self.TX_REQ_HEADER_FMT, self.TX_REQUEST_CMD, self.frame_id, self.addr64, 0xfffe, 0, 0)
checksum = 0xff - ((sum(ihdr) + sum(self.data)) & 0xff)
checksum = pack("!B", checksum)
return b"".join([ohdr, ihdr, self.data, checksum])
def __str__(self):
return "TXRequest to {:x} of {} bytes".format(self.addr64, len(self.data))
class XBeeATRequest(XBeeOutFrame):
AT_REQUEST_CMD = 0x08
AT_HEADER_FMT = "!BB2s"
AT_HEADER_SIZE = calcsize(AT_HEADER_FMT)
def __init__(self, key, value=b'', frame_id=1):
'''
value should be a hex string
'''
self.key = key
self.value = value
self.frame_id = frame_id
def __bytes__(self):
length = len(self.value) + self.AT_HEADER_SIZE
ohdr = pack("!BH", START_DELIMITER, length)
ihdr = pack(self.AT_HEADER_FMT, self.AT_REQUEST_CMD, self.frame_id, self.key.encode())
checksum = 0xff - ((sum(ihdr) + sum(self.value)) & 0xff)
checksum = pack("!B", checksum)
return b"".join([ohdr, ihdr, self.value, checksum])
def __str__(self):
return ("ATRequest {} = {}".format(self.key, self.value))
if __name__ == "__main__":
from binascii import hexlify, unhexlify
frame = XBeeTXRequest("eeeeee", b'TxData1B')
frame = XBeeATRequest("NI")
frame = XBeeInFrame.from_bytes(unhexlify("7e00028a066f"))
# frame = XBeeInFrame.from_bytes(unhexlify("7e00058801424400f0"))
# frame = XBeeInFrame.from_bytes(unhexlify("7e0011900013a20040522baa7d84015278446174610d"))
print(frame)
| [
"struct.calcsize",
"binascii.unhexlify",
"struct.pack"
] | [((3644, 3671), 'struct.calcsize', 'calcsize', (['TX_REQ_HEADER_FMT'], {}), '(TX_REQ_HEADER_FMT)\n', (3652, 3671), False, 'from struct import pack, unpack, calcsize\n'), ((4748, 4771), 'struct.calcsize', 'calcsize', (['AT_HEADER_FMT'], {}), '(AT_HEADER_FMT)\n', (4756, 4771), False, 'from struct import pack, unpack, calcsize\n'), ((4232, 4256), 'struct.pack', 'pack', (['"""!BH"""', '(126)', 'length'], {}), "('!BH', 126, length)\n", (4236, 4256), False, 'from struct import pack, unpack, calcsize\n'), ((4273, 4368), 'struct.pack', 'pack', (['self.TX_REQ_HEADER_FMT', 'self.TX_REQUEST_CMD', 'self.frame_id', 'self.addr64', '(65534)', '(0)', '(0)'], {}), '(self.TX_REQ_HEADER_FMT, self.TX_REQUEST_CMD, self.frame_id, self.\n addr64, 65534, 0, 0)\n', (4277, 4368), False, 'from struct import pack, unpack, calcsize\n'), ((4448, 4468), 'struct.pack', 'pack', (['"""!B"""', 'checksum'], {}), "('!B', checksum)\n", (4452, 4468), False, 'from struct import pack, unpack, calcsize\n'), ((5065, 5101), 'struct.pack', 'pack', (['"""!BH"""', 'START_DELIMITER', 'length'], {}), "('!BH', START_DELIMITER, length)\n", (5069, 5101), False, 'from struct import pack, unpack, calcsize\n'), ((5281, 5301), 'struct.pack', 'pack', (['"""!B"""', 'checksum'], {}), "('!B', checksum)\n", (5285, 5301), False, 'from struct import pack, unpack, calcsize\n'), ((5641, 5666), 'binascii.unhexlify', 'unhexlify', (['"""7e00028a066f"""'], {}), "('7e00028a066f')\n", (5650, 5666), False, 'from binascii import hexlify, unhexlify\n')] |
#IMPORTACIÓN DE LIBRERIAS
import tensorflow as tf
import os
#QUITAR LOS MENSAJES DE AVISO
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#CREAR EL MODELO
#Entradas
A = tf.constant([4], tf.int32, name='A')
B = tf.constant([5], tf.int32, name='B')
C = tf.constant([6], tf.int32, name='C')
x = tf.placeholder(tf.int32, name='x')
#CREAR OPERACIONES
#y1= Ax^2 + Bx + C
with tf.name_scope("Ecuacion_1"):
Ax2 = tf.multiply(A, tf.pow(x, 2), name="Ax2")
Bx = tf.multiply(B, x, name="Bx")
y1 = tf.add_n([Ax2, Bx, C], name="suma1")
#y2 = A + Bx + Cx^2
with tf.name_scope("Ecuacion_2"):
Bx = tf.multiply(B, x, name="Bx")
Cx2 = tf.multiply(C, tf.pow(x, 2), name="Cx2")
y2 = tf.add_n([A, Bx, Cx2], name="suma2")
#y = y1 + y2
with tf.name_scope("Suma_final"):
y = y1 + y2
#INICIAR SESIÓN
with tf.Session() as sesion:
#Ejecución operaciones
for i in range(10):
print ("Para x={} -->{}".format(i,sesion.run(y, feed_dict={x: [i]})))
#Creación del grafo
writer = tf.summary.FileWriter('/grafo_niveles', sesion.graph)
print("Guardados los datos para ver el grafo")
writer.close()
| [
"tensorflow.pow",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.multiply",
"tensorflow.add_n",
"tensorflow.name_scope",
"tensorflow.constant",
"tensorflow.summary.FileWriter"
] | [((173, 209), 'tensorflow.constant', 'tf.constant', (['[4]', 'tf.int32'], {'name': '"""A"""'}), "([4], tf.int32, name='A')\n", (184, 209), True, 'import tensorflow as tf\n'), ((215, 251), 'tensorflow.constant', 'tf.constant', (['[5]', 'tf.int32'], {'name': '"""B"""'}), "([5], tf.int32, name='B')\n", (226, 251), True, 'import tensorflow as tf\n'), ((257, 293), 'tensorflow.constant', 'tf.constant', (['[6]', 'tf.int32'], {'name': '"""C"""'}), "([6], tf.int32, name='C')\n", (268, 293), True, 'import tensorflow as tf\n'), ((299, 333), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""x"""'}), "(tf.int32, name='x')\n", (313, 333), True, 'import tensorflow as tf\n'), ((382, 409), 'tensorflow.name_scope', 'tf.name_scope', (['"""Ecuacion_1"""'], {}), "('Ecuacion_1')\n", (395, 409), True, 'import tensorflow as tf\n'), ((467, 495), 'tensorflow.multiply', 'tf.multiply', (['B', 'x'], {'name': '"""Bx"""'}), "(B, x, name='Bx')\n", (478, 495), True, 'import tensorflow as tf\n'), ((503, 539), 'tensorflow.add_n', 'tf.add_n', (['[Ax2, Bx, C]'], {'name': '"""suma1"""'}), "([Ax2, Bx, C], name='suma1')\n", (511, 539), True, 'import tensorflow as tf\n'), ((569, 596), 'tensorflow.name_scope', 'tf.name_scope', (['"""Ecuacion_2"""'], {}), "('Ecuacion_2')\n", (582, 596), True, 'import tensorflow as tf\n'), ((608, 636), 'tensorflow.multiply', 'tf.multiply', (['B', 'x'], {'name': '"""Bx"""'}), "(B, x, name='Bx')\n", (619, 636), True, 'import tensorflow as tf\n'), ((699, 735), 'tensorflow.add_n', 'tf.add_n', (['[A, Bx, Cx2]'], {'name': '"""suma2"""'}), "([A, Bx, Cx2], name='suma2')\n", (707, 735), True, 'import tensorflow as tf\n'), ((758, 785), 'tensorflow.name_scope', 'tf.name_scope', (['"""Suma_final"""'], {}), "('Suma_final')\n", (771, 785), True, 'import tensorflow as tf\n'), ((826, 838), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (836, 838), True, 'import tensorflow as tf\n'), ((1025, 1078), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""/grafo_niveles"""', 'sesion.graph'], {}), "('/grafo_niveles', sesion.graph)\n", (1046, 1078), True, 'import tensorflow as tf\n'), ((434, 446), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (440, 446), True, 'import tensorflow as tf\n'), ((663, 675), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (669, 675), True, 'import tensorflow as tf\n')] |
from ui.config import SITE_CONFIG
from django.contrib.sites.models import Site
def basics(req):
result = SITE_CONFIG
result['domain'] = Site.objects.get_current().domain
return result | [
"django.contrib.sites.models.Site.objects.get_current"
] | [((149, 175), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (173, 175), False, 'from django.contrib.sites.models import Site\n')] |
"""An echo server that has a server thread and a client thread. ONLY 5 CONNECTIONS."""
import threading
import socket
def server() -> None:
"""A server thread that has a server thread"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 8007
host = 'localhost'
server_socket.bind((host, port))
server_socket.listen(1)
for _ in range(5):
conn, addr = server_socket.accept()
data = conn.recv(100000000)
print('connected: ', addr, data.decode('utf-8'))
conn.send(data)
conn.close()
def client() -> None:
"""A client thread that has a client thread"""
for _ in range(5):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 8007
host = 'localhost'
client_socket.connect((host, port))
client_socket.send(input('> ').encode('utf-8'))
data = client_socket.recv(100000000)
print('received', data.decode('utf-8'), len(data), 'bytes')
client_socket.close()
server_thread = threading.Thread(target=server)
client_thread = threading.Thread(target=client)
server_thread.start()
client_thread.start()
| [
"threading.Thread",
"socket.socket"
] | [((1054, 1085), 'threading.Thread', 'threading.Thread', ([], {'target': 'server'}), '(target=server)\n', (1070, 1085), False, 'import threading\n'), ((1102, 1133), 'threading.Thread', 'threading.Thread', ([], {'target': 'client'}), '(target=client)\n', (1118, 1133), False, 'import threading\n'), ((214, 263), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (227, 263), False, 'import socket\n'), ((696, 745), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (709, 745), False, 'import socket\n')] |
import json
import urllib
import requests
import types
class MovesAPIError(Exception):
"""Raised if the Moves API returns an error."""
pass
class MovesAPINotModifed(Exception):
"""Raised if the document requested is unmodified. Need the use of etag header"""
pass
class MovesClient(object):
"""OAuth client for the Moves API"""
api_url = "https://api.moves-app.com/api/1.1"
app_auth_url = "moves://app/authorize"
web_auth_uri = "https://api.moves-app.com/oauth/v1/authorize"
token_url = "https://api.moves-app.com/oauth/v1/access_token"
tokeninfo_url = "https://api.moves-app.com/oauth/v1/tokeninfo"
def __init__(self, client_id=None, client_secret=None,
access_token=None, use_app=False):
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self.auth_url = self.app_auth_url if use_app else self.web_auth_uri
self.use_app = use_app
self._last_headers = None
def parse_response(self, response):
"""Parse JSON API responses."""
return json.loads(response.text)
def build_oauth_url(self, redirect_uri=None, scope="activity location"):
params = {
'client_id': self.client_id,
'scope': scope
}
if not self.use_app:
params['response_type'] = 'code'
if redirect_uri:
params['redirect_uri'] = redirect_uri
# Moves hates +s for spaces, so use %20 instead.
encoded = urllib.urlencode(params).replace('+', '%20')
return "%s?%s" % (self.auth_url, encoded)
def get_oauth_token(self, code, **kwargs):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'grant_type': kwargs.get('grant_type', 'authorization_code')
}
if 'redirect_uri' in kwargs:
params['redirect_uri'] = kwargs['redirect_uri']
response = requests.post(self.token_url, params=params)
response = json.loads(response.content)
try:
return response['access_token']
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def tokeninfo(self):
params = {
'access_token': self.access_token
}
response = requests.get(self.tokeninfo_url, params=params)
response = json.loads(response.content)
try:
return response
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def api(self, path, method='GET', **kwargs):
params = kwargs['params'] if 'params' in kwargs else {}
data = kwargs['data'] if 'data' in kwargs else {}
if not self.access_token and 'access_token' not in params:
raise MovesAPIError("You must provide a valid access token.")
url = "%s/%s" % (self.api_url, path)
if 'access_token' in params:
access_token = params['access_token']
del(params['access_token'])
else:
access_token = self.access_token
headers = {
"Authorization": 'Bearer ' + access_token
}
if 'etag' in params:
headers['If-None-Match'] = params['etag']
del(params['etag'])
resp = requests.request(method, url,
data=data,
params=params,
headers=headers)
if str(resp.status_code)[0] not in ('2', '3'):
raise MovesAPIError("Error returned via the API with status code (%s):" %
resp.status_code, resp.text)
if resp.status_code == 304:
raise MovesAPINotModifed("Unmodified")
self._last_headers = resp.headers
return resp
def get(self, path, **params):
return self.parse_response(
self.api(path, 'GET', params=params))
def post(self, path, **data):
return self.parse_response(
self.api(path, 'POST', data=data))
def set_first_date(self):
if not self.first_date:
response = self.user_profile()
self.first_date = response['profile']['firstDate']
def __getattr__(self, name):
'''\
Turns method calls such as "moves.foo_bar(...)" into
a call to "moves.api('/foo/bar', 'GET', params={...})"
and then parses the response.
'''
base_path = name.replace('_', '/')
# Define a function that does what we want.
def closure(*path, **params):
'Accesses the /%s API endpoints.'
path = list(path)
path.insert(0, base_path)
return self.parse_response(
self.api('/'.join(path), 'GET', params=params)
)
# Clone a new method with the correct name and doc string.
retval = types.FunctionType(
closure.func_code,
closure.func_globals,
name,
closure.func_defaults,
closure.func_closure)
retval.func_doc = closure.func_doc % base_path
# Cache it to avoid additional calls to __getattr__.
setattr(self, name, retval)
return retval
# Give Access to last attribute
_move_client_status = ['etag', 'x-ratelimit-hourlimit', 'x-ratelimit-hourremaining',
'x-ratelimit-minutelimit', 'x-ratelimit-minuteremaining']
for att in _move_client_status:
att = att.replace('-', '_')
setattr(MovesClient, att, property(lambda self,att=att: self._last_headers.get(att, None)
if self._last_headers else att))
| [
"json.loads",
"requests.post",
"requests.request",
"requests.get",
"urllib.urlencode",
"types.FunctionType"
] | [((1128, 1153), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1138, 1153), False, 'import json\n'), ((2035, 2079), 'requests.post', 'requests.post', (['self.token_url'], {'params': 'params'}), '(self.token_url, params=params)\n', (2048, 2079), False, 'import requests\n'), ((2099, 2127), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2109, 2127), False, 'import json\n'), ((2438, 2485), 'requests.get', 'requests.get', (['self.tokeninfo_url'], {'params': 'params'}), '(self.tokeninfo_url, params=params)\n', (2450, 2485), False, 'import requests\n'), ((2505, 2533), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2515, 2533), False, 'import json\n'), ((3471, 3543), 'requests.request', 'requests.request', (['method', 'url'], {'data': 'data', 'params': 'params', 'headers': 'headers'}), '(method, url, data=data, params=params, headers=headers)\n', (3487, 3543), False, 'import requests\n'), ((5052, 5167), 'types.FunctionType', 'types.FunctionType', (['closure.func_code', 'closure.func_globals', 'name', 'closure.func_defaults', 'closure.func_closure'], {}), '(closure.func_code, closure.func_globals, name, closure.\n func_defaults, closure.func_closure)\n', (5070, 5167), False, 'import types\n'), ((1556, 1580), 'urllib.urlencode', 'urllib.urlencode', (['params'], {}), '(params)\n', (1572, 1580), False, 'import urllib\n')] |
# Generated by Django 2.2.10 on 2020-04-09 02:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20200409_0223'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='active_guessing',
new_name='is_live_round',
),
migrations.RemoveField(
model_name='game',
name='round_done',
),
migrations.RemoveField(
model_name='game',
name='round_start_time',
),
migrations.AddField(
model_name='game',
name='round_start_timeint',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.migrations.RenameField",
"django.db.models.IntegerField"
] | [((233, 332), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""game"""', 'old_name': '"""active_guessing"""', 'new_name': '"""is_live_round"""'}), "(model_name='game', old_name='active_guessing',\n new_name='is_live_round')\n", (255, 332), False, 'from django.db import migrations, models\n'), ((385, 445), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""game"""', 'name': '"""round_done"""'}), "(model_name='game', name='round_done')\n", (407, 445), False, 'from django.db import migrations, models\n'), ((490, 556), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""game"""', 'name': '"""round_start_time"""'}), "(model_name='game', name='round_start_time')\n", (512, 556), False, 'from django.db import migrations, models\n'), ((711, 753), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (730, 753), False, 'from django.db import migrations, models\n')] |
import pickle
import numpy as np
import scipy.linalg as sci
from scipy import signal
# Rotations
def wrap2Pi(x):
xm = np.mod(x+np.pi,(2.0*np.pi))
return xm-np.pi
def Rot(x):
return np.array([[np.cos(x),-np.sin(x)],[np.sin(x),np.cos(x)]])
def RotVec(x_vec, rot_vec):
rvec = np.array([np.dot(x_vec[i,:-1],Rot(rot_vec[i])) for i in range(x_vec.shape[0])])
return np.hstack([rvec, x_vec[:,2].reshape(x_vec.shape[0],1)])
# Rattling
def diffusion_vel(x, dt):
t_vec = np.expand_dims(np.sqrt(np.arange(1,x.shape[0]+1)*dt),axis=1)
vec = np.divide(x-x[0],t_vec)
return vec
def rattling(x, dt, noRat = False, diffVel=True):
if diffVel:
vec = diffusion_vel(x, dt)
else:
vec = np.copy(x)
C = np.cov(vec.T)
if noRat:
R = None
else:
if len(np.shape(C)) == 0:
R = 0.5*np.log(C)
else:
R = 0.5*np.log(np.linalg.det(C))
return R, C
def rattling_windows(mat, dt, window_sz, overlap,noRat=False,diffVel=True):
cov_list = []
rat_list = []
ind_list = window_inds(mat,window_sz,overlap)
for inds in ind_list:
R, C = rattling(mat[inds[0]:inds[1],:],dt,noRat, diffVel)
cov_list.append(C)
rat_list.append(R)
return rat_list, cov_list, ind_list
# Rectangular windowing
def window_inds(dataset, window_sz, overlap, offset=0):
"""
Helper function that applies a rectangular window to the dataset
given some overlap percentage, s.t. ov \in [0,1)
"""
data_len = dataset.shape[0]
assert window_sz < data_len
ind1 = offset
ind2 = offset+window_sz-1
ind_list = []
ov_ind_diff = int(np.ceil(np.abs(overlap*window_sz)))
if ov_ind_diff == window_sz:
ov_ind_diff += -1
while ind2 < data_len+offset:
ind_list.append((ind1,ind2))
ind1 += window_sz-ov_ind_diff
ind2 += window_sz-ov_ind_diff
return ind_list
# Filtering
def moving_average(x,N):
return np.convolve(x,np.ones((N,))/float(N),mode='valid')
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5):
b, a = butter_highpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
def butter_bandstop(cutoffs, fs, order=5):
nyq = 0.5 * fs
normal_cutoffs = cutoffs / nyq
b, a = signal.butter(order, normal_cutoffs, btype='bandstop', analog=False)
return b, a
def butter_bandstop_filter(data, cutoffs, fs, order=5):
b, a = butter_bandstop(cutoffs, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
# Save/Load
def store_data(fname, rlist, clist, elist):
db = {}
db['rlist'] = rlist
db['clist'] = clist
db['elist'] = elist
dbfile = open(fname, 'ab')
pickle.dump(db, dbfile)
dbfile.close()
def load_data(fname):
# for reading also binary mode is important
dbfile = open(fname, 'rb')
db = pickle.load(dbfile)
rlist = db['rlist']
clist = db['clist']
elist = db['elist']
dbfile.close()
return rlist, clist, elist
# Observable preprocessing
def preprocess(data):
"""
Here we have take in a dataset of smarticle coordinates of the following
shape: (SampleNum, SmarticleNum*3), where each of the 3 coordinates are
coords = [x,y,theta,L_arm_theta,R_arm_theta]. We output a 7 dimensional
vector of the following format: [<mx_1>,<mx_2>,<mx_3>,<my_1>,<my_2>,<my_3>,e_theta]
"""
# Take in (x,y,theta) of each smarticle
S1_coords = data[:,0:3]
S2_coords = data[:,3:6]
S3_coords = data[:,6:9]
#########################
# Rotational invariance #
#########################
# Get CoM from the frame of each smarticle
CoM = np.mean([S1_coords,S2_coords,S3_coords],axis=0)
CoM_S1 = CoM-S1_coords
CoM_S2 = CoM-S2_coords
CoM_S3 = CoM-S3_coords
# Wrap angles
CoM_S1[:,2] = wrap2Pi(CoM_S1[:,2])
CoM_S2[:,2] = wrap2Pi(CoM_S2[:,2])
CoM_S3[:,2] = wrap2Pi(CoM_S3[:,2])
# Rotate coordinates so they're relative to the previous timestep
relCoM_S1 = RotVec(CoM_S1, S1_coords[:,2])
relCoM_S2 = RotVec(CoM_S2, S2_coords[:,2])
relCoM_S3 = RotVec(CoM_S3, S3_coords[:,2])
# Result Matrix
resMat = np.vstack([relCoM_S1[:,0],relCoM_S2[:,0],relCoM_S3[:,0],
relCoM_S1[:,1],relCoM_S2[:,1],relCoM_S3[:,1]]).T
# For theta:
pTheta = np.abs(np.mean(np.exp(1j*np.vstack([S1_coords[:,2],S2_coords[:,2],S3_coords[:,2]]).T),axis=1)).reshape(data.shape[0],1)
return np.hstack([resMat,pTheta]) | [
"numpy.hstack",
"scipy.signal.filtfilt",
"numpy.log",
"numpy.sin",
"numpy.cov",
"numpy.mod",
"numpy.arange",
"numpy.divide",
"numpy.mean",
"numpy.vstack",
"numpy.abs",
"numpy.ones",
"pickle.load",
"numpy.cos",
"numpy.shape",
"numpy.copy",
"pickle.dump",
"scipy.signal.butter",
"nu... | [((120, 150), 'numpy.mod', 'np.mod', (['(x + np.pi)', '(2.0 * np.pi)'], {}), '(x + np.pi, 2.0 * np.pi)\n', (126, 150), True, 'import numpy as np\n'), ((545, 571), 'numpy.divide', 'np.divide', (['(x - x[0])', 't_vec'], {}), '(x - x[0], t_vec)\n', (554, 571), True, 'import numpy as np\n'), ((730, 743), 'numpy.cov', 'np.cov', (['vec.T'], {}), '(vec.T)\n', (736, 743), True, 'import numpy as np\n'), ((2128, 2191), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoff'], {'btype': '"""high"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='high', analog=False)\n", (2141, 2191), False, 'from scipy import signal\n'), ((2324, 2351), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2339, 2351), False, 'from scipy import signal\n'), ((2474, 2542), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoffs'], {'btype': '"""bandstop"""', 'analog': '(False)'}), "(order, normal_cutoffs, btype='bandstop', analog=False)\n", (2487, 2542), False, 'from scipy import signal\n'), ((2677, 2704), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2692, 2704), False, 'from scipy import signal\n'), ((2898, 2921), 'pickle.dump', 'pickle.dump', (['db', 'dbfile'], {}), '(db, dbfile)\n', (2909, 2921), False, 'import pickle\n'), ((3087, 3106), 'pickle.load', 'pickle.load', (['dbfile'], {}), '(dbfile)\n', (3098, 3106), False, 'import pickle\n'), ((3850, 3900), 'numpy.mean', 'np.mean', (['[S1_coords, S2_coords, S3_coords]'], {'axis': '(0)'}), '([S1_coords, S2_coords, S3_coords], axis=0)\n', (3857, 3900), True, 'import numpy as np\n'), ((4590, 4617), 'numpy.hstack', 'np.hstack', (['[resMat, pTheta]'], {}), '([resMat, pTheta])\n', (4599, 4617), True, 'import numpy as np\n'), ((711, 721), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (718, 721), True, 'import numpy as np\n'), ((4325, 4443), 'numpy.vstack', 'np.vstack', (['[relCoM_S1[:, 0], relCoM_S2[:, 0], relCoM_S3[:, 0], relCoM_S1[:, 1],\n relCoM_S2[:, 1], relCoM_S3[:, 1]]'], {}), '([relCoM_S1[:, 0], relCoM_S2[:, 0], relCoM_S3[:, 0], relCoM_S1[:, \n 1], relCoM_S2[:, 1], relCoM_S3[:, 1]])\n', (4334, 4443), True, 'import numpy as np\n'), ((1667, 1694), 'numpy.abs', 'np.abs', (['(overlap * window_sz)'], {}), '(overlap * window_sz)\n', (1673, 1694), True, 'import numpy as np\n'), ((1985, 1998), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (1992, 1998), True, 'import numpy as np\n'), ((197, 206), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (203, 206), True, 'import numpy as np\n'), ((220, 229), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (226, 229), True, 'import numpy as np\n'), ((230, 239), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (236, 239), True, 'import numpy as np\n'), ((497, 525), 'numpy.arange', 'np.arange', (['(1)', '(x.shape[0] + 1)'], {}), '(1, x.shape[0] + 1)\n', (506, 525), True, 'import numpy as np\n'), ((800, 811), 'numpy.shape', 'np.shape', (['C'], {}), '(C)\n', (808, 811), True, 'import numpy as np\n'), ((839, 848), 'numpy.log', 'np.log', (['C'], {}), '(C)\n', (845, 848), True, 'import numpy as np\n'), ((208, 217), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (214, 217), True, 'import numpy as np\n'), ((890, 906), 'numpy.linalg.det', 'np.linalg.det', (['C'], {}), '(C)\n', (903, 906), True, 'import numpy as np\n'), ((4487, 4549), 'numpy.vstack', 'np.vstack', (['[S1_coords[:, 2], S2_coords[:, 2], S3_coords[:, 2]]'], {}), '([S1_coords[:, 2], S2_coords[:, 2], S3_coords[:, 2]])\n', (4496, 4549), True, 'import numpy as np\n')] |
import pytest
from time_manager.schemas.user import (
UserBase,
UserCredentials,
UserDB,
UserDBBase,
validate_username,
)
@pytest.mark.parametrize(
"username,should_raise",
[
("", True),
(" ", True),
(" -", True),
("- ", True),
("-a", True),
("a ", True),
("a", False),
("a!@#$%^&*()_12qw", False),
("a !@#$%^&*()_12qw", True),
],
)
def test_username(username, should_raise):
if should_raise:
with pytest.raises(ValueError):
validate_username(username)
else:
assert validate_username(username) == username
def test_models():
UserBase()
UserDBBase()
UserDB(id=1, hashed_password="aa", username="user_1", email="a@a.a")
UserCredentials(username="a", password="")
with pytest.raises(ValueError):
UserBase(username="")
UserDBBase(username="")
UserDB(id=1, hashed_password="aa", username="user 1")
UserCredentials(username="a-b", password="")
UserDB(id=1, hashed_password="aa", username="user_1", email="a@a.")
| [
"time_manager.schemas.user.UserCredentials",
"time_manager.schemas.user.UserBase",
"time_manager.schemas.user.UserDB",
"pytest.mark.parametrize",
"pytest.raises",
"time_manager.schemas.user.UserDBBase",
"time_manager.schemas.user.validate_username"
] | [((146, 358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""username,should_raise"""', "[('', True), (' ', True), (' -', True), ('- ', True), ('-a', True), ('a ', \n True), ('a', False), ('a!@#$%^&*()_12qw', False), ('a !@#$%^&*()_12qw',\n True)]"], {}), "('username,should_raise', [('', True), (' ', True),\n (' -', True), ('- ', True), ('-a', True), ('a ', True), ('a', False), (\n 'a!@#$%^&*()_12qw', False), ('a !@#$%^&*()_12qw', True)])\n", (169, 358), False, 'import pytest\n'), ((674, 684), 'time_manager.schemas.user.UserBase', 'UserBase', ([], {}), '()\n', (682, 684), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((689, 701), 'time_manager.schemas.user.UserDBBase', 'UserDBBase', ([], {}), '()\n', (699, 701), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((706, 774), 'time_manager.schemas.user.UserDB', 'UserDB', ([], {'id': '(1)', 'hashed_password': '"""aa"""', 'username': '"""user_1"""', 'email': '"""a@a.a"""'}), "(id=1, hashed_password='aa', username='user_1', email='a@a.a')\n", (712, 774), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((779, 821), 'time_manager.schemas.user.UserCredentials', 'UserCredentials', ([], {'username': '"""a"""', 'password': '""""""'}), "(username='a', password='')\n", (794, 821), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((831, 856), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (844, 856), False, 'import pytest\n'), ((866, 887), 'time_manager.schemas.user.UserBase', 'UserBase', ([], {'username': '""""""'}), "(username='')\n", (874, 887), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((896, 919), 'time_manager.schemas.user.UserDBBase', 'UserDBBase', ([], {'username': '""""""'}), "(username='')\n", (906, 919), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((928, 981), 'time_manager.schemas.user.UserDB', 'UserDB', ([], {'id': '(1)', 'hashed_password': '"""aa"""', 'username': '"""user 1"""'}), "(id=1, hashed_password='aa', username='user 1')\n", (934, 981), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((990, 1034), 'time_manager.schemas.user.UserCredentials', 'UserCredentials', ([], {'username': '"""a-b"""', 'password': '""""""'}), "(username='a-b', password='')\n", (1005, 1034), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((1043, 1110), 'time_manager.schemas.user.UserDB', 'UserDB', ([], {'id': '(1)', 'hashed_password': '"""aa"""', 'username': '"""user_1"""', 'email': '"""a@a."""'}), "(id=1, hashed_password='aa', username='user_1', email='a@a.')\n", (1049, 1110), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((517, 542), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (530, 542), False, 'import pytest\n'), ((556, 583), 'time_manager.schemas.user.validate_username', 'validate_username', (['username'], {}), '(username)\n', (573, 583), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n'), ((609, 636), 'time_manager.schemas.user.validate_username', 'validate_username', (['username'], {}), '(username)\n', (626, 636), False, 'from time_manager.schemas.user import UserBase, UserCredentials, UserDB, UserDBBase, validate_username\n')] |
"""
Make a learning curve for the full neural net trained on all 30 output
measures. The point of this graph is to investigate how much training data
is needed to achieve various MSE values.
"""
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import lasagne
from lasagne import layers
from lasagne import nonlinearities
from lasagne.nonlinearities import ScaledTanH
from nolearn.lasagne import NeuralNet, TrainSplit
from sklearn.learning_curve import learning_curve
from lignet_utils import gen_train_test
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
# set up the Scaled tanh parameters. See nonlinearities.py for usage notes.
# I am following the guidance of LeCun et al. for these values
scaled_tanh = ScaledTanH(scale_in=2./3, scale_out=1.7159)
# Make a learning curve to find out how much training data to use
train_size = int(1 * x_train.shape[0])
xt = x_train[:train_size, :]
yt = y_train[:train_size, :]
train_sizes, train_scores, valid_scores = learning_curve(
NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden0', layers.DenseLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer)
],
input_shape=(None, x_train.shape[1]),
hidden0_num_units=18,
hidden0_nonlinearity=scaled_tanh,
hidden1_num_units=20,
hidden1_nonlinearity=scaled_tanh,
output_num_units=y_train.shape[1],
output_nonlinearity=nonlinearities.linear,
regression=True,
verbose=1,
max_epochs=4000,
update=lasagne.updates.adagrad,
train_split=TrainSplit(eval_size=0.3),
),
xt, yt,
train_sizes=[500, 1500, 5000, 15000, 35000, 75000, 133333],
scoring='mean_squared_error')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
valid_scores_mean = np.mean(valid_scores, axis=1)
valid_scores_std = np.std(valid_scores, axis=1)
with open('learning_curve.pkl', 'wb') as pkl:
pickle.dump([train_scores_mean, train_scores_std,
valid_scores_mean, valid_scores_std,
train_sizes], pkl)
| [
"numpy.mean",
"cPickle.dump",
"lignet_utils.gen_train_test",
"lasagne.nonlinearities.ScaledTanH",
"numpy.std",
"nolearn.lasagne.TrainSplit"
] | [((593, 609), 'lignet_utils.gen_train_test', 'gen_train_test', ([], {}), '()\n', (607, 609), False, 'from lignet_utils import gen_train_test\n'), ((764, 810), 'lasagne.nonlinearities.ScaledTanH', 'ScaledTanH', ([], {'scale_in': '(2.0 / 3)', 'scale_out': '(1.7159)'}), '(scale_in=2.0 / 3, scale_out=1.7159)\n', (774, 810), False, 'from lasagne.nonlinearities import ScaledTanH\n'), ((1832, 1861), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1839, 1861), True, 'import numpy as np\n'), ((1881, 1909), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1887, 1909), True, 'import numpy as np\n'), ((1930, 1959), 'numpy.mean', 'np.mean', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (1937, 1959), True, 'import numpy as np\n'), ((1979, 2007), 'numpy.std', 'np.std', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (1985, 2007), True, 'import numpy as np\n'), ((2059, 2168), 'cPickle.dump', 'pickle.dump', (['[train_scores_mean, train_scores_std, valid_scores_mean, valid_scores_std,\n train_sizes]', 'pkl'], {}), '([train_scores_mean, train_scores_std, valid_scores_mean,\n valid_scores_std, train_sizes], pkl)\n', (2070, 2168), True, 'import cPickle as pickle\n'), ((1663, 1688), 'nolearn.lasagne.TrainSplit', 'TrainSplit', ([], {'eval_size': '(0.3)'}), '(eval_size=0.3)\n', (1673, 1688), False, 'from nolearn.lasagne import NeuralNet, TrainSplit\n')] |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
def _GetTelemetryPath(input_api):
return os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
input_api.PresubmitLocalPath())))), 'tools', 'telemetry')
def LoadSupport(input_api):
if 'cloud_storage' not in globals():
# Avoid leaking changes to global sys.path.
_old_sys_path = sys.path
try:
telemetry_path = _GetTelemetryPath(input_api)
sys.path = [telemetry_path] + sys.path
from telemetry.util import cloud_storage
globals()['cloud_storage'] = cloud_storage
finally:
sys.path = _old_sys_path
return globals()['cloud_storage']
def _GetFilesNotInCloud(input_api):
"""Searches for .sha1 files and checks to see if they have already
been uploaded Cloud Storage. Returns a list of those that have not.
"""
hash_paths = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
hash_path = affected_file.AbsoluteLocalPath()
_, extension = os.path.splitext(hash_path)
if extension == '.sha1':
hash_paths.append(hash_path)
if not hash_paths:
return []
cloud_storage = LoadSupport(input_api)
# Look in both buckets, in case the user uploaded the file manually.
hashes_in_cloud_storage = cloud_storage.List(cloud_storage.PUBLIC_BUCKET)
try:
hashes_in_cloud_storage += cloud_storage.List(cloud_storage.INTERNAL_BUCKET)
except (cloud_storage.PermissionError, cloud_storage.CredentialsError):
pass
files = []
for hash_path in hash_paths:
file_hash = cloud_storage.ReadHash(hash_path)
if file_hash not in hashes_in_cloud_storage:
files.append((hash_path, file_hash))
return files
def _VerifyFilesInCloud(input_api, output_api):
"""Fails presubmit if any .sha1 files have not been previously uploaded to
Cloud storage.
"""
results = []
hash_paths = _GetFilesNotInCloud(input_api)
file_paths = []
for hash_path, _ in hash_paths:
results.append(output_api.PresubmitError(
'Attemping to commit hash file, but corresponding '
'data file is not in Cloud Storage: %s' % hash_path))
file_paths.append(os.path.splitext(hash_path)[0])
if len(file_paths) > 0:
upload_script_path = os.path.join(
_GetTelemetryPath(input_api), 'cloud_storage')
results.append(output_api.PresubmitError(
'To upload missing files, Run: \n'
'%s upload %s google-only' %
(upload_script_path, ' '.join(file_paths))))
return results
def CheckChangeOnUpload(input_api, output_api):
results = _VerifyFilesInCloud(input_api, output_api)
return results
def CheckChangeOnCommit(input_api, output_api):
results = _VerifyFilesInCloud(input_api, output_api)
return results
| [
"telemetry.util.cloud_storage.ReadHash",
"os.path.splitext",
"telemetry.util.cloud_storage.List"
] | [((1429, 1476), 'telemetry.util.cloud_storage.List', 'cloud_storage.List', (['cloud_storage.PUBLIC_BUCKET'], {}), '(cloud_storage.PUBLIC_BUCKET)\n', (1447, 1476), False, 'from telemetry.util import cloud_storage\n'), ((1160, 1187), 'os.path.splitext', 'os.path.splitext', (['hash_path'], {}), '(hash_path)\n', (1176, 1187), False, 'import os\n'), ((1515, 1564), 'telemetry.util.cloud_storage.List', 'cloud_storage.List', (['cloud_storage.INTERNAL_BUCKET'], {}), '(cloud_storage.INTERNAL_BUCKET)\n', (1533, 1564), False, 'from telemetry.util import cloud_storage\n'), ((1709, 1742), 'telemetry.util.cloud_storage.ReadHash', 'cloud_storage.ReadHash', (['hash_path'], {}), '(hash_path)\n', (1731, 1742), False, 'from telemetry.util import cloud_storage\n'), ((2304, 2331), 'os.path.splitext', 'os.path.splitext', (['hash_path'], {}), '(hash_path)\n', (2320, 2331), False, 'import os\n')] |
from flask import render_template, flash, redirect, url_for
from app import flask_app
from app.forms import LoginForm
from flask_login import current_user, login_user
from app.models import User
from flask_login import logout_user
from flask_login import login_required
from flask import request
from werkzeug.urls import url_parse
from app import db
from app.forms import RegistrationForm
@flask_app.route('/')
@flask_app.route('/index')
@login_required
def index():
user = {'username': 'User1'}
posts = [
{
'author': {'username': 'User2'},
'body': 'Body of post from author User2'
},
{
'author': {'username': 'User3'},
'body': 'Body of post from author User3'
}
]
return render_template('index.html', title='Homepage', posts=posts)
@flask_app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash("Invalid username or pasword!")
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc !='':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@flask_app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@flask_app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations! You are now registered user...')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
| [
"app.flask_app.route",
"flask.render_template",
"app.forms.RegistrationForm",
"flask.request.args.get",
"app.forms.LoginForm",
"app.db.session.commit",
"flask.flash",
"werkzeug.urls.url_parse",
"flask_login.login_user",
"flask_login.logout_user",
"app.models.User",
"flask.url_for",
"flask.re... | [((392, 412), 'app.flask_app.route', 'flask_app.route', (['"""/"""'], {}), "('/')\n", (407, 412), False, 'from app import flask_app\n'), ((414, 439), 'app.flask_app.route', 'flask_app.route', (['"""/index"""'], {}), "('/index')\n", (429, 439), False, 'from app import flask_app\n'), ((833, 883), 'app.flask_app.route', 'flask_app.route', (['"""/login"""'], {'methods': "['GET', 'POST']"}), "('/login', methods=['GET', 'POST'])\n", (848, 883), False, 'from app import flask_app\n'), ((1586, 1612), 'app.flask_app.route', 'flask_app.route', (['"""/logout"""'], {}), "('/logout')\n", (1601, 1612), False, 'from app import flask_app\n'), ((1685, 1738), 'app.flask_app.route', 'flask_app.route', (['"""/register"""'], {'methods': "['GET', 'POST']"}), "('/register', methods=['GET', 'POST'])\n", (1700, 1738), False, 'from app import flask_app\n'), ((770, 830), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Homepage"""', 'posts': 'posts'}), "('index.html', title='Homepage', posts=posts)\n", (785, 830), False, 'from flask import render_template, flash, redirect, url_for\n'), ((988, 999), 'app.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (997, 999), False, 'from app.forms import LoginForm\n'), ((1526, 1583), 'flask.render_template', 'render_template', (['"""login.html"""'], {'title': '"""Sign In"""', 'form': 'form'}), "('login.html', title='Sign In', form=form)\n", (1541, 1583), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1631, 1644), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (1642, 1644), False, 'from flask_login import logout_user\n'), ((1846, 1864), 'app.forms.RegistrationForm', 'RegistrationForm', ([], {}), '()\n', (1862, 1864), False, 'from app.forms import RegistrationForm\n'), ((2192, 2253), 'flask.render_template', 'render_template', (['"""register.html"""'], {'title': '"""Register"""', 'form': 'form'}), "('register.html', title='Register', form=form)\n", (2207, 2253), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1283, 1331), 'flask_login.login_user', 'login_user', (['user'], {'remember': 'form.remember_me.data'}), '(user, remember=form.remember_me.data)\n', (1293, 1331), False, 'from flask_login import current_user, login_user\n'), ((1352, 1376), 'flask.request.args.get', 'request.args.get', (['"""next"""'], {}), "('next')\n", (1368, 1376), False, 'from flask import request\n'), ((1495, 1514), 'flask.redirect', 'redirect', (['next_page'], {}), '(next_page)\n', (1503, 1514), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1665, 1681), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1672, 1681), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1914, 1970), 'app.models.User', 'User', ([], {'username': 'form.username.data', 'email': 'form.email.data'}), '(username=form.username.data, email=form.email.data)\n', (1918, 1970), False, 'from app.models import User\n'), ((2025, 2045), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (2039, 2045), False, 'from app import db\n'), ((2054, 2073), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2071, 2073), False, 'from app import db\n'), ((2082, 2138), 'flask.flash', 'flash', (['"""Congratulations! You are now registered user..."""'], {}), "('Congratulations! You are now registered user...')\n", (2087, 2138), False, 'from flask import render_template, flash, redirect, url_for\n'), ((959, 975), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (966, 975), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1191, 1228), 'flask.flash', 'flash', (['"""Invalid username or pasword!"""'], {}), "('Invalid username or pasword!')\n", (1196, 1228), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1463, 1479), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1470, 1479), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1817, 1833), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1824, 1833), False, 'from flask import render_template, flash, redirect, url_for\n'), ((2163, 2179), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (2170, 2179), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1049, 1098), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'form.username.data'}), '(username=form.username.data)\n', (1069, 1098), False, 'from app.models import User\n'), ((1257, 1273), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (1264, 1273), False, 'from flask import render_template, flash, redirect, url_for\n'), ((1405, 1425), 'werkzeug.urls.url_parse', 'url_parse', (['next_page'], {}), '(next_page)\n', (1414, 1425), False, 'from werkzeug.urls import url_parse\n')] |
from pysmt.shortcuts import Symbol
from pysmt.typing import INT
h = Symbol("H", INT)
domain = (1 <= h) & (10 >= h)
| [
"pysmt.shortcuts.Symbol"
] | [((69, 85), 'pysmt.shortcuts.Symbol', 'Symbol', (['"""H"""', 'INT'], {}), "('H', INT)\n", (75, 85), False, 'from pysmt.shortcuts import Symbol\n')] |
# vim: sw=4:ts=4:et
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
| [
"flask.Blueprint"
] | [((55, 82), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (64, 82), False, 'from flask import Blueprint\n')] |
from PyQt5.QtWidgets import QMainWindow
from Controller.venda import VendaTemp
from Funcoes.utils import data_hora_atual
from Model.Compra_Itens import Compra_Itens
from Model.Compra_Fin import Compra_Fin
from Model.Compra_Header import Compras_Header
from Model.Compra_Tmp import Compra_Tmp
from Model.Fornecedor import Fornecedor
from Model.Pendencias import Pendencias
from Model.Produtos import Produtos
from Model.Venda_Tmp import Venda_Tmp
from Model.Venda_Fin import Venda_Fin
from Model.Finalizadoras import Finalizadoras
from PyQt5 import QtGui, QtCore
class EventFilter(QtCore.QObject):
def __init__(self, parent=None):
QtCore.QObject.__init__(self, parent)
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.ActivationChange:
if self.parent().isActiveWindow():
if obj.adicionando_fin:
obj.preenche_combo()
return QtCore.QObject.eventFilter(self, obj, event)
class Finalizar(QMainWindow):
def __init__(self, parent=None):
super(Finalizar, self).__init__(parent)
from View.finalizar import Ui_Frame
from PyQt5.QtCore import Qt
self.ui = Ui_Frame()
self.ui.setupUi(self)
self.dialogs = list()
self.tamanho = self.size()
self.setFixedSize(self.tamanho)
self.setWindowIcon(QtGui.QIcon("Imagens/logo_fzr.png"))
self.tela_principal = parent
self.setWindowModality(Qt.ApplicationModal)
self.installEventFilter(EventFilter(self))
self.linha_selecionada = None
if isinstance(self.tela_principal, VendaTemp):
self.fin_selecionada = Venda_Fin()
self.total = Venda_Tmp.retorna_total()
else:
self.fin_selecionada = Compra_Fin()
self.total = Compra_Tmp.retorna_total()
self.desconto_total = 0
self.total_item = 0
self.adicionando_fin = False
self.restante = 0
self.ui.bt_inserir.clicked.connect(self.inserir_fin)
self.ui.bt_sair.clicked.connect(self.sair)
self.ui.tx_valor.returnPressed.connect(self.inserir_fin)
self.ui.tb_fin.cellClicked.connect(self.linha_clicada)
self.ui.bt_excluir.clicked.connect(self.excluir_finalizacao)
self.ui.bt_add_finalizadora.clicked.connect(self.add_fin)
for i in range(0, 3):
self.ui.tb_fin.horizontalHeaderItem(i).setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.ui.tb_fin.setColumnWidth(0, 30)
self.ui.tb_fin.setColumnWidth(1, 300)
self.ui.tb_fin.setColumnWidth(2, 80)
self.set_lbls()
self.preenche_tabela()
self.preenche_combo()
def add_fin(self):
from Controller.cadastro_finalizadoras import CadastroFinalizadoras
from Funcoes.utils import exec_app
self.adicionando_fin = True
c = CadastroFinalizadoras()
exec_app(c)
self.dialogs.append(c)
def resizeEvent(self, a0: QtGui.QResizeEvent):
self.setFixedSize(self.tamanho)
def formatar_texto(self):
texto = self.ui.tx_valor.text()
tamanho = len(texto)
if not texto[tamanho-1:tamanho].isnumeric():
if texto[tamanho-1:tamanho] != '.':
self.ui.tx_valor.setText(texto[:tamanho - 1])
if texto.count(".") > 1 and texto[tamanho-1:tamanho] == '.':
self.ui.tx_valor.setText(texto[:tamanho - 1])
def sair(self):
self.close()
def preenche_combo(self):
self.ui.cb_pagamento.clear()
self.ui.cb_pagamento.addItem("SELECIONE")
todas_fin = Finalizadoras.get_todas_finalizadoras()
for contador, fin in enumerate(todas_fin):
contador += 1
self.ui.cb_pagamento.addItem(fin[1])
self.ui.cb_pagamento.setItemData(contador, fin)
def inserir_fin(self):
from PyQt5.QtWidgets import QMessageBox
if self.ui.cb_pagamento.currentIndex() != 0:
if isinstance(self.tela_principal, VendaTemp):
v_fin = Venda_Fin()
v_fin.venda_id = Venda_Tmp.get_cod_venda()
else:
v_fin = Compra_Fin()
v_fin.compra_id = Compra_Tmp.get_cod_compra()
v_fin.finalizadoras = Finalizadoras()
indice_fin = self.ui.cb_pagamento.currentIndex()
fin_id = self.ui.cb_pagamento.itemData(indice_fin)[0]
v_fin.valor = float(self.ui.tx_valor.text())
v_fin.finalizadoras.id = fin_id
try:
if isinstance(self.tela_principal, VendaTemp):
if v_fin.check_fin(fin_id):
v_fin.update_fin_venda(fin_id)
else:
v_fin.inserir_fin_venda()
valor_pago = self.tela_principal.venda_fin.valor_pago()
else:
if v_fin.check_fin(fin_id):
v_fin.update_fin_compra(fin_id)
else:
v_fin.inserir_fin_compra()
valor_pago = self.tela_principal.compra_fin.valor_pago()
except Exception as error:
QMessageBox.about(self, "Erro", str(error))
else:
self.preenche_tabela()
if valor_pago >= self.total:
from Model.Venda_Itens import Vendas
from Model.Veiculo import Veiculo
from Model.Vendas_Header import Vendas_Header
from Model.Usuario import Usuario
from Model.Operador import Operador
from Model.Cliente import Cliente
self.tela_principal.finalizou = True
if (valor_pago - self.total) > 0:
QMessageBox.about(self, "Venda Finalizada!", f"Valor de troco: {valor_pago - self.total:.2f}")
if isinstance(self.tela_principal, VendaTemp):
v = Venda_Tmp()
v.veiculo = Veiculo()
header = Vendas_Header()
header.veiculo = Veiculo()
header.cliente = Cliente()
header.id = Venda_Tmp.get_cod_venda()
indice_veic = self.tela_principal.ui.cb_veiculo.currentIndex()
if indice_veic != 0:
v.veiculo = Veiculo()
veic_placa = self.tela_principal.ui.cb_veiculo.itemData(indice_veic)[0]
v.veiculo.placa = veic_placa
header.veiculo.placa = veic_placa
v.cliente = Cliente()
v.cliente.id = self.tela_principal.cliente_selecionado.id
header.cliente.id = self.tela_principal.cliente_selecionado.id
v.update_cliente()
header.qtd_itens = Venda_Tmp.qtd_itens()
header.total_descontos = Venda_Tmp.soma_descontos()
header.valor_total = Venda_Tmp.retorna_total()
header.status = "FINALIZADO"
header.datahora = data_hora_atual()
if self.tela_principal.codigo_venda:
header.update()
venda = Vendas()
venda.id_venda = self.tela_principal.codigo_venda
venda.delete_venda_by_id()
Vendas.inserir_venda()
p = Pendencias()
p.venda = Vendas_Header()
p.venda.id = self.tela_principal.codigo_venda
p.delete()
else:
header.inserir()
Vendas.inserir_venda()
itens = Vendas()
itens.id_venda = Venda_Tmp.get_cod_venda()
itens_venda = itens.select_produtos_venda()
for linha in itens_venda:
produtos = Produtos()
produtos.id = linha[1]
qtd = linha[3]
produtos.alterar_estoque("-", qtd)
Venda_Tmp.delete_venda()
reply = QMessageBox.question(self, 'Imprimir?', f'Deseja imprimir o relatório da venda?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
from Funcoes.utils import print_dialog
from Funcoes.PDF.pdf_venda import gerar_pdf
usuario_operador = Usuario()
usuario_operador.id = Operador.get_operador_atual()[0]
usu = usuario_operador.get_usuario_by_id()
cnpj = usu[10]
gerar_pdf(header.id, cnpj, v.cliente.id)
print_dialog(self, "venda.pdf")
self.close()
else:
c = Compra_Tmp()
header = Compras_Header()
header.fornecedor = Fornecedor()
header.id = Compra_Tmp.get_cod_compra()
c.fornecedor = Fornecedor()
c.fornecedor.id = self.tela_principal.forn_selecionado.id
header.fornecedor.id = self.tela_principal.forn_selecionado.id
c.update_forn()
header.qtd_itens = Compra_Tmp.qtd_itens()
header.valor_total = Compra_Tmp.retorna_total()
header.status = "FINALIZADO"
header.datahora = data_hora_atual()
header.inserir()
Compra_Itens.inserir_compra()
itens = Compra_Itens()
itens.id_compra = Compra_Tmp.get_cod_compra()
itens_compra = itens.select_produtos_compra()
for linha in itens_compra:
produtos = Produtos()
produtos.id = linha[1]
qtd = linha[2]
produtos.alterar_estoque("+", qtd)
Compra_Tmp.delete_compra()
self.close()
self.set_lbls()
self.ui.tx_valor.setText("")
self.tela_principal.recebeu_pagamento = True
self.ui.cb_pagamento.setCurrentIndex(0)
else:
QMessageBox.warning(self, "Erro!", "Favor selecionar uma opção!")
def excluir_finalizacao(self):
from PyQt5.QtWidgets import QMessageBox
if self.fin_selecionada.id:
reply = QMessageBox.question(self, 'Excluir?', f'Tem certeza que deseja excluir o pagamento?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
try:
if isinstance(self.tela_principal, VendaTemp):
v_fin = Venda_Fin()
v_fin.id = self.fin_selecionada.id
v_fin.delete_fin_by_cod()
else:
c_fin = Compra_Fin()
c_fin.id = self.fin_selecionada.id
c_fin.delete_fin_by_cod()
self.tela_principal.recebeu_pagamento = False
self.tela_principal.excluiu_pagamento = True
self.linha_selecionada = None
except Exception as error:
QMessageBox.warning(self, "Erro", str(error))
else:
self.preenche_tabela()
self.set_lbls()
else:
return
else:
QMessageBox.warning(self, "Atenção!", "Nenhuma linha selecionada!")
def linha_clicada(self):
tb = self.ui.tb_fin
self.linha_selecionada = tb.currentRow()
self.fin_selecionada.finalizadoras = Finalizadoras()
self.fin_selecionada.id = tb.item(tb.currentRow(), 0).text()
self.fin_selecionada.finalizadoras.id = tb.item(tb.currentRow(), 1).text()
self.fin_selecionada.valor = tb.item(tb.currentRow(), 2).text()
def preenche_tabela(self):
from PyQt5.QtWidgets import QTableWidgetItem
from Model.Finalizadoras import Finalizadoras
self.ui.tb_fin.clearContents()
self.ui.tb_fin.setRowCount(0)
if isinstance(self.tela_principal, VendaTemp):
v_fin = Venda_Fin()
v_fin.venda_id = Venda_Tmp.get_cod_venda()
dados = v_fin.get_fins_venda()
else:
v_fin = Compra_Fin()
v_fin.compra_id = Compra_Tmp.get_cod_compra()
dados = v_fin.get_fins_compra()
if type(dados) == list:
for i, linha in enumerate(dados):
self.ui.tb_fin.insertRow(i)
self.ui.tb_fin.setItem(i, 2, QTableWidgetItem(str(linha[3])))
self.ui.tb_fin.setItem(i, 0, QTableWidgetItem(str(linha[0])))
# finalizadora
fin = Finalizadoras()
fin.id = linha[1]
finalizadora = fin.get_finalizadora_by_id()
self.ui.tb_fin.setItem(i, 1, QTableWidgetItem(str(finalizadora[1])))
if self.adicionando_fin:
self.adicionando_fin = False
def set_lbls(self):
if isinstance(self.tela_principal, VendaTemp):
self.ui.lb_total.setText(f"{Venda_Tmp.retorna_total():.2f}")
self.restante = Venda_Tmp.retorna_total() - self.tela_principal.venda_fin.valor_pago()
self.ui.lb_pago.setText(f"{self.tela_principal.venda_fin.valor_pago():.2f}")
else:
self.ui.lb_total.setText(f"{Compra_Tmp.retorna_total():.2f}")
self.restante = Compra_Tmp.retorna_total() - self.tela_principal.compra_fin.valor_pago()
self.ui.lb_pago.setText(f"{self.tela_principal.compra_fin.valor_pago():.2f}")
self.ui.lb_restante.setText(f"{self.restante:.2f}")
| [
"Funcoes.utils.exec_app",
"Model.Compra_Itens.Compra_Itens.inserir_compra",
"PyQt5.QtGui.QIcon",
"Model.Venda_Fin.Venda_Fin",
"Model.Venda_Tmp.Venda_Tmp.qtd_itens",
"Model.Venda_Tmp.Venda_Tmp.retorna_total",
"Model.Compra_Tmp.Compra_Tmp.delete_compra",
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.... | [((645, 682), 'PyQt5.QtCore.QObject.__init__', 'QtCore.QObject.__init__', (['self', 'parent'], {}), '(self, parent)\n', (668, 682), False, 'from PyQt5 import QtGui, QtCore\n'), ((926, 970), 'PyQt5.QtCore.QObject.eventFilter', 'QtCore.QObject.eventFilter', (['self', 'obj', 'event'], {}), '(self, obj, event)\n', (952, 970), False, 'from PyQt5 import QtGui, QtCore\n'), ((1188, 1198), 'View.finalizar.Ui_Frame', 'Ui_Frame', ([], {}), '()\n', (1196, 1198), False, 'from View.finalizar import Ui_Frame\n'), ((2894, 2917), 'Controller.cadastro_finalizadoras.CadastroFinalizadoras', 'CadastroFinalizadoras', ([], {}), '()\n', (2915, 2917), False, 'from Controller.cadastro_finalizadoras import CadastroFinalizadoras\n'), ((2926, 2937), 'Funcoes.utils.exec_app', 'exec_app', (['c'], {}), '(c)\n', (2934, 2937), False, 'from Funcoes.utils import exec_app\n'), ((3641, 3680), 'Model.Finalizadoras.Finalizadoras.get_todas_finalizadoras', 'Finalizadoras.get_todas_finalizadoras', ([], {}), '()\n', (3678, 3680), False, 'from Model.Finalizadoras import Finalizadoras\n'), ((12456, 12471), 'Model.Finalizadoras.Finalizadoras', 'Finalizadoras', ([], {}), '()\n', (12469, 12471), False, 'from Model.Finalizadoras import Finalizadoras\n'), ((1362, 1397), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""Imagens/logo_fzr.png"""'], {}), "('Imagens/logo_fzr.png')\n", (1373, 1397), False, 'from PyQt5 import QtGui, QtCore\n'), ((1671, 1682), 'Model.Venda_Fin.Venda_Fin', 'Venda_Fin', ([], {}), '()\n', (1680, 1682), False, 'from Model.Venda_Fin import Venda_Fin\n'), ((1708, 1733), 'Model.Venda_Tmp.Venda_Tmp.retorna_total', 'Venda_Tmp.retorna_total', ([], {}), '()\n', (1731, 1733), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((1783, 1795), 'Model.Compra_Fin.Compra_Fin', 'Compra_Fin', ([], {}), '()\n', (1793, 1795), False, 'from Model.Compra_Fin import Compra_Fin\n'), ((1821, 1847), 'Model.Compra_Tmp.Compra_Tmp.retorna_total', 'Compra_Tmp.retorna_total', ([], {}), '()\n', (1845, 1847), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((4304, 4319), 'Model.Finalizadoras.Finalizadoras', 'Finalizadoras', ([], {}), '()\n', (4317, 4319), False, 'from Model.Finalizadoras import Finalizadoras\n'), ((10928, 10993), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Erro!"""', '"""Favor selecionar uma opção!"""'], {}), "(self, 'Erro!', 'Favor selecionar uma opção!')\n", (10947, 10993), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((11135, 11280), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Excluir?"""', 'f"""Tem certeza que deseja excluir o pagamento?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.Yes'], {}), "(self, 'Excluir?',\n f'Tem certeza que deseja excluir o pagamento?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.Yes)\n", (11155, 11280), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((12236, 12303), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Atenção!"""', '"""Nenhuma linha selecionada!"""'], {}), "(self, 'Atenção!', 'Nenhuma linha selecionada!')\n", (12255, 12303), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((12990, 13001), 'Model.Venda_Fin.Venda_Fin', 'Venda_Fin', ([], {}), '()\n', (12999, 13001), False, 'from Model.Venda_Fin import Venda_Fin\n'), ((13031, 13056), 'Model.Venda_Tmp.Venda_Tmp.get_cod_venda', 'Venda_Tmp.get_cod_venda', ([], {}), '()\n', (13054, 13056), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((13134, 13146), 'Model.Compra_Fin.Compra_Fin', 'Compra_Fin', ([], {}), '()\n', (13144, 13146), False, 'from Model.Compra_Fin import Compra_Fin\n'), ((13177, 13204), 'Model.Compra_Tmp.Compra_Tmp.get_cod_compra', 'Compra_Tmp.get_cod_compra', ([], {}), '()\n', (13202, 13204), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((4081, 4092), 'Model.Venda_Fin.Venda_Fin', 'Venda_Fin', ([], {}), '()\n', (4090, 4092), False, 'from Model.Venda_Fin import Venda_Fin\n'), ((4126, 4151), 'Model.Venda_Tmp.Venda_Tmp.get_cod_venda', 'Venda_Tmp.get_cod_venda', ([], {}), '()\n', (4149, 4151), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((4194, 4206), 'Model.Compra_Fin.Compra_Fin', 'Compra_Fin', ([], {}), '()\n', (4204, 4206), False, 'from Model.Compra_Fin import Compra_Fin\n'), ((4241, 4268), 'Model.Compra_Tmp.Compra_Tmp.get_cod_compra', 'Compra_Tmp.get_cod_compra', ([], {}), '()\n', (4266, 4268), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((13582, 13597), 'Model.Finalizadoras.Finalizadoras', 'Finalizadoras', ([], {}), '()\n', (13595, 13597), False, 'from Model.Finalizadoras import Finalizadoras\n'), ((14034, 14059), 'Model.Venda_Tmp.Venda_Tmp.retorna_total', 'Venda_Tmp.retorna_total', ([], {}), '()\n', (14057, 14059), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((14310, 14336), 'Model.Compra_Tmp.Compra_Tmp.retorna_total', 'Compra_Tmp.retorna_total', ([], {}), '()\n', (14334, 14336), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((5845, 5943), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self', '"""Venda Finalizada!"""', 'f"""Valor de troco: {valor_pago - self.total:.2f}"""'], {}), "(self, 'Venda Finalizada!',\n f'Valor de troco: {valor_pago - self.total:.2f}')\n", (5862, 5943), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((6036, 6047), 'Model.Venda_Tmp.Venda_Tmp', 'Venda_Tmp', ([], {}), '()\n', (6045, 6047), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((6084, 6093), 'Model.Veiculo.Veiculo', 'Veiculo', ([], {}), '()\n', (6091, 6093), False, 'from Model.Veiculo import Veiculo\n'), ((6128, 6143), 'Model.Vendas_Header.Vendas_Header', 'Vendas_Header', ([], {}), '()\n', (6141, 6143), False, 'from Model.Vendas_Header import Vendas_Header\n'), ((6185, 6194), 'Model.Veiculo.Veiculo', 'Veiculo', ([], {}), '()\n', (6192, 6194), False, 'from Model.Veiculo import Veiculo\n'), ((6236, 6245), 'Model.Cliente.Cliente', 'Cliente', ([], {}), '()\n', (6243, 6245), False, 'from Model.Cliente import Cliente\n'), ((6282, 6307), 'Model.Venda_Tmp.Venda_Tmp.get_cod_venda', 'Venda_Tmp.get_cod_venda', ([], {}), '()\n', (6305, 6307), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((6747, 6756), 'Model.Cliente.Cliente', 'Cliente', ([], {}), '()\n', (6754, 6756), False, 'from Model.Cliente import Cliente\n'), ((7013, 7034), 'Model.Venda_Tmp.Venda_Tmp.qtd_itens', 'Venda_Tmp.qtd_itens', ([], {}), '()\n', (7032, 7034), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((7084, 7110), 'Model.Venda_Tmp.Venda_Tmp.soma_descontos', 'Venda_Tmp.soma_descontos', ([], {}), '()\n', (7108, 7110), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((7156, 7181), 'Model.Venda_Tmp.Venda_Tmp.retorna_total', 'Venda_Tmp.retorna_total', ([], {}), '()\n', (7179, 7181), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((7277, 7294), 'Funcoes.utils.data_hora_atual', 'data_hora_atual', ([], {}), '()\n', (7292, 7294), False, 'from Funcoes.utils import data_hora_atual\n'), ((8461, 8485), 'Model.Venda_Tmp.Venda_Tmp.delete_venda', 'Venda_Tmp.delete_venda', ([], {}), '()\n', (8483, 8485), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((8519, 8660), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Imprimir?"""', 'f"""Deseja imprimir o relatório da venda?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.Yes'], {}), "(self, 'Imprimir?',\n f'Deseja imprimir o relatório da venda?', QMessageBox.Yes | QMessageBox\n .No, QMessageBox.Yes)\n", (8539, 8660), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((9375, 9387), 'Model.Compra_Tmp.Compra_Tmp', 'Compra_Tmp', ([], {}), '()\n', (9385, 9387), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((9422, 9438), 'Model.Compra_Header.Compras_Header', 'Compras_Header', ([], {}), '()\n', (9436, 9438), False, 'from Model.Compra_Header import Compras_Header\n'), ((9483, 9495), 'Model.Fornecedor.Fornecedor', 'Fornecedor', ([], {}), '()\n', (9493, 9495), False, 'from Model.Fornecedor import Fornecedor\n'), ((9532, 9559), 'Model.Compra_Tmp.Compra_Tmp.get_cod_compra', 'Compra_Tmp.get_cod_compra', ([], {}), '()\n', (9557, 9559), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((9600, 9612), 'Model.Fornecedor.Fornecedor', 'Fornecedor', ([], {}), '()\n', (9610, 9612), False, 'from Model.Fornecedor import Fornecedor\n'), ((9866, 9888), 'Model.Compra_Tmp.Compra_Tmp.qtd_itens', 'Compra_Tmp.qtd_itens', ([], {}), '()\n', (9886, 9888), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((9934, 9960), 'Model.Compra_Tmp.Compra_Tmp.retorna_total', 'Compra_Tmp.retorna_total', ([], {}), '()\n', (9958, 9960), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((10056, 10073), 'Funcoes.utils.data_hora_atual', 'data_hora_atual', ([], {}), '()\n', (10071, 10073), False, 'from Funcoes.utils import data_hora_atual\n'), ((10140, 10169), 'Model.Compra_Itens.Compra_Itens.inserir_compra', 'Compra_Itens.inserir_compra', ([], {}), '()\n', (10167, 10169), False, 'from Model.Compra_Itens import Compra_Itens\n'), ((10203, 10217), 'Model.Compra_Itens.Compra_Itens', 'Compra_Itens', ([], {}), '()\n', (10215, 10217), False, 'from Model.Compra_Itens import Compra_Itens\n'), ((10260, 10287), 'Model.Compra_Tmp.Compra_Tmp.get_cod_compra', 'Compra_Tmp.get_cod_compra', ([], {}), '()\n', (10285, 10287), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((10642, 10668), 'Model.Compra_Tmp.Compra_Tmp.delete_compra', 'Compra_Tmp.delete_compra', ([], {}), '()\n', (10666, 10668), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((11476, 11487), 'Model.Venda_Fin.Venda_Fin', 'Venda_Fin', ([], {}), '()\n', (11485, 11487), False, 'from Model.Venda_Fin import Venda_Fin\n'), ((11655, 11667), 'Model.Compra_Fin.Compra_Fin', 'Compra_Fin', ([], {}), '()\n', (11665, 11667), False, 'from Model.Compra_Fin import Compra_Fin\n'), ((13973, 13998), 'Model.Venda_Tmp.Venda_Tmp.retorna_total', 'Venda_Tmp.retorna_total', ([], {}), '()\n', (13996, 13998), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((14248, 14274), 'Model.Compra_Tmp.Compra_Tmp.retorna_total', 'Compra_Tmp.retorna_total', ([], {}), '()\n', (14272, 14274), False, 'from Model.Compra_Tmp import Compra_Tmp\n'), ((6481, 6490), 'Model.Veiculo.Veiculo', 'Veiculo', ([], {}), '()\n', (6488, 6490), False, 'from Model.Veiculo import Veiculo\n'), ((7437, 7445), 'Model.Venda_Itens.Vendas', 'Vendas', ([], {}), '()\n', (7443, 7445), False, 'from Model.Venda_Itens import Vendas\n'), ((7607, 7629), 'Model.Venda_Itens.Vendas.inserir_venda', 'Vendas.inserir_venda', ([], {}), '()\n', (7627, 7629), False, 'from Model.Venda_Itens import Vendas\n'), ((7663, 7675), 'Model.Pendencias.Pendencias', 'Pendencias', ([], {}), '()\n', (7673, 7675), False, 'from Model.Pendencias import Pendencias\n'), ((7714, 7729), 'Model.Vendas_Header.Vendas_Header', 'Vendas_Header', ([], {}), '()\n', (7727, 7729), False, 'from Model.Vendas_Header import Vendas_Header\n'), ((7946, 7968), 'Model.Venda_Itens.Vendas.inserir_venda', 'Vendas.inserir_venda', ([], {}), '()\n', (7966, 7968), False, 'from Model.Venda_Itens import Vendas\n'), ((8006, 8014), 'Model.Venda_Itens.Vendas', 'Vendas', ([], {}), '()\n', (8012, 8014), False, 'from Model.Venda_Itens import Vendas\n'), ((8060, 8085), 'Model.Venda_Tmp.Venda_Tmp.get_cod_venda', 'Venda_Tmp.get_cod_venda', ([], {}), '()\n', (8083, 8085), False, 'from Model.Venda_Tmp import Venda_Tmp\n'), ((8946, 8955), 'Model.Usuario.Usuario', 'Usuario', ([], {}), '()\n', (8953, 8955), False, 'from Model.Usuario import Usuario\n'), ((9182, 9222), 'Funcoes.PDF.pdf_venda.gerar_pdf', 'gerar_pdf', (['header.id', 'cnpj', 'v.cliente.id'], {}), '(header.id, cnpj, v.cliente.id)\n', (9191, 9222), False, 'from Funcoes.PDF.pdf_venda import gerar_pdf\n'), ((9251, 9282), 'Funcoes.utils.print_dialog', 'print_dialog', (['self', '"""venda.pdf"""'], {}), "(self, 'venda.pdf')\n", (9263, 9282), False, 'from Funcoes.utils import print_dialog\n'), ((10449, 10459), 'Model.Produtos.Produtos', 'Produtos', ([], {}), '()\n', (10457, 10459), False, 'from Model.Produtos import Produtos\n'), ((8256, 8266), 'Model.Produtos.Produtos', 'Produtos', ([], {}), '()\n', (8264, 8266), False, 'from Model.Produtos import Produtos\n'), ((9006, 9035), 'Model.Operador.Operador.get_operador_atual', 'Operador.get_operador_atual', ([], {}), '()\n', (9033, 9035), False, 'from Model.Operador import Operador\n')] |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import force_authenticate
from core.models import UserModel
from recycle import garbage
from recycle.models import CommercialRequest, Location
from recycle.views.commercial_order import EditCommercialOrderAPIView
from tests.unittests.common import APIFactoryTestCase
class EditCommercialOrderAPIViewUpdateTestCase(APIFactoryTestCase):
def setUp(self) -> None:
super(EditCommercialOrderAPIViewUpdateTestCase, self).setUp()
self.user = UserModel.objects.get(username='User')
self.super_user = UserModel.objects.get(username='SuperUser')
self.gc_user = UserModel.objects.get(username='GCUser')
self.commercial_user = UserModel.objects.get(username='CommercialUser')
self.commercial_order = CommercialRequest.objects.get(address='Hello st., 11')
self.view = EditCommercialOrderAPIView.as_view()
def check_permission_denied(self):
request = self.request_factory.put(reverse('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]))
force_authenticate(request, self.user)
response = self.view(request, pk=self.commercial_order.pk)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_AnonymousUser(self):
request = self.request_factory.put(reverse('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]))
response = self.view(request, pk=self.commercial_order.pk)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_AuthenticatedUser(self):
self.check_permission_denied()
def test_CommercialUser(self):
self.check_permission_denied()
def test_SuperUser(self):
input_data = {
'address': 'Hello St. 10',
'date': '2021-05-03',
'garbage_type': garbage.METAL,
'mass': 15.7,
'status': CommercialRequest.IN_PROGRESS,
'location': Location.objects.get(address='Second st. 1').id,
'user': UserModel.objects.get(username='CommercialUser2').id
}
request = self.request_factory.put(
reverse('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]), data=input_data
)
force_authenticate(request, self.super_user)
response = self.view(request, pk=self.commercial_order.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
actual_order = response.data
self.assertEqual(input_data['address'], actual_order['address'])
self.assertEqual(input_data['date'], actual_order['date'])
self.assertEqual(input_data['garbage_type'], actual_order['garbage_type'])
self.assertEqual(input_data['mass'], actual_order['mass'])
self.assertEqual(input_data['status'], actual_order['status'])
self.assertEqual(input_data['location'], actual_order['location'])
self.assertEqual(input_data['user'], actual_order['user'])
def test_UpdateDoneOrder(self):
input_data = {
'address': 'Hello St. 10'
}
order = CommercialRequest.objects.get(address='Hello st., 12')
request = self.request_factory.put(
reverse('api_v1:recycle:edit_commercial_order', args=[order.pk]), data=input_data
)
force_authenticate(request, self.super_user)
response = self.view(request, pk=order.pk)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_GCUpdateStatusOnly(self):
input_data = {
'status': CommercialRequest.IN_PROGRESS
}
order = CommercialRequest.objects.get(address='Hello st., 12')
request = self.request_factory.put(
reverse('api_v1:recycle:edit_commercial_order', args=[order.pk]), data=input_data
)
force_authenticate(request, self.gc_user)
response = self.view(request, pk=self.commercial_order.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def run_update_field(self, data):
request = self.request_factory.put(
reverse('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]), data=data
)
force_authenticate(request, self.super_user)
response = self.view(request, pk=self.commercial_order.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_UpdateAddress(self):
input_data = {
'address': 'Hello St. 10'
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.address, input_data['address'])
def test_UpdateDate(self):
input_data = {
'date': '2021-05-03'
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.date.strftime('%Y-%m-%d'), input_data['date'])
def test_UpdateGarbageType(self):
input_data = {
'garbage_type': garbage.PAPER
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.garbage_type, input_data['garbage_type'])
def test_UpdateMass(self):
input_data = {
'mass': 15.7
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.mass, input_data['mass'])
def test_UpdateStatus(self):
input_data = {
'status': CommercialRequest.IN_PROGRESS
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.status, input_data['status'])
def test_UpdateLocation(self):
input_data = {
'location': Location.objects.get(address='Second st. 1').id
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.location.id, input_data['location'])
def test_UpdateUser(self):
input_data = {
'user': UserModel.objects.get(username='CommercialUser2').id
}
self.run_update_field(input_data)
actual_order = CommercialRequest.objects.get(pk=self.commercial_order.pk)
self.assertEqual(actual_order.user.id, input_data['user'])
| [
"recycle.views.commercial_order.EditCommercialOrderAPIView.as_view",
"recycle.models.CommercialRequest.objects.get",
"core.models.UserModel.objects.get",
"recycle.models.Location.objects.get",
"django.urls.reverse",
"rest_framework.test.force_authenticate"
] | [((534, 572), 'core.models.UserModel.objects.get', 'UserModel.objects.get', ([], {'username': '"""User"""'}), "(username='User')\n", (555, 572), False, 'from core.models import UserModel\n'), ((593, 636), 'core.models.UserModel.objects.get', 'UserModel.objects.get', ([], {'username': '"""SuperUser"""'}), "(username='SuperUser')\n", (614, 636), False, 'from core.models import UserModel\n'), ((654, 694), 'core.models.UserModel.objects.get', 'UserModel.objects.get', ([], {'username': '"""GCUser"""'}), "(username='GCUser')\n", (675, 694), False, 'from core.models import UserModel\n'), ((720, 768), 'core.models.UserModel.objects.get', 'UserModel.objects.get', ([], {'username': '"""CommercialUser"""'}), "(username='CommercialUser')\n", (741, 768), False, 'from core.models import UserModel\n'), ((795, 849), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'address': '"""Hello st., 11"""'}), "(address='Hello st., 11')\n", (824, 849), False, 'from recycle.models import CommercialRequest, Location\n'), ((864, 900), 'recycle.views.commercial_order.EditCommercialOrderAPIView.as_view', 'EditCommercialOrderAPIView.as_view', ([], {}), '()\n', (898, 900), False, 'from recycle.views.commercial_order import EditCommercialOrderAPIView\n'), ((1059, 1097), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.user'], {}), '(request, self.user)\n', (1077, 1097), False, 'from rest_framework.test import force_authenticate\n'), ((2117, 2161), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.super_user'], {}), '(request, self.super_user)\n', (2135, 2161), False, 'from rest_framework.test import force_authenticate\n'), ((2871, 2925), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'address': '"""Hello st., 12"""'}), "(address='Hello st., 12')\n", (2900, 2925), False, 'from recycle.models import CommercialRequest, Location\n'), ((3055, 3099), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.super_user'], {}), '(request, self.super_user)\n', (3073, 3099), False, 'from rest_framework.test import force_authenticate\n'), ((3326, 3380), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'address': '"""Hello st., 12"""'}), "(address='Hello st., 12')\n", (3355, 3380), False, 'from recycle.models import CommercialRequest, Location\n'), ((3510, 3551), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.gc_user'], {}), '(request, self.gc_user)\n', (3528, 3551), False, 'from rest_framework.test import force_authenticate\n'), ((3849, 3893), 'rest_framework.test.force_authenticate', 'force_authenticate', (['request', 'self.super_user'], {}), '(request, self.super_user)\n', (3867, 3893), False, 'from rest_framework.test import force_authenticate\n'), ((4151, 4209), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (4180, 4209), False, 'from recycle.models import CommercialRequest, Location\n'), ((4401, 4459), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (4430, 4459), False, 'from recycle.models import CommercialRequest, Location\n'), ((4682, 4740), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (4711, 4740), False, 'from recycle.models import CommercialRequest, Location\n'), ((4934, 4992), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (4963, 4992), False, 'from recycle.models import CommercialRequest, Location\n'), ((5199, 5257), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (5228, 5257), False, 'from recycle.models import CommercialRequest, Location\n'), ((5490, 5548), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (5519, 5548), False, 'from recycle.models import CommercialRequest, Location\n'), ((5785, 5843), 'recycle.models.CommercialRequest.objects.get', 'CommercialRequest.objects.get', ([], {'pk': 'self.commercial_order.pk'}), '(pk=self.commercial_order.pk)\n', (5814, 5843), False, 'from recycle.models import CommercialRequest, Location\n'), ((975, 1060), 'django.urls.reverse', 'reverse', (['"""api_v1:recycle:edit_commercial_order"""'], {'args': '[self.commercial_order.pk]'}), "('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]\n )\n", (982, 1060), False, 'from django.urls import reverse\n'), ((1296, 1381), 'django.urls.reverse', 'reverse', (['"""api_v1:recycle:edit_commercial_order"""'], {'args': '[self.commercial_order.pk]'}), "('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]\n )\n", (1303, 1381), False, 'from django.urls import reverse\n'), ((2013, 2098), 'django.urls.reverse', 'reverse', (['"""api_v1:recycle:edit_commercial_order"""'], {'args': '[self.commercial_order.pk]'}), "('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]\n )\n", (2020, 2098), False, 'from django.urls import reverse\n'), ((2967, 3031), 'django.urls.reverse', 'reverse', (['"""api_v1:recycle:edit_commercial_order"""'], {'args': '[order.pk]'}), "('api_v1:recycle:edit_commercial_order', args=[order.pk])\n", (2974, 3031), False, 'from django.urls import reverse\n'), ((3422, 3486), 'django.urls.reverse', 'reverse', (['"""api_v1:recycle:edit_commercial_order"""'], {'args': '[order.pk]'}), "('api_v1:recycle:edit_commercial_order', args=[order.pk])\n", (3429, 3486), False, 'from django.urls import reverse\n'), ((3751, 3836), 'django.urls.reverse', 'reverse', (['"""api_v1:recycle:edit_commercial_order"""'], {'args': '[self.commercial_order.pk]'}), "('api_v1:recycle:edit_commercial_order', args=[self.commercial_order.pk]\n )\n", (3758, 3836), False, 'from django.urls import reverse\n'), ((1855, 1899), 'recycle.models.Location.objects.get', 'Location.objects.get', ([], {'address': '"""Second st. 1"""'}), "(address='Second st. 1')\n", (1875, 1899), False, 'from recycle.models import CommercialRequest, Location\n'), ((1915, 1964), 'core.models.UserModel.objects.get', 'UserModel.objects.get', ([], {'username': '"""CommercialUser2"""'}), "(username='CommercialUser2')\n", (1936, 1964), False, 'from core.models import UserModel\n'), ((5385, 5429), 'recycle.models.Location.objects.get', 'Location.objects.get', ([], {'address': '"""Second st. 1"""'}), "(address='Second st. 1')\n", (5405, 5429), False, 'from recycle.models import CommercialRequest, Location\n'), ((5675, 5724), 'core.models.UserModel.objects.get', 'UserModel.objects.get', ([], {'username': '"""CommercialUser2"""'}), "(username='CommercialUser2')\n", (5696, 5724), False, 'from core.models import UserModel\n')] |
import numpy, copy
from numpy import nan
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox
from orangewidget import gui
from orangewidget import widget
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import TriggerIn, TriggerOut
import oasys.util.oasys_util as OU
from srxraylib.util.data_structures import ScaledMatrix
from scipy.interpolate import RectBivariateSpline
from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, Polarization
from wofrysrw.beamline.optical_elements.other.srw_crl import SRWCRL
from wofry.propagator.propagator import PropagationManager
from wofrysrw.propagator.propagators2D.srw_fresnel_native import SRW_APPLICATION
from wofrysrw.propagator.propagators2D.srw_propagation_mode import SRWPropagationMode
from orangecontrib.srw.util.srw_objects import SRWData
from orangecontrib.srw.util.srw_util import SRWPlot
from orangecontrib.srw.widgets.gui.ow_srw_wavefront_viewer import SRWWavefrontViewer
class OWThicknessErrorPhaseShift(SRWWavefrontViewer):
name = "Thickness Error Phase Shift"
description = "Thickness Error Phase Shift"
icon = "icons/thickness_phase_shifter.png"
maintainer = "<NAME>"
maintainer_email = "<EMAIL>"
priority = 5
category = "Display Data Tools"
keywords = ["data", "file", "load", "read"]
outputs = [{"name":"SRWData",
"type":SRWData,
"doc":"SRW Optical Element Data",
"id":"data"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"}]
inputs = [("SRWData", SRWData, "set_input"),
("Error Profiles", list, "setErrorProfiles"),
("Trigger", TriggerOut, "propagate_new_wavefront")]
crl_error_profiles = Setting([])
crl_scaling_factor = Setting(1.0)
TABS_AREA_HEIGHT = 555
CONTROL_AREA_WIDTH = 405
def __init__(self):
super().__init__()
self.runaction = widget.OWAction("Propagate Wavefront", self)
self.runaction.triggered.connect(self.propagate_wavefront)
self.addAction(self.runaction)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Propagate Wavefront", callback=self.propagate_wavefront)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
tab_thick = oasysgui.createTabPage(self.tabs_setting, "Thickness Error")
input_box = oasysgui.widgetBox(tab_thick, "Thickness Error Files", addSpace=True, orientation="vertical", height=390, width=self.CONTROL_AREA_WIDTH-20)
self.files_area = oasysgui.textArea(height=315)
input_box.layout().addWidget(self.files_area)
self.refresh_files_text_area()
oasysgui.lineEdit(input_box, self, "crl_scaling_factor", "Thickness Error Scaling Factor", labelWidth=260, valueType=float, orientation="horizontal")
def refresh_files_text_area(self):
text = ""
for file in self.crl_error_profiles:
text += file + "\n"
self.files_area.setText(text)
def setErrorProfiles(self, error_profiles):
try:
if not error_profiles is None:
self.crl_error_profiles = error_profiles
self.refresh_files_text_area()
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def set_input(self, srw_data):
if not srw_data is None:
self.input_srw_data = srw_data
if self.is_automatic_run:
self.propagate_wavefront()
def set_srw_live_propagation_mode(self):
if PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION)==SRWPropagationMode.WHOLE_BEAMLINE:
raise ValueError("Propagation Mode not supported, switch to Element by Element")
else:
super(OWThicknessErrorPhaseShift, self).set_srw_live_propagation_mode()
def propagate_wavefront(self):
try:
self.progressBarInit()
if self.input_srw_data is None: raise Exception("No Input Data")
self.check_data()
input_wavefront = self.input_srw_data.get_srw_wavefront().duplicate()
srw_beamline = self.input_srw_data.get_srw_beamline().duplicate()
optical_element = srw_beamline.get_beamline_element_at(-1).get_optical_element()
coordinates = srw_beamline.get_beamline_element_at(-1).get_coordinates()
if not isinstance(optical_element, SRWCRL):
raise ValueError("Thickness Error Phase Shift should be connected to a CRL optical element")
if coordinates.q() != 0.0:
raise ValueError("Thickness Error Phase Shift should be applied on unpropagated wavefronts: put 'q' value to 0.0 in the previous optical element")
crl_delta = optical_element.delta
crl_w_mirr_2D_values = [OWThicknessErrorPhaseShift.h5_readsurface(thickness_error_file) for thickness_error_file in self.crl_error_profiles]
# TO WOFRY
generic_wavefront = input_wavefront.toGenericWavefront()
for thickness_error_profile in crl_w_mirr_2D_values:
phase_shift = OWThicknessErrorPhaseShift.get_crl_phase_shift(thickness_error_profile, crl_delta, generic_wavefront, self.crl_scaling_factor)
generic_wavefront.add_phase_shift(phase_shift, Polarization.SIGMA)
generic_wavefront.add_phase_shift(phase_shift, Polarization.PI)
# TO SRW
output_wavefront = SRWWavefront.fromGenericWavefront(generic_wavefront)
output_wavefront.Rx = input_wavefront.Rx
output_wavefront.Ry = input_wavefront.Ry
output_wavefront.dRx = input_wavefront.dRx
output_wavefront.dRy = input_wavefront.dRy
output_wavefront.xc = input_wavefront.xc
output_wavefront.yc = input_wavefront.yc
output_wavefront.avgPhotEn = input_wavefront.avgPhotEn
output_wavefront.presCA = input_wavefront.presCA
output_wavefront.presFT = input_wavefront.presFT
output_wavefront.unitElFld = input_wavefront.unitElFld
output_wavefront.arElecPropMatr = copy.deepcopy(input_wavefront.arElecPropMatr)
output_wavefront.arMomX = copy.deepcopy(input_wavefront.arMomX)
output_wavefront.arMomY = copy.deepcopy(input_wavefront.arMomY)
output_wavefront.arWfrAuxData = copy.deepcopy(input_wavefront.arWfrAuxData)
output_wavefront.partBeam = copy.deepcopy(input_wavefront.partBeam)
output_wavefront.setScanningData(input_wavefront.scanned_variable_data)
output_srw_data = SRWData(srw_beamline=srw_beamline, srw_wavefront=output_wavefront)
self.progressBarSet(50)
self.initializeTabs()
tickets = []
self.run_calculation_for_plots(output_wavefront=output_wavefront, tickets=tickets, progress_bar_value=50)
self.plot_results(tickets, 80)
self.progressBarFinished()
self.setStatusMessage("")
self.send("SRWData", output_srw_data)
self.send("Trigger", TriggerIn(new_object=True))
except Exception as e:
QMessageBox.critical(self, "Error", str(e.args[0]), QMessageBox.Ok)
self.setStatusMessage("")
self.progressBarFinished()
if self.IS_DEVELOP: raise e
def run_calculation_for_plots(self, output_wavefront, tickets, progress_bar_value):
if self.view_type==2:
e, h, v, i = output_wavefront.get_intensity(multi_electron=False, polarization_component_to_be_extracted=PolarizationComponent.LINEAR_HORIZONTAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, i[int(e.size/2)]))
self.progressBarSet(progress_bar_value)
e, h, v, i = output_wavefront.get_intensity(multi_electron=False, polarization_component_to_be_extracted=PolarizationComponent.LINEAR_VERTICAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, i[int(e.size/2)]))
e, h, v, p = output_wavefront.get_phase(polarization_component_to_be_extracted=PolarizationComponent.LINEAR_HORIZONTAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, p[int(e.size/2)]))
self.progressBarSet(progress_bar_value + 10)
e, h, v, p = output_wavefront.get_phase(polarization_component_to_be_extracted=PolarizationComponent.LINEAR_VERTICAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, p[int(e.size/2)]))
elif self.view_type==1:
e, h, v, i = output_wavefront.get_intensity(multi_electron=False)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, i[int(e.size/2)]))
self.progressBarSet(progress_bar_value)
e, h, v, p = output_wavefront.get_phase()
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, p[int(e.size/2)]))
self.progressBarSet(progress_bar_value + 10)
def propagate_new_wavefront(self, trigger):
try:
if trigger and trigger.new_object == True:
if trigger.has_additional_parameter("variable_name"):
if self.input_srw_data is None: raise Exception("No Input Data")
variable_name = trigger.get_additional_parameter("variable_name").strip()
variable_display_name = trigger.get_additional_parameter("variable_display_name").strip()
variable_value = trigger.get_additional_parameter("variable_value")
variable_um = trigger.get_additional_parameter("variable_um")
if "," in variable_name:
variable_names = variable_name.split(",")
for variable_name in variable_names:
setattr(self, variable_name.strip(), variable_value)
else:
setattr(self, variable_name, variable_value)
self.input_srw_data.get_srw_wavefront().setScanningData(SRWWavefront.ScanningData(variable_name, variable_value, variable_display_name, variable_um))
self.propagate_wavefront()
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def check_data(self):
if len(self.crl_error_profiles) == 0: raise ValueError("No Thickness error profile specified")
congruence.checkPositiveNumber(self.crl_scaling_factor, "Thickness Error Scaling Factor")
@classmethod
def h5_readsurface(cls, filename):
x_coords, y_coords, z_values = OU.read_surface_file(filename)
return ScaledMatrix(x_coords, y_coords, z_values.T)
@classmethod
def get_crl_phase_shift(cls, thickness_error_profile, crl_delta, wavefront, crl_scaling_factor=1.0):
coord_x = thickness_error_profile.x_coord
coord_y = thickness_error_profile.y_coord
thickness_error = thickness_error_profile.z_values
interpolator = RectBivariateSpline(coord_x, coord_y, thickness_error, bbox=[None, None, None, None], kx=1, ky=1, s=0)
wavelength = wavefront.get_wavelength()
wavefront_coord_x = wavefront.get_coordinate_x()
wavefront_coord_y = wavefront.get_coordinate_y()
thickness_error = interpolator(wavefront_coord_x, wavefront_coord_y)
thickness_error[numpy.where(thickness_error==numpy.nan)] = 0.0
thickness_error *= crl_scaling_factor
return -2*numpy.pi*crl_delta*thickness_error/wavelength
def getVariablesToPlot(self):
if self.view_type == 2:
return [[1, 2], [1, 2], [1, 2], [1, 2]]
else:
return [[1, 2], [1, 2]]
def getWeightedPlots(self):
if self.view_type == 2:
return [False, False, True, True]
else:
return [False, True]
def getWeightTickets(self):
if self.view_type == 2:
return [nan, nan, 0, 1]
else:
return [nan, 0]
def getTitles(self, with_um=False):
if self.view_type == 2:
if with_um: return ["Intensity SE \u03c0 [ph/s/.1%bw/mm\u00b2]",
"Intensity SE \u03c3 [ph/s/.1%bw/mm\u00b2]",
"Phase SE \u03c0 [rad]",
"Phase SE \u03c0 [rad]"]
else: return ["Intensity SE \u03c0",
"Intensity SE \u03c3",
"Phase SE \u03c0",
"Phase SE \u03c3"]
else:
if with_um: return ["Intensity SE [ph/s/.1%bw/mm\u00b2]",
"Phase SE [rad]"]
else: return ["Intensity SE",
"Phase SE"]
def getXTitles(self):
if self.view_type == 2:
return ["X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]"]
else:
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYTitles(self):
if self.view_type == 2:
return ["Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]"]
else:
return ["Y [\u03bcm]", "Y [\u03bcm]"]
def getXUM(self):
if self.view_type == 2:
return ["X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]"]
else:
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYUM(self):
if self.view_type == 2:
return ["Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]"]
else:
return ["Y [\u03bcm]", "Y [\u03bcm]"]
def callResetSettings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
| [
"oasys.widgets.gui.widgetBox",
"oasys.widgets.gui.createTabPage",
"PyQt5.QtGui.QColor",
"oasys.util.oasys_util.read_surface_file",
"oasys.widgets.gui.tabWidget",
"copy.deepcopy",
"orangewidget.settings.Setting",
"wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.fromGenericWavefront",
"oasy... | [((2028, 2039), 'orangewidget.settings.Setting', 'Setting', (['[]'], {}), '([])\n', (2035, 2039), False, 'from orangewidget.settings import Setting\n'), ((2065, 2077), 'orangewidget.settings.Setting', 'Setting', (['(1.0)'], {}), '(1.0)\n', (2072, 2077), False, 'from orangewidget.settings import Setting\n'), ((2213, 2257), 'orangewidget.widget.OWAction', 'widget.OWAction', (['"""Propagate Wavefront"""', 'self'], {}), "('Propagate Wavefront', self)\n", (2228, 2257), False, 'from orangewidget import widget\n'), ((2386, 2473), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['self.controlArea', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(self.controlArea, '', addSpace=False, orientation=\n 'horizontal')\n", (2404, 2473), True, 'from oasys.widgets import gui as oasysgui\n'), ((2487, 2578), 'orangewidget.gui.button', 'gui.button', (['button_box', 'self', '"""Propagate Wavefront"""'], {'callback': 'self.propagate_wavefront'}), "(button_box, self, 'Propagate Wavefront', callback=self.\n propagate_wavefront)\n", (2497, 2578), False, 'from orangewidget import gui\n'), ((2915, 2992), 'orangewidget.gui.button', 'gui.button', (['button_box', 'self', '"""Reset Fields"""'], {'callback': 'self.callResetSettings'}), "(button_box, self, 'Reset Fields', callback=self.callResetSettings)\n", (2925, 2992), False, 'from orangewidget import gui\n'), ((3360, 3391), 'orangewidget.gui.separator', 'gui.separator', (['self.controlArea'], {}), '(self.controlArea)\n', (3373, 3391), False, 'from orangewidget import gui\n'), ((3486, 3522), 'oasys.widgets.gui.tabWidget', 'oasysgui.tabWidget', (['self.controlArea'], {}), '(self.controlArea)\n', (3504, 3522), True, 'from oasys.widgets import gui as oasysgui\n'), ((3675, 3735), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.tabs_setting', '"""Thickness Error"""'], {}), "(self.tabs_setting, 'Thickness Error')\n", (3697, 3735), True, 'from oasys.widgets import gui as oasysgui\n'), ((3757, 3902), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['tab_thick', '"""Thickness Error Files"""'], {'addSpace': '(True)', 'orientation': '"""vertical"""', 'height': '(390)', 'width': '(self.CONTROL_AREA_WIDTH - 20)'}), "(tab_thick, 'Thickness Error Files', addSpace=True,\n orientation='vertical', height=390, width=self.CONTROL_AREA_WIDTH - 20)\n", (3775, 3902), True, 'from oasys.widgets import gui as oasysgui\n'), ((3924, 3953), 'oasys.widgets.gui.textArea', 'oasysgui.textArea', ([], {'height': '(315)'}), '(height=315)\n', (3941, 3953), True, 'from oasys.widgets import gui as oasysgui\n'), ((4058, 4215), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['input_box', 'self', '"""crl_scaling_factor"""', '"""Thickness Error Scaling Factor"""'], {'labelWidth': '(260)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(input_box, self, 'crl_scaling_factor',\n 'Thickness Error Scaling Factor', labelWidth=260, valueType=float,\n orientation='horizontal')\n", (4075, 4215), True, 'from oasys.widgets import gui as oasysgui\n'), ((12076, 12169), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.crl_scaling_factor', '"""Thickness Error Scaling Factor"""'], {}), "(self.crl_scaling_factor,\n 'Thickness Error Scaling Factor')\n", (12106, 12169), False, 'from oasys.widgets import congruence\n'), ((12262, 12292), 'oasys.util.oasys_util.read_surface_file', 'OU.read_surface_file', (['filename'], {}), '(filename)\n', (12282, 12292), True, 'import oasys.util.oasys_util as OU\n'), ((12309, 12353), 'srxraylib.util.data_structures.ScaledMatrix', 'ScaledMatrix', (['x_coords', 'y_coords', 'z_values.T'], {}), '(x_coords, y_coords, z_values.T)\n', (12321, 12353), False, 'from srxraylib.util.data_structures import ScaledMatrix\n'), ((12660, 12766), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['coord_x', 'coord_y', 'thickness_error'], {'bbox': '[None, None, None, None]', 'kx': '(1)', 'ky': '(1)', 's': '(0)'}), '(coord_x, coord_y, thickness_error, bbox=[None, None,\n None, None], kx=1, ky=1, s=0)\n', (12679, 12766), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((15248, 15324), 'oasys.widgets.gui.ConfirmDialog.confirmed', 'ConfirmDialog.confirmed', ([], {'parent': 'self', 'message': '"""Confirm Reset of the Fields?"""'}), "(parent=self, message='Confirm Reset of the Fields?')\n", (15271, 15324), False, 'from oasys.widgets.gui import ConfirmDialog\n'), ((2786, 2805), 'PyQt5.QtGui.QColor', 'QColor', (['"""Dark Blue"""'], {}), "('Dark Blue')\n", (2792, 2805), False, 'from PyQt5.QtGui import QPalette, QColor, QFont\n'), ((3207, 3225), 'PyQt5.QtGui.QColor', 'QColor', (['"""Dark Red"""'], {}), "('Dark Red')\n", (3213, 3225), False, 'from PyQt5.QtGui import QPalette, QColor, QFont\n'), ((7041, 7093), 'wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.fromGenericWavefront', 'SRWWavefront.fromGenericWavefront', (['generic_wavefront'], {}), '(generic_wavefront)\n', (7074, 7093), False, 'from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, Polarization\n'), ((7728, 7773), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arElecPropMatr'], {}), '(input_wavefront.arElecPropMatr)\n', (7741, 7773), False, 'import numpy, copy\n'), ((7813, 7850), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arMomX'], {}), '(input_wavefront.arMomX)\n', (7826, 7850), False, 'import numpy, copy\n'), ((7890, 7927), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arMomY'], {}), '(input_wavefront.arMomY)\n', (7903, 7927), False, 'import numpy, copy\n'), ((7973, 8016), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arWfrAuxData'], {}), '(input_wavefront.arWfrAuxData)\n', (7986, 8016), False, 'import numpy, copy\n'), ((8057, 8096), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.partBeam'], {}), '(input_wavefront.partBeam)\n', (8070, 8096), False, 'import numpy, copy\n'), ((8213, 8279), 'orangecontrib.srw.util.srw_objects.SRWData', 'SRWData', ([], {'srw_beamline': 'srw_beamline', 'srw_wavefront': 'output_wavefront'}), '(srw_beamline=srw_beamline, srw_wavefront=output_wavefront)\n', (8220, 8279), False, 'from orangecontrib.srw.util.srw_objects import SRWData\n'), ((13028, 13069), 'numpy.where', 'numpy.where', (['(thickness_error == numpy.nan)'], {}), '(thickness_error == numpy.nan)\n', (13039, 13069), False, 'import numpy, copy\n'), ((4644, 4714), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['self', '"""Error"""', 'exception.args[0]', 'QMessageBox.Ok'], {}), "(self, 'Error', exception.args[0], QMessageBox.Ok)\n", (4664, 4714), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((8703, 8729), 'oasys.util.oasys_util.TriggerIn', 'TriggerIn', ([], {'new_object': '(True)'}), '(new_object=True)\n', (8712, 8729), False, 'from oasys.util.oasys_util import TriggerIn, TriggerOut\n'), ((5081, 5110), 'wofry.propagator.propagator.PropagationManager.Instance', 'PropagationManager.Instance', ([], {}), '()\n', (5108, 5110), False, 'from wofry.propagator.propagator import PropagationManager\n'), ((11628, 11724), 'wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.ScanningData', 'SRWWavefront.ScanningData', (['variable_name', 'variable_value', 'variable_display_name', 'variable_um'], {}), '(variable_name, variable_value,\n variable_display_name, variable_um)\n', (11653, 11724), False, 'from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, Polarization\n')] |
# Pi_ReportHumidity.py
#
# Created: Jan 10, 2016 by <NAME>
#
# Simple script to read humidity on RPi
import time
from sense_hat import SenseHat
sense = SenseHat()
#humidity = sense.get_humidity()
#print("Humidity: %s %%rH" % humidity)
# alternatives
#print(sense.humidity)
# continue to print humidity reading every 5 seconds until break
var = 0
while var == 0 :
humidity = sense.get_humidity()
print("Humidity: %s %%rH" % humidity)
time.sleep(5)
print("exit")
| [
"sense_hat.SenseHat",
"time.sleep"
] | [((154, 164), 'sense_hat.SenseHat', 'SenseHat', ([], {}), '()\n', (162, 164), False, 'from sense_hat import SenseHat\n'), ((449, 462), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (459, 462), False, 'import time\n')] |
from django import template
from django.template.defaultfilters import stringfilter
from django.conf import settings
from urlparse import urlparse
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def xml_escape(string):
"""Replaces all unescaped xml characters"""
return string.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
@register.simple_tag
def static_url(url):
""" returns the static url-ed version of the path, and not the s3 version """
full_s3_path = urlparse(url).path
relative_path = "/".join(full_s3_path.split('/')[2:])
return u"{}{}".format(settings.STATIC_BUCKET_URL, relative_path) | [
"django.template.Library",
"urlparse.urlparse"
] | [((160, 178), 'django.template.Library', 'template.Library', ([], {}), '()\n', (176, 178), False, 'from django import template\n'), ((564, 577), 'urlparse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (572, 577), False, 'from urlparse import urlparse\n')] |
import numpy as np
import torch
from matplotlib import pyplot as plt
from scipy.spatial.distance import directed_hausdorff
from numpy import linalg as LA
from sklearn import metrics
def get_roc_auc(target, prediction):
y_true = target.view(-1).numpy()
y_score = prediction.view(-1).cpu().detach().numpy()
roc_auc_score = metrics.roc_auc_score(y_true, y_score)
return roc_auc_score
def get_precission_recall_auc(target, prediction):
y_true = target.view(-1).numpy()
y_score = prediction.view(-1).cpu().detach().numpy()
precision, recall, _ = metrics.precision_recall_curve(y_true, y_score)
precission_recall_auc = metrics.auc(recall, precision)
return precission_recall_auc
def dice_coef(target, prediction):
pred_flat = prediction.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = torch.sum(pred_flat * target_flat)
union = torch.sum(pred_flat + target_flat)
coef = (2 * intersection) / union
return coef
def hausdorff_distance(target_coord, prediction_coord):
if len(prediction_coord) >= 1:
hausdorff_distance = max(directed_hausdorff(target_coord, prediction_coord)[0], directed_hausdorff(prediction_coord, target_coord)[0])
else:
hausdorff_distance = None
return hausdorff_distance
def jaccard_coef(target_fg, prediction_fg):
intersection = torch.sum(prediction_fg * target_fg)
union = torch.sum(prediction_fg + target_fg)
coef_fg = intersection/(union - intersection)
return coef_fg
# def gt_hot_encoding(ground_truth):
# return (ground_truth-1)*-1
def mean_surface_distance(target_coord, prediction_coord):
surface_sum_distance = 0
if len(prediction_coord) != 0:
for point in target_coord:
min_distances = min([LA.norm(coord) for coord in np.array(point)-np.array(prediction_coord)])
surface_sum_distance += min_distances
for point in prediction_coord:
min_distances = min([LA.norm(coord) for coord in np.array(point) - np.array(target_coord)])
surface_sum_distance += min_distances
return surface_sum_distance/(len(target_coord) + len(prediction_coord))
else:
return None
def convert_to_coordinates(target, prediction):
target = target.squeeze_(0).numpy()
prediction = prediction.squeeze_(0).numpy()
target_coord = [(x, y, z) for x, y, z in zip(np.where(target==1)[0], np.where(target==1)[1], np.where(target==1)[2])]
prediction_coord = [(x, y, z) for x, y, z in zip(np.where(prediction==1)[0], np.where(prediction==1)[1], np.where(prediction==1)[2])]
return target_coord, prediction_coord
def get_relative_volume(mask):
relative_volume = 100 * torch.sum(mask)/mask.numel()
return relative_volume
def plot_ct_and_mask(query, mask, pred, title, path):
query = query.squeeze_(0).squeeze_(0).detach().cpu()
pred = pred.squeeze_(0)
fig1 = plt.figure()
fig2 = plt.figure()
subplot_1 = 1
subplot_2 = 1
slices = np.random.choice(np.arange(query.shape[2]), 5)
for i in slices:
fig = plt.figure(figsize=(10, 10))
ax_gt = fig.add_subplot(1, 2, 1)
ax_gt.imshow(query[:, :, i] + mask[:, :, i] * 5, cmap=plt.cm.bone, aspect='auto')
ax_pred = fig.add_subplot(1, 2, 2)
ax_pred.imshow(query[:, :, i] + pred[:, :, i] * 5, cmap=plt.cm.bone, aspect='auto')
plt.title(title + ' slice ' + str(i))
plt.savefig(path + '/' + title + ' slice ' + str(i) + '.png')
def save_images(prediction, path, title, in_memory=None):
prediction = prediction.squeeze_(0)
if in_memory is not None:
title = title + in_memory
np.save(path + '/' + title, prediction)
| [
"scipy.spatial.distance.directed_hausdorff",
"numpy.arange",
"numpy.where",
"sklearn.metrics.auc",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"matplotlib.pyplot.figure",
"torch.sum",
"numpy.linalg.norm",
"numpy.save"
] | [((336, 374), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (357, 374), False, 'from sklearn import metrics\n'), ((574, 621), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (604, 621), False, 'from sklearn import metrics\n'), ((650, 680), 'sklearn.metrics.auc', 'metrics.auc', (['recall', 'precision'], {}), '(recall, precision)\n', (661, 680), False, 'from sklearn import metrics\n'), ((867, 901), 'torch.sum', 'torch.sum', (['(pred_flat * target_flat)'], {}), '(pred_flat * target_flat)\n', (876, 901), False, 'import torch\n'), ((914, 948), 'torch.sum', 'torch.sum', (['(pred_flat + target_flat)'], {}), '(pred_flat + target_flat)\n', (923, 948), False, 'import torch\n'), ((1378, 1414), 'torch.sum', 'torch.sum', (['(prediction_fg * target_fg)'], {}), '(prediction_fg * target_fg)\n', (1387, 1414), False, 'import torch\n'), ((1427, 1463), 'torch.sum', 'torch.sum', (['(prediction_fg + target_fg)'], {}), '(prediction_fg + target_fg)\n', (1436, 1463), False, 'import torch\n'), ((2933, 2945), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2943, 2945), True, 'from matplotlib import pyplot as plt\n'), ((2957, 2969), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2967, 2969), True, 'from matplotlib import pyplot as plt\n'), ((3681, 3720), 'numpy.save', 'np.save', (["(path + '/' + title)", 'prediction'], {}), "(path + '/' + title, prediction)\n", (3688, 3720), True, 'import numpy as np\n'), ((3037, 3062), 'numpy.arange', 'np.arange', (['query.shape[2]'], {}), '(query.shape[2])\n', (3046, 3062), True, 'import numpy as np\n'), ((3102, 3130), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3112, 3130), True, 'from matplotlib import pyplot as plt\n'), ((2725, 2740), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (2734, 2740), False, 'import torch\n'), ((1129, 1179), 'scipy.spatial.distance.directed_hausdorff', 'directed_hausdorff', (['target_coord', 'prediction_coord'], {}), '(target_coord, prediction_coord)\n', (1147, 1179), False, 'from scipy.spatial.distance import directed_hausdorff\n'), ((1184, 1234), 'scipy.spatial.distance.directed_hausdorff', 'directed_hausdorff', (['prediction_coord', 'target_coord'], {}), '(prediction_coord, target_coord)\n', (1202, 1234), False, 'from scipy.spatial.distance import directed_hausdorff\n'), ((1797, 1811), 'numpy.linalg.norm', 'LA.norm', (['coord'], {}), '(coord)\n', (1804, 1811), True, 'from numpy import linalg as LA\n'), ((1992, 2006), 'numpy.linalg.norm', 'LA.norm', (['coord'], {}), '(coord)\n', (1999, 2006), True, 'from numpy import linalg as LA\n'), ((2411, 2432), 'numpy.where', 'np.where', (['(target == 1)'], {}), '(target == 1)\n', (2419, 2432), True, 'import numpy as np\n'), ((2435, 2456), 'numpy.where', 'np.where', (['(target == 1)'], {}), '(target == 1)\n', (2443, 2456), True, 'import numpy as np\n'), ((2459, 2480), 'numpy.where', 'np.where', (['(target == 1)'], {}), '(target == 1)\n', (2467, 2480), True, 'import numpy as np\n'), ((2537, 2562), 'numpy.where', 'np.where', (['(prediction == 1)'], {}), '(prediction == 1)\n', (2545, 2562), True, 'import numpy as np\n'), ((2565, 2590), 'numpy.where', 'np.where', (['(prediction == 1)'], {}), '(prediction == 1)\n', (2573, 2590), True, 'import numpy as np\n'), ((2593, 2618), 'numpy.where', 'np.where', (['(prediction == 1)'], {}), '(prediction == 1)\n', (2601, 2618), True, 'import numpy as np\n'), ((1825, 1840), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1833, 1840), True, 'import numpy as np\n'), ((1841, 1867), 'numpy.array', 'np.array', (['prediction_coord'], {}), '(prediction_coord)\n', (1849, 1867), True, 'import numpy as np\n'), ((2020, 2035), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (2028, 2035), True, 'import numpy as np\n'), ((2038, 2060), 'numpy.array', 'np.array', (['target_coord'], {}), '(target_coord)\n', (2046, 2060), True, 'import numpy as np\n')] |
from server import Server
import os
def file_exists(server, name):
out, _ = server.run("ls")
print(out)
files = out.strip().split("\n")
print(files)
return name in files
if __name__ == "__main__":
server = Server(host = "192.168.3.11", user="ubuntu", key_filename="/Users/greg/.ssh/lightsail-ohio-gsd.pem")
# server.run("ls -la", hide=False)
# server.run("ps", hide=False)
# server.run("echo hello >hello.txt", hide=False)
# server.run("ls -la", hide=False)
# server.run("cat hello.txt", hide=False)
# out, _ = server.run("ls -la")
# print(out.split("\n"))
#if file_exists(server, "hello.txt"):
# print("file exists")
packages = server.get_installed_apt_packages()
print(packages) | [
"server.Server"
] | [((232, 335), 'server.Server', 'Server', ([], {'host': '"""192.168.3.11"""', 'user': '"""ubuntu"""', 'key_filename': '"""/Users/greg/.ssh/lightsail-ohio-gsd.pem"""'}), "(host='192.168.3.11', user='ubuntu', key_filename=\n '/Users/greg/.ssh/lightsail-ohio-gsd.pem')\n", (238, 335), False, 'from server import Server\n')] |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper
from pants.build_graph.target import Target
from pants_test.base_test import BaseTest
# TODO(<NAME>) There are methods in BuildFileAddressMapper that are missing
# explicit unit tests: addresses_in_spec_path, spec_to_address, spec_to_addresses
class BuildFileAddressMapperTest(BaseTest):
def test_resolve(self):
build_file = self.add_to_build_file('BUILD', 'target(name="foo")')
address, addressable = self.address_mapper.resolve(Address.parse('//:foo'))
self.assertIsInstance(address, BuildFileAddress)
self.assertEqual(build_file, address.build_file)
self.assertEqual('foo', address.target_name)
self.assertEqual(address.target_name, addressable.addressed_name)
self.assertEqual(addressable.addressed_type, Target)
def test_resolve_spec(self):
self.add_to_build_file('BUILD', dedent("""
target(name='foozle')
target(name='baz')
"""))
with self.assertRaises(AddressLookupError):
self.address_mapper.resolve_spec('//:bad_spec')
dependencies_addressable = self.address_mapper.resolve_spec('//:foozle')
self.assertEqual(dependencies_addressable.addressed_type, Target)
def test_scan_addresses(self):
root_build_file = self.add_to_build_file('BUILD', 'target(name="foo")')
subdir_build_file = self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
subdir_suffix_build_file = self.add_to_build_file('subdir/BUILD.suffix', 'target(name="baz")')
with open(os.path.join(self.build_root, 'BUILD.invalid.suffix'), 'w') as invalid_build_file:
invalid_build_file.write('target(name="foobar")')
self.assertEquals({BuildFileAddress(root_build_file, 'foo'),
BuildFileAddress(subdir_build_file, 'bar'),
BuildFileAddress(subdir_suffix_build_file, 'baz')},
self.address_mapper.scan_addresses())
def test_scan_addresses_with_excludes(self):
root_build_file = self.add_to_build_file('BUILD', 'target(name="foo")')
self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
spec_excludes = [os.path.join(self.build_root, 'subdir')]
self.assertEquals({BuildFileAddress(root_build_file, 'foo')},
self.address_mapper.scan_addresses(spec_excludes=spec_excludes))
def test_scan_addresses_with_root(self):
self.add_to_build_file('BUILD', 'target(name="foo")')
subdir_build_file = self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
subdir_suffix_build_file = self.add_to_build_file('subdir/BUILD.suffix', 'target(name="baz")')
subdir = os.path.join(self.build_root, 'subdir')
self.assertEquals({BuildFileAddress(subdir_build_file, 'bar'),
BuildFileAddress(subdir_suffix_build_file, 'baz')},
self.address_mapper.scan_addresses(root=subdir))
def test_scan_addresses_with_invalid_root(self):
with self.assertRaises(BuildFileAddressMapper.InvalidRootError):
self.address_mapper.scan_addresses(root='subdir')
def test_raises_invalid_build_file_reference(self):
# reference a BUILD file that doesn't exist
with self.assertRaisesRegexp(BuildFileAddressMapper.InvalidBuildFileReference,
'^BUILD file does not exist at: .*/non-existent-path'
'\s+when translating spec //non-existent-path:a'):
self.address_mapper.spec_to_address('//non-existent-path:a')
def test_raises_address_not_in_build_file(self):
self.add_to_build_file('BUILD', 'target(name="foo")')
# Create an address that doesn't exist in an existing BUILD file
address = Address.parse(':bar')
with self.assertRaises(BuildFileAddressMapper.AddressNotInBuildFile):
self.address_mapper.resolve(address)
def test_raises_address_invalid_address_error(self):
with self.assertRaises(BuildFileAddressMapper.InvalidAddressError):
self.address_mapper.resolve_spec("../foo")
def test_raises_empty_build_file_error(self):
self.add_to_build_file('BUILD', 'pass')
with self.assertRaises(BuildFileAddressMapper.EmptyBuildFileError):
self.address_mapper.resolve_spec('//:foo')
def test_address_lookup_error_hierarcy(self):
self.assertIsInstance(BuildFileAddressMapper.AddressNotInBuildFile(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.EmptyBuildFileError(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.InvalidBuildFileReference(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.InvalidAddressError(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.BuildFileScanError(), AddressLookupError)
| [
"pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.EmptyBuildFileError",
"textwrap.dedent",
"pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.InvalidBuildFileReference",
"pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.BuildFileScanError",
"pants.build_g... | [((3114, 3153), 'os.path.join', 'os.path.join', (['self.build_root', '"""subdir"""'], {}), "(self.build_root, 'subdir')\n", (3126, 3153), False, 'import os\n'), ((4162, 4183), 'pants.build_graph.address.Address.parse', 'Address.parse', (['""":bar"""'], {}), "(':bar')\n", (4175, 4183), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((995, 1018), 'pants.build_graph.address.Address.parse', 'Address.parse', (['"""//:foo"""'], {}), "('//:foo')\n", (1008, 1018), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((1370, 1444), 'textwrap.dedent', 'dedent', (['"""\n target(name=\'foozle\')\n target(name=\'baz\')\n """'], {}), '("""\n target(name=\'foozle\')\n target(name=\'baz\')\n """)\n', (1376, 1444), False, 'from textwrap import dedent\n'), ((2621, 2660), 'os.path.join', 'os.path.join', (['self.build_root', '"""subdir"""'], {}), "(self.build_root, 'subdir')\n", (2633, 2660), False, 'import os\n'), ((4767, 4813), 'pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.AddressNotInBuildFile', 'BuildFileAddressMapper.AddressNotInBuildFile', ([], {}), '()\n', (4811, 4813), False, 'from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper\n'), ((4861, 4905), 'pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.EmptyBuildFileError', 'BuildFileAddressMapper.EmptyBuildFileError', ([], {}), '()\n', (4903, 4905), False, 'from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper\n'), ((4953, 5003), 'pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.InvalidBuildFileReference', 'BuildFileAddressMapper.InvalidBuildFileReference', ([], {}), '()\n', (5001, 5003), False, 'from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper\n'), ((5051, 5095), 'pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.InvalidAddressError', 'BuildFileAddressMapper.InvalidAddressError', ([], {}), '()\n', (5093, 5095), False, 'from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper\n'), ((5143, 5186), 'pants.build_graph.build_file_address_mapper.BuildFileAddressMapper.BuildFileScanError', 'BuildFileAddressMapper.BuildFileScanError', ([], {}), '()\n', (5184, 5186), False, 'from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper\n'), ((2005, 2058), 'os.path.join', 'os.path.join', (['self.build_root', '"""BUILD.invalid.suffix"""'], {}), "(self.build_root, 'BUILD.invalid.suffix')\n", (2017, 2058), False, 'import os\n'), ((2167, 2207), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', (['root_build_file', '"""foo"""'], {}), "(root_build_file, 'foo')\n", (2183, 2207), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((2232, 2274), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', (['subdir_build_file', '"""bar"""'], {}), "(subdir_build_file, 'bar')\n", (2248, 2274), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((2299, 2348), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', (['subdir_suffix_build_file', '"""baz"""'], {}), "(subdir_suffix_build_file, 'baz')\n", (2315, 2348), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((2685, 2725), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', (['root_build_file', '"""foo"""'], {}), "(root_build_file, 'foo')\n", (2701, 2725), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((3177, 3219), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', (['subdir_build_file', '"""bar"""'], {}), "(subdir_build_file, 'bar')\n", (3193, 3219), False, 'from pants.build_graph.address import Address, BuildFileAddress\n'), ((3244, 3293), 'pants.build_graph.address.BuildFileAddress', 'BuildFileAddress', (['subdir_suffix_build_file', '"""baz"""'], {}), "(subdir_suffix_build_file, 'baz')\n", (3260, 3293), False, 'from pants.build_graph.address import Address, BuildFileAddress\n')] |
from __future__ import absolute_import
__author__ = '<NAME>'
import time
import struct
try:
from pebble import pulse2
except ImportError:
pass
from . import BaseTransport, MessageTargetWatch
from libpebble2.exceptions import ConnectionError, PebbleError
class PULSETransport(BaseTransport):
"""
Represents a direct connection to a physical/virtual Pebble uses the PULSEv2 interface.
This transport expects to be given a PULSE2 Link object.
:param connection: A PULSE2 Link object to tunnel Pebble Protocol over.
:type link: pulse2.link.Link
"""
must_initialise = True
PPOPULSE_PORT = 0x3e22
OPCODE_PROTOCOL_DATA = 0x1
OPCODE_PROTOCOL_OPEN = 0x2
OPCODE_PROTOCOL_CLOSE = 0x3
def __init__(self, link):
self.link = link
self.connection = None
self.buffer = b''
@staticmethod
def _chunks(list_items, chunk_length):
for i in xrange(0, len(list_items), chunk_length):
yield list_items[i:i+chunk_length]
def connect(self):
self.connection = self.link.open_socket('reliable', self.PPOPULSE_PORT)
if not self.connection:
raise ConnectionError('Failed to open PPoPULSE socket')
self._send_with_opcode(self.OPCODE_PROTOCOL_OPEN)
start_time = time.time()
while time.time() < start_time + 10.0:
opcode, _ = self._recv_with_opcode()
if opcode == self.OPCODE_PROTOCOL_OPEN:
break
else:
raise ConnectionError('Timeout waiting for PPoPULSE open ACK')
def disconnect(self):
if self.connected:
try:
self._send_with_opcode(self.OPCODE_PROTOCOL_CLOSE)
except pulse2.exceptions.SocketClosed:
pass
self.connection.close()
self.connection = None
@property
def connected(self):
return self.connection is not None
def read_packet(self):
while self.connected:
if len(self.buffer) >= 2:
length, = struct.unpack('!H', self.buffer[:2])
length += 4
if len(self.buffer) >= length:
msg_data = self.buffer[:length]
self.buffer = self.buffer[length:]
return MessageTargetWatch(), msg_data
opcode, data = self._recv_with_opcode()
if opcode == self.OPCODE_PROTOCOL_DATA:
self.buffer += data
def send_packet(self, message, target=MessageTargetWatch()):
assert isinstance(target, MessageTargetWatch)
for chunk in self._chunks(message, self.connection.mtu - 1):
self._send_with_opcode(self.OPCODE_PROTOCOL_DATA, chunk)
def _recv_with_opcode(self):
try:
packet = self.connection.receive(block=True)
except (AttributeError, pulse2.exceptions.SocketClosed):
self.connection = None
raise ConnectionError('PULSE transport closed')
assert len(packet) >= 1
opcode = ord(packet[0])
data = packet[1:]
return opcode, data
def _send_with_opcode(self, opcode, body=None):
assert self.connected
data = chr(opcode)
if body:
data += body
self.connection.send(data)
| [
"struct.unpack",
"time.time",
"libpebble2.exceptions.ConnectionError"
] | [((1298, 1309), 'time.time', 'time.time', ([], {}), '()\n', (1307, 1309), False, 'import time\n'), ((1168, 1217), 'libpebble2.exceptions.ConnectionError', 'ConnectionError', (['"""Failed to open PPoPULSE socket"""'], {}), "('Failed to open PPoPULSE socket')\n", (1183, 1217), False, 'from libpebble2.exceptions import ConnectionError, PebbleError\n'), ((1324, 1335), 'time.time', 'time.time', ([], {}), '()\n', (1333, 1335), False, 'import time\n'), ((1512, 1568), 'libpebble2.exceptions.ConnectionError', 'ConnectionError', (['"""Timeout waiting for PPoPULSE open ACK"""'], {}), "('Timeout waiting for PPoPULSE open ACK')\n", (1527, 1568), False, 'from libpebble2.exceptions import ConnectionError, PebbleError\n'), ((2055, 2091), 'struct.unpack', 'struct.unpack', (['"""!H"""', 'self.buffer[:2]'], {}), "('!H', self.buffer[:2])\n", (2068, 2091), False, 'import struct\n'), ((2955, 2996), 'libpebble2.exceptions.ConnectionError', 'ConnectionError', (['"""PULSE transport closed"""'], {}), "('PULSE transport closed')\n", (2970, 2996), False, 'from libpebble2.exceptions import ConnectionError, PebbleError\n')] |
# -*- coding: utf-8 -*-
import cx_Oracle
import re
from collections import namedtuple
from .base import (SynDataDriver)
import logging
log = logging.getLogger(__name__)
# RE_CONN_TEMPLATE = re.compile(r"(?:(?P<user>[^/]*)/(?P<password>[^@]*)@//)?(?P<host>[^:^/]*)(?::(?P<port>[^/|^?]*))?(?:/(?P<path>.*))?")
RE_CONN_TEMPLATE = re.compile(r"(?:(?P<user>[^/]*)/(?P<password>[^@]*)@//)?(?P<dsn>.*)")
OracleUriParts = namedtuple('OracleUriParts', 'user,password,dsn'.split(','))
class OracleConnMixin:
def _parse_uri(self, conn_uri):
"""Returns UriParts tuple
Arguments:
conn_uri {str} -- [description]
Returns:
[UriParts] -- Parts of uri
"""
match = RE_CONN_TEMPLATE.match(conn_uri)
m = match.group
return OracleUriParts(m('user'), m('password'), m('dsn'))
class OracleDb(SynDataDriver, OracleConnMixin):
autoclose = True
def is_connected(self):
return self.engine is not None
def _setup_cursor(self, cursor):
cursor.rowfactory = makeDictFactory(cursor)
return cursor
def _connect(self):
user, password, dsn = self.uri_parts
self.engine = cx_Oracle.connect(user, password, dsn, encoding="UTF-8")
# Thanks for: https://stackoverflow.com/a/35046018
def makeDictFactory(cursor):
columnNames = [d[0] for d in cursor.description]
def createRow(*args):
return dict(zip(columnNames, args))
return createRow
def makeNamedTupleFactory(cursor):
columnNames = [d[0].lower() for d in cursor.description]
import collections
Row = collections.namedtuple('Row', columnNames)
return Row | [
"logging.getLogger",
"cx_Oracle.connect",
"collections.namedtuple",
"re.compile"
] | [((144, 171), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'import logging\n'), ((331, 399), 're.compile', 're.compile', (['"""(?:(?P<user>[^/]*)/(?P<password>[^@]*)@//)?(?P<dsn>.*)"""'], {}), "('(?:(?P<user>[^/]*)/(?P<password>[^@]*)@//)?(?P<dsn>.*)')\n", (341, 399), False, 'import re\n'), ((1627, 1669), 'collections.namedtuple', 'collections.namedtuple', (['"""Row"""', 'columnNames'], {}), "('Row', columnNames)\n", (1649, 1669), False, 'import collections\n'), ((1210, 1266), 'cx_Oracle.connect', 'cx_Oracle.connect', (['user', 'password', 'dsn'], {'encoding': '"""UTF-8"""'}), "(user, password, dsn, encoding='UTF-8')\n", (1227, 1266), False, 'import cx_Oracle\n')] |
from logging.handlers import SysLogHandler
from logging import LogRecord
import json
class SyslogLoggerMsgOnlyFormatter(object):
def format(self, record):
return record.msg['event']
class SyslogLoggerJSONFormatter(object):
def format(self, record):
return json.dumps(record.msg)
class SyslogLogger(object):
__syslog_handler = None
def __init__(self, address, frmt=None):
self.__syslog_handler = SysLogHandler(address)
if frmt is None or frmt == "msg_only":
self.__syslog_handler.formatter = SyslogLoggerMsgOnlyFormatter()
else:
self.__syslog_handler.formatter = SyslogLoggerJSONFormatter()
def close(self):
self.__syslog_handler.close()
def msg(self, event_dict):
record = LogRecord(event_dict.get("name", "unknown"),
event_dict.get("level", "WARNING"),
"/not_used/not_used.py", 1,
event_dict, [], None)
self.__syslog_handler.acquire()
try:
self.__syslog_handler.emit(record)
finally:
self.__syslog_handler.release()
| [
"json.dumps",
"logging.handlers.SysLogHandler"
] | [((286, 308), 'json.dumps', 'json.dumps', (['record.msg'], {}), '(record.msg)\n', (296, 308), False, 'import json\n'), ((445, 467), 'logging.handlers.SysLogHandler', 'SysLogHandler', (['address'], {}), '(address)\n', (458, 467), False, 'from logging.handlers import SysLogHandler\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError
from azure.cli.core.util import is_guid
from azure.graphrbac.models import GraphErrorException
from msrestazure.azure_exceptions import CloudError
from .._client_factory import cf_synapse_role_assignments, cf_synapse_role_definitions, cf_graph_client_factory
from ..constant import ITEM_NAME_MAPPING
# List Synapse Role Assignment
def list_role_assignments(cmd, workspace_name, role=None, assignee=None, assignee_object_id=None,
scope=None, item=None, item_type=None):
if bool(assignee) and bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
return _list_role_assignments(cmd, workspace_name, role, assignee or assignee_object_id,
scope, resolve_assignee=(not assignee_object_id), item=item, item_type=item_type)
def _list_role_assignments(cmd, workspace_name, role=None, assignee=None, scope=None,
resolve_assignee=True, item=None, item_type=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
role_assignments = client.list_role_assignments(role_id, object_id, scope).value
return role_assignments
# Show Synapse Role Assignment By Id
def get_role_assignment_by_id(cmd, workspace_name, role_assignment_id):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return client.get_role_assignment_by_id(role_assignment_id)
# Delete Synapse Role Assignment
def delete_role_assignment(cmd, workspace_name, ids=None, assignee=None, assignee_object_id=None, role=None,
scope=None, item=None, item_type=None):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
if not any([ids, assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('usage error: No argument are provided. --assignee STRING | --ids GUID')
if ids:
if any([assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('You should not provide --role or --assignee or --assignee_object_id '
'or --scope or --principal-type when --ids is provided.')
role_assignments = list_role_assignments(cmd, workspace_name, None, None, None, None, None, None)
assignment_id_list = [x.id for x in role_assignments]
# check role assignment id
for assignment_id in ids:
if assignment_id not in assignment_id_list:
raise ArgumentUsageError("role assignment id:'{}' doesn't exist.".format(assignment_id))
# delete when all ids check pass
for assignment_id in ids:
client.delete_role_assignment_by_id(assignment_id)
return
role_assignments = list_role_assignments(cmd, workspace_name, role, assignee, assignee_object_id,
scope, item, item_type)
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_assignments = [x for x in role_assignments if x.scope == scope]
if role_assignments:
for assignment in role_assignments:
client.delete_role_assignment_by_id(assignment.id)
else:
raise CLIError('No matched assignments were found to delete, please provide correct --role or --assignee.'
'Use `az synapse role assignment list` to get role assignments.')
def create_role_assignment(cmd, workspace_name, role, assignee=None, assignee_object_id=None,
scope=None, assignee_principal_type=None, item_type=None, item=None, assignment_id=None):
"""Check parameters are provided correctly, then call _create_role_assignment."""
if assignment_id and not is_guid(assignment_id):
raise InvalidArgumentValueError('usage error: --id GUID')
if bool(assignee) == bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if assignee_principal_type and not assignee_object_id:
raise ArgumentUsageError('usage error: --assignee-object-id GUID [--assignee-principal-type]')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
try:
return _create_role_assignment(cmd, workspace_name, role, assignee or assignee_object_id, scope, item,
item_type, resolve_assignee=(not assignee_object_id),
assignee_principal_type=assignee_principal_type, assignment_id=assignment_id)
except Exception as ex: # pylint: disable=broad-except
if _error_caused_by_role_assignment_exists(ex): # for idempotent
return list_role_assignments(cmd, workspace_name, role=role,
assignee=assignee, assignee_object_id=assignee_object_id,
scope=scope, item=item, item_type=item_type)
raise
def _resolve_object_id(cmd, assignee, fallback_to_object_id=False):
if assignee is None:
return None
client = cf_graph_client_factory(cmd.cli_ctx)
result = None
try:
result = list(client.users.list(filter="userPrincipalName eq '{0}' or mail eq '{0}' or displayName eq '{0}'"
.format(assignee)))
if not result:
result = list(client.service_principals.list(filter="displayName eq '{}'".format(assignee)))
if not result:
result = list(client.groups.list(filter="mail eq '{}'".format(assignee)))
if not result and is_guid(assignee): # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("Cannot find user or group or service principal in graph database for '{assignee}'. "
"If the assignee is a principal id, make sure the corresponding principal is created "
"with 'az ad sp create --id {assignee}'.".format(assignee=assignee))
if len(result) > 1:
raise CLIError("Find more than one user or group or service principal in graph database for '{assignee}'. "
"Please using --assignee-object-id GUID to specify assignee accurately"
.format(assignee=assignee))
return result[0].object_id
except (CloudError, GraphErrorException):
if fallback_to_object_id and is_guid(assignee):
return assignee
raise
def _get_object_stubs(graph_client, assignees):
from azure.graphrbac.models import GetObjectsParameters
result = []
assignees = list(assignees) # callers could pass in a set
for i in range(0, len(assignees), 1000):
params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees[i:i + 1000])
result += list(graph_client.objects.get_objects_by_object_ids(params))
return result
def _error_caused_by_role_assignment_exists(ex):
return getattr(ex, 'status_code', None) == 409 and 'role assignment already exists' in ex.message
def _create_role_assignment(cmd, workspace_name, role, assignee, scope=None, item=None, item_type=None,
resolve_assignee=True, assignee_principal_type=None, assignment_id=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
assignment_client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return assignment_client.create_role_assignment(assignment_id if assignment_id is not None else _gen_guid(),
role_id, object_id, scope, assignee_principal_type)
def _build_role_scope(workspace_name, scope, item, item_type):
if scope:
return scope
if item and item_type:
# workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}
scope = "workspaces/" + workspace_name + "/" + item_type + "/" + item
else:
scope = "workspaces/" + workspace_name
return scope
def _resolve_role_id(cmd, role, workspace_name):
role_id = None
if not role:
return role_id
if is_guid(role):
role_id = role
else:
role_definition_client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
role_definition = role_definition_client.list_role_definitions()
role_dict = {x.name.lower(): x.id for x in role_definition if x.name}
if role.lower() not in role_dict:
raise CLIError("Role '{}' doesn't exist.".format(role))
role_id = role_dict[role.lower()]
return role_id
def _gen_guid():
import uuid
return uuid.uuid4()
# List Synapse Role Definitions Scope
def list_scopes(cmd, workspace_name):
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
return client.list_scopes()
# List Synapse Role Definitions
def list_role_definitions(cmd, workspace_name, is_built_in=None):
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
role_definitions = client.list_role_definitions(is_built_in)
return role_definitions
def _build_role_scope_format(scope, item_type):
if scope:
return scope
if item_type:
scope = "workspaces/{workspaceName}/" + item_type + "/" + ITEM_NAME_MAPPING[item_type]
else:
scope = "workspaces/{workspaceName}"
return scope
# Get Synapse Role Definition
def get_role_definition(cmd, workspace_name, role):
role_id = _resolve_role_id(cmd, role, workspace_name)
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
return client.get_role_definition_by_id(role_id)
| [
"azure.cli.core.azclierror.ArgumentUsageError",
"azure.cli.core.azclierror.InvalidArgumentValueError",
"azure.graphrbac.models.GetObjectsParameters",
"uuid.uuid4",
"azure.cli.core.util.is_guid",
"knack.util.CLIError"
] | [((9531, 9544), 'azure.cli.core.util.is_guid', 'is_guid', (['role'], {}), '(role)\n', (9538, 9544), False, 'from azure.cli.core.util import is_guid\n'), ((10037, 10049), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10047, 10049), False, 'import uuid\n'), ((1025, 1110), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""usage error: --assignee STRING | --assignee-object-id GUID"""'], {}), "('usage error: --assignee STRING | --assignee-object-id GUID'\n )\n", (1043, 1110), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((1159, 1226), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""usage error: --item-type STRING --item STRING"""'], {}), "('usage error: --item-type STRING --item STRING')\n", (1177, 1226), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((2762, 2858), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""usage error: No argument are provided. --assignee STRING | --ids GUID"""'], {}), "(\n 'usage error: No argument are provided. --assignee STRING | --ids GUID')\n", (2780, 2858), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((4217, 4390), 'knack.util.CLIError', 'CLIError', (['"""No matched assignments were found to delete, please provide correct --role or --assignee.Use `az synapse role assignment list` to get role assignments."""'], {}), "(\n 'No matched assignments were found to delete, please provide correct --role or --assignee.Use `az synapse role assignment list` to get role assignments.'\n )\n", (4225, 4390), False, 'from knack.util import CLIError\n'), ((4773, 4824), 'azure.cli.core.azclierror.InvalidArgumentValueError', 'InvalidArgumentValueError', (['"""usage error: --id GUID"""'], {}), "('usage error: --id GUID')\n", (4798, 4824), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((4891, 4976), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""usage error: --assignee STRING | --assignee-object-id GUID"""'], {}), "('usage error: --assignee STRING | --assignee-object-id GUID'\n )\n", (4909, 4976), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((5046, 5139), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""usage error: --assignee-object-id GUID [--assignee-principal-type]"""'], {}), "(\n 'usage error: --assignee-object-id GUID [--assignee-principal-type]')\n", (5064, 5139), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((5188, 5255), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""usage error: --item-type STRING --item STRING"""'], {}), "('usage error: --item-type STRING --item STRING')\n", (5206, 5255), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((7900, 8001), 'azure.graphrbac.models.GetObjectsParameters', 'GetObjectsParameters', ([], {'include_directory_object_references': '(True)', 'object_ids': 'assignees[i:i + 1000]'}), '(include_directory_object_references=True, object_ids=\n assignees[i:i + 1000])\n', (7920, 8001), False, 'from azure.graphrbac.models import GetObjectsParameters\n'), ((2963, 3117), 'azure.cli.core.azclierror.ArgumentUsageError', 'ArgumentUsageError', (['"""You should not provide --role or --assignee or --assignee_object_id or --scope or --principal-type when --ids is provided."""'], {}), "(\n 'You should not provide --role or --assignee or --assignee_object_id or --scope or --principal-type when --ids is provided.'\n )\n", (2981, 3117), False, 'from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError\n'), ((4735, 4757), 'azure.cli.core.util.is_guid', 'is_guid', (['assignment_id'], {}), '(assignment_id)\n', (4742, 4757), False, 'from azure.cli.core.util import is_guid\n'), ((6625, 6642), 'azure.cli.core.util.is_guid', 'is_guid', (['assignee'], {}), '(assignee)\n', (6632, 6642), False, 'from azure.cli.core.util import is_guid\n'), ((7588, 7605), 'azure.cli.core.util.is_guid', 'is_guid', (['assignee'], {}), '(assignee)\n', (7595, 7605), False, 'from azure.cli.core.util import is_guid\n')] |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : tensorflow_cookbook
@File : C0707_Doc2Vec.py
@Version : v0.1
@Time : 2019-12-06 17:12
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0707,P172
@Desc : 自然语言处理,使用 TensorFlow 实现基于 Doc2Vec 的情感分析
@理解:关键是文档嵌套与单词嵌套的结合。
结合有两种方式:❶ 文档嵌套和单词嵌套相加;❷ 文档嵌套直接在单词嵌套后面。
这个模型采用的是第2种方式,但是使用的数据集对于理解Doc2Vec方法效果不太好,通过这个例子只能知道如何使用,无法知道这个模型带来的改变是什么。
这个例子还说明,虽然使用神经网络训练不需要考虑太多前期工作,但是前期数据特征化依然是非常重要的,只有对模型的充分理解才能更好的特征化。
"""
# common imports
import os
import pickle
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from nltk.corpus import stopwords
from tensorflow.python.framework import ops
# 设置数据显示的精确度为小数点后3位
from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 初始化默认的计算图
ops.reset_default_graph()
# Open graph session
sess = tf.Session()
# ----------------------------------------------------------------------
# Declare model parameters
data_folder_name = 'temp'
batch_size = 500
vocabulary_size = 7500
generations = 100000
model_learning_rate = 0.001
embedding_size = 200 # Word embedding size
doc_embedding_size = 100 # Document embedding size
concatenated_size = embedding_size + doc_embedding_size
num_sampled = int(batch_size / 2) # Number of negative examples to sample.
window_size = 3 # How many words to consider to the left.
# Add checkpoints to training
save_embeddings_every = 5000
print_valid_every = 5000
print_loss_every = 100
# Declare stop words
stops = stopwords.words('english')
# We pick a few test words for validation.
valid_words = ['love', 'hate', 'happy', 'sad', 'man', 'woman']
# Later we will have to transform these into indices
# Load the movie review data
print('Loading Data')
texts, target = load_movie_data()
# Normalize text
print('Normalizing Text Data')
texts = normalize_text(texts, stops)
# Texts must contain at least 3 words
target = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > window_size]
texts = [x for x in texts if len(x.split()) > window_size]
assert (len(target) == len(texts))
# Build our data set and dictionaries
print('Creating Dictionary')
word_dictionary = build_dictionary(texts, vocabulary_size)
word_dictionary_rev = dict(zip(word_dictionary.values(), word_dictionary.keys()))
text_data = text_to_numbers(texts, word_dictionary)
# 获得检验用的单词的键值
valid_examples = [word_dictionary[x] for x in valid_words]
print('Creating Model')
# 6. 定义单词嵌套,声明对比噪声损失函数(NCE)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
nce_weights = tf.Variable(tf.truncated_normal(
[vocabulary_size, concatenated_size], stddev = 1.0 / np.sqrt(concatenated_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Create data/target placeholders
x_inputs = tf.placeholder(tf.int32, shape = [None, window_size + 1]) # windows_size是单词嵌套,后面的1是文档嵌套
y_target = tf.placeholder(tf.int32, shape = [None, 1])
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
# 8. 创建单词嵌套函数和文档嵌套函数,将单词嵌套求和,再与文档嵌套连接在一起
# 创建单词嵌套函数(基于的CBOW方法)
embed = tf.zeros([batch_size, embedding_size])
for element in range(window_size):
embed += tf.nn.embedding_lookup(embeddings, x_inputs[:, element])
# 创建文档嵌套函数(文档索引基于文档导入时顺序的唯一索引值)
doc_indices = tf.slice(x_inputs, [0, window_size], [batch_size, 1])
doc_embeddings = tf.Variable(tf.random_uniform([len(texts), doc_embedding_size], -1.0, 1.0))
doc_embed = tf.nn.embedding_lookup(doc_embeddings, doc_indices)
# 单词嵌套与文档嵌套的连接
final_embed = tf.concat(axis = 1, values = [embed, tf.squeeze(doc_embed)])
# 9. 声明损失函数和优化器
# Get loss from prediction
loss = tf.reduce_mean(tf.nn.nce_loss(weights = nce_weights,
biases = nce_biases,
labels = y_target,
inputs = final_embed,
num_sampled = num_sampled,
num_classes = vocabulary_size))
# Create optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate = model_learning_rate)
train_step = optimizer.minimize(loss)
# 10. 声明验证单词集的余弦距离
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b = True)
# 11. 创建模型的 Saver 函数,用于保存单词嵌套和文档嵌套
saver = tf.train.Saver({"embeddings": embeddings, "doc_embeddings": doc_embeddings})
# Add variable initializer.
init = tf.global_variables_initializer()
sess.run(init)
# 训练 Doc2Vec 模型
print('Starting Training')
loss_vec = []
loss_x_vec = []
for i in range(generations):
batch_inputs, batch_labels = generate_batch_data(text_data, batch_size, window_size, method = 'doc2vec')
feed_dict = {x_inputs: batch_inputs, y_target: batch_labels}
# Run the train step
sess.run(train_step, feed_dict = feed_dict)
# Return the loss
if (i + 1) % print_loss_every == 0:
loss_val = sess.run(loss, feed_dict = feed_dict)
loss_vec.append(loss_val)
loss_x_vec.append(i + 1)
print('Loss at step {} : {}'.format(i + 1, loss_val))
# Validation: Print some random words and top 5 related words
if (i + 1) % print_valid_every == 0:
sim = sess.run(similarity, feed_dict = feed_dict)
for j in range(len(valid_words)):
valid_word = word_dictionary_rev[valid_examples[j]]
top_k = 5 # number of nearest neighbors
nearest = (-sim[j, :]).argsort()[1:top_k + 1]
log_str = "Nearest to {}:".format(valid_word)
for k in range(top_k):
close_word = word_dictionary_rev[nearest[k]]
log_str = '{} {},'.format(log_str, close_word)
print(log_str)
# Save dictionary + embeddings
if (i + 1) % save_embeddings_every == 0:
# Save vocabulary dictionary
with open(os.path.join(data_folder_name, 'movie_vocab.pkl'), 'wb') as f:
pickle.dump(word_dictionary, f)
# Save embeddings
model_checkpoint_path = os.path.join(os.getcwd(), data_folder_name, 'doc2vec_movie_embeddings.ckpt')
save_path = saver.save(sess, model_checkpoint_path)
print('Model saved in file: {}'.format(save_path))
# Start logistic model-------------------------
# 使用这些嵌套矩阵训练逻辑回归模型
max_words = 20
logistic_batch_size = 500
# Split dataset into train and test sets
# Need to keep the indices sorted to keep track of document index
train_indices = np.sort(np.random.choice(len(target), round(0.8 * len(target)), replace = False))
test_indices = np.sort(np.array(list(set(range(len(target))) - set(train_indices))))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])
target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])
# Convert texts to lists of indices
text_data_train = np.array(text_to_numbers(texts_train, word_dictionary))
text_data_test = np.array(text_to_numbers(texts_test, word_dictionary))
# Pad/crop movie reviews to specific length
text_data_train = np.array([x[0:max_words] for x in [y + [0] * max_words for y in text_data_train]])
text_data_test = np.array([x[0:max_words] for x in [y + [0] * max_words for y in text_data_test]])
# Define Logistic placeholders
log_x_inputs = tf.placeholder(tf.int32, shape = [None, max_words + 1]) # plus 1 for doc index
log_y_target = tf.placeholder(tf.int32, shape = [None, 1])
# Define logistic embedding lookup (needed if we have two different batch sizes)
# Add together element embeddings in window:
log_embed = tf.zeros([logistic_batch_size, embedding_size])
for element in range(max_words):
log_embed += tf.nn.embedding_lookup(embeddings, log_x_inputs[:, element])
log_doc_indices = tf.slice(log_x_inputs, [0, max_words], [logistic_batch_size, 1])
log_doc_embed = tf.nn.embedding_lookup(doc_embeddings, log_doc_indices)
# concatenate embeddings
log_final_embed = tf.concat(axis = 1, values = [log_embed, tf.squeeze(log_doc_embed)])
# Define model:
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape = [concatenated_size, 1]))
b = tf.Variable(tf.random_normal(shape = [1, 1]))
# Declare logistic model (sigmoid in loss function)
model_output = tf.add(tf.matmul(log_final_embed, A), b)
# Declare loss function (Cross Entropy loss)
logistic_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = model_output, labels = tf.cast(log_y_target, tf.float32)))
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, tf.cast(log_y_target, tf.float32)), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Declare optimizer
logistic_opt = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
logistic_train_step = logistic_opt.minimize(logistic_loss, var_list = [A, b])
# Intitialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start Logistic Regression
print('Starting Logistic Doc2Vec Model Training')
train_loss, test_loss = [], []
train_acc, test_acc = [], []
i_data = []
for i in range(10000):
rand_index = np.random.choice(text_data_train.shape[0], size = logistic_batch_size)
rand_x = text_data_train[rand_index]
# Append review index at the end of text data
rand_x_doc_indices = train_indices[rand_index]
# 这里才把输入数据补齐(单词索引+文档索引)
rand_x = np.hstack((rand_x, np.transpose([rand_x_doc_indices])))
rand_y = np.transpose([target_train[rand_index]])
feed_dict = {log_x_inputs: rand_x, log_y_target: rand_y}
sess.run(logistic_train_step, feed_dict = feed_dict)
# Only record loss and accuracy every 100 generations
if (i + 1) % 100 == 0:
rand_index_test = np.random.choice(text_data_test.shape[0], size = logistic_batch_size)
rand_x_test = text_data_test[rand_index_test]
rand_x_doc_indices_test = test_indices[rand_index_test]
rand_x_test = np.hstack((rand_x_test, np.transpose([rand_x_doc_indices_test])))
rand_y_test = np.transpose([target_test[rand_index_test]])
test_feed_dict = {log_x_inputs: rand_x_test, log_y_target: rand_y_test}
i_data.append(i + 1)
train_loss_temp = sess.run(logistic_loss, feed_dict = feed_dict)
train_loss.append(train_loss_temp)
test_loss_temp = sess.run(logistic_loss, feed_dict = test_feed_dict)
test_loss.append(test_loss_temp)
train_acc_temp = sess.run(accuracy, feed_dict = feed_dict)
train_acc.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict = test_feed_dict)
test_acc.append(test_acc_temp)
if (i + 1) % 500 == 0:
acc_and_loss = [i + 1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]
acc_and_loss = [np.round(x, 2) for x in acc_and_loss]
print('Generation # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'
.format(*acc_and_loss))
# Plot loss over time
plt.figure()
plt.plot(i_data, train_loss, 'k-', label = '训练集')
plt.plot(i_data, test_loss, 'r--', label = '测试集', linewidth = 4)
plt.title('每次迭代的交叉熵损失')
plt.xlabel('迭代次数')
plt.ylabel('交叉熵损失')
plt.legend(loc = 'upper right')
# Plot train and test accuracy
plt.figure()
plt.plot(i_data, train_acc, 'k-', label = '训练集')
plt.plot(i_data, test_acc, 'r--', label = '测试集', linewidth = 4)
plt.title('训练集和测试集的精度')
plt.xlabel('迭代次数')
plt.ylabel('精度')
plt.legend(loc = 'lower right')
# ----------------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| [
"numpy.sqrt",
"tensorflow.python.framework.ops.reset_default_graph",
"matplotlib.pyplot.ylabel",
"text_tools.generate_batch_data",
"text_tools.text_to_numbers",
"numpy.array",
"tensorflow.reduce_mean",
"text_tools.build_dictionary",
"tensorflow.set_random_seed",
"tensorflow.cast",
"tensorflow.sl... | [((1086, 1171), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=8, suppress=True, threshold=np.inf, linewidth=200\n )\n', (1105, 1171), True, 'import numpy as np\n'), ((1219, 1239), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1233, 1239), True, 'import numpy as np\n'), ((1240, 1264), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (1258, 1264), True, 'import tensorflow as tf\n'), ((1621, 1646), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (1644, 1646), False, 'from tensorflow.python.framework import ops\n'), ((1675, 1687), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1685, 1687), True, 'import tensorflow as tf\n'), ((2330, 2356), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2345, 2356), False, 'from nltk.corpus import stopwords\n'), ((2585, 2602), 'text_tools.load_movie_data', 'load_movie_data', ([], {}), '()\n', (2600, 2602), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((2660, 2688), 'text_tools.normalize_text', 'normalize_text', (['texts', 'stops'], {}), '(texts, stops)\n', (2674, 2688), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((2992, 3032), 'text_tools.build_dictionary', 'build_dictionary', (['texts', 'vocabulary_size'], {}), '(texts, vocabulary_size)\n', (3008, 3032), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((3127, 3166), 'text_tools.text_to_numbers', 'text_to_numbers', (['texts', 'word_dictionary'], {}), '(texts, word_dictionary)\n', (3142, 3166), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((3621, 3676), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, window_size + 1]'}), '(tf.int32, shape=[None, window_size + 1])\n', (3635, 3676), True, 'import tensorflow as tf\n'), ((3721, 3762), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]'}), '(tf.int32, shape=[None, 1])\n', (3735, 3762), True, 'import tensorflow as tf\n'), ((3781, 3824), 'tensorflow.constant', 'tf.constant', (['valid_examples'], {'dtype': 'tf.int32'}), '(valid_examples, dtype=tf.int32)\n', (3792, 3824), True, 'import tensorflow as tf\n'), ((3899, 3937), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, embedding_size]'], {}), '([batch_size, embedding_size])\n', (3907, 3937), True, 'import tensorflow as tf\n'), ((4089, 4142), 'tensorflow.slice', 'tf.slice', (['x_inputs', '[0, window_size]', '[batch_size, 1]'], {}), '(x_inputs, [0, window_size], [batch_size, 1])\n', (4097, 4142), True, 'import tensorflow as tf\n'), ((4248, 4299), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['doc_embeddings', 'doc_indices'], {}), '(doc_embeddings, doc_indices)\n', (4270, 4299), True, 'import tensorflow as tf\n'), ((4832, 4900), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'model_learning_rate'}), '(learning_rate=model_learning_rate)\n', (4865, 4900), True, 'import tensorflow as tf\n'), ((5096, 5156), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['normalized_embeddings', 'valid_dataset'], {}), '(normalized_embeddings, valid_dataset)\n', (5118, 5156), True, 'import tensorflow as tf\n'), ((5170, 5238), 'tensorflow.matmul', 'tf.matmul', (['valid_embeddings', 'normalized_embeddings'], {'transpose_b': '(True)'}), '(valid_embeddings, normalized_embeddings, transpose_b=True)\n', (5179, 5238), True, 'import tensorflow as tf\n'), ((5285, 5361), 'tensorflow.train.Saver', 'tf.train.Saver', (["{'embeddings': embeddings, 'doc_embeddings': doc_embeddings}"], {}), "({'embeddings': embeddings, 'doc_embeddings': doc_embeddings})\n", (5299, 5361), True, 'import tensorflow as tf\n'), ((5398, 5431), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5429, 5431), True, 'import tensorflow as tf\n'), ((8122, 8210), 'numpy.array', 'np.array', (['[x[0:max_words] for x in [(y + [0] * max_words) for y in text_data_train]]'], {}), '([x[0:max_words] for x in [(y + [0] * max_words) for y in\n text_data_train]])\n', (8130, 8210), True, 'import numpy as np\n'), ((8222, 8309), 'numpy.array', 'np.array', (['[x[0:max_words] for x in [(y + [0] * max_words) for y in text_data_test]]'], {}), '([x[0:max_words] for x in [(y + [0] * max_words) for y in\n text_data_test]])\n', (8230, 8309), True, 'import numpy as np\n'), ((8351, 8404), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, max_words + 1]'}), '(tf.int32, shape=[None, max_words + 1])\n', (8365, 8404), True, 'import tensorflow as tf\n'), ((8446, 8487), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]'}), '(tf.int32, shape=[None, 1])\n', (8460, 8487), True, 'import tensorflow as tf\n'), ((8629, 8676), 'tensorflow.zeros', 'tf.zeros', (['[logistic_batch_size, embedding_size]'], {}), '([logistic_batch_size, embedding_size])\n', (8637, 8676), True, 'import tensorflow as tf\n'), ((8807, 8871), 'tensorflow.slice', 'tf.slice', (['log_x_inputs', '[0, max_words]', '[logistic_batch_size, 1]'], {}), '(log_x_inputs, [0, max_words], [logistic_batch_size, 1])\n', (8815, 8871), True, 'import tensorflow as tf\n'), ((8888, 8943), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['doc_embeddings', 'log_doc_indices'], {}), '(doc_embeddings, log_doc_indices)\n', (8910, 8943), True, 'import tensorflow as tf\n'), ((9715, 9750), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['predictions_correct'], {}), '(predictions_correct)\n', (9729, 9750), True, 'import tensorflow as tf\n'), ((9787, 9840), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (9820, 9840), True, 'import tensorflow as tf\n'), ((9953, 9986), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9984, 9986), True, 'import tensorflow as tf\n'), ((12075, 12087), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12085, 12087), True, 'import matplotlib.pyplot as plt\n'), ((12088, 12135), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'train_loss', '"""k-"""'], {'label': '"""训练集"""'}), "(i_data, train_loss, 'k-', label='训练集')\n", (12096, 12135), True, 'import matplotlib.pyplot as plt\n'), ((12138, 12198), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'test_loss', '"""r--"""'], {'label': '"""测试集"""', 'linewidth': '(4)'}), "(i_data, test_loss, 'r--', label='测试集', linewidth=4)\n", (12146, 12198), True, 'import matplotlib.pyplot as plt\n'), ((12203, 12226), 'matplotlib.pyplot.title', 'plt.title', (['"""每次迭代的交叉熵损失"""'], {}), "('每次迭代的交叉熵损失')\n", (12212, 12226), True, 'import matplotlib.pyplot as plt\n'), ((12227, 12245), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""迭代次数"""'], {}), "('迭代次数')\n", (12237, 12245), True, 'import matplotlib.pyplot as plt\n'), ((12246, 12265), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""交叉熵损失"""'], {}), "('交叉熵损失')\n", (12256, 12265), True, 'import matplotlib.pyplot as plt\n'), ((12266, 12295), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (12276, 12295), True, 'import matplotlib.pyplot as plt\n'), ((12330, 12342), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12340, 12342), True, 'import matplotlib.pyplot as plt\n'), ((12343, 12389), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'train_acc', '"""k-"""'], {'label': '"""训练集"""'}), "(i_data, train_acc, 'k-', label='训练集')\n", (12351, 12389), True, 'import matplotlib.pyplot as plt\n'), ((12392, 12451), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'test_acc', '"""r--"""'], {'label': '"""测试集"""', 'linewidth': '(4)'}), "(i_data, test_acc, 'r--', label='测试集', linewidth=4)\n", (12400, 12451), True, 'import matplotlib.pyplot as plt\n'), ((12456, 12479), 'matplotlib.pyplot.title', 'plt.title', (['"""训练集和测试集的精度"""'], {}), "('训练集和测试集的精度')\n", (12465, 12479), True, 'import matplotlib.pyplot as plt\n'), ((12480, 12498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""迭代次数"""'], {}), "('迭代次数')\n", (12490, 12498), True, 'import matplotlib.pyplot as plt\n'), ((12499, 12515), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""精度"""'], {}), "('精度')\n", (12509, 12515), True, 'import matplotlib.pyplot as plt\n'), ((12516, 12545), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (12526, 12545), True, 'import matplotlib.pyplot as plt\n'), ((12631, 12654), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (12644, 12654), False, 'import winsound\n'), ((3319, 3382), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocabulary_size, embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocabulary_size, embedding_size], -1.0, 1.0)\n', (3336, 3382), True, 'import tensorflow as tf\n'), ((3546, 3573), 'tensorflow.zeros', 'tf.zeros', (['[vocabulary_size]'], {}), '([vocabulary_size])\n', (3554, 3573), True, 'import tensorflow as tf\n'), ((3986, 4042), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'x_inputs[:, element]'], {}), '(embeddings, x_inputs[:, element])\n', (4008, 4042), True, 'import tensorflow as tf\n'), ((4456, 4605), 'tensorflow.nn.nce_loss', 'tf.nn.nce_loss', ([], {'weights': 'nce_weights', 'biases': 'nce_biases', 'labels': 'y_target', 'inputs': 'final_embed', 'num_sampled': 'num_sampled', 'num_classes': 'vocabulary_size'}), '(weights=nce_weights, biases=nce_biases, labels=y_target,\n inputs=final_embed, num_sampled=num_sampled, num_classes=vocabulary_size)\n', (4470, 4605), True, 'import tensorflow as tf\n'), ((5583, 5656), 'text_tools.generate_batch_data', 'generate_batch_data', (['text_data', 'batch_size', 'window_size'], {'method': '"""doc2vec"""'}), "(text_data, batch_size, window_size, method='doc2vec')\n", (5602, 5656), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((7940, 7985), 'text_tools.text_to_numbers', 'text_to_numbers', (['texts_train', 'word_dictionary'], {}), '(texts_train, word_dictionary)\n', (7955, 7985), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((8013, 8057), 'text_tools.text_to_numbers', 'text_to_numbers', (['texts_test', 'word_dictionary'], {}), '(texts_test, word_dictionary)\n', (8028, 8057), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((8727, 8787), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'log_x_inputs[:, element]'], {}), '(embeddings, log_x_inputs[:, element])\n', (8749, 8787), True, 'import tensorflow as tf\n'), ((9133, 9179), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[concatenated_size, 1]'}), '(shape=[concatenated_size, 1])\n', (9149, 9179), True, 'import tensorflow as tf\n'), ((9199, 9229), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[1, 1]'}), '(shape=[1, 1])\n', (9215, 9229), True, 'import tensorflow as tf\n'), ((9308, 9337), 'tensorflow.matmul', 'tf.matmul', (['log_final_embed', 'A'], {}), '(log_final_embed, A)\n', (9317, 9337), True, 'import tensorflow as tf\n'), ((9579, 9603), 'tensorflow.sigmoid', 'tf.sigmoid', (['model_output'], {}), '(model_output)\n', (9589, 9603), True, 'import tensorflow as tf\n'), ((10193, 10261), 'numpy.random.choice', 'np.random.choice', (['text_data_train.shape[0]'], {'size': 'logistic_batch_size'}), '(text_data_train.shape[0], size=logistic_batch_size)\n', (10209, 10261), True, 'import numpy as np\n'), ((10516, 10556), 'numpy.transpose', 'np.transpose', (['[target_train[rand_index]]'], {}), '([target_train[rand_index]])\n', (10528, 10556), True, 'import numpy as np\n'), ((12691, 12701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12699, 12701), True, 'import matplotlib.pyplot as plt\n'), ((4990, 5011), 'tensorflow.square', 'tf.square', (['embeddings'], {}), '(embeddings)\n', (4999, 5011), True, 'import tensorflow as tf\n'), ((9656, 9689), 'tensorflow.cast', 'tf.cast', (['log_y_target', 'tf.float32'], {}), '(log_y_target, tf.float32)\n', (9663, 9689), True, 'import tensorflow as tf\n'), ((10788, 10855), 'numpy.random.choice', 'np.random.choice', (['text_data_test.shape[0]'], {'size': 'logistic_batch_size'}), '(text_data_test.shape[0], size=logistic_batch_size)\n', (10804, 10855), True, 'import numpy as np\n'), ((11086, 11130), 'numpy.transpose', 'np.transpose', (['[target_test[rand_index_test]]'], {}), '([target_test[rand_index_test]])\n', (11098, 11130), True, 'import numpy as np\n'), ((12662, 12679), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (12677, 12679), True, 'import matplotlib.pyplot as plt\n'), ((4366, 4387), 'tensorflow.squeeze', 'tf.squeeze', (['doc_embed'], {}), '(doc_embed)\n', (4376, 4387), True, 'import tensorflow as tf\n'), ((6885, 6916), 'pickle.dump', 'pickle.dump', (['word_dictionary', 'f'], {}), '(word_dictionary, f)\n', (6896, 6916), False, 'import pickle\n'), ((6989, 7000), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6998, 7000), False, 'import os\n'), ((9029, 9054), 'tensorflow.squeeze', 'tf.squeeze', (['log_doc_embed'], {}), '(log_doc_embed)\n', (9039, 9054), True, 'import tensorflow as tf\n'), ((9500, 9533), 'tensorflow.cast', 'tf.cast', (['log_y_target', 'tf.float32'], {}), '(log_y_target, tf.float32)\n', (9507, 9533), True, 'import tensorflow as tf\n'), ((10466, 10500), 'numpy.transpose', 'np.transpose', (['[rand_x_doc_indices]'], {}), '([rand_x_doc_indices])\n', (10478, 10500), True, 'import numpy as np\n'), ((3492, 3518), 'numpy.sqrt', 'np.sqrt', (['concatenated_size'], {}), '(concatenated_size)\n', (3499, 3518), True, 'import numpy as np\n'), ((6810, 6859), 'os.path.join', 'os.path.join', (['data_folder_name', '"""movie_vocab.pkl"""'], {}), "(data_folder_name, 'movie_vocab.pkl')\n", (6822, 6859), False, 'import os\n'), ((11022, 11061), 'numpy.transpose', 'np.transpose', (['[rand_x_doc_indices_test]'], {}), '([rand_x_doc_indices_test])\n', (11034, 11061), True, 'import numpy as np\n'), ((11856, 11870), 'numpy.round', 'np.round', (['x', '(2)'], {}), '(x, 2)\n', (11864, 11870), True, 'import numpy as np\n')] |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Utilities for IPU ops
~~~~~~~~~~~~~~~~~~~~~
"""
import six
from tensorflow.compiler.plugin.poplar.driver import backend_config_pb2
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ipu import scopes
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import nn_grad
from tensorflow.python.util import tf_contextlib
def SetMlType(op, ml_type):
if context.executing_eagerly():
return op
if ml_type:
operation = op if isinstance(op, ops.Operation) else op.op
attrs = xla_data_pb2.FrontendAttributes()
attr_name = backend_config_pb2.FrontendAttributeId.Name(
backend_config_pb2.FrontendAttributeId.ML_TYPE)
attrs.map[attr_name] = backend_config_pb2.MLType.Name(ml_type)
serial_attrs = attrs.SerializeToString()
operation._set_attr( # pylint: disable=protected-access
scopes.FRONTEND_ATTRIBUTES_NAME,
attr_value_pb2.AttrValue(s=serial_attrs))
return op
def SetOpAsFwd(op):
return SetMlType(op, backend_config_pb2.TRAINING_FWD)
def SetOpAsBwd(op):
return SetMlType(op, backend_config_pb2.TRAINING_BWD)
def SetOpAsWU(op):
return SetMlType(op, backend_config_pb2.TRAINING_WU)
# Override all the convolution operation gradients so that they can be annotated
# with the "ML type".
@ops.RegisterGradient("CustomConv2D")
def _CustomConv2DGrad(op, grad):
grads = nn_grad._Conv2DGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 2
SetOpAsFwd(op)
SetOpAsBwd(grads[0])
SetOpAsWU(grads[1])
return grads
@ops.RegisterGradient("CustomConv2DBackpropInput")
def _CustomConv2DBackpropInputGrad(op, grad):
grads = nn_grad._Conv2DBackpropInputGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 3
SetOpAsFwd(op)
SetOpAsBwd(grads[1])
SetOpAsWU(grads[2])
return grads
@ops.RegisterGradient("CustomConv2DBackpropFilter")
def _CustomConv2DBackpropFilterGrad(op, grad):
grads = nn_grad._Conv2DBackpropFilterGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 3
SetOpAsFwd(op)
SetOpAsBwd(grads[0])
SetOpAsWU(grads[2])
return grads
@ops.RegisterGradient("CustomDepthwiseConv2dNative")
def _CustomDepthwiseConv2dNativeGrad(op, grad):
grads = nn_grad._DepthwiseConv2dNativeGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 2
SetOpAsFwd(op)
SetOpAsBwd(grads[0])
SetOpAsWU(grads[1])
return grads
@ops.RegisterGradient("CustomDepthwiseConv2dNativeBackpropInput")
def _CustomDepthwiseConv2dNativeBackpropInputGrad(op, grad):
grads = nn_grad._DepthwiseConv2dNativeBackpropInputGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 3
SetOpAsFwd(op)
SetOpAsBwd(grads[1])
SetOpAsWU(grads[2])
return grads
@ops.RegisterGradient("CustomDepthwiseConv2dNativeBackpropFilter")
def _CustomDepthwiseConv2dNativeBackpropFilterGrad(op, grad):
grads = nn_grad._DepthwiseConv2dNativeBackpropFilterGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 3
SetOpAsFwd(op)
SetOpAsBwd(grads[0])
SetOpAsWU(grads[2])
return grads
@ops.RegisterGradient("CustomConv3D")
def _CustomConv3DGrad(op, grad):
grads = nn_grad._Conv3DGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 2
SetOpAsFwd(op)
SetOpAsBwd(grads[0])
SetOpAsWU(grads[1])
return grads
@ops.RegisterGradient("CustomConv3DBackpropInputV2")
def _CustomConv3DBackpropInputGrad(op, grad):
grads = nn_grad._Conv3DBackpropInputGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 3
SetOpAsFwd(op)
SetOpAsBwd(grads[1])
SetOpAsWU(grads[2])
return grads
@ops.RegisterGradient("CustomConv3DBackpropFilterV2")
def _CustomConv3DBackpropFilterGrad(op, grad):
grads = nn_grad._Conv3DBackpropFilterGrad(op, grad) # pylint: disable=protected-access
assert len(grads) == 3
SetOpAsFwd(op)
SetOpAsBwd(grads[0])
SetOpAsWU(grads[2])
return grads
def conv_gradients_override_map():
return {
"Conv2D":
"CustomConv2D",
"Conv2DBackpropInput":
"CustomConv2DBackpropInput",
"Conv2DBackpropFilter":
"CustomConv2DBackpropFilter",
"Conv3D":
"CustomConv3D",
"Conv3DBackpropInputV2":
"CustomConv3DBackpropInputV2",
"Conv3DBackpropFilterV2":
"CustomConv3DBackpropFilterV2",
"DepthwiseConv2dNative":
"CustomDepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropInput":
"CustomDepthwiseConv2dNativeBackpropInput",
"DepthwiseConv2dNativeBackpropFilter":
"CustomDepthwiseConv2dNativeBackpropFilter"
}
def HandleMatMulGrads(grads):
assert len(grads) == 2
# Batched matmul might have batch dimension reductions.
def look_through_reshape_reduce(output):
if output.op.type == "Reshape":
output = output.op.inputs[0]
if output.op.type == "Sum":
output = output.op.inputs[0]
return output
SetOpAsBwd(look_through_reshape_reduce(grads[0]))
SetOpAsWU(look_through_reshape_reduce(grads[1]))
return grads
@ops.RegisterGradient("CustomMatMul")
def _CustomMatMulGrad(op, grad):
grads = math_grad._MatMulGrad(op, grad) # pylint: disable=protected-access
SetOpAsFwd(op)
return HandleMatMulGrads(grads)
@ops.RegisterGradient("CustomBatchMatMul")
def _CustomBatchMatMulGrad(op, grad):
grads = math_grad._BatchMatMul(op, grad) # pylint: disable=protected-access
SetOpAsFwd(op)
return HandleMatMulGrads(grads)
@ops.RegisterGradient("CustomBatchMatMulV2")
def _CustomBatchMatMulV2Grad(op, grad):
grads = math_grad._BatchMatMulV2(op, grad) # pylint: disable=protected-access
SetOpAsFwd(op)
return HandleMatMulGrads(grads)
def matmul_gradients_override_map():
return {
"MatMul": "CustomMatMul",
"BatchMatMul": "CustomBatchMatMul",
"BatchMatMulV2": "CustomBatchMatMulV2",
}
def gradients_override_map():
return {**conv_gradients_override_map(), **matmul_gradients_override_map()}
@tf_contextlib.contextmanager
def gradient_override_scope(training):
"""Scope which configures any operations which need to be aware of whether
they are an operation in forward or backward propagation, and if the latter,
make sure that the gradient operations are annotated as a gradient with
respect to activations or as a gradient with respect to the weights.
Args:
training: whether this is a training graph.
Returns:
A context
"""
with scopes.frontend_attribute(
backend_config_pb2.FrontendAttributeId.Name(backend_config_pb2.ML_TYPE),
backend_config_pb2.MLType.Name(
backend_config_pb2.TRAINING_FWD if training else backend_config_pb2.
INFERENCE_FWD)):
with ops.get_default_graph().as_default() as g:
with g.gradient_override_map(gradients_override_map()):
yield
def get_accumulator_dtype(variable, dtype_override):
"""Get the accumulator dtype for the given variable."""
if dtype_override is None:
return variable.dtype
# Note that a `DType` is callable, so only try to call it if validation fails.
try:
return dtypes.as_dtype(dtype_override)
except TypeError:
if callable(dtype_override):
return dtypes.as_dtype(dtype_override(variable))
else:
raise
_activation_modules = set(
['tensorflow.python.keras.activations', 'tensorflow.python.ops.math_ops'])
def get_activation_name(identifier):
"Get activation name from string or activation function object"
if isinstance(identifier, six.string_types):
return identifier
elif callable(identifier):
if identifier.__module__ not in _activation_modules:
raise TypeError('Unrecognized function : '
f'{identifier.__module__}.{identifier.__name__}')
return identifier.__name__
raise TypeError(
f'Could not interpret activation function identifier: {repr(identifier)}'
)
| [
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.nn_grad._DepthwiseConv2dNativeGrad",
"tensorflow.python.ops.nn_grad._DepthwiseConv2dNativeBackpropFilterGrad",
"tensorflow.python.ops.nn_grad._Conv2DBackpropFilterGrad",
"tensorflow.python.ops.nn_grad._Conv2DBackpropInputGrad",
"te... | [((2170, 2206), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomConv2D"""'], {}), "('CustomConv2D')\n", (2190, 2206), False, 'from tensorflow.python.framework import ops\n'), ((2421, 2470), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomConv2DBackpropInput"""'], {}), "('CustomConv2DBackpropInput')\n", (2441, 2470), False, 'from tensorflow.python.framework import ops\n'), ((2711, 2761), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomConv2DBackpropFilter"""'], {}), "('CustomConv2DBackpropFilter')\n", (2731, 2761), False, 'from tensorflow.python.framework import ops\n'), ((3004, 3055), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomDepthwiseConv2dNative"""'], {}), "('CustomDepthwiseConv2dNative')\n", (3024, 3055), False, 'from tensorflow.python.framework import ops\n'), ((3300, 3364), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomDepthwiseConv2dNativeBackpropInput"""'], {}), "('CustomDepthwiseConv2dNativeBackpropInput')\n", (3320, 3364), False, 'from tensorflow.python.framework import ops\n'), ((3635, 3700), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomDepthwiseConv2dNativeBackpropFilter"""'], {}), "('CustomDepthwiseConv2dNativeBackpropFilter')\n", (3655, 3700), False, 'from tensorflow.python.framework import ops\n'), ((3973, 4009), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomConv3D"""'], {}), "('CustomConv3D')\n", (3993, 4009), False, 'from tensorflow.python.framework import ops\n'), ((4224, 4275), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomConv3DBackpropInputV2"""'], {}), "('CustomConv3DBackpropInputV2')\n", (4244, 4275), False, 'from tensorflow.python.framework import ops\n'), ((4516, 4568), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomConv3DBackpropFilterV2"""'], {}), "('CustomConv3DBackpropFilterV2')\n", (4536, 4568), False, 'from tensorflow.python.framework import ops\n'), ((5898, 5934), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomMatMul"""'], {}), "('CustomMatMul')\n", (5918, 5934), False, 'from tensorflow.python.framework import ops\n'), ((6100, 6141), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomBatchMatMul"""'], {}), "('CustomBatchMatMul')\n", (6120, 6141), False, 'from tensorflow.python.framework import ops\n'), ((6313, 6356), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""CustomBatchMatMulV2"""'], {}), "('CustomBatchMatMulV2')\n", (6333, 6356), False, 'from tensorflow.python.framework import ops\n'), ((1273, 1300), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (1298, 1300), False, 'from tensorflow.python.eager import context\n'), ((2250, 2279), 'tensorflow.python.ops.nn_grad._Conv2DGrad', 'nn_grad._Conv2DGrad', (['op', 'grad'], {}), '(op, grad)\n', (2269, 2279), False, 'from tensorflow.python.ops import nn_grad\n'), ((2527, 2569), 'tensorflow.python.ops.nn_grad._Conv2DBackpropInputGrad', 'nn_grad._Conv2DBackpropInputGrad', (['op', 'grad'], {}), '(op, grad)\n', (2559, 2569), False, 'from tensorflow.python.ops import nn_grad\n'), ((2819, 2862), 'tensorflow.python.ops.nn_grad._Conv2DBackpropFilterGrad', 'nn_grad._Conv2DBackpropFilterGrad', (['op', 'grad'], {}), '(op, grad)\n', (2852, 2862), False, 'from tensorflow.python.ops import nn_grad\n'), ((3114, 3158), 'tensorflow.python.ops.nn_grad._DepthwiseConv2dNativeGrad', 'nn_grad._DepthwiseConv2dNativeGrad', (['op', 'grad'], {}), '(op, grad)\n', (3148, 3158), False, 'from tensorflow.python.ops import nn_grad\n'), ((3436, 3493), 'tensorflow.python.ops.nn_grad._DepthwiseConv2dNativeBackpropInputGrad', 'nn_grad._DepthwiseConv2dNativeBackpropInputGrad', (['op', 'grad'], {}), '(op, grad)\n', (3483, 3493), False, 'from tensorflow.python.ops import nn_grad\n'), ((3773, 3831), 'tensorflow.python.ops.nn_grad._DepthwiseConv2dNativeBackpropFilterGrad', 'nn_grad._DepthwiseConv2dNativeBackpropFilterGrad', (['op', 'grad'], {}), '(op, grad)\n', (3821, 3831), False, 'from tensorflow.python.ops import nn_grad\n'), ((4053, 4082), 'tensorflow.python.ops.nn_grad._Conv3DGrad', 'nn_grad._Conv3DGrad', (['op', 'grad'], {}), '(op, grad)\n', (4072, 4082), False, 'from tensorflow.python.ops import nn_grad\n'), ((4332, 4374), 'tensorflow.python.ops.nn_grad._Conv3DBackpropInputGrad', 'nn_grad._Conv3DBackpropInputGrad', (['op', 'grad'], {}), '(op, grad)\n', (4364, 4374), False, 'from tensorflow.python.ops import nn_grad\n'), ((4626, 4669), 'tensorflow.python.ops.nn_grad._Conv3DBackpropFilterGrad', 'nn_grad._Conv3DBackpropFilterGrad', (['op', 'grad'], {}), '(op, grad)\n', (4659, 4669), False, 'from tensorflow.python.ops import nn_grad\n'), ((5978, 6009), 'tensorflow.python.ops.math_grad._MatMulGrad', 'math_grad._MatMulGrad', (['op', 'grad'], {}), '(op, grad)\n', (5999, 6009), False, 'from tensorflow.python.ops import math_grad\n'), ((6190, 6222), 'tensorflow.python.ops.math_grad._BatchMatMul', 'math_grad._BatchMatMul', (['op', 'grad'], {}), '(op, grad)\n', (6212, 6222), False, 'from tensorflow.python.ops import math_grad\n'), ((6407, 6441), 'tensorflow.python.ops.math_grad._BatchMatMulV2', 'math_grad._BatchMatMulV2', (['op', 'grad'], {}), '(op, grad)\n', (6431, 6441), False, 'from tensorflow.python.ops import math_grad\n'), ((1405, 1438), 'tensorflow.compiler.xla.xla_data_pb2.FrontendAttributes', 'xla_data_pb2.FrontendAttributes', ([], {}), '()\n', (1436, 1438), False, 'from tensorflow.compiler.xla import xla_data_pb2\n'), ((1455, 1551), 'tensorflow.compiler.plugin.poplar.driver.backend_config_pb2.FrontendAttributeId.Name', 'backend_config_pb2.FrontendAttributeId.Name', (['backend_config_pb2.FrontendAttributeId.ML_TYPE'], {}), '(backend_config_pb2.\n FrontendAttributeId.ML_TYPE)\n', (1498, 1551), False, 'from tensorflow.compiler.plugin.poplar.driver import backend_config_pb2\n'), ((1583, 1622), 'tensorflow.compiler.plugin.poplar.driver.backend_config_pb2.MLType.Name', 'backend_config_pb2.MLType.Name', (['ml_type'], {}), '(ml_type)\n', (1613, 1622), False, 'from tensorflow.compiler.plugin.poplar.driver import backend_config_pb2\n'), ((7928, 7959), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['dtype_override'], {}), '(dtype_override)\n', (7943, 7959), False, 'from tensorflow.python.framework import dtypes\n'), ((1778, 1818), 'tensorflow.core.framework.attr_value_pb2.AttrValue', 'attr_value_pb2.AttrValue', ([], {'s': 'serial_attrs'}), '(s=serial_attrs)\n', (1802, 1818), False, 'from tensorflow.core.framework import attr_value_pb2\n'), ((7315, 7386), 'tensorflow.compiler.plugin.poplar.driver.backend_config_pb2.FrontendAttributeId.Name', 'backend_config_pb2.FrontendAttributeId.Name', (['backend_config_pb2.ML_TYPE'], {}), '(backend_config_pb2.ML_TYPE)\n', (7358, 7386), False, 'from tensorflow.compiler.plugin.poplar.driver import backend_config_pb2\n'), ((7394, 7511), 'tensorflow.compiler.plugin.poplar.driver.backend_config_pb2.MLType.Name', 'backend_config_pb2.MLType.Name', (['(backend_config_pb2.TRAINING_FWD if training else backend_config_pb2.\n INFERENCE_FWD)'], {}), '(backend_config_pb2.TRAINING_FWD if training else\n backend_config_pb2.INFERENCE_FWD)\n', (7424, 7511), False, 'from tensorflow.compiler.plugin.poplar.driver import backend_config_pb2\n'), ((7541, 7564), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (7562, 7564), False, 'from tensorflow.python.framework import ops\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import torch
import numpy as np
from utils import Generator
import matplotlib.pyplot as plt
from IPython.display import HTML
import torchvision.utils as vutils
import matplotlib.animation as animation
from IPython import embed
if __name__ == "__main__":
model_dir = '../checkpoints'
mids = list(range(1, 11))
fixed_noise = torch.randn(64, 100, 1, 1).cuda(0)
generator = Generator(100, 64).cuda(0)
generator = torch.nn.DataParallel(generator, device_ids=[0, 1])
imgs_list = []
for mid in mids:
checkpoints = torch.load(os.path.join(model_dir, 'epoch_%d.pth.tar' % mid))
epoch = checkpoints['epoch']
generator.load_state_dict(checkpoints['generator'])
print('epoch : %d, mid : %d' % (epoch, mid))
generator.eval()
fake = generator(fixed_noise).detach().cpu()
imgs_list.append(fake)
fig = plt.figure(figsize=(8,8))
plt.axis("off")
embed()
ims = [[plt.imshow(np.transpose(i[0],(1, 2, 0)), animated=True)] for i in imgs_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())
plt.subplot(1, 2, 2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1][0],(1,2,0)))
plt.show()
| [
"utils.Generator",
"matplotlib.pyplot.title",
"IPython.embed",
"torch.nn.DataParallel",
"os.path.join",
"matplotlib.animation.ArtistAnimation",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"torch.randn",
"matplotlib.pyplot.show"
] | [((495, 546), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['generator'], {'device_ids': '[0, 1]'}), '(generator, device_ids=[0, 1])\n', (516, 546), False, 'import torch\n'), ((968, 994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (978, 994), True, 'import matplotlib.pyplot as plt\n'), ((998, 1013), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1006, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1025), 'IPython.embed', 'embed', ([], {}), '()\n', (1023, 1025), False, 'from IPython import embed\n'), ((1125, 1210), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'ims'], {'interval': '(1000)', 'repeat_delay': '(1000)', 'blit': '(True)'}), '(fig, ims, interval=1000, repeat_delay=1000, blit=True\n )\n', (1150, 1210), True, 'import matplotlib.animation as animation\n'), ((1237, 1257), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1248, 1257), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1277), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1270, 1277), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1306), 'matplotlib.pyplot.title', 'plt.title', (['"""Fake Images"""'], {}), "('Fake Images')\n", (1291, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1375), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1373, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1362), 'numpy.transpose', 'np.transpose', (['img_list[-1][0]', '(1, 2, 0)'], {}), '(img_list[-1][0], (1, 2, 0))\n', (1334, 1362), True, 'import numpy as np\n'), ((401, 427), 'torch.randn', 'torch.randn', (['(64)', '(100)', '(1)', '(1)'], {}), '(64, 100, 1, 1)\n', (412, 427), False, 'import torch\n'), ((452, 470), 'utils.Generator', 'Generator', (['(100)', '(64)'], {}), '(100, 64)\n', (461, 470), False, 'from utils import Generator\n'), ((634, 683), 'os.path.join', 'os.path.join', (['model_dir', "('epoch_%d.pth.tar' % mid)"], {}), "(model_dir, 'epoch_%d.pth.tar' % mid)\n", (646, 683), False, 'import os\n'), ((1049, 1078), 'numpy.transpose', 'np.transpose', (['i[0]', '(1, 2, 0)'], {}), '(i[0], (1, 2, 0))\n', (1061, 1078), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from . import common
def v_loc(x):
return 40*np.log10(x + 1)
def x_loc(x):
return 40*(np.log10(x) + 1)
def main(debug=False):
name = ['I', 'SCA', 'tfp']
suffix = ['', '', '']
df = []
for n, s in zip(name, suffix):
prec = pd.read_csv(f'results/logk_prec_{n}{s}.csv')
prec = prec.groupby(['v', 'x'])['log_err'].mean()
prec.name = f'prec_{n}'
time = pd.read_csv(f'results/logk_time_{n}{s}.csv')
time = time.groupby(['v', 'x'])['time'].mean()
time = 1000 * time
time.name = f'time_{n}'
df += [prec, time]
df = pd.concat(df, axis=1)
df['diff_prec'] = df['prec_SCA'] - df['prec_I']
df['diff_time'] = df['time_SCA'] - df['time_I']
v, x = zip(*df.index)
df['v'] = v
df['x'] = x
df1 = df[['v', 'x', 'prec_I', 'prec_SCA', 'prec_tfp']].copy()
df1.rename(columns=dict(prec_I='I', prec_SCA='SCA', prec_tfp='tfp'), inplace=True)
df1 = df1.melt(id_vars=['v','x'])
df1.rename(columns=dict(variable='type', value='prec'), inplace=True)
df2 = df[['v', 'x', 'time_I', 'time_SCA', 'time_tfp']].copy()
df2.rename(columns=dict(time_I='I', time_SCA='SCA', time_tfp='tfp'), inplace=True)
df2 = df2.melt(id_vars=['v','x'])
df2.rename(columns=dict(variable='type', value='time'), inplace=True)
type_cmap = ListedColormap(['silver', 'grey', 'black'])
type_cmap.set_under('white')
name = [['diff_prec', 'prec_SCA'], ['diff_time', 'time_SCA']]
#pos = [[[0.1, 0.85], [0.85, 0.1]], [[0.1, 0.1], [0.1, 0.85]]]
vmin = [[-1.0, 0], [-10, 0]]
vmax = [[+1.0, 2.8], [10, 28]]
cmap = [[type_cmap, 'Reds'], [type_cmap, 'Blues']]
fig = common.figure(figsize=(5.5, 4), box=debug)
ax = fig.subplots(
2, 2, sharex='col',
)
vticks = [0, 1, 5, 10, 50]
xticks = [0.1, 0.5, 1, 5, 10, 50]
label = [['a', 'c'], ['b', 'd']]
pos = [[[-0.15, 0.9], [-0.2, 0.9]],
[[-0.15, 0.9], [-0.2, 0.9]]]
for i in range(2):
for j in [0]:
hm = df[name[i][j]].unstack(0)
sns.heatmap(hm, vmin=vmin[i][j], vmax=vmax[i][j], cmap=cmap[i][j], ax=ax[i, j])
ax[i, j].invert_yaxis()
ax[i, j].set_xticks([v_loc(v) for v in vticks])
ax[i, j].set_xticklabels([f"${k}$" for k in vticks], rotation=0)
ax[i, j].xaxis.set_ticks_position('both')
ax[i, j].set_yticks([x_loc(x) for x in xticks])
ax[i, j].set_yticklabels([f"${k}$" for k in xticks])
ax[i, j].yaxis.set_ticks_position('both')
if i == 1:
ax[i, j].set_xlabel('$v$')
else:
ax[i, j].set_xlabel('')
if j == 0:
ax[i, j].set_ylabel('$x$')
else:
ax[i, j].set_ylabel('')
for i in range(2):
for j in range(2):
ax[i, j].text(*pos[i][j], label[i][j], transform=ax[i, j].transAxes)
args = dict(
color='white',
)
sns.boxenplot(x='type', y='prec', data=df1, ax=ax[0, 1], **args)
ax[0, 1].xaxis.label.set_visible(False)
ax[0, 1].set_ylabel('err ($\log (\Delta/\epsilon + 1)$)')
sns.boxenplot(x='type', y='time', data=df2, ax=ax[1, 1], **args)
ax[1, 1].set_ylim(0, 35)
ax[1, 1].set_ylabel('time (msec)')
for i in range(2):
for c in ax[i, 1].collections[1::2]:
plt.setp(c, color='k')
fig.savefig('figs/fig5.pdf')
if __name__ == '__main__':
main(debug=False)
| [
"matplotlib.pyplot.setp",
"numpy.log10",
"pandas.read_csv",
"seaborn.heatmap",
"matplotlib.colors.ListedColormap",
"seaborn.boxenplot",
"pandas.concat"
] | [((744, 765), 'pandas.concat', 'pd.concat', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (753, 765), True, 'import pandas as pd\n'), ((1477, 1520), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['silver', 'grey', 'black']"], {}), "(['silver', 'grey', 'black'])\n", (1491, 1520), False, 'from matplotlib.colors import ListedColormap\n'), ((3127, 3191), 'seaborn.boxenplot', 'sns.boxenplot', ([], {'x': '"""type"""', 'y': '"""prec"""', 'data': 'df1', 'ax': 'ax[0, 1]'}), "(x='type', y='prec', data=df1, ax=ax[0, 1], **args)\n", (3140, 3191), True, 'import seaborn as sns\n'), ((3303, 3367), 'seaborn.boxenplot', 'sns.boxenplot', ([], {'x': '"""type"""', 'y': '"""time"""', 'data': 'df2', 'ax': 'ax[1, 1]'}), "(x='type', y='time', data=df2, ax=ax[1, 1], **args)\n", (3316, 3367), True, 'import seaborn as sns\n'), ((191, 206), 'numpy.log10', 'np.log10', (['(x + 1)'], {}), '(x + 1)\n', (199, 206), True, 'import numpy as np\n'), ((399, 443), 'pandas.read_csv', 'pd.read_csv', (['f"""results/logk_prec_{n}{s}.csv"""'], {}), "(f'results/logk_prec_{n}{s}.csv')\n", (410, 443), True, 'import pandas as pd\n'), ((549, 593), 'pandas.read_csv', 'pd.read_csv', (['f"""results/logk_time_{n}{s}.csv"""'], {}), "(f'results/logk_time_{n}{s}.csv')\n", (560, 593), True, 'import pandas as pd\n'), ((238, 249), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (246, 249), True, 'import numpy as np\n'), ((2209, 2288), 'seaborn.heatmap', 'sns.heatmap', (['hm'], {'vmin': 'vmin[i][j]', 'vmax': 'vmax[i][j]', 'cmap': 'cmap[i][j]', 'ax': 'ax[i, j]'}), '(hm, vmin=vmin[i][j], vmax=vmax[i][j], cmap=cmap[i][j], ax=ax[i, j])\n', (2220, 2288), True, 'import seaborn as sns\n'), ((3517, 3539), 'matplotlib.pyplot.setp', 'plt.setp', (['c'], {'color': '"""k"""'}), "(c, color='k')\n", (3525, 3539), True, 'import matplotlib.pyplot as plt\n')] |
import pytest
from asynctb._registry import HANDLING_FOR_CODE
from asynctb._glue import ensure_installed
@pytest.fixture
def local_registry():
ensure_installed()
prev_contents = list(HANDLING_FOR_CODE.items())
yield
HANDLING_FOR_CODE.clear()
HANDLING_FOR_CODE.update(prev_contents)
@pytest.fixture
def isolated_registry(local_registry):
HANDLING_FOR_CODE.clear()
| [
"asynctb._registry.HANDLING_FOR_CODE.update",
"asynctb._registry.HANDLING_FOR_CODE.clear",
"asynctb._glue.ensure_installed",
"asynctb._registry.HANDLING_FOR_CODE.items"
] | [((149, 167), 'asynctb._glue.ensure_installed', 'ensure_installed', ([], {}), '()\n', (165, 167), False, 'from asynctb._glue import ensure_installed\n'), ((234, 259), 'asynctb._registry.HANDLING_FOR_CODE.clear', 'HANDLING_FOR_CODE.clear', ([], {}), '()\n', (257, 259), False, 'from asynctb._registry import HANDLING_FOR_CODE\n'), ((264, 303), 'asynctb._registry.HANDLING_FOR_CODE.update', 'HANDLING_FOR_CODE.update', (['prev_contents'], {}), '(prev_contents)\n', (288, 303), False, 'from asynctb._registry import HANDLING_FOR_CODE\n'), ((365, 390), 'asynctb._registry.HANDLING_FOR_CODE.clear', 'HANDLING_FOR_CODE.clear', ([], {}), '()\n', (388, 390), False, 'from asynctb._registry import HANDLING_FOR_CODE\n'), ((193, 218), 'asynctb._registry.HANDLING_FOR_CODE.items', 'HANDLING_FOR_CODE.items', ([], {}), '()\n', (216, 218), False, 'from asynctb._registry import HANDLING_FOR_CODE\n')] |
import os
from pathlib import Path
import sys
from subprocess import run, PIPE
from typing import Optional, Sequence, Iterable, List
import importlib
import traceback
from . import LazyLogger
log = LazyLogger("HPI cli")
import functools
@functools.lru_cache()
def mypy_cmd() -> Optional[Sequence[str]]:
try:
# import to use current env mypy?
# import mypy
return ["python3", "-m", "mypy"]
except ImportError:
pass
# ok, not ideal but try from PATH
import shutil
if shutil.which("mypy"):
return ["mypy"]
warning(
"mypy not found, so can't check config with it. See https://github.com/python/mypy#readme if you want to install it and retry"
)
return None
def run_mypy(pkg):
from .preinit import get_mycfg_dir
mycfg_dir = get_mycfg_dir()
# todo ugh. not sure how to extract it from pkg?
# todo dunno maybe use the same mypy config in repository?
# I'd need to install mypy.ini then??
env = {**os.environ}
mpath = env.get("MYPYPATH")
mpath = str(mycfg_dir) + ("" if mpath is None else f":{mpath}")
env["MYPYPATH"] = mpath
cmd = mypy_cmd()
if cmd is None:
return None
mres = run(
[
*cmd,
"--namespace-packages",
"--color-output", # not sure if works??
"--pretty",
"--show-error-codes",
"--show-error-context",
"--check-untyped-defs",
"-p",
pkg.__name__,
],
stderr=PIPE,
stdout=PIPE,
env=env,
)
return mres
def eprint(x: str):
print(x, file=sys.stderr)
def indent(x: str) -> str:
return "".join(" " + l for l in x.splitlines(keepends=True))
OK = "✅"
OFF = "🔲"
def info(x: str):
eprint(OK + " " + x)
def error(x: str):
eprint("❌ " + x)
def warning(x: str):
eprint("❗ " + x) # todo yellow?
def tb(e):
tb = "".join(traceback.format_exception(Exception, e, e.__traceback__))
sys.stderr.write(indent(tb))
# todo not gonna work on Windows... perhaps make it optional and use colorama/termcolor? (similar to core.warnings)
class color:
BLACK = "\033[30m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
UNDERLINE = "\033[4m"
RESET = "\033[0m"
def config_create(args) -> None:
from .preinit import get_mycfg_dir
mycfg_dir = get_mycfg_dir()
created = False
if not mycfg_dir.exists():
# todo not sure about the layout... should I use my/config.py instead?
my_config = mycfg_dir / "my" / "config" / "__init__.py"
my_config.parent.mkdir(parents=True)
my_config.touch()
info(f"created empty config: {my_config}")
created = True
else:
error(f"config directory '{mycfg_dir}' already exists, skipping creation")
checked_passed = config_ok(args)
if not created or not checked_passed:
sys.exit(1)
def config_check_cli(args) -> None:
ok = config_ok(args)
sys.exit(0 if ok else False)
# TODO return the config as a result?
def config_ok(args):
errors: List[Exception] = []
try:
import my.config as cfg
except Exception as e:
errors.append(e)
error("failed to import the config")
tb(e)
sys.exit(1) # todo yield exception here? so it doesn't fail immediately..
cfg_path = cfg.__file__ # todo might be better to use __path__?
info(f"config file: {cfg_path}")
import my.core as core
try:
core_pkg_path = str(Path(core.__path__[0]).parent) # type: ignore[attr-defined]
if cfg_path.startswith(core_pkg_path):
error(
f"""
Seems that the default config is used ({cfg_path}).
See https://github.com/karlicoss/HPI/blob/master/doc/SETUP.org#setting-up-modules for more information
""".strip()
)
except Exception as e:
errors.append(e)
tb(e)
mres = run_mypy(cfg)
if mres is None: # no mypy
return
rc = mres.returncode
if rc == 0:
info("mypy config check: success")
else:
error("mypy config check: failed")
sys.stderr.write(indent(mres.stderr.decode("utf8")))
sys.stderr.write(indent(mres.stdout.decode("utf8")))
if len(errors) > 0:
error(f"config check: {len(errors)} errors")
return False
else:
info("config check: success!")
return True
def _modules(all=False):
from .util import modules
skipped = []
for m in modules():
if not all and m.skip_reason is not None:
skipped.append(m.name)
else:
yield m
if len(skipped) > 0:
warning(
f"Skipped {len(skipped)} modules: {skipped}. Pass --all if you want to see them."
)
def modules_check(args) -> None:
verbose: bool = args.verbose
quick: bool = args.quick
module: Optional[str] = args.module
if module is not None:
verbose = True
vw = "" if verbose else "; pass --verbose to print more information"
from . import common
common.QUICK_STATS = quick # dirty, but hopefully OK for cli
tabulate_warnings()
from .util import get_stats, HPIModule
mods: Iterable[HPIModule]
if module is None:
mods = _modules(all=args.all)
else:
mods = [HPIModule(name=module, skip_reason=None)]
# todo add a --all argument to disregard is_active check?
for mr in mods:
skip = mr.skip_reason
m = mr.name
if skip is not None:
eprint(OFF + f" {color.YELLOW}SKIP{color.RESET}: {m:<50} {skip}")
continue
try:
importlib.import_module(m)
except Exception as e:
# todo more specific command?
error(f"{color.RED}FAIL{color.RESET}: {m:<50} loading failed{vw}")
if verbose:
tb(e)
continue
info(f"{color.GREEN}OK{color.RESET} : {m:<50}")
stats = get_stats(m)
if stats is None:
eprint(" - no 'stats' function, can't check the data")
continue
try:
res = stats()
assert res is not None, "stats() returned None"
except Exception as ee:
warning(
f" - {color.RED}stats:{color.RESET} computing failed{vw}"
)
if verbose:
tb(ee)
else:
info(f" - stats: {res}")
def list_modules(args) -> None:
# todo add a --sort argument?
tabulate_warnings()
for mr in _modules(all=args.all):
m = mr.name
sr = mr.skip_reason
if sr is None:
pre = OK
suf = ""
else:
pre = OFF
suf = f" {color.YELLOW}[disabled: {sr}]{color.RESET}"
print(f"{pre} {m:50}{suf}")
def tabulate_warnings() -> None:
"""
Helper to avoid visual noise in hpi modules/doctor
"""
import warnings
orig = warnings.formatwarning
def override(*args, **kwargs):
res = orig(*args, **kwargs)
return "".join(" " + x for x in res.splitlines(keepends=True))
warnings.formatwarning = override
# TODO loggers as well?
# todo check that it finds private modules too?
def doctor(args) -> None:
# ok = config_ok(args)
modules_check(args)
def parser():
from argparse import ArgumentParser
p = ArgumentParser(
"Human Programming Interface",
epilog="""
Tool for HPI.
Work in progress, will be used for config management, troubleshooting & introspection
""",
)
sp = p.add_subparsers(dest="mode")
dp = sp.add_parser("doctor", help="Run various checks")
dp.add_argument(
"--verbose", action="store_true", help="Print more diagnosic infomration"
)
dp.add_argument(
"--all", action="store_true", help="List all modules, including disabled"
)
dp.add_argument(
"--quick", action="store_true", help="Only run partial checks (first 100 items)"
)
dp.add_argument(
"module", nargs="?", type=str, help="Pass to check a specific module"
)
dp.set_defaults(func=doctor)
cp = sp.add_parser("config", help="Work with configuration")
scp = cp.add_subparsers(dest="mode")
if True:
ccp = scp.add_parser("check", help="Check config")
ccp.set_defaults(func=config_check_cli)
icp = scp.add_parser("create", help="Create user config")
icp.set_defaults(func=config_create)
mp = sp.add_parser("modules", help="List available modules")
mp.add_argument(
"--all", action="store_true", help="List all modules, including disabled"
)
mp.set_defaults(func=list_modules)
return p
def main():
p = parser()
args = p.parse_args()
func = getattr(args, "func", None)
if func is None:
p.print_help()
sys.exit(1)
import tempfile
with tempfile.TemporaryDirectory() as td:
# cd into tmp dir to prevent accidental imports..
os.chdir(str(td))
func(args)
if __name__ == "__main__":
main()
| [
"tempfile.TemporaryDirectory",
"importlib.import_module",
"argparse.ArgumentParser",
"pathlib.Path",
"shutil.which",
"subprocess.run",
"traceback.format_exception",
"sys.exit",
"functools.lru_cache"
] | [((244, 265), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (263, 265), False, 'import functools\n'), ((525, 545), 'shutil.which', 'shutil.which', (['"""mypy"""'], {}), "('mypy')\n", (537, 545), False, 'import shutil\n'), ((1219, 1417), 'subprocess.run', 'run', (["[*cmd, '--namespace-packages', '--color-output', '--pretty',\n '--show-error-codes', '--show-error-context', '--check-untyped-defs',\n '-p', pkg.__name__]"], {'stderr': 'PIPE', 'stdout': 'PIPE', 'env': 'env'}), "([*cmd, '--namespace-packages', '--color-output', '--pretty',\n '--show-error-codes', '--show-error-context', '--check-untyped-defs',\n '-p', pkg.__name__], stderr=PIPE, stdout=PIPE, env=env)\n", (1222, 1417), False, 'from subprocess import run, PIPE\n'), ((3115, 3143), 'sys.exit', 'sys.exit', (['(0 if ok else False)'], {}), '(0 if ok else False)\n', (3123, 3143), False, 'import sys\n'), ((7572, 7744), 'argparse.ArgumentParser', 'ArgumentParser', (['"""Human Programming Interface"""'], {'epilog': '"""\nTool for HPI.\n\nWork in progress, will be used for config management, troubleshooting & introspection\n"""'}), '(\'Human Programming Interface\', epilog=\n """\nTool for HPI.\n\nWork in progress, will be used for config management, troubleshooting & introspection\n"""\n )\n', (7586, 7744), False, 'from argparse import ArgumentParser\n'), ((1953, 2010), 'traceback.format_exception', 'traceback.format_exception', (['Exception', 'e', 'e.__traceback__'], {}), '(Exception, e, e.__traceback__)\n', (1979, 2010), False, 'import traceback\n'), ((3036, 3047), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3044, 3047), False, 'import sys\n'), ((9045, 9056), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9053, 9056), False, 'import sys\n'), ((9088, 9117), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9115, 9117), False, 'import tempfile\n'), ((3398, 3409), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3406, 3409), False, 'import sys\n'), ((5805, 5831), 'importlib.import_module', 'importlib.import_module', (['m'], {}), '(m)\n', (5828, 5831), False, 'import importlib\n'), ((3646, 3668), 'pathlib.Path', 'Path', (['core.__path__[0]'], {}), '(core.__path__[0])\n', (3650, 3668), False, 'from pathlib import Path\n')] |
import symjax
import symjax.tensor as T
import matplotlib.pyplot as plt
import numpy as np
J = 5
Q = 4
scales = T.power(2, T.linspace(0.1, J - 1, J * Q))
scales = scales[:, None]
print(scales.get())
wavelet = symjax.tensor.signal.complex_morlet(5 * scales, np.pi / scales)
waveletw = symjax.tensor.signal.fourier_complex_morlet(
5 * scales, np.pi / scales, wavelet.shape[-1]
)
waveletlp = symjax.tensor.signal.littewood_paley_normalization(
waveletw, down=np.pi / scales[-1, 0]
)
wavelet = wavelet.get()
waveletw = waveletw.get()
waveletlp = waveletlp.get()
plt.subplot(321)
for i in range(J * Q):
fr = np.real(np.fft.fft(np.fft.ifftshift(wavelet[i])))
fi = np.imag(np.fft.fft(np.fft.ifftshift(wavelet[i])))
plt.plot(i + fr, "--b")
plt.plot(i + fi, "--r")
plt.subplot(322)
for i in range(J * Q):
plt.plot(2 * i + wavelet[i].real, c="b")
plt.plot(2 * i + wavelet[i].imag, c="r")
plt.subplot(324)
for i in range(J * Q):
fr = np.real(np.fft.fftshift(np.fft.ifft(waveletw[i])))
fi = np.imag(np.fft.fftshift(np.fft.ifft(waveletw[i])))
plt.plot(2 * i + fr / fr.max(), "--b")
plt.plot(2 * i + fi / fi.max(), "--r")
plt.subplot(323)
for i in range(J * Q):
plt.plot(i + waveletw[i].real, c="b")
plt.plot(i + waveletw[i].imag, c="r")
plt.subplot(325)
for i in range(J * Q):
plt.plot(i + waveletlp[i].real, c="b")
plt.plot(i + waveletlp[i].imag, c="r")
plt.plot(np.abs(waveletlp).sum(0), c="g")
plt.subplot(326)
for i in range(J * Q):
fr = np.real(np.fft.fftshift(np.fft.ifft(waveletlp[i])))
fi = np.imag(np.fft.fftshift(np.fft.ifft(waveletlp[i])))
plt.plot(2 * i + fr / fr.max(), "--b")
plt.plot(2 * i + fi / fi.max(), "--r")
# plt.show()
plt.savefig("wavelets.png")
| [
"numpy.abs",
"matplotlib.pyplot.savefig",
"symjax.tensor.linspace",
"symjax.tensor.signal.littewood_paley_normalization",
"symjax.tensor.signal.complex_morlet",
"matplotlib.pyplot.plot",
"numpy.fft.ifft",
"numpy.fft.ifftshift",
"matplotlib.pyplot.subplot",
"symjax.tensor.signal.fourier_complex_mor... | [((212, 275), 'symjax.tensor.signal.complex_morlet', 'symjax.tensor.signal.complex_morlet', (['(5 * scales)', '(np.pi / scales)'], {}), '(5 * scales, np.pi / scales)\n', (247, 275), False, 'import symjax\n'), ((287, 381), 'symjax.tensor.signal.fourier_complex_morlet', 'symjax.tensor.signal.fourier_complex_morlet', (['(5 * scales)', '(np.pi / scales)', 'wavelet.shape[-1]'], {}), '(5 * scales, np.pi / scales,\n wavelet.shape[-1])\n', (330, 381), False, 'import symjax\n'), ((396, 488), 'symjax.tensor.signal.littewood_paley_normalization', 'symjax.tensor.signal.littewood_paley_normalization', (['waveletw'], {'down': '(np.pi / scales[-1, 0])'}), '(waveletw, down=np.pi /\n scales[-1, 0])\n', (446, 488), False, 'import symjax\n'), ((572, 588), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (583, 588), True, 'import matplotlib.pyplot as plt\n'), ((787, 803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(322)'], {}), '(322)\n', (798, 803), True, 'import matplotlib.pyplot as plt\n'), ((918, 934), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (929, 934), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1181), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (1176, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1306), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (1301, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(326)'], {}), '(326)\n', (1470, 1475), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""wavelets.png"""'], {}), "('wavelets.png')\n", (1733, 1749), True, 'import matplotlib.pyplot as plt\n'), ((124, 153), 'symjax.tensor.linspace', 'T.linspace', (['(0.1)', '(J - 1)', '(J * Q)'], {}), '(0.1, J - 1, J * Q)\n', (134, 153), True, 'import symjax.tensor as T\n'), ((734, 757), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + fr)', '"""--b"""'], {}), "(i + fr, '--b')\n", (742, 757), True, 'import matplotlib.pyplot as plt\n'), ((762, 785), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + fi)', '"""--r"""'], {}), "(i + fi, '--r')\n", (770, 785), True, 'import matplotlib.pyplot as plt\n'), ((831, 871), 'matplotlib.pyplot.plot', 'plt.plot', (['(2 * i + wavelet[i].real)'], {'c': '"""b"""'}), "(2 * i + wavelet[i].real, c='b')\n", (839, 871), True, 'import matplotlib.pyplot as plt\n'), ((876, 916), 'matplotlib.pyplot.plot', 'plt.plot', (['(2 * i + wavelet[i].imag)'], {'c': '"""r"""'}), "(2 * i + wavelet[i].imag, c='r')\n", (884, 916), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1246), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletw[i].real)'], {'c': '"""b"""'}), "(i + waveletw[i].real, c='b')\n", (1217, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1288), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletw[i].imag)'], {'c': '"""r"""'}), "(i + waveletw[i].imag, c='r')\n", (1259, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1372), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletlp[i].real)'], {'c': '"""b"""'}), "(i + waveletlp[i].real, c='b')\n", (1342, 1372), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1415), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletlp[i].imag)'], {'c': '"""r"""'}), "(i + waveletlp[i].imag, c='r')\n", (1385, 1415), True, 'import matplotlib.pyplot as plt\n'), ((640, 668), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['wavelet[i]'], {}), '(wavelet[i])\n', (656, 668), True, 'import numpy as np\n'), ((699, 727), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['wavelet[i]'], {}), '(wavelet[i])\n', (715, 727), True, 'import numpy as np\n'), ((991, 1015), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletw[i]'], {}), '(waveletw[i])\n', (1002, 1015), True, 'import numpy as np\n'), ((1051, 1075), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletw[i]'], {}), '(waveletw[i])\n', (1062, 1075), True, 'import numpy as np\n'), ((1425, 1442), 'numpy.abs', 'np.abs', (['waveletlp'], {}), '(waveletlp)\n', (1431, 1442), True, 'import numpy as np\n'), ((1532, 1557), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletlp[i]'], {}), '(waveletlp[i])\n', (1543, 1557), True, 'import numpy as np\n'), ((1593, 1618), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletlp[i]'], {}), '(waveletlp[i])\n', (1604, 1618), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
class MatplotlibWidget(FigureCanvas):
def __init__(self, figure, parent):
"""
"""
super(MatplotlibWidget, self).__init__(figure)
self.setParent(parent)
self.fig = figure
def close_figure(self):
"""
"""
if self.fig:
self.fig.clf()
plt.close(self.fig)
self.fig = None
| [
"matplotlib.pyplot.close"
] | [((619, 638), 'matplotlib.pyplot.close', 'plt.close', (['self.fig'], {}), '(self.fig)\n', (628, 638), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
from cms.utils.i18n import get_default_language
from django.conf import settings
from django.core.urlresolvers import reverse
from django.middleware.locale import LocaleMiddleware
from django.utils import translation
import re
import urllib
class DummyMultilingualURLMiddleware(object):
def get_language_from_request (self,request):
return 'zh_cn'
def process_request(self, request):
language = self.get_language_from_request(request)
translation.activate(language)
request.LANGUAGE_CODE = language
def process_response(self, request, response):
return response
| [
"django.utils.translation.activate"
] | [((512, 542), 'django.utils.translation.activate', 'translation.activate', (['language'], {}), '(language)\n', (532, 542), False, 'from django.utils import translation\n')] |
"""Publication model."""
# Django
from django.db import models
# Utilities
from apartacho.utils.models import ApartachoModel
from apartacho.properties.models import Property
from apartacho.users.models import User
class Publication(ApartachoModel):
"""Publication model."""
is_published = models.BooleanField(
verbose_name='published',
default=False,
help_text='Set to true when the publication is published'
)
is_active = models.BooleanField(
verbose_name='active',
default=True,
help_text='Set to true when the publication is active'
)
description = models.CharField(
verbose_name='description',
max_length=150,
help_text='Set to description of publication'
)
property = models.ForeignKey(
Property,
on_delete=models.CASCADE,
help_text='Set relationship with property'
)
user_admin = models.ForeignKey(
User,
on_delete=models.CASCADE,
help_text='Set relationship with user admin'
)
def __str__(self):
"""Return addresses name."""
return self.description
| [
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((302, 426), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""published"""', 'default': '(False)', 'help_text': '"""Set to true when the publication is published"""'}), "(verbose_name='published', default=False, help_text=\n 'Set to true when the publication is published')\n", (321, 426), False, 'from django.db import models\n'), ((469, 586), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'verbose_name': '"""active"""', 'default': '(True)', 'help_text': '"""Set to true when the publication is active"""'}), "(verbose_name='active', default=True, help_text=\n 'Set to true when the publication is active')\n", (488, 586), False, 'from django.db import models\n'), ((631, 743), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""description"""', 'max_length': '(150)', 'help_text': '"""Set to description of publication"""'}), "(verbose_name='description', max_length=150, help_text=\n 'Set to description of publication')\n", (647, 743), False, 'from django.db import models\n'), ((785, 887), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Property'], {'on_delete': 'models.CASCADE', 'help_text': '"""Set relationship with property"""'}), "(Property, on_delete=models.CASCADE, help_text=\n 'Set relationship with property')\n", (802, 887), False, 'from django.db import models\n'), ((931, 1031), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'help_text': '"""Set relationship with user admin"""'}), "(User, on_delete=models.CASCADE, help_text=\n 'Set relationship with user admin')\n", (948, 1031), False, 'from django.db import models\n')] |
from sqlalchemy import Column, String
from app.api.database.models import Base
def make_subscriber_table(name: str) -> type:
"""
Return a new SQLAlchemy Table object representing the lichess - twitch pairs for a certain user.
"""
class Pair(Base):
__tablename__ = name
__table_args__ = {'extend_existing': True}
twitch = Column(String(25), primary_key=True)
lichess = Column(String(20), nullable=False)
def dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return f'{self.__tablename__.capitalize()} subscriber(twitch={self.twitch}, lichess={self.lichess})'
return Pair
| [
"sqlalchemy.String"
] | [((371, 381), 'sqlalchemy.String', 'String', (['(25)'], {}), '(25)\n', (377, 381), False, 'from sqlalchemy import Column, String\n'), ((426, 436), 'sqlalchemy.String', 'String', (['(20)'], {}), '(20)\n', (432, 436), False, 'from sqlalchemy import Column, String\n')] |
# coding: utf-8
import asyncio
from concurrent.futures._base import CancelledError
import json
import typing
import aiohttp
from aiohttp import web
from aiohttp.web_request import Request
from rolling.exception import DisconnectClient
from rolling.exception import UnableToProcessEvent
from rolling.exception import UnknownEvent
from rolling.log import server_logger
from rolling.model.event import EmptyData
from rolling.model.event import ZoneEvent
from rolling.model.event import ZoneEventType
from rolling.model.serializer import ZoneEventSerializerFactory
from rolling.server.zone.event import EventProcessorFactory
if typing.TYPE_CHECKING:
from rolling.kernel import Kernel
class ZoneEventsManager:
def __init__(self, kernel: "Kernel", loop: asyncio.AbstractEventLoop) -> None:
self._sockets: typing.Dict[typing.Tuple[int, int], typing.List[web.WebSocketResponse]] = {}
self._event_processor_factory = EventProcessorFactory(kernel, self)
self._event_serializer_factory = ZoneEventSerializerFactory()
self._loop = loop or asyncio.get_event_loop()
self._kernel = kernel
async def get_new_socket(
self, request: Request, row_i: int, col_i: int
) -> web.WebSocketResponse:
server_logger.info(f"Create websocket for zone {row_i},{col_i}")
# Create socket
socket = web.WebSocketResponse()
await socket.prepare(request)
# TODO BS 2019-01-23: Implement a heartbeat to close sockets where client disapear
# see https://github.com/aio-libs/aiohttp/issues/961#issuecomment-239647597
# Something lik asyncio.ensure_future(self._heartbeat(ws))
# Make it available for send job
self._sockets.setdefault((row_i, col_i), []).append(socket)
# Start to listen client messages
try:
await self._listen(socket, row_i, col_i)
except CancelledError:
server_logger.debug(f"websocket ({row_i},{col_i}) seems cancelled")
# If this code reached: ws is disconnected
server_logger.debug(f"remove websocket ({row_i},{col_i})")
self._sockets[(row_i, col_i)].remove(socket)
return socket
async def _listen(self, socket: web.WebSocketResponse, row_i: int, col_i: int) -> None:
server_logger.info(f"Listen websocket for zone {row_i},{col_i}")
async for msg in socket:
server_logger.debug(f"Receive message on websocket for zone {row_i},{col_i}: {msg}")
if msg.type == aiohttp.WSMsgType.ERROR:
server_logger.error(f"Zone websocket closed with exception {socket.exception()}")
else:
try:
await self._process_msg(row_i, col_i, msg, socket)
except DisconnectClient:
await socket.send_str(
self._event_serializer_factory.get_serializer(
ZoneEventType.SERVER_PERMIT_CLOSE
).dump_json(
ZoneEvent(type=ZoneEventType.SERVER_PERMIT_CLOSE, data=EmptyData())
)
)
return
server_logger.info(f"Websocket of zone {row_i},{col_i} closed")
async def _process_msg(
self, row_i: int, col_i: int, msg, socket: web.WebSocketResponse
) -> None:
event_dict = json.loads(msg.data)
event_type = ZoneEventType(event_dict["type"])
event = self._event_serializer_factory.get_serializer(event_type).load(event_dict)
await self._process_event(row_i, col_i, event, socket)
async def _process_event(
self, row_i: int, col_i: int, event: ZoneEvent, socket: web.WebSocketResponse
) -> None:
try:
event_processor = self._event_processor_factory.get_processor(event.type)
except UnknownEvent:
server_logger.warning(f"Unknown received event type '{event.type}'")
return
try:
await event_processor.process(row_i, col_i, event, sender_socket=socket)
except UnableToProcessEvent as exc:
server_logger.debug(f"Unable to process event {event.type}: {str(exc)}")
exception_event = exc.event
exception_event_str = self._event_serializer_factory.get_serializer(
exception_event.type
).dump_json(exception_event)
# FIXME: do kept this feature ?
await socket.send_str(exception_event_str)
def get_sockets(self, row_i: int, col_i: int) -> typing.Iterable[web.WebSocketResponse]:
for socket in self._sockets.get((row_i, col_i), []):
yield socket
| [
"json.loads",
"rolling.log.server_logger.warning",
"rolling.model.event.ZoneEventType",
"rolling.model.serializer.ZoneEventSerializerFactory",
"rolling.log.server_logger.debug",
"rolling.server.zone.event.EventProcessorFactory",
"rolling.log.server_logger.info",
"asyncio.get_event_loop",
"rolling.mo... | [((937, 972), 'rolling.server.zone.event.EventProcessorFactory', 'EventProcessorFactory', (['kernel', 'self'], {}), '(kernel, self)\n', (958, 972), False, 'from rolling.server.zone.event import EventProcessorFactory\n'), ((1014, 1042), 'rolling.model.serializer.ZoneEventSerializerFactory', 'ZoneEventSerializerFactory', ([], {}), '()\n', (1040, 1042), False, 'from rolling.model.serializer import ZoneEventSerializerFactory\n'), ((1253, 1317), 'rolling.log.server_logger.info', 'server_logger.info', (['f"""Create websocket for zone {row_i},{col_i}"""'], {}), "(f'Create websocket for zone {row_i},{col_i}')\n", (1271, 1317), False, 'from rolling.log import server_logger\n'), ((1360, 1383), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (1381, 1383), False, 'from aiohttp import web\n'), ((2055, 2113), 'rolling.log.server_logger.debug', 'server_logger.debug', (['f"""remove websocket ({row_i},{col_i})"""'], {}), "(f'remove websocket ({row_i},{col_i})')\n", (2074, 2113), False, 'from rolling.log import server_logger\n'), ((2291, 2355), 'rolling.log.server_logger.info', 'server_logger.info', (['f"""Listen websocket for zone {row_i},{col_i}"""'], {}), "(f'Listen websocket for zone {row_i},{col_i}')\n", (2309, 2355), False, 'from rolling.log import server_logger\n'), ((3181, 3244), 'rolling.log.server_logger.info', 'server_logger.info', (['f"""Websocket of zone {row_i},{col_i} closed"""'], {}), "(f'Websocket of zone {row_i},{col_i} closed')\n", (3199, 3244), False, 'from rolling.log import server_logger\n'), ((3383, 3403), 'json.loads', 'json.loads', (['msg.data'], {}), '(msg.data)\n', (3393, 3403), False, 'import json\n'), ((3425, 3458), 'rolling.model.event.ZoneEventType', 'ZoneEventType', (["event_dict['type']"], {}), "(event_dict['type'])\n", (3438, 3458), False, 'from rolling.model.event import ZoneEventType\n'), ((1072, 1096), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1094, 1096), False, 'import asyncio\n'), ((2401, 2490), 'rolling.log.server_logger.debug', 'server_logger.debug', (['f"""Receive message on websocket for zone {row_i},{col_i}: {msg}"""'], {}), "(\n f'Receive message on websocket for zone {row_i},{col_i}: {msg}')\n", (2420, 2490), False, 'from rolling.log import server_logger\n'), ((1927, 1994), 'rolling.log.server_logger.debug', 'server_logger.debug', (['f"""websocket ({row_i},{col_i}) seems cancelled"""'], {}), "(f'websocket ({row_i},{col_i}) seems cancelled')\n", (1946, 1994), False, 'from rolling.log import server_logger\n'), ((3885, 3953), 'rolling.log.server_logger.warning', 'server_logger.warning', (['f"""Unknown received event type \'{event.type}\'"""'], {}), '(f"Unknown received event type \'{event.type}\'")\n', (3906, 3953), False, 'from rolling.log import server_logger\n'), ((3084, 3095), 'rolling.model.event.EmptyData', 'EmptyData', ([], {}), '()\n', (3093, 3095), False, 'from rolling.model.event import EmptyData\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contentos_sdk.key_store import KeyStore
def test_key_store():
key_store = KeyStore()
key_store.add_key("account1", "key1")
key_store.add_key("account2", "key2")
assert key_store.get_key("account1") == "key1"
assert key_store.get_accounts() == ["account1", "account2"]
key_store.remove_key("account2")
assert key_store.get_accounts() == ["account1"]
key_store2 = KeyStore()
key_store.dump_to_file("dummy", b"badpasswd")
key_store2.load_from_file("dummy", b"badpasswd")
assert key_store2.get_accounts() == key_store.get_accounts()
| [
"contentos_sdk.key_store.KeyStore"
] | [((133, 143), 'contentos_sdk.key_store.KeyStore', 'KeyStore', ([], {}), '()\n', (141, 143), False, 'from contentos_sdk.key_store import KeyStore\n'), ((451, 461), 'contentos_sdk.key_store.KeyStore', 'KeyStore', ([], {}), '()\n', (459, 461), False, 'from contentos_sdk.key_store import KeyStore\n')] |
'''
Authors: <NAME>, <NAME>, <NAME>
'''
# FIXME Behebe möglichen fehler mit Flask:
# https://github.com/flask-restful/flask-restful/pull/913
# import flask.scaffold
# flask.helpers._endpoint_from_view_func = flask.scaffold._endpoint_from_view_funcfrom flask import Flask, request, make_response
from flask import Flask, request, make_response
from flask_cors import CORS
from dotenv import load_dotenv
from os import environ as env
import requests
import json
app = Flask(__name__)
CORS(app, supports_credentials=True)
# The Proxy receives all API-Requests and forwards them to the corresponding service
@app.route('/<path:path>', methods=["GET", "POST", "PUT", "DELETE"])
def proxy(path):
base = None
port = None
body = None
method = {"GET": requests.get, "POST": requests.post, "PUT": requests.put, "DELETE": requests.delete}
if path.startswith("user") or path.startswith("group") or path.startswith("login") or path.startswith("logout"):
port = env.get("USER_GROUP_PORT")
base = env.get("USER_GROUP_BASE")
elif path.startswith("workoutPlan") or path.startswith("category"):
port = env.get("WORKOUT_PORT")
base = env.get("WORKOUT_BASE")
elif path.startswith("wikiHow"):
port = env.get("WIKI_HOW_PORT")
base = env.get("WIKI_HOW_BASE")
elif path.startswith("shoppingsearch"):
port = env.get("SHOPPING_PORT")
base = env.get("SHOPPING_BASE")
else:
return "The Proxy is not aware of this URL", 404
if request.method in ["POST", "PUT"]:
try:
body = request.get_json() if request.content_type == "application/json" else json.loads(request.get_data().decode("utf-8"))
except json.decoder.JSONDecodeError:
body = None
res = method[request.method](f"{base}:{port}/{path}", json=body, headers=request.headers)
response = make_response((res.text, res.status_code, res.headers.items()))
if cookies := res.cookies:
for (name, val) in cookies.get_dict().items():
response.set_cookie(key=name, value=val, secure=False, max_age=360 * 60 * 60 * 24)
return response
if __name__ == '__main__':
load_dotenv()
app.run(host="0.0.0.0", debug=True) | [
"flask_cors.CORS",
"flask.Flask",
"flask.request.get_data",
"os.environ.get",
"dotenv.load_dotenv",
"flask.request.get_json"
] | [((469, 484), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (474, 484), False, 'from flask import Flask, request, make_response\n'), ((485, 521), 'flask_cors.CORS', 'CORS', (['app'], {'supports_credentials': '(True)'}), '(app, supports_credentials=True)\n', (489, 521), False, 'from flask_cors import CORS\n'), ((2031, 2044), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2042, 2044), False, 'from dotenv import load_dotenv\n'), ((959, 985), 'os.environ.get', 'env.get', (['"""USER_GROUP_PORT"""'], {}), "('USER_GROUP_PORT')\n", (966, 985), True, 'from os import environ as env\n'), ((995, 1021), 'os.environ.get', 'env.get', (['"""USER_GROUP_BASE"""'], {}), "('USER_GROUP_BASE')\n", (1002, 1021), True, 'from os import environ as env\n'), ((1100, 1123), 'os.environ.get', 'env.get', (['"""WORKOUT_PORT"""'], {}), "('WORKOUT_PORT')\n", (1107, 1123), True, 'from os import environ as env\n'), ((1133, 1156), 'os.environ.get', 'env.get', (['"""WORKOUT_BASE"""'], {}), "('WORKOUT_BASE')\n", (1140, 1156), True, 'from os import environ as env\n'), ((1200, 1224), 'os.environ.get', 'env.get', (['"""WIKI_HOW_PORT"""'], {}), "('WIKI_HOW_PORT')\n", (1207, 1224), True, 'from os import environ as env\n'), ((1234, 1258), 'os.environ.get', 'env.get', (['"""WIKI_HOW_BASE"""'], {}), "('WIKI_HOW_BASE')\n", (1241, 1258), True, 'from os import environ as env\n'), ((1482, 1500), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1498, 1500), False, 'from flask import Flask, request, make_response\n'), ((1309, 1333), 'os.environ.get', 'env.get', (['"""SHOPPING_PORT"""'], {}), "('SHOPPING_PORT')\n", (1316, 1333), True, 'from os import environ as env\n'), ((1343, 1367), 'os.environ.get', 'env.get', (['"""SHOPPING_BASE"""'], {}), "('SHOPPING_BASE')\n", (1350, 1367), True, 'from os import environ as env\n'), ((1563, 1581), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (1579, 1581), False, 'from flask import Flask, request, make_response\n')] |