hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f722693b45716fdf18743cfb8e07e1164ef19d6a | 56,161 | py | Python | simba/features_scripts/extract_features_16bp.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 1 | 2021-12-15T07:30:33.000Z | 2021-12-15T07:30:33.000Z | simba/features_scripts/extract_features_16bp.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | null | null | null | simba/features_scripts/extract_features_16bp.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 1 | 2021-11-14T09:15:30.000Z | 2021-11-14T09:15:30.000Z | from __future__ import division
import os, glob
import pandas as pd
import math
import numpy as np
from scipy.spatial import ConvexHull
import scipy
from configparser import ConfigParser, NoOptionError, NoSectionError
from numba import jit
from simba.rw_dfs import *
import re
def extract_features_wotarget_16(inifile):
config = ConfigParser()
configFile = str(inifile)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
csv_dir_in, csv_dir_out = os.path.join(projectPath, 'csv', 'outlier_corrected_movement_location'), os.path.join(projectPath,'csv', 'features_extracted')
vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
vidinfDf = pd.read_csv(vidInfPath)
#change videos name to str
vidinfDf.Video = vidinfDf.Video.astype('str')
def count_values_in_range(series, values_in_range_min, values_in_range_max):
return series.between(left=values_in_range_min, right=values_in_range_max).sum()
def angle3pt(ax, ay, bx, by, cx, cy):
ang = math.degrees(
math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))
return ang + 360 if ang < 0 else ang
@jit(nopython=True, cache=True)
def EuclidianDistCald(bp1xVals, bp1yVals, bp2xVals, bp2yVals, currPixPerMM):
series = (np.sqrt((bp1xVals - bp2xVals) ** 2 + (bp1yVals - bp2yVals) ** 2)) / currPixPerMM
return series
roll_windows, loopy = [], 0
roll_windows_values = [2, 5, 6, 7.5, 15]
#REMOVE WINDOWS THAT ARE TOO SMALL
minimum_fps = vidinfDf['fps'].min()
for win in range(len(roll_windows_values)):
if minimum_fps < roll_windows_values[win]:
roll_windows_values[win] = minimum_fps
else:
pass
roll_windows_values = list(set(roll_windows_values))
########### FIND CSV FILES ###########
print(csv_dir_in)
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
print('Extracting features from ' + str(len(filesFound)) + ' files...')
########### CREATE PD FOR RAW DATA AND PD FOR MOVEMENT BETWEEN FRAMES ###########
for currentFile in filesFound:
M1_hull_large_euclidean_list, M1_hull_small_euclidean_list, M1_hull_mean_euclidean_list, M1_hull_sum_euclidean_list, M2_hull_large_euclidean_list, M2_hull_small_euclidean_list, M2_hull_mean_euclidean_list, M2_hull_sum_euclidean_list = [], [], [], [], [], [], [], []
currVidName = os.path.basename(currentFile).replace('.' +wfileType, '')
# get current pixels/mm
currVideoSettings = vidinfDf.loc[vidinfDf['Video'] == currVidName]
try:
currPixPerMM = float(currVideoSettings['pixels/mm'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
fps = float(currVideoSettings['fps'])
print('Processing ' + '"' + str(currVidName) + '".' + ' Fps: ' + str(fps) + ". mm/ppx: " + str(currPixPerMM))
for i in range(len(roll_windows_values)):
roll_windows.append(int(fps / roll_windows_values[i]))
loopy += 1
columnHeaders = ["Ear_left_1_x", "Ear_left_1_y", "Ear_left_1_p", "Ear_right_1_x", "Ear_right_1_y",
"Ear_right_1_p", "Nose_1_x", "Nose_1_y", "Nose_1_p", "Center_1_x", "Center_1_y", "Center_1_p",
"Lat_left_1_x", "Lat_left_1_y",
"Lat_left_1_p", "Lat_right_1_x", "Lat_right_1_y", "Lat_right_1_p", "Tail_base_1_x",
"Tail_base_1_y", "Tail_base_1_p", "Tail_end_1_x", "Tail_end_1_y", "Tail_end_1_p",
"Ear_left_2_x",
"Ear_left_2_y", "Ear_left_2_p", "Ear_right_2_x", "Ear_right_2_y", "Ear_right_2_p",
"Nose_2_x", "Nose_2_y", "Nose_2_p", "Center_2_x", "Center_2_y", "Center_2_p", "Lat_left_2_x",
"Lat_left_2_y",
"Lat_left_2_p", "Lat_right_2_x", "Lat_right_2_y", "Lat_right_2_p", "Tail_base_2_x",
"Tail_base_2_y", "Tail_base_2_p", "Tail_end_2_x", "Tail_end_2_y", "Tail_end_2_p"]
csv_df = read_df(currentFile, wfileType)
try:
csv_df = csv_df.set_index('scorer')
except KeyError:
pass
csv_df.columns = columnHeaders
csv_df = csv_df.fillna(0)
#csv_df = csv_df.drop(csv_df.index[[0]])
csv_df = csv_df.apply(pd.to_numeric)
csv_df = csv_df.reset_index()
csv_df = csv_df.reset_index(drop=True)
print('Evaluating convex hulls...')
########### MOUSE AREAS ###########################################
try:
csv_df['Mouse_1_poly_area'] = csv_df.apply(lambda x: ConvexHull(np.array(
[[x['Ear_left_1_x'], x["Ear_left_1_y"]],
[x['Ear_right_1_x'], x["Ear_right_1_y"]],
[x['Nose_1_x'], x["Nose_1_y"]],
[x['Lat_left_1_x'], x["Lat_left_1_y"]], \
[x['Lat_right_1_x'], x["Lat_right_1_y"]],
[x['Tail_base_1_x'], x["Tail_base_1_y"]],
[x['Center_1_x'], x["Center_1_y"]]])).area, axis=1)
except scipy.spatial.qhull.QhullError as e:
print(e)
print('ERROR: For more information, go to https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md#i-get-a-qhull-eg-qh6154-or-6013-error-when-extracting-the-features')
csv_df['Mouse_1_poly_area'] = csv_df.eval('Mouse_1_poly_area / @currPixPerMM')
try:
csv_df['Mouse_2_poly_area'] = csv_df.apply(lambda x: ConvexHull(np.array(
[[x['Ear_left_2_x'], x["Ear_left_2_y"]],
[x['Ear_right_2_x'], x["Ear_right_2_y"]],
[x['Nose_2_x'], x["Nose_2_y"]],
[x['Lat_left_2_x'], x["Lat_left_2_y"]], \
[x['Lat_right_2_x'], x["Lat_right_2_y"]],
[x['Tail_base_2_x'], x["Tail_base_2_y"]],
[x['Center_2_x'], x["Center_2_y"]]])).area, axis=1)
except scipy.spatial.qhull.QhullError as e:
print(e)
print('ERROR: For more information, check https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md#i-get-a-qhull-eg-qh6154-or-6013-error-when-extracting-the-features')
########### CREATE SHIFTED DATAFRAME FOR DISTANCE CALCULATIONS ###########################################
csv_df_shifted = csv_df.shift(periods=1)
csv_df_shifted = csv_df_shifted.rename(
columns={'Ear_left_1_x': 'Ear_left_1_x_shifted', 'Ear_left_1_y': 'Ear_left_1_y_shifted',
'Ear_left_1_p': 'Ear_left_1_p_shifted', 'Ear_right_1_x': 'Ear_right_1_x_shifted', \
'Ear_right_1_y': 'Ear_right_1_y_shifted', 'Ear_right_1_p': 'Ear_right_1_p_shifted',
'Nose_1_x': 'Nose_1_x_shifted', 'Nose_1_y': 'Nose_1_y_shifted', \
'Nose_1_p': 'Nose_1_p_shifted', 'Center_1_x': 'Center_1_x_shifted',
'Center_1_y': 'Center_1_y_shifted', 'Center_1_p': 'Center_1_p_shifted', 'Lat_left_1_x': \
'Lat_left_1_x_shifted', 'Lat_left_1_y': 'Lat_left_1_y_shifted',
'Lat_left_1_p': 'Lat_left_1_p_shifted', 'Lat_right_1_x': 'Lat_right_1_x_shifted',
'Lat_right_1_y': 'Lat_right_1_y_shifted', \
'Lat_right_1_p': 'Lat_right_1_p_shifted', 'Tail_base_1_x': 'Tail_base_1_x_shifted',
'Tail_base_1_y': 'Tail_base_1_y_shifted', \
'Tail_base_1_p': 'Tail_base_1_p_shifted', 'Tail_end_1_x': 'Tail_end_1_x_shifted',
'Tail_end_1_y': 'Tail_end_1_y_shifted', 'Tail_end_1_p': 'Tail_end_1_p_shifted',
'Ear_left_2_x': 'Ear_left_2_x_shifted', 'Ear_left_2_y': 'Ear_left_2_y_shifted',
'Ear_left_2_p': 'Ear_left_2_p_shifted', 'Ear_right_2_x': 'Ear_right_2_x_shifted', \
'Ear_right_2_y': 'Ear_right_2_y_shifted', 'Ear_right_2_p': 'Ear_right_2_p_shifted',
'Nose_2_x': 'Nose_2_x_shifted', 'Nose_2_y': 'Nose_2_y_shifted', \
'Nose_2_p': 'Nose_2_p_shifted', 'Center_2_x': 'Center_2_x_shifted',
'Center_2_y': 'Center_2_y_shifted', 'Center_2_p': 'Center_2_p_shifted', 'Lat_left_2_x': \
'Lat_left_2_x_shifted', 'Lat_left_2_y': 'Lat_left_2_y_shifted',
'Lat_left_2_p': 'Lat_left_2_p_shifted', 'Lat_right_2_x': 'Lat_right_2_x_shifted',
'Lat_right_2_y': 'Lat_right_2_y_shifted', \
'Lat_right_2_p': 'Lat_right_2_p_shifted', 'Tail_base_2_x': 'Tail_base_2_x_shifted',
'Tail_base_2_y': 'Tail_base_2_y_shifted', \
'Tail_base_2_p': 'Tail_base_2_p_shifted', 'Tail_end_2_x': 'Tail_end_2_x_shifted',
'Tail_end_2_y': 'Tail_end_2_y_shifted', 'Tail_end_2_p': 'Tail_end_2_p_shifted',
'Mouse_1_poly_area': 'Mouse_1_poly_area_shifted',
'Mouse_2_poly_area': 'Mouse_2_poly_area_shifted'})
csv_df_combined = pd.concat([csv_df, csv_df_shifted], axis=1, join='inner')
csv_df_combined = csv_df_combined.fillna(0)
csv_df_combined = csv_df_combined.reset_index(drop=True)
print('Calculating euclidean distances...')
########### EUCLIDEAN DISTANCES ###########################################
csv_df['Mouse_1_nose_to_tail'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values, csv_df['Tail_base_1_x'].values, csv_df['Tail_base_1_y'].values, currPixPerMM)
csv_df['Mouse_2_nose_to_tail'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values, csv_df['Tail_base_2_x'].values, csv_df['Tail_base_2_y'].values, currPixPerMM)
csv_df['Mouse_1_width'] = EuclidianDistCald(csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_width'] = EuclidianDistCald(csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Ear_distance'] = EuclidianDistCald(csv_df['Ear_left_1_x'].values, csv_df['Ear_left_1_y'].values, csv_df['Ear_right_1_x'].values, csv_df['Ear_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Ear_distance'] = EuclidianDistCald(csv_df['Ear_left_2_x'].values, csv_df['Ear_left_2_y'].values, csv_df['Ear_right_2_x'].values, csv_df['Ear_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_centroid'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values, csv_df['Center_1_x'].values, csv_df['Center_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_centroid'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Center_2_x'].values, csv_df['Center_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_lateral_left'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_lateral_left'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_lateral_right'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_lateral_right'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Centroid_to_lateral_left'] = EuclidianDistCald(csv_df['Center_1_x'].values, csv_df['Center_1_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Centroid_to_lateral_left'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Centroid_to_lateral_right'] = EuclidianDistCald(csv_df['Center_1_x'].values, csv_df['Center_1_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Centroid_to_lateral_right'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Centroid_distance'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Center_1_x'].values, csv_df['Center_1_y'].values, currPixPerMM)
csv_df['Nose_to_nose_distance'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_lat_left'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_lat_right'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_lat_left'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_lat_right'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_tail_base'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Tail_base_2_x'].values, csv_df['Tail_base_2_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_tail_base'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Tail_base_1_x'].values, csv_df['Tail_base_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_centroid'] = EuclidianDistCald(csv_df_combined['Center_1_x_shifted'].values, csv_df_combined['Center_1_y_shifted'].values,csv_df_combined['Center_1_x'].values, csv_df_combined['Center_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_centroid'] = EuclidianDistCald(csv_df_combined['Center_2_x_shifted'].values, csv_df_combined['Center_2_y_shifted'].values,csv_df_combined['Center_2_x'].values, csv_df_combined['Center_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_nose'] = EuclidianDistCald(csv_df_combined['Nose_1_x_shifted'].values, csv_df_combined['Nose_1_y_shifted'].values,csv_df_combined['Nose_1_x'].values, csv_df_combined['Nose_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_nose'] = EuclidianDistCald(csv_df_combined['Nose_2_x_shifted'].values, csv_df_combined['Nose_2_y_shifted'].values,csv_df_combined['Nose_2_x'].values, csv_df_combined['Nose_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_tail_base'] = EuclidianDistCald(csv_df_combined['Tail_base_1_x_shifted'].values, csv_df_combined['Tail_base_1_y_shifted'].values,csv_df_combined['Tail_base_1_x'].values, csv_df_combined['Tail_base_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_tail_base'] = EuclidianDistCald(csv_df_combined['Tail_base_2_x_shifted'].values, csv_df_combined['Tail_base_2_y_shifted'].values,csv_df_combined['Tail_base_2_x'].values, csv_df_combined['Tail_base_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_tail_end'] = EuclidianDistCald(csv_df_combined['Tail_end_1_x_shifted'].values, csv_df_combined['Tail_end_1_y_shifted'].values,csv_df_combined['Tail_end_1_x'].values, csv_df_combined['Tail_end_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_tail_end'] = EuclidianDistCald(csv_df_combined['Tail_end_2_x_shifted'].values, csv_df_combined['Tail_end_2_y_shifted'].values,csv_df_combined['Tail_end_2_x'].values, csv_df_combined['Tail_end_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_left_ear'] = EuclidianDistCald(csv_df_combined['Ear_left_1_x_shifted'].values, csv_df_combined['Ear_left_1_y_shifted'].values,csv_df_combined['Ear_left_1_x'].values, csv_df_combined['Ear_left_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_left_ear'] = EuclidianDistCald(csv_df_combined['Ear_left_2_x_shifted'].values, csv_df_combined['Ear_left_2_y_shifted'].values,csv_df_combined['Ear_left_2_x'].values, csv_df_combined['Ear_left_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_right_ear'] = EuclidianDistCald(csv_df_combined['Ear_right_1_x_shifted'].values, csv_df_combined['Ear_right_1_y_shifted'].values,csv_df_combined['Ear_right_1_x'].values, csv_df_combined['Ear_right_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_right_ear'] = EuclidianDistCald(csv_df_combined['Ear_right_2_x_shifted'].values, csv_df_combined['Ear_right_2_y_shifted'].values,csv_df_combined['Ear_right_2_x'].values, csv_df_combined['Ear_right_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_lateral_left'] = EuclidianDistCald(csv_df_combined['Lat_left_1_x_shifted'].values, csv_df_combined['Lat_left_1_y_shifted'].values,csv_df_combined['Lat_left_1_x'].values, csv_df_combined['Lat_left_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_lateral_left'] = EuclidianDistCald(csv_df_combined['Lat_left_2_x_shifted'].values, csv_df_combined['Lat_left_2_y_shifted'].values,csv_df_combined['Lat_left_2_x'].values, csv_df_combined['Lat_left_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_lateral_right'] = EuclidianDistCald(csv_df_combined['Lat_right_1_x_shifted'].values, csv_df_combined['Lat_right_1_y_shifted'].values,csv_df_combined['Lat_right_1_x'].values, csv_df_combined['Lat_right_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_lateral_right'] = EuclidianDistCald(csv_df_combined['Lat_right_2_x_shifted'].values, csv_df_combined['Lat_right_2_y_shifted'].values,csv_df_combined['Lat_right_2_x'].values, csv_df_combined['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_polygon_size_change'] = pd.eval("csv_df_combined.Mouse_1_poly_area_shifted - csv_df_combined.Mouse_1_poly_area")
csv_df['Mouse_2_polygon_size_change'] = pd.eval("csv_df_combined.Mouse_2_poly_area_shifted - csv_df_combined.Mouse_2_poly_area")
print('Calculating hull variables...')
########### HULL - EUCLIDEAN DISTANCES ###########################################
for index, row in csv_df.iterrows():
M1_np_array = np.array(
[[row['Ear_left_1_x'], row["Ear_left_1_y"]], [row['Ear_right_1_x'], row["Ear_right_1_y"]],
[row['Nose_1_x'], row["Nose_1_y"]], [row['Center_1_x'], row["Center_1_y"]],
[row['Lat_left_1_x'], row["Lat_left_1_y"]], [row['Lat_right_1_x'], row["Lat_right_1_y"]],
[row['Tail_base_1_x'], row["Tail_base_1_y"]]]).astype(int)
M2_np_array = np.array(
[[row['Ear_left_2_x'], row["Ear_left_2_y"]], [row['Ear_right_2_x'], row["Ear_right_2_y"]],
[row['Nose_2_x'], row["Nose_2_y"]], [row['Center_2_x'], row["Center_2_y"]],
[row['Lat_left_2_x'], row["Lat_left_2_y"]], [row['Lat_right_2_x'], row["Lat_right_2_y"]],
[row['Tail_base_2_x'], row["Tail_base_2_y"]]]).astype(int)
M1_dist_euclidean = scipy.spatial.distance.cdist(M1_np_array, M1_np_array, metric='euclidean')
M1_dist_euclidean = M1_dist_euclidean[M1_dist_euclidean != 0]
M1_hull_large_euclidean = np.amax(M1_dist_euclidean)
M1_hull_small_euclidean = np.min(M1_dist_euclidean)
M1_hull_mean_euclidean = np.mean(M1_dist_euclidean)
M1_hull_sum_euclidean = np.sum(M1_dist_euclidean)
M1_hull_large_euclidean_list.append(M1_hull_large_euclidean)
M1_hull_small_euclidean_list.append(M1_hull_small_euclidean)
M1_hull_mean_euclidean_list.append(M1_hull_mean_euclidean)
M1_hull_sum_euclidean_list.append(M1_hull_sum_euclidean)
M2_dist_euclidean = scipy.spatial.distance.cdist(M2_np_array, M2_np_array, metric='euclidean')
M2_dist_euclidean = M2_dist_euclidean[M2_dist_euclidean != 0]
M2_hull_large_euclidean = np.amax(M2_dist_euclidean)
M2_hull_small_euclidean = np.min(M2_dist_euclidean)
M2_hull_mean_euclidean = np.mean(M2_dist_euclidean)
M2_hull_sum_euclidean = np.sum(M2_dist_euclidean)
M2_hull_large_euclidean_list.append(M2_hull_large_euclidean)
M2_hull_small_euclidean_list.append(M2_hull_small_euclidean)
M2_hull_mean_euclidean_list.append(M2_hull_mean_euclidean)
M2_hull_sum_euclidean_list.append(M2_hull_sum_euclidean)
csv_df['M1_largest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_large_euclidean_list))
csv_df['M1_smallest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_small_euclidean_list))
csv_df['M1_mean_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_mean_euclidean_list))
csv_df['M1_sum_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_sum_euclidean_list))
csv_df['M2_largest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_large_euclidean_list))
csv_df['M2_smallest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_small_euclidean_list))
csv_df['M2_mean_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_mean_euclidean_list))
csv_df['M2_sum_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_sum_euclidean_list))
csv_df['Sum_euclidean_distance_hull_M1_M2'] = (csv_df['M1_sum_euclidean_distance_hull'] + csv_df['M2_sum_euclidean_distance_hull'])
########### COLLAPSED MEASURES ###########################################
csv_df['Total_movement_centroids'] = csv_df.eval("Movement_mouse_1_centroid + Movement_mouse_2_centroid")
csv_df['Total_movement_tail_ends'] = csv_df.eval('Movement_mouse_1_tail_end + Movement_mouse_2_tail_end')
csv_df['Total_movement_all_bodyparts_M1'] = csv_df.eval('Movement_mouse_1_nose + Movement_mouse_1_tail_end + Movement_mouse_1_tail_base + Movement_mouse_1_left_ear + Movement_mouse_1_right_ear + Movement_mouse_1_lateral_left + Movement_mouse_1_lateral_right')
csv_df['Total_movement_all_bodyparts_M2'] = csv_df.eval('Movement_mouse_2_nose + Movement_mouse_2_tail_end + Movement_mouse_2_tail_base + Movement_mouse_2_left_ear + Movement_mouse_2_right_ear + Movement_mouse_2_lateral_left + Movement_mouse_2_lateral_right')
csv_df['Total_movement_all_bodyparts_both_mice'] = csv_df.eval('Total_movement_all_bodyparts_M1 + Total_movement_all_bodyparts_M2')
########### CALC ROLLING WINDOWS MEDIANS AND MEANS ###########################################
print('Calculating rolling windows: medians, medians, and sums...')
for i in range(len(roll_windows_values)):
currentColName = 'Sum_euclid_distances_hull_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Sum_euclid_distances_hull_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Sum_euclid_distances_hull_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Movement_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Movement_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Distance_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Centroid_distance'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Centroid_distance'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Distance_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Centroid_distance'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_width_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Mouse1_width_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Mouse1_width_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_width_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Mouse2_width_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Mouse2_width_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_mean_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_mean_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_mean_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_mean_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_smallest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_smallest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_smallest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_largest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_largest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_largest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_largest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_all_bodyparts_both_mice_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Total_movement_all_bodyparts_both_mice_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_centroids_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Total_movement_centroids_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Total_movement_centroids_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_base_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_base_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_base_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_base_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_base_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_base_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Centroid_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Centroid_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Centroid_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Centroid_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Centroid_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Centroid_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_end_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_end_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_end_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_end_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_end_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_end_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Nose_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Nose_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Nose_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Nose_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Nose_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Nose_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).sum()
########### BODY PARTS RELATIVE TO EACH OTHER ##################
csv_df['Tail_end_relative_to_tail_base_centroid_nose'] = csv_df['Movement_mouse_1_tail_end'] - (
csv_df['Movement_mouse_1_tail_base'] + csv_df['Movement_mouse_1_centroid'] + csv_df[
'Movement_mouse_1_nose'])
for i in range(len(roll_windows_values)):
currentColName_M1 = 'Tail_end_relative_to_tail_base_centroid_nose_M1_' + str(roll_windows_values[i])
tail_end_col_name = 'Tail_end_movement_M1_mean_' + str(roll_windows_values[i])
tail_base_col_name = 'Tail_base_movement_M1_mean_' + str(roll_windows_values[i])
centroid_col_name = 'Centroid_movement_M1_mean_' + str(roll_windows_values[i])
nose_col_name = 'Nose_movement_M1_mean_' + str(roll_windows_values[i])
currentColName_M2 = 'Tail_end_relative_to_tail_base_centroid_nose_M2_mean_' + str(roll_windows_values[i])
tail_end_col_name_M2 = 'Tail_end_movement_M2_mean_' + str(roll_windows_values[i])
tail_base_col_name_M2 = 'Tail_base_movement_M2_mean_' + str(roll_windows_values[i])
centroid_col_name_M2 = 'Centroid_movement_M2_mean_' + str(roll_windows_values[i])
nose_col_name_M2 = 'Nose_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName_M1] = csv_df[tail_end_col_name] - (
csv_df[tail_base_col_name] + csv_df[centroid_col_name] + csv_df[nose_col_name])
csv_df[currentColName_M2] = csv_df[tail_end_col_name_M2] - (
csv_df[tail_base_col_name_M2] + csv_df[centroid_col_name_M2] + csv_df[nose_col_name_M2])
########### ANGLES ###########################################
print('Calculating angles...')
csv_df['Mouse_1_angle'] = csv_df.apply(
lambda x: angle3pt(x['Nose_1_x'], x['Nose_1_y'], x['Center_1_x'], x['Center_1_y'], x['Tail_base_1_x'],
x['Tail_base_1_y']), axis=1)
csv_df['Mouse_2_angle'] = csv_df.apply(
lambda x: angle3pt(x['Nose_2_x'], x['Nose_2_y'], x['Center_2_x'], x['Center_2_y'], x['Tail_base_2_x'],
x['Tail_base_2_y']), axis=1)
csv_df['Total_angle_both_mice'] = csv_df['Mouse_1_angle'] + csv_df['Mouse_2_angle']
for i in range(len(roll_windows_values)):
currentColName = 'Total_angle_both_mice_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_angle_both_mice'].rolling(roll_windows[i], min_periods=1).sum()
########### DEVIATIONS ###########################################
print('Calculating deviations...')
csv_df['Total_movement_all_bodyparts_both_mice_deviation'] = csv_df.eval('Total_movement_all_bodyparts_both_mice.mean() - Total_movement_all_bodyparts_both_mice')
csv_df['Sum_euclid_distances_hull_deviation'] = csv_df.eval('Sum_euclidean_distance_hull_M1_M2.mean() - Sum_euclidean_distance_hull_M1_M2')
csv_df['M1_smallest_euclid_distances_hull_deviation'] = csv_df.eval('M1_smallest_euclidean_distance_hull.mean() - M1_smallest_euclidean_distance_hull')
csv_df['M1_largest_euclid_distances_hull_deviation'] = csv_df.eval('M1_largest_euclidean_distance_hull.mean() - M1_largest_euclidean_distance_hull')
csv_df['M1_mean_euclid_distances_hull_deviation'] = csv_df.eval('M1_mean_euclidean_distance_hull.mean() - M1_mean_euclidean_distance_hull')
csv_df['Centroid_distance_deviation'] = csv_df.eval('Centroid_distance.mean() - Centroid_distance')
csv_df['Total_angle_both_mice_deviation'] = csv_df.eval('Total_angle_both_mice - Total_angle_both_mice')
csv_df['Movement_mouse_1_deviation_centroid'] = csv_df.eval('Movement_mouse_1_centroid.mean() - Movement_mouse_1_centroid')
csv_df['Movement_mouse_2_deviation_centroid'] = csv_df.eval('Movement_mouse_2_centroid.mean() - Movement_mouse_2_centroid')
csv_df['Mouse_1_polygon_deviation'] = csv_df.eval('Mouse_1_poly_area.mean() - Mouse_1_poly_area')
csv_df['Mouse_2_polygon_deviation'] = csv_df.eval('Mouse_2_poly_area.mean() - Mouse_2_poly_area')
for i in roll_windows_values:
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(i)
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Sum_euclid_distances_hull_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Total_angle_both_mice_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
########### PERCENTILE RANK ###########################################
print('Calculating percentile ranks...')
csv_df['Movement_percentile_rank'] = csv_df['Total_movement_centroids'].rank(pct=True)
csv_df['Distance_percentile_rank'] = csv_df['Centroid_distance'].rank(pct=True)
csv_df['Movement_mouse_1_percentile_rank'] = csv_df['Movement_mouse_1_centroid'].rank(pct=True)
csv_df['Movement_mouse_2_percentile_rank'] = csv_df['Movement_mouse_1_centroid'].rank(pct=True)
csv_df['Movement_mouse_1_deviation_percentile_rank'] = csv_df['Movement_mouse_1_deviation_centroid'].rank(
pct=True)
csv_df['Movement_mouse_2_deviation_percentile_rank'] = csv_df['Movement_mouse_2_deviation_centroid'].rank(
pct=True)
csv_df['Centroid_distance_percentile_rank'] = csv_df['Centroid_distance'].rank(pct=True)
csv_df['Centroid_distance_deviation_percentile_rank'] = csv_df['Centroid_distance_deviation'].rank(pct=True)
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Sum_euclid_distances_hull_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
########### CALCULATE STRAIGHTNESS OF POLYLINE PATH: tortuosity ###########################################
print('Calculating path tortuosities...')
as_strided = np.lib.stride_tricks.as_strided
win_size = 3
centroidList_Mouse1_x = as_strided(csv_df.Center_1_x, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_1_x.values.strides * 2))
centroidList_Mouse1_y = as_strided(csv_df.Center_1_y, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_1_y.values.strides * 2))
centroidList_Mouse2_x = as_strided(csv_df.Center_2_x, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_2_x.values.strides * 2))
centroidList_Mouse2_y = as_strided(csv_df.Center_2_y, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_2_y.values.strides * 2))
for k in range(len(roll_windows_values)):
start = 0
end = start + int(roll_windows_values[k])
tortuosity_M1 = []
tortuosity_M2 = []
for y in range(len(csv_df)):
tortuosity_List_M1 = []
tortuosity_List_M2 = []
CurrCentroidList_Mouse1_x = centroidList_Mouse1_x[start:end]
CurrCentroidList_Mouse1_y = centroidList_Mouse1_y[start:end]
CurrCentroidList_Mouse2_x = centroidList_Mouse2_x[start:end]
CurrCentroidList_Mouse2_y = centroidList_Mouse2_y[start:end]
for i in range(len(CurrCentroidList_Mouse1_x)):
currMovementAngle_mouse1 = (
angle3pt(CurrCentroidList_Mouse1_x[i][0], CurrCentroidList_Mouse1_y[i][0],
CurrCentroidList_Mouse1_x[i][1], CurrCentroidList_Mouse1_y[i][1],
CurrCentroidList_Mouse1_x[i][2], CurrCentroidList_Mouse1_y[i][2]))
currMovementAngle_mouse2 = (
angle3pt(CurrCentroidList_Mouse2_x[i][0], CurrCentroidList_Mouse2_y[i][0],
CurrCentroidList_Mouse2_x[i][1], CurrCentroidList_Mouse2_y[i][1],
CurrCentroidList_Mouse2_x[i][2], CurrCentroidList_Mouse2_y[i][2]))
tortuosity_List_M1.append(currMovementAngle_mouse1)
tortuosity_List_M2.append(currMovementAngle_mouse2)
tortuosity_M1.append(sum(tortuosity_List_M1) / (2 * math.pi))
tortuosity_M2.append(sum(tortuosity_List_M2) / (2 * math.pi))
start += 1
end += 1
currentColName1 = str('Tortuosity_Mouse1_') + str(roll_windows_values[k])
#currentColName2 = str('Tortuosity_Mouse2_') + str(roll_windows_values[k])
csv_df[currentColName1] = tortuosity_M1
#csv_df[currentColName2] = tortuosity_M2
########### CALC THE NUMBER OF LOW PROBABILITY DETECTIONS & TOTAL PROBABILITY VALUE FOR ROW###########################################
print('Calculating pose probability scores...')
csv_df['Sum_probabilities'] = csv_df.eval('Ear_left_1_p + Ear_right_1_p + Nose_1_p + Center_1_p + Lat_left_1_p + Lat_right_1_p + Tail_base_1_p + Tail_end_1_p + Ear_left_2_p + Ear_right_2_p + Nose_2_p + Center_2_p + Lat_left_2_p + Lat_right_2_p + Tail_base_2_p + Tail_end_2_p')
csv_df['Sum_probabilities_deviation'] = csv_df.eval('Sum_probabilities.mean() - Sum_probabilities')
csv_df['Sum_probabilities_deviation_percentile_rank'] = csv_df['Sum_probabilities_deviation'].rank(pct=True)
csv_df['Sum_probabilities_percentile_rank'] = csv_df['Sum_probabilities_deviation_percentile_rank'].rank(pct=True)
csv_df_probability = csv_df.filter(
['Ear_left_1_p', 'Ear_right_1_p', 'Nose_1_p', 'Center_1_p', 'Lat_left_1_p', 'Lat_right_1_p',
'Tail_base_1_p', 'Tail_end_1_p', 'Ear_left_2_p', 'Ear_right_2_p', 'Nose_2_p', 'Center_2_p', 'Lat_left_2_p',
'Lat_right_2_p', 'Tail_base_2_p', 'Tail_end_2_p'])
values_in_range_min, values_in_range_max = 0.0, 0.1
csv_df["Low_prob_detections_0.1"] = csv_df_probability.apply(func=lambda row: count_values_in_range(row, values_in_range_min, values_in_range_max), axis=1)
values_in_range_min, values_in_range_max = 0.000000000, 0.5
csv_df["Low_prob_detections_0.5"] = csv_df_probability.apply(
func=lambda row: count_values_in_range(row, values_in_range_min, values_in_range_max), axis=1)
values_in_range_min, values_in_range_max = 0.000000000, 0.75
csv_df["Low_prob_detections_0.75"] = csv_df_probability.apply(
func=lambda row: count_values_in_range(row, values_in_range_min, values_in_range_max), axis=1)
########### DROP COORDINATE COLUMNS ###########################################
csv_df = csv_df.reset_index(drop=True)
csv_df = csv_df.fillna(0)
csv_df = csv_df.drop(columns=['index'], axis=1, errors='ignore')
fileName = os.path.basename(currentFile)
saveFN = os.path.join(csv_dir_out, fileName)
save_df(csv_df, wfileType, saveFN)
print('Feature extraction complete for ' + '"' + str(currVidName) + '".')
print('All feature extraction complete.') | 83.448737 | 285 | 0.656131 | from __future__ import division
import os, glob
import pandas as pd
import math
import numpy as np
from scipy.spatial import ConvexHull
import scipy
from configparser import ConfigParser, NoOptionError, NoSectionError
from numba import jit
from simba.rw_dfs import *
import re
def extract_features_wotarget_16(inifile):
config = ConfigParser()
configFile = str(inifile)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
csv_dir_in, csv_dir_out = os.path.join(projectPath, 'csv', 'outlier_corrected_movement_location'), os.path.join(projectPath,'csv', 'features_extracted')
vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
vidinfDf = pd.read_csv(vidInfPath)
vidinfDf.Video = vidinfDf.Video.astype('str')
def count_values_in_range(series, values_in_range_min, values_in_range_max):
return series.between(left=values_in_range_min, right=values_in_range_max).sum()
def angle3pt(ax, ay, bx, by, cx, cy):
ang = math.degrees(
math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))
return ang + 360 if ang < 0 else ang
@jit(nopython=True, cache=True)
def EuclidianDistCald(bp1xVals, bp1yVals, bp2xVals, bp2yVals, currPixPerMM):
series = (np.sqrt((bp1xVals - bp2xVals) ** 2 + (bp1yVals - bp2yVals) ** 2)) / currPixPerMM
return series
roll_windows, loopy = [], 0
roll_windows_values = [2, 5, 6, 7.5, 15]
minimum_fps = vidinfDf['fps'].min()
for win in range(len(roll_windows_values)):
if minimum_fps < roll_windows_values[win]:
roll_windows_values[win] = minimum_fps
else:
pass
roll_windows_values = list(set(roll_windows_values))
1_x", "Ear_left_1_y", "Ear_left_1_p", "Ear_right_1_x", "Ear_right_1_y",
"Ear_right_1_p", "Nose_1_x", "Nose_1_y", "Nose_1_p", "Center_1_x", "Center_1_y", "Center_1_p",
"Lat_left_1_x", "Lat_left_1_y",
"Lat_left_1_p", "Lat_right_1_x", "Lat_right_1_y", "Lat_right_1_p", "Tail_base_1_x",
"Tail_base_1_y", "Tail_base_1_p", "Tail_end_1_x", "Tail_end_1_y", "Tail_end_1_p",
"Ear_left_2_x",
"Ear_left_2_y", "Ear_left_2_p", "Ear_right_2_x", "Ear_right_2_y", "Ear_right_2_p",
"Nose_2_x", "Nose_2_y", "Nose_2_p", "Center_2_x", "Center_2_y", "Center_2_p", "Lat_left_2_x",
"Lat_left_2_y",
"Lat_left_2_p", "Lat_right_2_x", "Lat_right_2_y", "Lat_right_2_p", "Tail_base_2_x",
"Tail_base_2_y", "Tail_base_2_p", "Tail_end_2_x", "Tail_end_2_y", "Tail_end_2_p"]
csv_df = read_df(currentFile, wfileType)
try:
csv_df = csv_df.set_index('scorer')
except KeyError:
pass
csv_df.columns = columnHeaders
csv_df = csv_df.fillna(0)
csv_df = csv_df.apply(pd.to_numeric)
csv_df = csv_df.reset_index()
csv_df = csv_df.reset_index(drop=True)
print('Evaluating convex hulls...')
-or-6013-error-when-extracting-the-features')
_2_y': 'Lat_left_2_y_shifted',
'Lat_left_2_p': 'Lat_left_2_p_shifted', 'Lat_right_2_x': 'Lat_right_2_x_shifted',
'Lat_right_2_y': 'Lat_right_2_y_shifted', \
'Lat_right_2_p': 'Lat_right_2_p_shifted', 'Tail_base_2_x': 'Tail_base_2_x_shifted',
'Tail_base_2_y': 'Tail_base_2_y_shifted', \
'Tail_base_2_p': 'Tail_base_2_p_shifted', 'Tail_end_2_x': 'Tail_end_2_x_shifted',
'Tail_end_2_y': 'Tail_end_2_y_shifted', 'Tail_end_2_p': 'Tail_end_2_p_shifted',
'Mouse_1_poly_area': 'Mouse_1_poly_area_shifted',
'Mouse_2_poly_area': 'Mouse_2_poly_area_shifted'})
csv_df_combined = pd.concat([csv_df, csv_df_shifted], axis=1, join='inner')
csv_df_combined = csv_df_combined.fillna(0)
csv_df_combined = csv_df_combined.reset_index(drop=True)
print('Calculating euclidean distances...')
alues, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_lateral_left'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_lateral_right'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_lateral_right'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Centroid_to_lateral_left'] = EuclidianDistCald(csv_df['Center_1_x'].values, csv_df['Center_1_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Centroid_to_lateral_left'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Centroid_to_lateral_right'] = EuclidianDistCald(csv_df['Center_1_x'].values, csv_df['Center_1_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Centroid_to_lateral_right'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Centroid_distance'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Center_1_x'].values, csv_df['Center_1_y'].values, currPixPerMM)
csv_df['Nose_to_nose_distance'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_lat_left'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_lat_right'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_lat_left'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_lat_right'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_tail_base'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Tail_base_2_x'].values, csv_df['Tail_base_2_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_tail_base'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Tail_base_1_x'].values, csv_df['Tail_base_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_centroid'] = EuclidianDistCald(csv_df_combined['Center_1_x_shifted'].values, csv_df_combined['Center_1_y_shifted'].values,csv_df_combined['Center_1_x'].values, csv_df_combined['Center_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_centroid'] = EuclidianDistCald(csv_df_combined['Center_2_x_shifted'].values, csv_df_combined['Center_2_y_shifted'].values,csv_df_combined['Center_2_x'].values, csv_df_combined['Center_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_nose'] = EuclidianDistCald(csv_df_combined['Nose_1_x_shifted'].values, csv_df_combined['Nose_1_y_shifted'].values,csv_df_combined['Nose_1_x'].values, csv_df_combined['Nose_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_nose'] = EuclidianDistCald(csv_df_combined['Nose_2_x_shifted'].values, csv_df_combined['Nose_2_y_shifted'].values,csv_df_combined['Nose_2_x'].values, csv_df_combined['Nose_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_tail_base'] = EuclidianDistCald(csv_df_combined['Tail_base_1_x_shifted'].values, csv_df_combined['Tail_base_1_y_shifted'].values,csv_df_combined['Tail_base_1_x'].values, csv_df_combined['Tail_base_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_tail_base'] = EuclidianDistCald(csv_df_combined['Tail_base_2_x_shifted'].values, csv_df_combined['Tail_base_2_y_shifted'].values,csv_df_combined['Tail_base_2_x'].values, csv_df_combined['Tail_base_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_tail_end'] = EuclidianDistCald(csv_df_combined['Tail_end_1_x_shifted'].values, csv_df_combined['Tail_end_1_y_shifted'].values,csv_df_combined['Tail_end_1_x'].values, csv_df_combined['Tail_end_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_tail_end'] = EuclidianDistCald(csv_df_combined['Tail_end_2_x_shifted'].values, csv_df_combined['Tail_end_2_y_shifted'].values,csv_df_combined['Tail_end_2_x'].values, csv_df_combined['Tail_end_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_left_ear'] = EuclidianDistCald(csv_df_combined['Ear_left_1_x_shifted'].values, csv_df_combined['Ear_left_1_y_shifted'].values,csv_df_combined['Ear_left_1_x'].values, csv_df_combined['Ear_left_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_left_ear'] = EuclidianDistCald(csv_df_combined['Ear_left_2_x_shifted'].values, csv_df_combined['Ear_left_2_y_shifted'].values,csv_df_combined['Ear_left_2_x'].values, csv_df_combined['Ear_left_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_right_ear'] = EuclidianDistCald(csv_df_combined['Ear_right_1_x_shifted'].values, csv_df_combined['Ear_right_1_y_shifted'].values,csv_df_combined['Ear_right_1_x'].values, csv_df_combined['Ear_right_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_right_ear'] = EuclidianDistCald(csv_df_combined['Ear_right_2_x_shifted'].values, csv_df_combined['Ear_right_2_y_shifted'].values,csv_df_combined['Ear_right_2_x'].values, csv_df_combined['Ear_right_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_lateral_left'] = EuclidianDistCald(csv_df_combined['Lat_left_1_x_shifted'].values, csv_df_combined['Lat_left_1_y_shifted'].values,csv_df_combined['Lat_left_1_x'].values, csv_df_combined['Lat_left_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_lateral_left'] = EuclidianDistCald(csv_df_combined['Lat_left_2_x_shifted'].values, csv_df_combined['Lat_left_2_y_shifted'].values,csv_df_combined['Lat_left_2_x'].values, csv_df_combined['Lat_left_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_lateral_right'] = EuclidianDistCald(csv_df_combined['Lat_right_1_x_shifted'].values, csv_df_combined['Lat_right_1_y_shifted'].values,csv_df_combined['Lat_right_1_x'].values, csv_df_combined['Lat_right_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_lateral_right'] = EuclidianDistCald(csv_df_combined['Lat_right_2_x_shifted'].values, csv_df_combined['Lat_right_2_y_shifted'].values,csv_df_combined['Lat_right_2_x'].values, csv_df_combined['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_polygon_size_change'] = pd.eval("csv_df_combined.Mouse_1_poly_area_shifted - csv_df_combined.Mouse_1_poly_area")
csv_df['Mouse_2_polygon_size_change'] = pd.eval("csv_df_combined.Mouse_2_poly_area_shifted - csv_df_combined.Mouse_2_poly_area")
print('Calculating hull variables...')
= M2_dist_euclidean[M2_dist_euclidean != 0]
M2_hull_large_euclidean = np.amax(M2_dist_euclidean)
M2_hull_small_euclidean = np.min(M2_dist_euclidean)
M2_hull_mean_euclidean = np.mean(M2_dist_euclidean)
M2_hull_sum_euclidean = np.sum(M2_dist_euclidean)
M2_hull_large_euclidean_list.append(M2_hull_large_euclidean)
M2_hull_small_euclidean_list.append(M2_hull_small_euclidean)
M2_hull_mean_euclidean_list.append(M2_hull_mean_euclidean)
M2_hull_sum_euclidean_list.append(M2_hull_sum_euclidean)
csv_df['M1_largest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_large_euclidean_list))
csv_df['M1_smallest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_small_euclidean_list))
csv_df['M1_mean_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_mean_euclidean_list))
csv_df['M1_sum_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_sum_euclidean_list))
csv_df['M2_largest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_large_euclidean_list))
csv_df['M2_smallest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_small_euclidean_list))
csv_df['M2_mean_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_mean_euclidean_list))
csv_df['M2_sum_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_sum_euclidean_list))
csv_df['Sum_euclidean_distance_hull_M1_M2'] = (csv_df['M1_sum_euclidean_distance_hull'] + csv_df['M2_sum_euclidean_distance_hull'])
g(roll_windows[i], min_periods=1).mean()
currentColName = 'Mouse1_width_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_width_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Mouse2_width_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Mouse2_width_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_mean_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_mean_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_mean_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_mean_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_smallest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_smallest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_smallest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_largest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_largest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_largest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_largest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_all_bodyparts_both_mice_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Total_movement_all_bodyparts_both_mice_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_centroids_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Total_movement_centroids_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Total_movement_centroids_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_base_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_base_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_base_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_base_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_base_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_base_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Centroid_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Centroid_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Centroid_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Centroid_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Centroid_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Centroid_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_end_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_end_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_end_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_end_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_end_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_end_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Nose_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Nose_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Nose_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Nose_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Nose_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Nose_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).sum()
currentColName_M2 = 'Tail_end_relative_to_tail_base_centroid_nose_M2_mean_' + str(roll_windows_values[i])
tail_end_col_name_M2 = 'Tail_end_movement_M2_mean_' + str(roll_windows_values[i])
tail_base_col_name_M2 = 'Tail_base_movement_M2_mean_' + str(roll_windows_values[i])
centroid_col_name_M2 = 'Centroid_movement_M2_mean_' + str(roll_windows_values[i])
nose_col_name_M2 = 'Nose_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName_M1] = csv_df[tail_end_col_name] - (
csv_df[tail_base_col_name] + csv_df[centroid_col_name] + csv_df[nose_col_name])
csv_df[currentColName_M2] = csv_df[tail_end_col_name_M2] - (
csv_df[tail_base_col_name_M2] + csv_df[centroid_col_name_M2] + csv_df[nose_col_name_M2])
e = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Total_angle_both_mice_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
entColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
idList_Mouse2_x[i][0], CurrCentroidList_Mouse2_y[i][0],
CurrCentroidList_Mouse2_x[i][1], CurrCentroidList_Mouse2_y[i][1],
CurrCentroidList_Mouse2_x[i][2], CurrCentroidList_Mouse2_y[i][2]))
tortuosity_List_M1.append(currMovementAngle_mouse1)
tortuosity_List_M2.append(currMovementAngle_mouse2)
tortuosity_M1.append(sum(tortuosity_List_M1) / (2 * math.pi))
tortuosity_M2.append(sum(tortuosity_List_M2) / (2 * math.pi))
start += 1
end += 1
currentColName1 = str('Tortuosity_Mouse1_') + str(roll_windows_values[k])
csv_df[currentColName1] = tortuosity_M1
| true | true |
f7226947df59ea580f5caef7fa0aece55a646706 | 3,660 | py | Python | ros/src/twist_controller/twist_controller.py | uppala75/System-Integration | f8086104858386732f0c9977d6a4c552c22f0e81 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | uppala75/System-Integration | f8086104858386732f0c9977d6a4c552c22f0e81 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | uppala75/System-Integration | f8086104858386732f0c9977d6a4c552c22f0e81 | [
"MIT"
] | null | null | null | import rospy
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
"""
This file contains a stub of the Controller class. You can use this class to implement vehicle control.
For example, the control method can take twist data as input and return throttle, brake, and steering values.
Within this class, you can import and use the provided pid.py and lowpass.py if needed for acceleration.
And yaw_controller.py for steering.
Note that it is not required for you to use these, and you are free to write and import other controllers.
"""
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, **ros_param):
"""
:param ros_param:
Note:
sample time (sec) is based on the dbw node frequency 50Hz
low_pass filter:
val = w * cur_val + (1 - w) * prev_val
w is 0 ~ 1
"""
# used ros_param
self.vehicle_mass = ros_param['vehicle_mass']
# self.fuel_capacity = ros_param['fuel_capacity']
self.brake_deadband = ros_param['brake_deadband']
self.decel_limit = ros_param['decel_limit']
self.accel_limit = ros_param['accel_limit']
self.wheel_radius = ros_param['wheel_radius']
self.last_time = rospy.get_time()
# low pass filter for velocity
self.vel_lpf = LowPassFilter(0.5, .02)
# Init yaw controller
min_speed = 0.1 # I think min_speed
self.steer_controller = YawController(min_speed, **ros_param)
self.throttle_lpf = LowPassFilter(0.05, 0.02) # w = 0.28
# Init throttle PID controller
# TODO: tweaking
kp = 0.5
ki = 0.005
kd = 0.1
acc_min = 0.
acc_max = self.accel_limit
self.throttle_controller = PID(kp, ki, kd, acc_min, acc_max)
def control(self, target_linear_velocity, target_angular_velocity,
cur_linear_velocity, dbw_status):
# Check input info is ready
if not dbw_status:
self.throttle_controller.reset()
return 0., 0., 0.
# dbw enabled: control!
cur_linear_velocity = self.vel_lpf.filt(cur_linear_velocity)
# get steer value
steering = self.steer_controller.get_steering(target_linear_velocity,
target_angular_velocity,
cur_linear_velocity)
# get throttle (could be < 0 so it will be updated by `get brake` as well)
vel_err = target_linear_velocity - cur_linear_velocity
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_err, sample_time)
# get brake
brake = 0
if target_linear_velocity == 0. and cur_linear_velocity < 0.1:
throttle = 0
brake = 400 # N * m - hold the car in place for the red traffic light
elif throttle < .1 and vel_err < 0.:
throttle = 0.
decel_velocity = max(vel_err, self.decel_limit) # attention this value is < 0
# if less than brake_deaband, we don't need to add brake
# The car will deceleration by friction just release peddle
if abs(decel_velocity) > self.brake_deadband:
brake = abs(decel_velocity) * self.vehicle_mass * self.wheel_radius
else:
brake = 0
return throttle, brake, steering
def reset(self):
self.throttle_controller.reset() | 37.346939 | 110 | 0.621585 | import rospy
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, **ros_param):
self.vehicle_mass = ros_param['vehicle_mass']
self.brake_deadband = ros_param['brake_deadband']
self.decel_limit = ros_param['decel_limit']
self.accel_limit = ros_param['accel_limit']
self.wheel_radius = ros_param['wheel_radius']
self.last_time = rospy.get_time()
self.vel_lpf = LowPassFilter(0.5, .02)
min_speed = 0.1
self.steer_controller = YawController(min_speed, **ros_param)
self.throttle_lpf = LowPassFilter(0.05, 0.02)
kp = 0.5
ki = 0.005
kd = 0.1
acc_min = 0.
acc_max = self.accel_limit
self.throttle_controller = PID(kp, ki, kd, acc_min, acc_max)
def control(self, target_linear_velocity, target_angular_velocity,
cur_linear_velocity, dbw_status):
if not dbw_status:
self.throttle_controller.reset()
return 0., 0., 0.
cur_linear_velocity = self.vel_lpf.filt(cur_linear_velocity)
steering = self.steer_controller.get_steering(target_linear_velocity,
target_angular_velocity,
cur_linear_velocity)
vel_err = target_linear_velocity - cur_linear_velocity
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_err, sample_time)
brake = 0
if target_linear_velocity == 0. and cur_linear_velocity < 0.1:
throttle = 0
brake = 400
elif throttle < .1 and vel_err < 0.:
throttle = 0.
decel_velocity = max(vel_err, self.decel_limit)
# The car will deceleration by friction just release peddle
if abs(decel_velocity) > self.brake_deadband:
brake = abs(decel_velocity) * self.vehicle_mass * self.wheel_radius
else:
brake = 0
return throttle, brake, steering
def reset(self):
self.throttle_controller.reset() | true | true |
f7226a323835515ea6c0252f49673228d15dc7f9 | 2,030 | py | Python | lab19/thomas.py | Desnord/lab-mc102 | 470e5d942cf57305d8ba0b272f4c5f9aad4de11f | [
"Apache-2.0"
] | null | null | null | lab19/thomas.py | Desnord/lab-mc102 | 470e5d942cf57305d8ba0b272f4c5f9aad4de11f | [
"Apache-2.0"
] | null | null | null | lab19/thomas.py | Desnord/lab-mc102 | 470e5d942cf57305d8ba0b272f4c5f9aad4de11f | [
"Apache-2.0"
] | null | null | null | # Thomas (Desnord)
# O objetivo desta tarefa é fazer um programa
# que use recursividade e que, dada a matriz
# que descreve a hierarquia de uma empresa,
# encontre a cadeia hierárquica relativa a
# um determinado funcionário.
#entrada:
# A primeira linha contém dois inteiros: n,
# o número de funcionários entre 3 e 30, e k,
# o identificador numérico do funcionário sobre
# o qual deseja-se conhecer a cadeira hierárquica.
# A seguir tem-se n linhas que correspondem as
# linhas da matriz que descrevem a hierarquia
# da empresa.
#saída:
# Na saída devem ser impressos os números
# que identificam todos os funcionários
# que estejam na cadeia hierárquica do
# funcionário k, começando pelo próprio,
# e então imprimindo, em ordem crescente
# por identificador, os outros funcionários.
'''------------------------------------------'''
#lendo entradas
nk = input().split()
n = int(nk[0])
k = int(nk[1])
matriz = []
for i in range(n):
linha = input().split()
matriz.append(linha)
resultado = []
resultado.append(k)
#função recursiva para achar a cadeia hierarquica
def cadeiahier(mat, res):
aux = res[:]
for i in range(len(mat[aux[len(aux)-1]])):
if(int(mat[aux[len(aux)-1]][i]) == 1):
res.append(i)
res = cadeiahier(mat, res)
return res
#função para ordernar
def ckts(res):
for w in range(len(res)-1, 0, -1):
trocou = False
for i in range(w, 0, -1):
if (res[i] < res[i-1]):
res[i], res[i-1] = res[i-1], res[i]
trocou = True
for i in range(w):
if (res[i] > res[i+1]):
res[i], res[i+1] = res[i+1], res[i]
trocou = True
if not trocou:
return res
return res
#gera a cadeia hierárquica
resultado = cadeiahier(matriz,resultado)
#arrumando a saída no formato exigido
resultado.remove(k)
if(len(resultado) != 0):
resultado = ckts(resultado)
resstr = ' '.join(map(str, resultado))
print(str(k) +' '+ str(resstr))
else:
print(k)
| 24.166667 | 51 | 0.622167 |
nk = input().split()
n = int(nk[0])
k = int(nk[1])
matriz = []
for i in range(n):
linha = input().split()
matriz.append(linha)
resultado = []
resultado.append(k)
def cadeiahier(mat, res):
aux = res[:]
for i in range(len(mat[aux[len(aux)-1]])):
if(int(mat[aux[len(aux)-1]][i]) == 1):
res.append(i)
res = cadeiahier(mat, res)
return res
def ckts(res):
for w in range(len(res)-1, 0, -1):
trocou = False
for i in range(w, 0, -1):
if (res[i] < res[i-1]):
res[i], res[i-1] = res[i-1], res[i]
trocou = True
for i in range(w):
if (res[i] > res[i+1]):
res[i], res[i+1] = res[i+1], res[i]
trocou = True
if not trocou:
return res
return res
resultado = cadeiahier(matriz,resultado)
resultado.remove(k)
if(len(resultado) != 0):
resultado = ckts(resultado)
resstr = ' '.join(map(str, resultado))
print(str(k) +' '+ str(resstr))
else:
print(k)
| true | true |
f7226a34f9a9cf0625f7c12efdf4417b6efcc66d | 113 | py | Python | sciapp/object/__init__.py | Pad0y/imagepy | 23f41b64ade02f94b566b0d23a4b6459c1a1578d | [
"BSD-4-Clause"
] | null | null | null | sciapp/object/__init__.py | Pad0y/imagepy | 23f41b64ade02f94b566b0d23a4b6459c1a1578d | [
"BSD-4-Clause"
] | null | null | null | sciapp/object/__init__.py | Pad0y/imagepy | 23f41b64ade02f94b566b0d23a4b6459c1a1578d | [
"BSD-4-Clause"
] | null | null | null | from .shape import *
from .image import Image
from .table import Table
from .roi import *
from .surface import *
| 18.833333 | 24 | 0.752212 | from .shape import *
from .image import Image
from .table import Table
from .roi import *
from .surface import *
| true | true |
f7226af5637df4ce18dc689955e470c333471afd | 559 | py | Python | prettyqt/quick/quickitemgrabresult.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | prettyqt/quick/quickitemgrabresult.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | prettyqt/quick/quickitemgrabresult.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | from __future__ import annotations
from prettyqt import core, gui
from prettyqt.qt import QtQuick
QtQuick.QQuickItemGrabResult.__bases__ = (core.Object,)
class QuickItemGrabResult:
def __init__(self, item: QtQuick.QQuickItemGrabResult):
self.item = item
def __getattr__(self, val):
return getattr(self.item, val)
def get_image(self) -> gui.Image:
return gui.Image(self.image())
def get_url(self) -> core.Url:
return core.Url(self.url())
# if __name__ == "__main__":
# item = QuickItemGrabResult()
| 21.5 | 59 | 0.68873 | from __future__ import annotations
from prettyqt import core, gui
from prettyqt.qt import QtQuick
QtQuick.QQuickItemGrabResult.__bases__ = (core.Object,)
class QuickItemGrabResult:
def __init__(self, item: QtQuick.QQuickItemGrabResult):
self.item = item
def __getattr__(self, val):
return getattr(self.item, val)
def get_image(self) -> gui.Image:
return gui.Image(self.image())
def get_url(self) -> core.Url:
return core.Url(self.url())
| true | true |
f7226c42efe353297973425885128f29c9a1a476 | 2,182 | py | Python | posts/forms/admin.py | ollkostin/vas3k.club | 083af267499fe755ccd717712bc29c92fc78ef0a | [
"MIT"
] | 496 | 2020-04-24T04:20:32.000Z | 2022-03-31T21:55:57.000Z | posts/forms/admin.py | YAR-SEN/vas3k.club | 0571d726f1e24b2574457a659c9be48c4984b3ac | [
"MIT"
] | 642 | 2020-04-24T11:54:13.000Z | 2022-03-26T15:41:06.000Z | posts/forms/admin.py | YAR-SEN/vas3k.club | 0571d726f1e24b2574457a659c9be48c4984b3ac | [
"MIT"
] | 243 | 2020-04-24T11:49:11.000Z | 2022-03-24T18:38:48.000Z | from django import forms
from common.data.labels import LABELS
from posts.models.post import Post
class PostCuratorForm(forms.Form):
change_type = forms.ChoiceField(
label="Сменить тип поста",
choices=[(None, "---")] + Post.TYPES,
required=False,
)
new_label = forms.ChoiceField(
label="Выдать лейбл",
choices=[(None, "---")] + [(key, value.get("title")) for key, value in LABELS.items()],
required=False,
)
remove_label = forms.BooleanField(
label="Удалить текуший лейбл",
required=False
)
add_pin = forms.BooleanField(
label="Запинить",
required=False
)
pin_days = forms.IntegerField(
label="На сколько дней пин?",
initial=1,
required=False
)
remove_pin = forms.BooleanField(
label="Отпинить обратно",
required=False
)
move_up = forms.BooleanField(
label="Подбросить на главной",
required=False
)
move_down = forms.BooleanField(
label="Опустить на главной",
required=False
)
shadow_ban = forms.BooleanField(
label="Шадоу бан (редко!)",
required=False,
)
hide_from_feeds = forms.BooleanField(
label="Скрыть с главной",
required=False,
)
class PostAdminForm(PostCuratorForm):
close_comments = forms.BooleanField(
label="Закрыть комменты",
required=False,
)
transfer_ownership = forms.CharField(
label="Передать владение постом другому юзернейму",
required=False,
)
refresh_linked = forms.BooleanField(
label="Обновить связанные посты",
required=False,
)
class PostAnnounceForm(forms.Form):
text = forms.CharField(
label="Текст анонса",
required=True,
max_length=500000,
widget=forms.Textarea(
attrs={
"maxlength": 500000,
}
),
)
image = forms.CharField(
label="Картинка",
required=False,
)
with_image = forms.BooleanField(
label="Постим с картинкой?",
required=False,
initial=True,
)
| 22.040404 | 95 | 0.589826 | from django import forms
from common.data.labels import LABELS
from posts.models.post import Post
class PostCuratorForm(forms.Form):
change_type = forms.ChoiceField(
label="Сменить тип поста",
choices=[(None, "---")] + Post.TYPES,
required=False,
)
new_label = forms.ChoiceField(
label="Выдать лейбл",
choices=[(None, "---")] + [(key, value.get("title")) for key, value in LABELS.items()],
required=False,
)
remove_label = forms.BooleanField(
label="Удалить текуший лейбл",
required=False
)
add_pin = forms.BooleanField(
label="Запинить",
required=False
)
pin_days = forms.IntegerField(
label="На сколько дней пин?",
initial=1,
required=False
)
remove_pin = forms.BooleanField(
label="Отпинить обратно",
required=False
)
move_up = forms.BooleanField(
label="Подбросить на главной",
required=False
)
move_down = forms.BooleanField(
label="Опустить на главной",
required=False
)
shadow_ban = forms.BooleanField(
label="Шадоу бан (редко!)",
required=False,
)
hide_from_feeds = forms.BooleanField(
label="Скрыть с главной",
required=False,
)
class PostAdminForm(PostCuratorForm):
close_comments = forms.BooleanField(
label="Закрыть комменты",
required=False,
)
transfer_ownership = forms.CharField(
label="Передать владение постом другому юзернейму",
required=False,
)
refresh_linked = forms.BooleanField(
label="Обновить связанные посты",
required=False,
)
class PostAnnounceForm(forms.Form):
text = forms.CharField(
label="Текст анонса",
required=True,
max_length=500000,
widget=forms.Textarea(
attrs={
"maxlength": 500000,
}
),
)
image = forms.CharField(
label="Картинка",
required=False,
)
with_image = forms.BooleanField(
label="Постим с картинкой?",
required=False,
initial=True,
)
| true | true |
f7226d7a5f96836dbd7186da5616d1c93a8446a6 | 3,039 | py | Python | setup.py | TidyData/Orion | 3fd9fe64ce3eead87ea5fb8491d2af29c9d8b09f | [
"MIT"
] | 3 | 2021-05-16T09:02:38.000Z | 2021-07-04T11:03:54.000Z | setup.py | Jaehoon9201/Orion | 1b63b3032d0df84443d83a19713bbf8838121c93 | [
"MIT"
] | null | null | null | setup.py | Jaehoon9201/Orion | 1b63b3032d0df84443d83a19713bbf8838121c93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
try:
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
except IOError:
readme = ''
try:
with open('HISTORY.md', encoding='utf-8') as history_file:
history = history_file.read()
except IOError:
history = ''
install_requires = [
's3fs>=0.2.2,<0.5',
'baytune>=0.2.3,<0.3',
'mlblocks>=0.3.0,<0.4',
'mlprimitives>=0.2.2,<0.3',
'mongoengine>=0.16.3,<0.17',
'numpy>=1.15.4,<1.17',
'pandas>=0.23.4,<0.25',
'pymongo>=3.7.2,<4',
'scikit-learn>=0.20.1,<0.21',
'tabulate>=0.8.3,<0.9',
'Keras>=2.1.6,<2.4',
'numba>=0.48,<0.52',
'pyts>=0.9,<0.11',
'azure-cognitiveservices-anomalydetector>=0.3,<0.4',
'xlsxwriter>=1.3.6<1.4',
# fix conflict
'h5py<2.11.0,>=2.10.0',
]
setup_requires = [
'pytest-runner>=2.11.1',
]
tests_require = [
'pytest>=3.4.2',
'pytest-cov>=2.6.0',
'rundoc>=0.4.3,<0.5',
]
development_requires = [
# general
'pip>=9.0.1',
'bumpversion>=0.5.3,<0.6',
'watchdog>=0.8.3,<0.11',
# docs
'm2r2>=0.2.5,<0.3',
'nbsphinx>=0.5.0,<0.7',
'Sphinx>=3,<3.3',
'pydata-sphinx-theme',
'autodocsumm>=0.1.10,<1',
'ipython>=6.5,<7.5',
# style check
'flake8>=3.7.7,<4',
'isort>=4.3.4,<5',
# fix style issues
'autoflake>=1.2,<2',
'autopep8>=1.4.3,<2',
# distribute on PyPI
'twine>=1.10.0,<4',
'wheel>=0.30.0',
# Advanced testing
'coverage>=4.5.1,<6',
'tox>=2.9.1,<4',
# Documentation style
'pydocstyle==3.0.0,<4',
]
setup(
author="MIT Data To AI Lab",
author_email='dailabmit@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Orion is a machine learning library built for data generated by satellites.",
entry_points={
'console_scripts': [
'orion=orion.cli:main'
],
'mlblocks': [
'primitives=orion:MLBLOCKS_PRIMITIVES',
'pipelines=orion:MLBLOCKS_PIPELINES'
],
},
extras_require={
'test': tests_require,
'dev': development_requires + tests_require,
},
include_package_data=True,
install_requires=install_requires,
keywords='orion',
license="MIT license",
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
name='orion-ml',
packages=find_packages(include=['orion', 'orion.*']),
python_requires='>=3.6,<3.8',
setup_requires=setup_requires,
test_suite='tests',
tests_require=tests_require,
url='https://github.com/D3-AI/Orion',
version='0.1.8.dev0',
zip_safe=False,
)
| 24.119048 | 94 | 0.571241 |
from setuptools import setup, find_packages
try:
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
except IOError:
readme = ''
try:
with open('HISTORY.md', encoding='utf-8') as history_file:
history = history_file.read()
except IOError:
history = ''
install_requires = [
's3fs>=0.2.2,<0.5',
'baytune>=0.2.3,<0.3',
'mlblocks>=0.3.0,<0.4',
'mlprimitives>=0.2.2,<0.3',
'mongoengine>=0.16.3,<0.17',
'numpy>=1.15.4,<1.17',
'pandas>=0.23.4,<0.25',
'pymongo>=3.7.2,<4',
'scikit-learn>=0.20.1,<0.21',
'tabulate>=0.8.3,<0.9',
'Keras>=2.1.6,<2.4',
'numba>=0.48,<0.52',
'pyts>=0.9,<0.11',
'azure-cognitiveservices-anomalydetector>=0.3,<0.4',
'xlsxwriter>=1.3.6<1.4',
'h5py<2.11.0,>=2.10.0',
]
setup_requires = [
'pytest-runner>=2.11.1',
]
tests_require = [
'pytest>=3.4.2',
'pytest-cov>=2.6.0',
'rundoc>=0.4.3,<0.5',
]
development_requires = [
'pip>=9.0.1',
'bumpversion>=0.5.3,<0.6',
'watchdog>=0.8.3,<0.11',
'm2r2>=0.2.5,<0.3',
'nbsphinx>=0.5.0,<0.7',
'Sphinx>=3,<3.3',
'pydata-sphinx-theme',
'autodocsumm>=0.1.10,<1',
'ipython>=6.5,<7.5',
'flake8>=3.7.7,<4',
'isort>=4.3.4,<5',
'autoflake>=1.2,<2',
'autopep8>=1.4.3,<2',
'twine>=1.10.0,<4',
'wheel>=0.30.0',
'coverage>=4.5.1,<6',
'tox>=2.9.1,<4',
'pydocstyle==3.0.0,<4',
]
setup(
author="MIT Data To AI Lab",
author_email='dailabmit@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Orion is a machine learning library built for data generated by satellites.",
entry_points={
'console_scripts': [
'orion=orion.cli:main'
],
'mlblocks': [
'primitives=orion:MLBLOCKS_PRIMITIVES',
'pipelines=orion:MLBLOCKS_PIPELINES'
],
},
extras_require={
'test': tests_require,
'dev': development_requires + tests_require,
},
include_package_data=True,
install_requires=install_requires,
keywords='orion',
license="MIT license",
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
name='orion-ml',
packages=find_packages(include=['orion', 'orion.*']),
python_requires='>=3.6,<3.8',
setup_requires=setup_requires,
test_suite='tests',
tests_require=tests_require,
url='https://github.com/D3-AI/Orion',
version='0.1.8.dev0',
zip_safe=False,
)
| true | true |
f7226f93d7e8e6860afd6ccd7b534a92597eec05 | 3,275 | py | Python | mediagrains/patterngenerators/video/lumasteps.py | bbc/rd-apmm-python-lib-mediagrains | 84c9de511cc53418c277867eaf143f2cc8730d02 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2018-03-26T23:49:34.000Z | 2021-12-23T10:06:09.000Z | mediagrains/patterngenerators/video/lumasteps.py | bbc/rd-apmm-python-lib-mediagrains | 84c9de511cc53418c277867eaf143f2cc8730d02 | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2018-03-21T16:45:10.000Z | 2022-03-28T13:27:34.000Z | mediagrains/patterngenerators/video/lumasteps.py | bbc/rd-apmm-python-lib-mediagrains | 84c9de511cc53418c277867eaf143f2cc8730d02 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fractions import Fraction
from .still import StillPatternGenerator
from .constants import pixel_ranges
from ...grain_constructors import VideoGrain
from ...cogenums import CogFrameFormat, CogFrameLayout
__all__ = ["LumaSteps"]
class LumaSteps(StillPatternGenerator):
def __init__(self, src_id, flow_id, width, height,
rate=Fraction(25, 1),
cog_frame_format=CogFrameFormat.U8_444):
if cog_frame_format not in pixel_ranges:
raise ValueError("Not a supported format for this generator")
_bpp = pixel_ranges[cog_frame_format][0]
_offset = pixel_ranges[cog_frame_format][1][0]
_range = pixel_ranges[cog_frame_format][1][1]
_steps = 8
_chromaval = pixel_ranges[cog_frame_format][2][0]
vg = VideoGrain(src_id, flow_id,
rate=rate,
cog_frame_format=cog_frame_format,
cog_frame_layout=CogFrameLayout.FULL_FRAME,
width=width,
height=height)
line = bytearray(width*_bpp)
for x in range(0, width):
pos = x//(width//_steps)
if _bpp == 1:
line[x] = (_offset + ((pos * _range)//_steps)) & 0xFF
elif _bpp == 2:
line[2*x + 0] = (_offset + ((pos * _range)//_steps)) & 0xFF
line[2*x + 1] = ((_offset + ((pos * _range)//_steps)) >> 8) & 0xFF
for y in range(0, height):
vg.data[
vg.components[0].offset +
y*vg.components[0].stride:vg.components[0].offset +
y*vg.components[0].stride +
vg.components[0].width*_bpp
] = line
if _bpp == 1:
for y in range(0, vg.components[1].height):
u = vg.components[1].offset + y*vg.components[1].stride
v = vg.components[2].offset + y*vg.components[2].stride
for x in range(0, vg.components[1].width):
vg.data[u + x] = _chromaval
vg.data[v + x] = _chromaval
else:
for y in range(0, vg.components[1].height):
u = vg.components[1].offset + y*vg.components[1].stride
v = vg.components[2].offset + y*vg.components[2].stride
for x in range(0, vg.components[1].width):
vg.data[u + 2*x + 0] = _chromaval & 0xFF
vg.data[u + 2*x + 1] = (_chromaval >> 8) & 0xFF
vg.data[v + 2*x + 0] = _chromaval & 0xFF
vg.data[v + 2*x + 1] = (_chromaval >> 8) & 0xFF
super().__init__(vg)
| 39.939024 | 82 | 0.574351 |
from fractions import Fraction
from .still import StillPatternGenerator
from .constants import pixel_ranges
from ...grain_constructors import VideoGrain
from ...cogenums import CogFrameFormat, CogFrameLayout
__all__ = ["LumaSteps"]
class LumaSteps(StillPatternGenerator):
def __init__(self, src_id, flow_id, width, height,
rate=Fraction(25, 1),
cog_frame_format=CogFrameFormat.U8_444):
if cog_frame_format not in pixel_ranges:
raise ValueError("Not a supported format for this generator")
_bpp = pixel_ranges[cog_frame_format][0]
_offset = pixel_ranges[cog_frame_format][1][0]
_range = pixel_ranges[cog_frame_format][1][1]
_steps = 8
_chromaval = pixel_ranges[cog_frame_format][2][0]
vg = VideoGrain(src_id, flow_id,
rate=rate,
cog_frame_format=cog_frame_format,
cog_frame_layout=CogFrameLayout.FULL_FRAME,
width=width,
height=height)
line = bytearray(width*_bpp)
for x in range(0, width):
pos = x//(width//_steps)
if _bpp == 1:
line[x] = (_offset + ((pos * _range)//_steps)) & 0xFF
elif _bpp == 2:
line[2*x + 0] = (_offset + ((pos * _range)//_steps)) & 0xFF
line[2*x + 1] = ((_offset + ((pos * _range)//_steps)) >> 8) & 0xFF
for y in range(0, height):
vg.data[
vg.components[0].offset +
y*vg.components[0].stride:vg.components[0].offset +
y*vg.components[0].stride +
vg.components[0].width*_bpp
] = line
if _bpp == 1:
for y in range(0, vg.components[1].height):
u = vg.components[1].offset + y*vg.components[1].stride
v = vg.components[2].offset + y*vg.components[2].stride
for x in range(0, vg.components[1].width):
vg.data[u + x] = _chromaval
vg.data[v + x] = _chromaval
else:
for y in range(0, vg.components[1].height):
u = vg.components[1].offset + y*vg.components[1].stride
v = vg.components[2].offset + y*vg.components[2].stride
for x in range(0, vg.components[1].width):
vg.data[u + 2*x + 0] = _chromaval & 0xFF
vg.data[u + 2*x + 1] = (_chromaval >> 8) & 0xFF
vg.data[v + 2*x + 0] = _chromaval & 0xFF
vg.data[v + 2*x + 1] = (_chromaval >> 8) & 0xFF
super().__init__(vg)
| true | true |
f7227065422b043e93fa32ab873710d3b7762048 | 1,105 | py | Python | fmriprep/workflows/fieldmap/__init__.py | hstojic/fmriprep | c92bf833fecf645a2fbf3943486c665a9ebc54f7 | [
"BSD-3-Clause"
] | 1 | 2018-03-14T03:10:47.000Z | 2018-03-14T03:10:47.000Z | fmriprep/workflows/fieldmap/__init__.py | hstojic/fmriprep | c92bf833fecf645a2fbf3943486c665a9ebc54f7 | [
"BSD-3-Clause"
] | null | null | null | fmriprep/workflows/fieldmap/__init__.py | hstojic/fmriprep | c92bf833fecf645a2fbf3943486c665a9ebc54f7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
.. _sdc_estimation :
Fieldmap estimation and unwarping workflows
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: fmriprep.workflows.fieldmap.base
:members:
:undoc-members:
:show-inheritance:
.. automodule:: fmriprep.workflows.fieldmap.fmap
:members:
:undoc-members:
:show-inheritance:
.. automodule:: fmriprep.workflows.fieldmap.phdiff
:members:
:undoc-members:
:show-inheritance:
.. automodule:: fmriprep.workflows.fieldmap.pepolar
:members:
:undoc-members:
:show-inheritance:
.. automodule:: fmriprep.workflows.fieldmap.syn
:members:
:undoc-members:
:show-inheritance:
.. automodule:: fmriprep.workflows.fieldmap.unwarp
:members:
:undoc-members:
:show-inheritance:
"""
from .base import init_sdc_wf
from .unwarp import init_sdc_unwarp_wf, init_fmap_unwarp_report_wf
from .pepolar import init_pepolar_unwarp_wf
from .syn import init_syn_sdc_wf
| 22.1 | 73 | 0.676018 |
from .base import init_sdc_wf
from .unwarp import init_sdc_unwarp_wf, init_fmap_unwarp_report_wf
from .pepolar import init_pepolar_unwarp_wf
from .syn import init_syn_sdc_wf
| true | true |
f72270d6b9a6b50871d1e93310ec499628718357 | 1,055 | py | Python | setup.py | ohahlev/ahlev-django-auth-rename | fd40aec14df290f59a3c393d4eb926343b76121b | [
"BSD-3-Clause"
] | null | null | null | setup.py | ohahlev/ahlev-django-auth-rename | fd40aec14df290f59a3c393d4eb926343b76121b | [
"BSD-3-Clause"
] | null | null | null | setup.py | ohahlev/ahlev-django-auth-rename | fd40aec14df290f59a3c393d4eb926343b76121b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import auth_rename
setup(
name='ahlev-django-auth-rename',
version=auth_rename.__version__,
description='to rename authentication and authorization',
long_description='to rename authentication and authorization',
long_description_content_type='text/x-rst',
author='ahlev',
author_email='ohahlev@gmail.com',
include_package_data=True,
url='https://github.com/ohahlev/ahlev-django-auth-rename/tree/%s' % auth_rename.__version__,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe=False,
)
# Usage of setup.py:
# $> python setup.py register # registering package on PYPI
# $> python setup.py build sdist upload # build, make source dist and upload to PYPI
| 35.166667 | 96 | 0.681517 |
from setuptools import setup, find_packages
import auth_rename
setup(
name='ahlev-django-auth-rename',
version=auth_rename.__version__,
description='to rename authentication and authorization',
long_description='to rename authentication and authorization',
long_description_content_type='text/x-rst',
author='ahlev',
author_email='ohahlev@gmail.com',
include_package_data=True,
url='https://github.com/ohahlev/ahlev-django-auth-rename/tree/%s' % auth_rename.__version__,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe=False,
)
| true | true |
f722712c2f0aef5079706dcb4b272105c1569f03 | 629 | py | Python | paur/find/find.py | tonybenoy/paur | 398b244b95946f8fc4a8817a084f6edf5205e5a4 | [
"BSD-3-Clause"
] | null | null | null | paur/find/find.py | tonybenoy/paur | 398b244b95946f8fc4a8817a084f6edf5205e5a4 | [
"BSD-3-Clause"
] | 1 | 2020-07-26T12:37:26.000Z | 2020-07-26T22:25:10.000Z | paur/find/find.py | tonybenoy/paur | 398b244b95946f8fc4a8817a084f6edf5205e5a4 | [
"BSD-3-Clause"
] | null | null | null | import typer
from find.utils import find_package
from paur.paur import console
findcmd = typer.Typer()
@findcmd.command("find")
def find(
packages: str,
in_aur: bool = typer.Option(False, help="Search only in AUR."),
in_repo: bool = typer.Option(False, help="Search only in official repo."),
) -> None:
if not (in_repo and in_aur):
in_aur, in_repo = True, True
packages_list = packages.split()
for pkg in packages_list:
package = find_package(pkg_name=pkg, aor=in_repo, aur=in_aur)
if not package:
console.print("Package ", pkg, "not", "found!", style="bold red")
| 27.347826 | 78 | 0.661367 | import typer
from find.utils import find_package
from paur.paur import console
findcmd = typer.Typer()
@findcmd.command("find")
def find(
packages: str,
in_aur: bool = typer.Option(False, help="Search only in AUR."),
in_repo: bool = typer.Option(False, help="Search only in official repo."),
) -> None:
if not (in_repo and in_aur):
in_aur, in_repo = True, True
packages_list = packages.split()
for pkg in packages_list:
package = find_package(pkg_name=pkg, aor=in_repo, aur=in_aur)
if not package:
console.print("Package ", pkg, "not", "found!", style="bold red")
| true | true |
f722713e279eaaf12f9403baa402b9aea5845cd7 | 63,779 | py | Python | ckan/tests/logic/action/test_get.py | manishjakhodecdn/ckan-custom | 03d0a150fe550841ae43ee1c1bf9ce1d5bbaab0c | [
"Apache-2.0"
] | null | null | null | ckan/tests/logic/action/test_get.py | manishjakhodecdn/ckan-custom | 03d0a150fe550841ae43ee1c1bf9ce1d5bbaab0c | [
"Apache-2.0"
] | null | null | null | ckan/tests/logic/action/test_get.py | manishjakhodecdn/ckan-custom | 03d0a150fe550841ae43ee1c1bf9ce1d5bbaab0c | [
"Apache-2.0"
] | null | null | null | import nose.tools
import ckan.logic as logic
import ckan.plugins as p
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.logic.schema as schema
eq = nose.tools.eq_
class TestPackageShow(helpers.FunctionalTestBase):
def test_package_show(self):
dataset1 = factories.Dataset()
dataset2 = helpers.call_action('package_show', id=dataset1['id'])
eq(dataset2['name'], dataset1['name'])
missing_keys = set(('title', 'groups')) - set(dataset2.keys())
assert not missing_keys, missing_keys
def test_package_show_with_custom_schema(self):
dataset1 = factories.Dataset()
from ckan.logic.schema import default_show_package_schema
custom_schema = default_show_package_schema()
def foo(key, data, errors, context):
data[key] = 'foo'
custom_schema['new_field'] = [foo]
dataset2 = helpers.call_action('package_show', id=dataset1['id'],
context={'schema': custom_schema})
eq(dataset2['new_field'], 'foo')
class TestGroupList(helpers.FunctionalTestBase):
def test_group_list(self):
group1 = factories.Group()
group2 = factories.Group()
group_list = helpers.call_action('group_list')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_in_presence_of_organizations(self):
'''
Getting the group_list should only return groups of type 'group' (not
organizations).
'''
group1 = factories.Group()
group2 = factories.Group()
factories.Organization()
factories.Organization()
group_list = helpers.call_action('group_list')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_in_presence_of_custom_group_types(self):
'''Getting the group_list shouldn't return custom group types.'''
group1 = factories.Group()
group2 = factories.Group()
factories.Group(type='custom')
group_list = helpers.call_action('group_list')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_return_custom_group(self):
'''
Getting the group_list with a type defined should only return
groups of that type.
'''
group1 = factories.Group(type='custom')
group2 = factories.Group(type='custom')
factories.Group()
factories.Group()
group_list = helpers.call_action('group_list', type='custom')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_sort_by_package_count(self):
factories.Group(name='aa')
factories.Group(name='bb')
factories.Dataset(groups=[{'name': 'aa'}, {'name': 'bb'}])
factories.Dataset(groups=[{'name': 'bb'}])
group_list = helpers.call_action('group_list', sort='package_count')
eq(sorted(group_list), sorted(['bb', 'aa']))
def test_group_list_sort_by_package_count_ascending(self):
factories.Group(name='aa')
factories.Group(name='bb')
factories.Dataset(groups=[{'name': 'aa'}, {'name': 'bb'}])
factories.Dataset(groups=[{'name': 'aa'}])
group_list = helpers.call_action('group_list',
sort='package_count asc')
eq(group_list, ['bb', 'aa'])
def assert_equals_expected(self, expected_dict, result_dict):
superfluous_keys = set(result_dict) - set(expected_dict)
assert not superfluous_keys, 'Did not expect key: %s' % \
' '.join(('%s=%s' % (k, result_dict[k]) for k in superfluous_keys))
for key in expected_dict:
assert expected_dict[key] == result_dict[key], \
'%s=%s should be %s' % \
(key, result_dict[key], expected_dict[key])
def test_group_list_all_fields(self):
group = factories.Group()
group_list = helpers.call_action('group_list', all_fields=True)
expected_group = dict(group.items()[:])
for field in ('users', 'tags', 'extras', 'groups'):
if field in group_list[0]:
del group_list[0][field]
del expected_group[field]
assert group_list[0] == expected_group
assert 'extras' not in group_list[0]
assert 'tags' not in group_list[0]
assert 'groups' not in group_list[0]
assert 'users' not in group_list[0]
assert 'datasets' not in group_list[0]
def test_group_list_extras_returned(self):
group = factories.Group(extras=[{'key': 'key1', 'value': 'val1'}])
group_list = helpers.call_action('group_list', all_fields=True,
include_extras=True)
eq(group_list[0]['extras'], group['extras'])
eq(group_list[0]['extras'][0]['key'], 'key1')
# NB there is no test_group_list_tags_returned because tags are not in the
# group_create schema (yet)
def test_group_list_groups_returned(self):
parent_group = factories.Group(tags=[{'name': 'river'}])
child_group = factories.Group(groups=[{'name': parent_group['name']}],
tags=[{'name': 'river'}])
group_list = helpers.call_action('group_list', all_fields=True,
include_groups=True)
child_group_returned = group_list[0]
if group_list[0]['name'] == child_group['name']:
child_group_returned, parent_group_returned = group_list
else:
child_group_returned, parent_group_returned = group_list[::-1]
expected_parent_group = dict(parent_group.items()[:])
eq([g['name'] for g in child_group_returned['groups']], [expected_parent_group['name']])
class TestGroupShow(helpers.FunctionalTestBase):
def test_group_show(self):
group = factories.Group(user=factories.User())
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True)
group_dict.pop('packages', None)
eq(group_dict, group)
def test_group_show_error_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'group_show', id='does_not_exist')
def test_group_show_error_for_organization(self):
org = factories.Organization()
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'group_show', id=org['id'])
def test_group_show_packages_returned(self):
user_name = helpers.call_action('get_site_user')['name']
group = factories.Group(user=factories.User())
datasets = [
{'name': 'dataset_1', 'groups': [{'name': group['name']}]},
{'name': 'dataset_2', 'groups': [{'name': group['name']}]},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True)
assert len(group_dict['packages']) == 2
assert group_dict['package_count'] == 2
def test_group_show_packages_returned_for_view(self):
user_name = helpers.call_action('get_site_user')['name']
group = factories.Group(user=factories.User())
datasets = [
{'name': 'dataset_1', 'groups': [{'name': group['name']}]},
{'name': 'dataset_2', 'groups': [{'name': group['name']}]},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True,
context={'for_view': True})
assert len(group_dict['packages']) == 2
assert group_dict['package_count'] == 2
def test_group_show_no_packages_returned(self):
user_name = helpers.call_action('get_site_user')['name']
group = factories.Group(user=factories.User())
datasets = [
{'name': 'dataset_1', 'groups': [{'name': group['name']}]},
{'name': 'dataset_2', 'groups': [{'name': group['name']}]},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=False)
assert 'packages' not in group_dict
assert group_dict['package_count'] == 2
def test_group_show_does_not_show_private_datasets(self):
'''group_show() should never show private datasets.
If a dataset is a private member of an organization and also happens to
be a member of a group, group_show() should not return the dataset as
part of the group dict, even if the user calling group_show() is a
member or admin of the group or the organization or is a sysadmin.
'''
org_member = factories.User()
org = factories.Organization(user=org_member)
private_dataset = factories.Dataset(user=org_member,
owner_org=org['name'], private=True)
group = factories.Group()
# Add the private dataset to the group.
helpers.call_action('member_create', id=group['id'],
object=private_dataset['id'], object_type='package',
capacity='public')
# Create a member user and an admin user of the group.
group_member = factories.User()
helpers.call_action('member_create', id=group['id'],
object=group_member['id'], object_type='user',
capacity='member')
group_admin = factories.User()
helpers.call_action('member_create', id=group['id'],
object=group_admin['id'], object_type='user',
capacity='admin')
# Create a user who isn't a member of any group or organization.
non_member = factories.User()
sysadmin = factories.Sysadmin()
# None of the users should see the dataset when they call group_show().
for user in (org_member, group_member, group_admin, non_member,
sysadmin, None):
if user is None:
context = None # No user logged-in.
else:
context = {'user': user['name']}
group = helpers.call_action('group_show', id=group['id'],
include_datasets=True, context=context)
assert private_dataset['id'] not in [dataset['id'] for dataset
in group['packages']], (
"group_show() should never show private datasets")
class TestOrganizationList(helpers.FunctionalTestBase):
def test_organization_list(self):
org1 = factories.Organization()
org2 = factories.Organization()
org_list = helpers.call_action('organization_list')
assert (sorted(org_list) ==
sorted([g['name'] for g in [org1, org2]]))
def test_organization_list_in_presence_of_groups(self):
'''
Getting the organization_list only returns organization group
types.
'''
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group()
factories.Group()
org_list = helpers.call_action('organization_list')
assert (sorted(org_list) ==
sorted([g['name'] for g in [org1, org2]]))
def test_organization_list_in_presence_of_custom_group_types(self):
'''
Getting the organization_list only returns organization group
types.
'''
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group(type="custom")
factories.Group(type="custom")
org_list = helpers.call_action('organization_list')
assert (sorted(org_list) ==
sorted([g['name'] for g in [org1, org2]]))
class TestOrganizationShow(helpers.FunctionalTestBase):
def test_organization_show(self):
org = factories.Organization()
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
org_dict.pop('packages', None)
eq(org_dict, org)
def test_organization_show_error_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'organization_show', id='does_not_exist')
def test_organization_show_error_for_group(self):
group = factories.Group()
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'organization_show', id=group['id'])
def test_organization_show_packages_returned(self):
user_name = helpers.call_action('get_site_user')['name']
org = factories.Organization()
datasets = [
{'name': 'dataset_1', 'owner_org': org['name']},
{'name': 'dataset_2', 'owner_org': org['name']},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
assert len(org_dict['packages']) == 2
assert org_dict['package_count'] == 2
def test_organization_show_private_packages_not_returned(self):
user_name = helpers.call_action('get_site_user')['name']
org = factories.Organization()
datasets = [
{'name': 'dataset_1', 'owner_org': org['name']},
{'name': 'dataset_2', 'owner_org': org['name'], 'private': True},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
assert len(org_dict['packages']) == 1
assert org_dict['packages'][0]['name'] == 'dataset_1'
assert org_dict['package_count'] == 1
class TestUserList(helpers.FunctionalTestBase):
def test_user_list_default_values(self):
user = factories.User()
got_users = helpers.call_action('user_list')
assert len(got_users) == 1
got_user = got_users[0]
assert got_user['id'] == user['id']
assert got_user['name'] == user['name']
assert got_user['fullname'] == user['fullname']
assert got_user['display_name'] == user['display_name']
assert got_user['created'] == user['created']
assert got_user['about'] == user['about']
assert got_user['sysadmin'] == user['sysadmin']
assert got_user['number_of_edits'] == 0
assert got_user['number_created_packages'] == 0
assert 'password' not in got_user
assert 'reset_key' not in got_user
assert 'apikey' not in got_user
assert 'email' not in got_user
assert 'datasets' not in got_user
def test_user_list_edits(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset['title'] = 'Edited title'
helpers.call_action('package_update',
context={'user': user['name']},
**dataset)
got_users = helpers.call_action('user_list')
assert len(got_users) == 1
got_user = got_users[0]
assert got_user['number_created_packages'] == 1
assert got_user['number_of_edits'] == 2
def test_user_list_excludes_deleted_users(self):
user = factories.User()
factories.User(state='deleted')
got_users = helpers.call_action('user_list')
assert len(got_users) == 1
assert got_users[0]['name'] == user['name']
class TestUserShow(helpers.FunctionalTestBase):
def test_user_show_default_values(self):
user = factories.User()
got_user = helpers.call_action('user_show', id=user['id'])
assert got_user['id'] == user['id']
assert got_user['name'] == user['name']
assert got_user['fullname'] == user['fullname']
assert got_user['display_name'] == user['display_name']
assert got_user['created'] == user['created']
assert got_user['about'] == user['about']
assert got_user['sysadmin'] == user['sysadmin']
assert got_user['number_of_edits'] == 0
assert got_user['number_created_packages'] == 0
assert 'password' not in got_user
assert 'reset_key' not in got_user
assert 'apikey' not in got_user
assert 'email' not in got_user
assert 'datasets' not in got_user
def test_user_show_keep_email(self):
user = factories.User()
got_user = helpers.call_action('user_show',
context={'keep_email': True},
id=user['id'])
assert got_user['email'] == user['email']
assert 'apikey' not in got_user
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_keep_apikey(self):
user = factories.User()
got_user = helpers.call_action('user_show',
context={'keep_apikey': True},
id=user['id'])
assert 'email' not in got_user
assert got_user['apikey'] == user['apikey']
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_for_myself(self):
user = factories.User()
got_user = helpers.call_action('user_show',
context={'user': user['name']},
id=user['id'])
assert got_user['email'] == user['email']
assert got_user['apikey'] == user['apikey']
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_sysadmin_values(self):
user = factories.User()
sysadmin = factories.User(sysadmin=True)
got_user = helpers.call_action('user_show',
context={'user': sysadmin['name']},
id=user['id'])
assert got_user['email'] == user['email']
assert got_user['apikey'] == user['apikey']
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_include_datasets(self):
user = factories.User()
dataset = factories.Dataset(user=user)
got_user = helpers.call_action('user_show',
include_datasets=True,
id=user['id'])
assert len(got_user['datasets']) == 1
assert got_user['datasets'][0]['name'] == dataset['name']
def test_user_show_include_datasets_excludes_draft_and_private(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
got_user = helpers.call_action('user_show',
include_datasets=True,
id=user['id'])
assert len(got_user['datasets']) == 1
assert got_user['datasets'][0]['name'] == dataset['name']
assert got_user['number_created_packages'] == 1
def test_user_show_include_datasets_includes_draft_myself(self):
# a user viewing his own user should see the draft and private datasets
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
got_user = helpers.call_action('user_show',
context={'user': user['name']},
include_datasets=True,
id=user['id'])
eq(len(got_user['datasets']), 3)
datasets_got = set([user_['name'] for user_ in got_user['datasets']])
assert dataset_deleted['name'] not in datasets_got
eq(got_user['number_created_packages'], 3)
def test_user_show_include_datasets_includes_draft_sysadmin(self):
# sysadmin should see the draft and private datasets
user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
got_user = helpers.call_action('user_show',
context={'user': sysadmin['name']},
include_datasets=True,
id=user['id'])
eq(len(got_user['datasets']), 3)
datasets_got = set([user_['name'] for user_ in got_user['datasets']])
assert dataset_deleted['name'] not in datasets_got
eq(got_user['number_created_packages'], 3)
class TestRelatedList(helpers.FunctionalTestBase):
def test_related_list_with_no_params(self):
'''
Test related_list with no parameters and default sort
'''
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list')
assert len(related_list) == 2
assert related1 in related_list
assert related2 in related_list
def test_related_list_type_filter(self):
'''
Test related_list with type filter
'''
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list',
type_filter='application')
assert ([related2] == related_list)
def test_related_list_sorted(self):
'''
Test related_list with sort parameter
'''
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list', sort='created_desc')
assert ([related2, related1] == related_list)
def test_related_list_invalid_sort_parameter(self):
'''
Test related_list with invalid value for sort parameter
'''
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list', sort='invalid')
assert ([related1, related2] == related_list)
def test_related_list_featured(self):
'''
Test related_list with no featured filter
'''
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list', featured=True)
assert ([related1] == related_list)
# TODO: Create related items associated with a dataset and test
# related_list with them
class TestCurrentPackageList(helpers.FunctionalTestBase):
def test_current_package_list(self):
'''
Test current_package_list_with_resources with no parameters
'''
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources')
eq(len(current_package_list), 2)
def test_current_package_list_limit_param(self):
'''
Test current_package_list_with_resources with limit parameter
'''
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources', limit=1)
eq(len(current_package_list), 1)
eq(current_package_list[0]['name'], dataset2['name'])
def test_current_package_list_offset_param(self):
'''
Test current_package_list_with_resources with offset parameter
'''
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources', offset=1)
eq(len(current_package_list), 1)
eq(current_package_list[0]['name'], dataset1['name'])
def test_current_package_list_private_datasets_anonoymous_user(self):
'''
Test current_package_list_with_resources with an anoymous user and
a private dataset
'''
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(user=user, owner_org=org['name'],
private=True)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources', context={})
eq(len(current_package_list), 1)
def test_current_package_list_private_datasets_sysadmin_user(self):
'''
Test current_package_list_with_resources with a sysadmin user and a
private dataset
'''
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(user=user, owner_org=org['name'],
private=True)
dataset2 = factories.Dataset(user=user)
sysadmin = factories.Sysadmin()
current_package_list = helpers. \
call_action('current_package_list_with_resources', context={'user':
sysadmin['name']})
eq(len(current_package_list), 2)
class TestPackageAutocomplete(helpers.FunctionalTestBase):
def test_package_autocomplete_does_not_return_private_datasets(self):
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(user=user, owner_org=org['name'],
title='Some public stuff')
dataset2 = factories.Dataset(user=user, owner_org=org['name'],
private=True, title='Some private stuff')
package_list = helpers.call_action('package_autocomplete',
q='some')
eq(len(package_list), 1)
class TestPackageSearch(helpers.FunctionalTestBase):
def test_package_search_on_resource_name(self):
'''
package_search() should allow searching on resource name field.
'''
resource_name = 'resource_abc'
package = factories.Resource(name=resource_name)
search_result = helpers.call_action('package_search', q='resource_abc')
eq(search_result['results'][0]['resources'][0]['name'], resource_name)
def test_package_search_excludes_private_and_drafts(self):
'''
package_search() with no options should not return private and draft
datasets.
'''
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search')['results']
eq(len(results), 1)
eq(results[0]['name'], dataset['name'])
def test_package_search_with_fq_excludes_private(self):
'''
package_search() with fq capacity:private should not return private
and draft datasets.
'''
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
fq = "capacity:private"
results = helpers.call_action('package_search', fq=fq)['results']
eq(len(results), 1)
eq(results[0]['name'], dataset['name'])
def test_package_search_with_fq_excludes_drafts(self):
'''
An anon user can't use fq drafts to get draft datasets. Nothing is
returned.
'''
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "state:draft"
results = helpers.call_action('package_search', fq=fq)['results']
eq(len(results), 0)
def test_package_search_with_include_drafts_option_excludes_drafts_for_anon_user(self):
'''
An anon user can't user include_drafts to get draft datasets.
'''
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
draft_dataset = factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search', include_drafts=True)['results']
eq(len(results), 1)
nose.tools.assert_not_equals(results[0]['name'], draft_dataset['name'])
nose.tools.assert_equal(results[0]['name'], dataset['name'])
def test_package_search_with_include_drafts_option_includes_drafts_for_sysadmin(self):
'''
A sysadmin can use the include_drafts option to get draft datasets for
all users.
'''
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
draft_dataset = factories.Dataset(user=user, state='draft')
other_draft_dataset = factories.Dataset(user=other_user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search', include_drafts=True,
context={'user': sysadmin['name']})['results']
eq(len(results), 3)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] in names)
nose.tools.assert_true(other_draft_dataset['name'] in names)
nose.tools.assert_true(dataset['name'] in names)
def test_package_search_with_include_drafts_false_option_doesnot_include_drafts_for_sysadmin(self):
'''
A sysadmin with include_drafts option set to `False` will not get
drafts returned in results.
'''
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
draft_dataset = factories.Dataset(user=user, state='draft')
other_draft_dataset = factories.Dataset(user=other_user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search', include_drafts=False,
context={'user': sysadmin['name']})['results']
eq(len(results), 1)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] not in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
nose.tools.assert_true(dataset['name'] in names)
def test_package_search_with_include_drafts_option_includes_drafts_for_user(self):
'''
The include_drafts option will include draft datasets for the
authorized user, but not drafts for other users.
'''
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
draft_dataset = factories.Dataset(user=user, state='draft', name="draft-dataset")
other_draft_dataset = factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
results = helpers.call_action('package_search', include_drafts=True,
context={'user': user['name']})['results']
eq(len(results), 3)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
nose.tools.assert_true(dataset['name'] in names)
nose.tools.assert_true(other_dataset['name'] in names)
def test_package_search_with_fq_for_create_user_id_will_include_datasets_for_other_users(self):
'''
A normal user can use the fq creator_user_id to get active datasets
(but not draft) for another user.
'''
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
draft_dataset = factories.Dataset(user=user, state='draft', name="draft-dataset")
other_draft_dataset = factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "creator_user_id:{0}".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq,
context={'user': user['name']})['results']
eq(len(results), 1)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] not in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
nose.tools.assert_true(dataset['name'] not in names)
nose.tools.assert_true(other_dataset['name'] in names)
def test_package_search_with_fq_for_create_user_id_will_not_include_drafts_for_other_users(self):
'''
A normal user can't use fq creator_user_id and drafts to get draft
datasets for another user.
'''
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq,
context={'user': user['name']})['results']
eq(len(results), 0)
def test_package_search_with_fq_for_creator_user_id_and_drafts_and_include_drafts_option_will_not_include_drafts_for_other_user(self):
'''
A normal user can't use fq creator_user_id and drafts and the
include_drafts option to get draft datasets for another user.
'''
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq, include_drafts=True,
context={'user': user['name']})['results']
eq(len(results), 0)
def test_package_search_with_fq_for_creator_user_id_and_include_drafts_option_will_not_include_drafts_for_other_user(self):
'''
A normal user can't use fq creator_user_id and the include_drafts
option to get draft datasets for another user.
'''
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
other_draft_dataset = factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "creator_user_id:{0}".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq, include_drafts=True,
context={'user': user['name']})['results']
names = [r['name'] for r in results]
eq(len(results), 1)
nose.tools.assert_true(other_dataset['name'] in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
def test_package_search_with_fq_for_create_user_id_will_include_drafts_for_other_users_for_sysadmin(self):
'''
Sysadmins can use fq to get draft datasets for another user.
'''
user = factories.User()
sysadmin = factories.Sysadmin()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
draft_dataset = factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "(creator_user_id:{0} AND +state:draft)".format(user['id'])
results = helpers.call_action('package_search', fq=fq,
context={'user': sysadmin['name']})['results']
names = [r['name'] for r in results]
eq(len(results), 1)
nose.tools.assert_true(dataset['name'] not in names)
nose.tools.assert_true(draft_dataset['name'] in names)
def test_package_search_private_with_ignore_capacity_check(self):
'''
package_search() can return private datasets when
`ignore_capacity_check` present in context.
'''
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
private_dataset = factories.Dataset(user=user, private=True, owner_org=org['name'])
fq = '+capacity:"private"'
results = helpers.call_action('package_search', fq=fq,
context={'ignore_capacity_check': True})['results']
eq(len(results), 1)
eq(results[0]['name'], private_dataset['name'])
class TestBadLimitQueryParameters(helpers.FunctionalTestBase):
'''test class for #1258 non-int query parameters cause 500 errors
Test that validation errors are raised when calling actions with
bad parameters.
'''
def test_activity_list_actions(self):
actions = [
'user_activity_list',
'package_activity_list',
'group_activity_list',
'organization_activity_list',
'recently_changed_packages_activity_list',
'user_activity_list_html',
'package_activity_list_html',
'group_activity_list_html',
'organization_activity_list_html',
'recently_changed_packages_activity_list_html',
'current_package_list_with_resources',
]
for action in actions:
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, action,
id='test_user', limit='not_an_int', offset='not_an_int')
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, action,
id='test_user', limit=-1, offset=-1)
def test_package_search_facet_field_is_json(self):
kwargs = {'facet.field': 'notjson'}
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, 'package_search',
**kwargs)
class TestOrganizationListForUser(helpers.FunctionalTestBase):
'''Functional tests for the organization_list_for_user() action function.'''
def test_when_user_is_not_a_member_of_any_organizations(self):
"""
When the user isn't a member of any organizations (in any capacity)
organization_list_for_user() should return an empty list.
"""
user = factories.User()
context = {'user': user['name']}
# Create an organization so we can test that it does not get returned.
factories.Organization()
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_when_user_is_an_admin_of_one_organization(self):
"""
When the user is an admin of one organization
organization_list_for_user() should return a list of just that one
organization.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert len(organizations) == 1
assert organizations[0]['id'] == organization['id']
def test_when_user_is_an_admin_of_three_organizations(self):
"""
When the user is an admin of three organizations
organization_list_for_user() should return a list of all three
organizations.
"""
user = factories.User()
context = {'user': user['name']}
organization_1 = factories.Organization()
organization_2 = factories.Organization()
organization_3 = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
# Make the user an admin of all three organizations:
for organization in (organization_1, organization_2, organization_3):
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert len(organizations) == 3
ids = [organization['id'] for organization in organizations]
for organization in (organization_1, organization_2, organization_3):
assert organization['id'] in ids
def test_when_permissions_extend_to_sub_organizations(self):
"""
When the user is an admin of one organization
organization_list_for_user() should return a list of just that one
organization.
"""
user = factories.User()
context = {'user': user['name']}
user['capacity'] = 'admin'
top_organization = factories.Organization(users=[user])
middle_organization = factories.Organization(users=[user])
bottom_organization = factories.Organization()
# Create another organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action('member_create',
id=bottom_organization['id'],
object=middle_organization['id'],
object_type='group', capacity='parent')
helpers.call_action('member_create',
id=middle_organization['id'],
object=top_organization['id'],
object_type='group', capacity='parent')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert len(organizations) == 3
org_ids = set(org['id'] for org in organizations)
assert bottom_organization['id'] in org_ids
def test_does_not_return_members(self):
"""
By default organization_list_for_user() should not return organizations
that the user is just a member (not an admin) of.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='member')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_does_not_return_editors(self):
"""
By default organization_list_for_user() should not return organizations
that the user is just an editor (not an admin) of.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='editor')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_editor_permission(self):
"""
organization_list_for_user() should return organizations that the user
is an editor of if passed a permission that belongs to the editor role.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='editor')
organizations = helpers.call_action('organization_list_for_user',
permission='create_dataset',
context=context)
assert [org['id'] for org in organizations] == [organization['id']]
def test_member_permission(self):
"""
organization_list_for_user() should return organizations that the user
is a member of if passed a permission that belongs to the member role.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='member')
organizations = helpers.call_action('organization_list_for_user',
permission='read',
context=context)
assert [org['id'] for org in organizations] == [organization['id']]
def test_invalid_permission(self):
'''
organization_list_for_user() should return an empty list if passed a
non-existent or invalid permission.
Note that we test this with a user who is an editor of one organization.
If the user was an admin of the organization then it would return that
organization - admins have all permissions, including permissions that
don't exist.
'''
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='editor')
for permission in ('', ' ', 'foo', 27.3, 5, True, False, None):
organizations = helpers.call_action('organization_list_for_user',
permission=permission,
context=context)
assert organizations == []
def test_that_it_does_not_return_groups(self):
"""
organization_list_for_user() should not return groups that the user is
a member, editor or admin of.
"""
user = factories.User()
context = {'user': user['name']}
group_1 = factories.Group()
group_2 = factories.Group()
group_3 = factories.Group()
helpers.call_action('member_create', id=group_1['id'],
object=user['id'], object_type='user',
capacity='member')
helpers.call_action('member_create', id=group_2['id'],
object=user['id'], object_type='user',
capacity='editor')
helpers.call_action('member_create', id=group_3['id'],
object=user['id'], object_type='user',
capacity='admin')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_that_it_does_not_return_previous_memberships(self):
"""
organization_list_for_user() should return organizations that the user
was previously an admin of.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
# Remove the user from the organization.
helpers.call_action('member_delete', id=organization['id'],
object=user['id'], object_type='user')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_when_user_is_sysadmin(self):
"""
When the user is a sysadmin organization_list_for_user() should just
return all organizations, even if the user is not a member of them.
"""
user = factories.Sysadmin()
context = {'user': user['name']}
organization = factories.Organization()
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert [org['id'] for org in organizations] == [organization['id']]
def test_that_it_does_not_return_deleted_organizations(self):
"""
organization_list_for_user() should not return deleted organizations
that the user was an admin of.
"""
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
# Delete the organization.
helpers.call_action('organization_delete', id=organization['id'])
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_with_no_authorized_user(self):
"""
organization_list_for_user() should return an empty list if there's no
authorized user. Users who aren't logged-in don't have any permissions.
"""
# Create an organization so we can test that it doesn't get returned.
organization = factories.Organization()
organizations = helpers.call_action('organization_list_for_user')
assert organizations == []
class TestShowResourceView(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
def test_resource_view_show(self):
resource = factories.Resource()
resource_view = {'resource_id': resource['id'],
'view_type': u'image_view',
'title': u'View',
'description': u'A nice view',
'image_url': 'url'}
new_view = helpers.call_action('resource_view_create', **resource_view)
result = helpers.call_action('resource_view_show', id=new_view['id'])
result.pop('id')
result.pop('package_id')
assert result == resource_view
def test_resource_view_show_id_missing(self):
nose.tools.assert_raises(
logic.ValidationError,
helpers.call_action, 'resource_view_show')
def test_resource_view_show_id_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'resource_view_show', id='does_not_exist')
class TestGetHelpShow(object):
def test_help_show_basic(self):
function_name = 'package_search'
result = helpers.call_action('help_show', name=function_name)
function = logic.get_action(function_name)
eq(result, function.__doc__)
def test_help_show_no_docstring(self):
function_name = 'package_search'
function = logic.get_action(function_name)
actual_docstring = function.__doc__
function.__doc__ = None
result = helpers.call_action('help_show', name=function_name)
function.__doc__ = actual_docstring
eq(result, None)
def test_help_show_not_found(self):
function_name = 'unknown_action'
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'help_show', name=function_name)
class TestConfigOptionShow(helpers.FunctionalTestBase):
@helpers.change_config('ckan.site_title', 'My Test CKAN')
def test_config_option_show_in_config_not_in_db(self):
'''config_option_show returns value from config when value on in
system_info table.'''
title = helpers.call_action('config_option_show',
key='ckan.site_title')
nose.tools.assert_equal(title, 'My Test CKAN')
@helpers.change_config('ckan.site_title', 'My Test CKAN')
def test_config_option_show_in_config_and_in_db(self):
'''config_option_show returns value from db when value is in both
config and system_info table.'''
params = {'ckan.site_title': 'Test site title'}
helpers.call_action('config_option_update', **params)
title = helpers.call_action('config_option_show',
key='ckan.site_title')
nose.tools.assert_equal(title, 'Test site title')
@helpers.change_config('ckan.not.editable', 'My non editable option')
def test_config_option_show_not_whitelisted_key(self):
'''config_option_show raises exception if key is not a whitelisted
config option.'''
nose.tools.assert_raises(logic.ValidationError, helpers.call_action,
'config_option_show', key='ckan.not.editable')
class TestConfigOptionList(object):
def test_config_option_list(self):
'''config_option_list returns whitelisted config option keys'''
keys = helpers.call_action('config_option_list')
schema_keys = schema.update_configuration_schema().keys()
nose.tools.assert_equal(keys, schema_keys)
def remove_pseudo_users(user_list):
pseudo_users = set(('logged_in', 'visitor'))
user_list[:] = [user for user in user_list
if user['name'] not in pseudo_users]
class TestTagShow(helpers.FunctionalTestBase):
def test_tag_show_for_free_tag(self):
dataset = factories.Dataset(tags=[{'name': 'acid-rain'}])
tag_in_dataset = dataset['tags'][0]
tag_shown = helpers.call_action('tag_show', id='acid-rain')
eq(tag_shown['name'], 'acid-rain')
eq(tag_shown['display_name'], 'acid-rain')
eq(tag_shown['id'], tag_in_dataset['id'])
eq(tag_shown['vocabulary_id'], None)
assert 'packages' not in tag_shown
def test_tag_show_with_datasets(self):
dataset = factories.Dataset(tags=[{'name': 'acid-rain'}])
tag_shown = helpers.call_action('tag_show', id='acid-rain',
include_datasets=True)
eq([d['name'] for d in tag_shown['packages']], [dataset['name']])
def test_tag_show_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'tag_show', id='does-not-exist')
def test_tag_show_for_flexible_tag(self):
# A 'flexible' tag is one with spaces, some punctuation
# and foreign characters in its name
dataset = factories.Dataset(tags=[{'name': u'Flexible. \u30a1'}])
tag_shown = helpers.call_action('tag_show', id=u'Flexible. \u30a1',
include_datasets=True)
eq(tag_shown['name'], u'Flexible. \u30a1')
eq(tag_shown['display_name'], u'Flexible. \u30a1')
eq([d['name'] for d in tag_shown['packages']], [dataset['name']])
def test_tag_show_for_vocab_tag(self):
vocab = factories.Vocabulary(
tags=[dict(name='acid-rain')])
dataset = factories.Dataset(tags=vocab['tags'])
tag_in_dataset = dataset['tags'][0]
tag_shown = helpers.call_action('tag_show', id='acid-rain',
vocabulary_id=vocab['id'],
include_datasets=True)
eq(tag_shown['name'], 'acid-rain')
eq(tag_shown['display_name'], 'acid-rain')
eq(tag_shown['id'], tag_in_dataset['id'])
eq(tag_shown['vocabulary_id'], vocab['id'])
eq([d['name'] for d in tag_shown['packages']], [dataset['name']])
class TestTagList(helpers.FunctionalTestBase):
def test_tag_list(self):
factories.Dataset(tags=[{'name': 'acid-rain'},
{'name': 'pollution'}])
factories.Dataset(tags=[{'name': 'pollution'}])
tag_list = helpers.call_action('tag_list')
eq(set(tag_list), set(('acid-rain', 'pollution')))
def test_tag_list_all_fields(self):
factories.Dataset(tags=[{'name': 'acid-rain'}])
tag_list = helpers.call_action('tag_list', all_fields=True)
eq(tag_list[0]['name'], 'acid-rain')
eq(tag_list[0]['display_name'], 'acid-rain')
assert 'packages' not in tag_list
def test_tag_list_with_flexible_tag(self):
# A 'flexible' tag is one with spaces, punctuation (apart from commas)
# and foreign characters in its name
flexible_tag = u'Flexible. \u30a1'
factories.Dataset(tags=[{'name': flexible_tag}])
tag_list = helpers.call_action('tag_list', all_fields=True)
eq(tag_list[0]['name'], flexible_tag)
def test_tag_list_with_vocab(self):
vocab = factories.Vocabulary(
tags=[dict(name='acid-rain'),
dict(name='pollution')])
tag_list = helpers.call_action('tag_list', vocabulary_id=vocab['id'])
eq(set(tag_list), set(('acid-rain', 'pollution')))
def test_tag_list_vocab_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'tag_list', vocabulary_id='does-not-exist')
| 38.842266 | 138 | 0.609401 | import nose.tools
import ckan.logic as logic
import ckan.plugins as p
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.logic.schema as schema
eq = nose.tools.eq_
class TestPackageShow(helpers.FunctionalTestBase):
def test_package_show(self):
dataset1 = factories.Dataset()
dataset2 = helpers.call_action('package_show', id=dataset1['id'])
eq(dataset2['name'], dataset1['name'])
missing_keys = set(('title', 'groups')) - set(dataset2.keys())
assert not missing_keys, missing_keys
def test_package_show_with_custom_schema(self):
dataset1 = factories.Dataset()
from ckan.logic.schema import default_show_package_schema
custom_schema = default_show_package_schema()
def foo(key, data, errors, context):
data[key] = 'foo'
custom_schema['new_field'] = [foo]
dataset2 = helpers.call_action('package_show', id=dataset1['id'],
context={'schema': custom_schema})
eq(dataset2['new_field'], 'foo')
class TestGroupList(helpers.FunctionalTestBase):
def test_group_list(self):
group1 = factories.Group()
group2 = factories.Group()
group_list = helpers.call_action('group_list')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_in_presence_of_organizations(self):
group1 = factories.Group()
group2 = factories.Group()
factories.Organization()
factories.Organization()
group_list = helpers.call_action('group_list')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_in_presence_of_custom_group_types(self):
group1 = factories.Group()
group2 = factories.Group()
factories.Group(type='custom')
group_list = helpers.call_action('group_list')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_return_custom_group(self):
group1 = factories.Group(type='custom')
group2 = factories.Group(type='custom')
factories.Group()
factories.Group()
group_list = helpers.call_action('group_list', type='custom')
assert (sorted(group_list) ==
sorted([g['name'] for g in [group1, group2]]))
def test_group_list_sort_by_package_count(self):
factories.Group(name='aa')
factories.Group(name='bb')
factories.Dataset(groups=[{'name': 'aa'}, {'name': 'bb'}])
factories.Dataset(groups=[{'name': 'bb'}])
group_list = helpers.call_action('group_list', sort='package_count')
eq(sorted(group_list), sorted(['bb', 'aa']))
def test_group_list_sort_by_package_count_ascending(self):
factories.Group(name='aa')
factories.Group(name='bb')
factories.Dataset(groups=[{'name': 'aa'}, {'name': 'bb'}])
factories.Dataset(groups=[{'name': 'aa'}])
group_list = helpers.call_action('group_list',
sort='package_count asc')
eq(group_list, ['bb', 'aa'])
def assert_equals_expected(self, expected_dict, result_dict):
superfluous_keys = set(result_dict) - set(expected_dict)
assert not superfluous_keys, 'Did not expect key: %s' % \
' '.join(('%s=%s' % (k, result_dict[k]) for k in superfluous_keys))
for key in expected_dict:
assert expected_dict[key] == result_dict[key], \
'%s=%s should be %s' % \
(key, result_dict[key], expected_dict[key])
def test_group_list_all_fields(self):
group = factories.Group()
group_list = helpers.call_action('group_list', all_fields=True)
expected_group = dict(group.items()[:])
for field in ('users', 'tags', 'extras', 'groups'):
if field in group_list[0]:
del group_list[0][field]
del expected_group[field]
assert group_list[0] == expected_group
assert 'extras' not in group_list[0]
assert 'tags' not in group_list[0]
assert 'groups' not in group_list[0]
assert 'users' not in group_list[0]
assert 'datasets' not in group_list[0]
def test_group_list_extras_returned(self):
group = factories.Group(extras=[{'key': 'key1', 'value': 'val1'}])
group_list = helpers.call_action('group_list', all_fields=True,
include_extras=True)
eq(group_list[0]['extras'], group['extras'])
eq(group_list[0]['extras'][0]['key'], 'key1')
def test_group_list_groups_returned(self):
parent_group = factories.Group(tags=[{'name': 'river'}])
child_group = factories.Group(groups=[{'name': parent_group['name']}],
tags=[{'name': 'river'}])
group_list = helpers.call_action('group_list', all_fields=True,
include_groups=True)
child_group_returned = group_list[0]
if group_list[0]['name'] == child_group['name']:
child_group_returned, parent_group_returned = group_list
else:
child_group_returned, parent_group_returned = group_list[::-1]
expected_parent_group = dict(parent_group.items()[:])
eq([g['name'] for g in child_group_returned['groups']], [expected_parent_group['name']])
class TestGroupShow(helpers.FunctionalTestBase):
def test_group_show(self):
group = factories.Group(user=factories.User())
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True)
group_dict.pop('packages', None)
eq(group_dict, group)
def test_group_show_error_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'group_show', id='does_not_exist')
def test_group_show_error_for_organization(self):
org = factories.Organization()
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'group_show', id=org['id'])
def test_group_show_packages_returned(self):
user_name = helpers.call_action('get_site_user')['name']
group = factories.Group(user=factories.User())
datasets = [
{'name': 'dataset_1', 'groups': [{'name': group['name']}]},
{'name': 'dataset_2', 'groups': [{'name': group['name']}]},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True)
assert len(group_dict['packages']) == 2
assert group_dict['package_count'] == 2
def test_group_show_packages_returned_for_view(self):
user_name = helpers.call_action('get_site_user')['name']
group = factories.Group(user=factories.User())
datasets = [
{'name': 'dataset_1', 'groups': [{'name': group['name']}]},
{'name': 'dataset_2', 'groups': [{'name': group['name']}]},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=True,
context={'for_view': True})
assert len(group_dict['packages']) == 2
assert group_dict['package_count'] == 2
def test_group_show_no_packages_returned(self):
user_name = helpers.call_action('get_site_user')['name']
group = factories.Group(user=factories.User())
datasets = [
{'name': 'dataset_1', 'groups': [{'name': group['name']}]},
{'name': 'dataset_2', 'groups': [{'name': group['name']}]},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
group_dict = helpers.call_action('group_show', id=group['id'],
include_datasets=False)
assert 'packages' not in group_dict
assert group_dict['package_count'] == 2
def test_group_show_does_not_show_private_datasets(self):
org_member = factories.User()
org = factories.Organization(user=org_member)
private_dataset = factories.Dataset(user=org_member,
owner_org=org['name'], private=True)
group = factories.Group()
helpers.call_action('member_create', id=group['id'],
object=private_dataset['id'], object_type='package',
capacity='public')
group_member = factories.User()
helpers.call_action('member_create', id=group['id'],
object=group_member['id'], object_type='user',
capacity='member')
group_admin = factories.User()
helpers.call_action('member_create', id=group['id'],
object=group_admin['id'], object_type='user',
capacity='admin')
non_member = factories.User()
sysadmin = factories.Sysadmin()
# None of the users should see the dataset when they call group_show().
for user in (org_member, group_member, group_admin, non_member,
sysadmin, None):
if user is None:
context = None # No user logged-in.
else:
context = {'user': user['name']}
group = helpers.call_action('group_show', id=group['id'],
include_datasets=True, context=context)
assert private_dataset['id'] not in [dataset['id'] for dataset
in group['packages']], (
"group_show() should never show private datasets")
class TestOrganizationList(helpers.FunctionalTestBase):
def test_organization_list(self):
org1 = factories.Organization()
org2 = factories.Organization()
org_list = helpers.call_action('organization_list')
assert (sorted(org_list) ==
sorted([g['name'] for g in [org1, org2]]))
def test_organization_list_in_presence_of_groups(self):
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group()
factories.Group()
org_list = helpers.call_action('organization_list')
assert (sorted(org_list) ==
sorted([g['name'] for g in [org1, org2]]))
def test_organization_list_in_presence_of_custom_group_types(self):
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group(type="custom")
factories.Group(type="custom")
org_list = helpers.call_action('organization_list')
assert (sorted(org_list) ==
sorted([g['name'] for g in [org1, org2]]))
class TestOrganizationShow(helpers.FunctionalTestBase):
def test_organization_show(self):
org = factories.Organization()
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
org_dict.pop('packages', None)
eq(org_dict, org)
def test_organization_show_error_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'organization_show', id='does_not_exist')
def test_organization_show_error_for_group(self):
group = factories.Group()
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'organization_show', id=group['id'])
def test_organization_show_packages_returned(self):
user_name = helpers.call_action('get_site_user')['name']
org = factories.Organization()
datasets = [
{'name': 'dataset_1', 'owner_org': org['name']},
{'name': 'dataset_2', 'owner_org': org['name']},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
assert len(org_dict['packages']) == 2
assert org_dict['package_count'] == 2
def test_organization_show_private_packages_not_returned(self):
user_name = helpers.call_action('get_site_user')['name']
org = factories.Organization()
datasets = [
{'name': 'dataset_1', 'owner_org': org['name']},
{'name': 'dataset_2', 'owner_org': org['name'], 'private': True},
]
for dataset in datasets:
helpers.call_action('package_create',
context={'user': user_name},
**dataset)
org_dict = helpers.call_action('organization_show', id=org['id'],
include_datasets=True)
assert len(org_dict['packages']) == 1
assert org_dict['packages'][0]['name'] == 'dataset_1'
assert org_dict['package_count'] == 1
class TestUserList(helpers.FunctionalTestBase):
def test_user_list_default_values(self):
user = factories.User()
got_users = helpers.call_action('user_list')
assert len(got_users) == 1
got_user = got_users[0]
assert got_user['id'] == user['id']
assert got_user['name'] == user['name']
assert got_user['fullname'] == user['fullname']
assert got_user['display_name'] == user['display_name']
assert got_user['created'] == user['created']
assert got_user['about'] == user['about']
assert got_user['sysadmin'] == user['sysadmin']
assert got_user['number_of_edits'] == 0
assert got_user['number_created_packages'] == 0
assert 'password' not in got_user
assert 'reset_key' not in got_user
assert 'apikey' not in got_user
assert 'email' not in got_user
assert 'datasets' not in got_user
def test_user_list_edits(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset['title'] = 'Edited title'
helpers.call_action('package_update',
context={'user': user['name']},
**dataset)
got_users = helpers.call_action('user_list')
assert len(got_users) == 1
got_user = got_users[0]
assert got_user['number_created_packages'] == 1
assert got_user['number_of_edits'] == 2
def test_user_list_excludes_deleted_users(self):
user = factories.User()
factories.User(state='deleted')
got_users = helpers.call_action('user_list')
assert len(got_users) == 1
assert got_users[0]['name'] == user['name']
class TestUserShow(helpers.FunctionalTestBase):
def test_user_show_default_values(self):
user = factories.User()
got_user = helpers.call_action('user_show', id=user['id'])
assert got_user['id'] == user['id']
assert got_user['name'] == user['name']
assert got_user['fullname'] == user['fullname']
assert got_user['display_name'] == user['display_name']
assert got_user['created'] == user['created']
assert got_user['about'] == user['about']
assert got_user['sysadmin'] == user['sysadmin']
assert got_user['number_of_edits'] == 0
assert got_user['number_created_packages'] == 0
assert 'password' not in got_user
assert 'reset_key' not in got_user
assert 'apikey' not in got_user
assert 'email' not in got_user
assert 'datasets' not in got_user
def test_user_show_keep_email(self):
user = factories.User()
got_user = helpers.call_action('user_show',
context={'keep_email': True},
id=user['id'])
assert got_user['email'] == user['email']
assert 'apikey' not in got_user
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_keep_apikey(self):
user = factories.User()
got_user = helpers.call_action('user_show',
context={'keep_apikey': True},
id=user['id'])
assert 'email' not in got_user
assert got_user['apikey'] == user['apikey']
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_for_myself(self):
user = factories.User()
got_user = helpers.call_action('user_show',
context={'user': user['name']},
id=user['id'])
assert got_user['email'] == user['email']
assert got_user['apikey'] == user['apikey']
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_sysadmin_values(self):
user = factories.User()
sysadmin = factories.User(sysadmin=True)
got_user = helpers.call_action('user_show',
context={'user': sysadmin['name']},
id=user['id'])
assert got_user['email'] == user['email']
assert got_user['apikey'] == user['apikey']
assert 'password' not in got_user
assert 'reset_key' not in got_user
def test_user_show_include_datasets(self):
user = factories.User()
dataset = factories.Dataset(user=user)
got_user = helpers.call_action('user_show',
include_datasets=True,
id=user['id'])
assert len(got_user['datasets']) == 1
assert got_user['datasets'][0]['name'] == dataset['name']
def test_user_show_include_datasets_excludes_draft_and_private(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
got_user = helpers.call_action('user_show',
include_datasets=True,
id=user['id'])
assert len(got_user['datasets']) == 1
assert got_user['datasets'][0]['name'] == dataset['name']
assert got_user['number_created_packages'] == 1
def test_user_show_include_datasets_includes_draft_myself(self):
# a user viewing his own user should see the draft and private datasets
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
got_user = helpers.call_action('user_show',
context={'user': user['name']},
include_datasets=True,
id=user['id'])
eq(len(got_user['datasets']), 3)
datasets_got = set([user_['name'] for user_ in got_user['datasets']])
assert dataset_deleted['name'] not in datasets_got
eq(got_user['number_created_packages'], 3)
def test_user_show_include_datasets_includes_draft_sysadmin(self):
# sysadmin should see the draft and private datasets
user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
got_user = helpers.call_action('user_show',
context={'user': sysadmin['name']},
include_datasets=True,
id=user['id'])
eq(len(got_user['datasets']), 3)
datasets_got = set([user_['name'] for user_ in got_user['datasets']])
assert dataset_deleted['name'] not in datasets_got
eq(got_user['number_created_packages'], 3)
class TestRelatedList(helpers.FunctionalTestBase):
def test_related_list_with_no_params(self):
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list')
assert len(related_list) == 2
assert related1 in related_list
assert related2 in related_list
def test_related_list_type_filter(self):
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list',
type_filter='application')
assert ([related2] == related_list)
def test_related_list_sorted(self):
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list', sort='created_desc')
assert ([related2, related1] == related_list)
def test_related_list_invalid_sort_parameter(self):
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list', sort='invalid')
assert ([related1, related2] == related_list)
def test_related_list_featured(self):
user = factories.User()
related1 = factories.Related(user=user, featured=True)
related2 = factories.Related(user=user, type='application')
related_list = helpers.call_action('related_list', featured=True)
assert ([related1] == related_list)
# TODO: Create related items associated with a dataset and test
# related_list with them
class TestCurrentPackageList(helpers.FunctionalTestBase):
def test_current_package_list(self):
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources')
eq(len(current_package_list), 2)
def test_current_package_list_limit_param(self):
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources', limit=1)
eq(len(current_package_list), 1)
eq(current_package_list[0]['name'], dataset2['name'])
def test_current_package_list_offset_param(self):
user = factories.User()
dataset1 = factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources', offset=1)
eq(len(current_package_list), 1)
eq(current_package_list[0]['name'], dataset1['name'])
def test_current_package_list_private_datasets_anonoymous_user(self):
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(user=user, owner_org=org['name'],
private=True)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers. \
call_action('current_package_list_with_resources', context={})
eq(len(current_package_list), 1)
def test_current_package_list_private_datasets_sysadmin_user(self):
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(user=user, owner_org=org['name'],
private=True)
dataset2 = factories.Dataset(user=user)
sysadmin = factories.Sysadmin()
current_package_list = helpers. \
call_action('current_package_list_with_resources', context={'user':
sysadmin['name']})
eq(len(current_package_list), 2)
class TestPackageAutocomplete(helpers.FunctionalTestBase):
def test_package_autocomplete_does_not_return_private_datasets(self):
user = factories.User()
org = factories.Organization(user=user)
dataset1 = factories.Dataset(user=user, owner_org=org['name'],
title='Some public stuff')
dataset2 = factories.Dataset(user=user, owner_org=org['name'],
private=True, title='Some private stuff')
package_list = helpers.call_action('package_autocomplete',
q='some')
eq(len(package_list), 1)
class TestPackageSearch(helpers.FunctionalTestBase):
def test_package_search_on_resource_name(self):
resource_name = 'resource_abc'
package = factories.Resource(name=resource_name)
search_result = helpers.call_action('package_search', q='resource_abc')
eq(search_result['results'][0]['resources'][0]['name'], resource_name)
def test_package_search_excludes_private_and_drafts(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search')['results']
eq(len(results), 1)
eq(results[0]['name'], dataset['name'])
def test_package_search_with_fq_excludes_private(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
fq = "capacity:private"
results = helpers.call_action('package_search', fq=fq)['results']
eq(len(results), 1)
eq(results[0]['name'], dataset['name'])
def test_package_search_with_fq_excludes_drafts(self):
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "state:draft"
results = helpers.call_action('package_search', fq=fq)['results']
eq(len(results), 0)
def test_package_search_with_include_drafts_option_excludes_drafts_for_anon_user(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
draft_dataset = factories.Dataset(user=user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search', include_drafts=True)['results']
eq(len(results), 1)
nose.tools.assert_not_equals(results[0]['name'], draft_dataset['name'])
nose.tools.assert_equal(results[0]['name'], dataset['name'])
def test_package_search_with_include_drafts_option_includes_drafts_for_sysadmin(self):
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
draft_dataset = factories.Dataset(user=user, state='draft')
other_draft_dataset = factories.Dataset(user=other_user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search', include_drafts=True,
context={'user': sysadmin['name']})['results']
eq(len(results), 3)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] in names)
nose.tools.assert_true(other_draft_dataset['name'] in names)
nose.tools.assert_true(dataset['name'] in names)
def test_package_search_with_include_drafts_false_option_doesnot_include_drafts_for_sysadmin(self):
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
draft_dataset = factories.Dataset(user=user, state='draft')
other_draft_dataset = factories.Dataset(user=other_user, state='draft')
factories.Dataset(user=user, private=True, owner_org=org['name'])
results = helpers.call_action('package_search', include_drafts=False,
context={'user': sysadmin['name']})['results']
eq(len(results), 1)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] not in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
nose.tools.assert_true(dataset['name'] in names)
def test_package_search_with_include_drafts_option_includes_drafts_for_user(self):
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
draft_dataset = factories.Dataset(user=user, state='draft', name="draft-dataset")
other_draft_dataset = factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
results = helpers.call_action('package_search', include_drafts=True,
context={'user': user['name']})['results']
eq(len(results), 3)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
nose.tools.assert_true(dataset['name'] in names)
nose.tools.assert_true(other_dataset['name'] in names)
def test_package_search_with_fq_for_create_user_id_will_include_datasets_for_other_users(self):
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
draft_dataset = factories.Dataset(user=user, state='draft', name="draft-dataset")
other_draft_dataset = factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "creator_user_id:{0}".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq,
context={'user': user['name']})['results']
eq(len(results), 1)
names = [r['name'] for r in results]
nose.tools.assert_true(draft_dataset['name'] not in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
nose.tools.assert_true(dataset['name'] not in names)
nose.tools.assert_true(other_dataset['name'] in names)
def test_package_search_with_fq_for_create_user_id_will_not_include_drafts_for_other_users(self):
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq,
context={'user': user['name']})['results']
eq(len(results), 0)
def test_package_search_with_fq_for_creator_user_id_and_drafts_and_include_drafts_option_will_not_include_drafts_for_other_user(self):
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq, include_drafts=True,
context={'user': user['name']})['results']
eq(len(results), 0)
def test_package_search_with_fq_for_creator_user_id_and_include_drafts_option_will_not_include_drafts_for_other_user(self):
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
factories.Dataset(user=user, state='draft', name="draft-dataset")
other_draft_dataset = factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "creator_user_id:{0}".format(other_user['id'])
results = helpers.call_action('package_search', fq=fq, include_drafts=True,
context={'user': user['name']})['results']
names = [r['name'] for r in results]
eq(len(results), 1)
nose.tools.assert_true(other_dataset['name'] in names)
nose.tools.assert_true(other_draft_dataset['name'] not in names)
def test_package_search_with_fq_for_create_user_id_will_include_drafts_for_other_users_for_sysadmin(self):
user = factories.User()
sysadmin = factories.Sysadmin()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state='deleted', name="deleted-dataset")
draft_dataset = factories.Dataset(user=user, state='draft', name="draft-dataset")
factories.Dataset(user=other_user, state='draft', name="other-draft-dataset")
factories.Dataset(user=user, private=True, owner_org=org['name'], name="private-dataset")
fq = "(creator_user_id:{0} AND +state:draft)".format(user['id'])
results = helpers.call_action('package_search', fq=fq,
context={'user': sysadmin['name']})['results']
names = [r['name'] for r in results]
eq(len(results), 1)
nose.tools.assert_true(dataset['name'] not in names)
nose.tools.assert_true(draft_dataset['name'] in names)
def test_package_search_private_with_ignore_capacity_check(self):
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
factories.Dataset(user=user, state='deleted')
factories.Dataset(user=user, state='draft')
private_dataset = factories.Dataset(user=user, private=True, owner_org=org['name'])
fq = '+capacity:"private"'
results = helpers.call_action('package_search', fq=fq,
context={'ignore_capacity_check': True})['results']
eq(len(results), 1)
eq(results[0]['name'], private_dataset['name'])
class TestBadLimitQueryParameters(helpers.FunctionalTestBase):
def test_activity_list_actions(self):
actions = [
'user_activity_list',
'package_activity_list',
'group_activity_list',
'organization_activity_list',
'recently_changed_packages_activity_list',
'user_activity_list_html',
'package_activity_list_html',
'group_activity_list_html',
'organization_activity_list_html',
'recently_changed_packages_activity_list_html',
'current_package_list_with_resources',
]
for action in actions:
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, action,
id='test_user', limit='not_an_int', offset='not_an_int')
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, action,
id='test_user', limit=-1, offset=-1)
def test_package_search_facet_field_is_json(self):
kwargs = {'facet.field': 'notjson'}
nose.tools.assert_raises(
logic.ValidationError, helpers.call_action, 'package_search',
**kwargs)
class TestOrganizationListForUser(helpers.FunctionalTestBase):
def test_when_user_is_not_a_member_of_any_organizations(self):
user = factories.User()
context = {'user': user['name']}
# Create an organization so we can test that it does not get returned.
factories.Organization()
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_when_user_is_an_admin_of_one_organization(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert len(organizations) == 1
assert organizations[0]['id'] == organization['id']
def test_when_user_is_an_admin_of_three_organizations(self):
user = factories.User()
context = {'user': user['name']}
organization_1 = factories.Organization()
organization_2 = factories.Organization()
organization_3 = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
# Make the user an admin of all three organizations:
for organization in (organization_1, organization_2, organization_3):
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert len(organizations) == 3
ids = [organization['id'] for organization in organizations]
for organization in (organization_1, organization_2, organization_3):
assert organization['id'] in ids
def test_when_permissions_extend_to_sub_organizations(self):
user = factories.User()
context = {'user': user['name']}
user['capacity'] = 'admin'
top_organization = factories.Organization(users=[user])
middle_organization = factories.Organization(users=[user])
bottom_organization = factories.Organization()
# Create another organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action('member_create',
id=bottom_organization['id'],
object=middle_organization['id'],
object_type='group', capacity='parent')
helpers.call_action('member_create',
id=middle_organization['id'],
object=top_organization['id'],
object_type='group', capacity='parent')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert len(organizations) == 3
org_ids = set(org['id'] for org in organizations)
assert bottom_organization['id'] in org_ids
def test_does_not_return_members(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='member')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_does_not_return_editors(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='editor')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_editor_permission(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='editor')
organizations = helpers.call_action('organization_list_for_user',
permission='create_dataset',
context=context)
assert [org['id'] for org in organizations] == [organization['id']]
def test_member_permission(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='member')
organizations = helpers.call_action('organization_list_for_user',
permission='read',
context=context)
assert [org['id'] for org in organizations] == [organization['id']]
def test_invalid_permission(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
factories.Organization()
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='editor')
for permission in ('', ' ', 'foo', 27.3, 5, True, False, None):
organizations = helpers.call_action('organization_list_for_user',
permission=permission,
context=context)
assert organizations == []
def test_that_it_does_not_return_groups(self):
user = factories.User()
context = {'user': user['name']}
group_1 = factories.Group()
group_2 = factories.Group()
group_3 = factories.Group()
helpers.call_action('member_create', id=group_1['id'],
object=user['id'], object_type='user',
capacity='member')
helpers.call_action('member_create', id=group_2['id'],
object=user['id'], object_type='user',
capacity='editor')
helpers.call_action('member_create', id=group_3['id'],
object=user['id'], object_type='user',
capacity='admin')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_that_it_does_not_return_previous_memberships(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
# Remove the user from the organization.
helpers.call_action('member_delete', id=organization['id'],
object=user['id'], object_type='user')
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_when_user_is_sysadmin(self):
user = factories.Sysadmin()
context = {'user': user['name']}
organization = factories.Organization()
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert [org['id'] for org in organizations] == [organization['id']]
def test_that_it_does_not_return_deleted_organizations(self):
user = factories.User()
context = {'user': user['name']}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action('member_create', id=organization['id'],
object=user['id'], object_type='user',
capacity='admin')
# Delete the organization.
helpers.call_action('organization_delete', id=organization['id'])
organizations = helpers.call_action('organization_list_for_user',
context=context)
assert organizations == []
def test_with_no_authorized_user(self):
# Create an organization so we can test that it doesn't get returned.
organization = factories.Organization()
organizations = helpers.call_action('organization_list_for_user')
assert organizations == []
class TestShowResourceView(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
def test_resource_view_show(self):
resource = factories.Resource()
resource_view = {'resource_id': resource['id'],
'view_type': u'image_view',
'title': u'View',
'description': u'A nice view',
'image_url': 'url'}
new_view = helpers.call_action('resource_view_create', **resource_view)
result = helpers.call_action('resource_view_show', id=new_view['id'])
result.pop('id')
result.pop('package_id')
assert result == resource_view
def test_resource_view_show_id_missing(self):
nose.tools.assert_raises(
logic.ValidationError,
helpers.call_action, 'resource_view_show')
def test_resource_view_show_id_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'resource_view_show', id='does_not_exist')
class TestGetHelpShow(object):
def test_help_show_basic(self):
function_name = 'package_search'
result = helpers.call_action('help_show', name=function_name)
function = logic.get_action(function_name)
eq(result, function.__doc__)
def test_help_show_no_docstring(self):
function_name = 'package_search'
function = logic.get_action(function_name)
actual_docstring = function.__doc__
function.__doc__ = None
result = helpers.call_action('help_show', name=function_name)
function.__doc__ = actual_docstring
eq(result, None)
def test_help_show_not_found(self):
function_name = 'unknown_action'
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'help_show', name=function_name)
class TestConfigOptionShow(helpers.FunctionalTestBase):
@helpers.change_config('ckan.site_title', 'My Test CKAN')
def test_config_option_show_in_config_not_in_db(self):
title = helpers.call_action('config_option_show',
key='ckan.site_title')
nose.tools.assert_equal(title, 'My Test CKAN')
@helpers.change_config('ckan.site_title', 'My Test CKAN')
def test_config_option_show_in_config_and_in_db(self):
params = {'ckan.site_title': 'Test site title'}
helpers.call_action('config_option_update', **params)
title = helpers.call_action('config_option_show',
key='ckan.site_title')
nose.tools.assert_equal(title, 'Test site title')
@helpers.change_config('ckan.not.editable', 'My non editable option')
def test_config_option_show_not_whitelisted_key(self):
nose.tools.assert_raises(logic.ValidationError, helpers.call_action,
'config_option_show', key='ckan.not.editable')
class TestConfigOptionList(object):
def test_config_option_list(self):
keys = helpers.call_action('config_option_list')
schema_keys = schema.update_configuration_schema().keys()
nose.tools.assert_equal(keys, schema_keys)
def remove_pseudo_users(user_list):
pseudo_users = set(('logged_in', 'visitor'))
user_list[:] = [user for user in user_list
if user['name'] not in pseudo_users]
class TestTagShow(helpers.FunctionalTestBase):
def test_tag_show_for_free_tag(self):
dataset = factories.Dataset(tags=[{'name': 'acid-rain'}])
tag_in_dataset = dataset['tags'][0]
tag_shown = helpers.call_action('tag_show', id='acid-rain')
eq(tag_shown['name'], 'acid-rain')
eq(tag_shown['display_name'], 'acid-rain')
eq(tag_shown['id'], tag_in_dataset['id'])
eq(tag_shown['vocabulary_id'], None)
assert 'packages' not in tag_shown
def test_tag_show_with_datasets(self):
dataset = factories.Dataset(tags=[{'name': 'acid-rain'}])
tag_shown = helpers.call_action('tag_show', id='acid-rain',
include_datasets=True)
eq([d['name'] for d in tag_shown['packages']], [dataset['name']])
def test_tag_show_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'tag_show', id='does-not-exist')
def test_tag_show_for_flexible_tag(self):
dataset = factories.Dataset(tags=[{'name': u'Flexible. \u30a1'}])
tag_shown = helpers.call_action('tag_show', id=u'Flexible. \u30a1',
include_datasets=True)
eq(tag_shown['name'], u'Flexible. \u30a1')
eq(tag_shown['display_name'], u'Flexible. \u30a1')
eq([d['name'] for d in tag_shown['packages']], [dataset['name']])
def test_tag_show_for_vocab_tag(self):
vocab = factories.Vocabulary(
tags=[dict(name='acid-rain')])
dataset = factories.Dataset(tags=vocab['tags'])
tag_in_dataset = dataset['tags'][0]
tag_shown = helpers.call_action('tag_show', id='acid-rain',
vocabulary_id=vocab['id'],
include_datasets=True)
eq(tag_shown['name'], 'acid-rain')
eq(tag_shown['display_name'], 'acid-rain')
eq(tag_shown['id'], tag_in_dataset['id'])
eq(tag_shown['vocabulary_id'], vocab['id'])
eq([d['name'] for d in tag_shown['packages']], [dataset['name']])
class TestTagList(helpers.FunctionalTestBase):
def test_tag_list(self):
factories.Dataset(tags=[{'name': 'acid-rain'},
{'name': 'pollution'}])
factories.Dataset(tags=[{'name': 'pollution'}])
tag_list = helpers.call_action('tag_list')
eq(set(tag_list), set(('acid-rain', 'pollution')))
def test_tag_list_all_fields(self):
factories.Dataset(tags=[{'name': 'acid-rain'}])
tag_list = helpers.call_action('tag_list', all_fields=True)
eq(tag_list[0]['name'], 'acid-rain')
eq(tag_list[0]['display_name'], 'acid-rain')
assert 'packages' not in tag_list
def test_tag_list_with_flexible_tag(self):
flexible_tag = u'Flexible. \u30a1'
factories.Dataset(tags=[{'name': flexible_tag}])
tag_list = helpers.call_action('tag_list', all_fields=True)
eq(tag_list[0]['name'], flexible_tag)
def test_tag_list_with_vocab(self):
vocab = factories.Vocabulary(
tags=[dict(name='acid-rain'),
dict(name='pollution')])
tag_list = helpers.call_action('tag_list', vocabulary_id=vocab['id'])
eq(set(tag_list), set(('acid-rain', 'pollution')))
def test_tag_list_vocab_not_found(self):
nose.tools.assert_raises(
logic.NotFound,
helpers.call_action, 'tag_list', vocabulary_id='does-not-exist')
| true | true |
f72272527531c2472e42c8f3296ae27996a41b23 | 27,634 | py | Python | src/sage/categories/lie_algebras.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | src/sage/categories/lie_algebras.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | src/sage/categories/lie_algebras.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | r"""
Lie Algebras
AUTHORS:
- Travis Scrimshaw (07-15-2013): Initial implementation
"""
#*****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.abstract_method import abstract_method
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.lazy_import import LazyImport
from sage.categories.category import JoinCategory, Category
from sage.categories.category_types import Category_over_base_ring
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.modules import Modules
from sage.categories.sets_cat import Sets
from sage.categories.homset import Hom
from sage.categories.morphism import Morphism
from sage.structure.element import coerce_binop
class LieAlgebras(Category_over_base_ring):
"""
The category of Lie algebras.
EXAMPLES::
sage: C = LieAlgebras(QQ); C
Category of Lie algebras over Rational Field
sage: sorted(C.super_categories(), key=str)
[Category of vector spaces over Rational Field]
We construct a typical parent in this category, and do some
computations with it::
sage: A = C.example(); A
An example of a Lie algebra: the Lie algebra from the associative
algebra Symmetric group algebra of order 3 over Rational Field
generated by ([2, 1, 3], [2, 3, 1])
sage: A.category()
Category of Lie algebras over Rational Field
sage: A.base_ring()
Rational Field
sage: a,b = A.lie_algebra_generators()
sage: a.bracket(b)
-[1, 3, 2] + [3, 2, 1]
sage: b.bracket(2*a + b)
2*[1, 3, 2] - 2*[3, 2, 1]
sage: A.bracket(a, b)
-[1, 3, 2] + [3, 2, 1]
Please see the source code of `A` (with ``A??``) for how to
implement other Lie algebras.
TESTS::
sage: C = LieAlgebras(QQ)
sage: TestSuite(C).run()
sage: TestSuite(C.example()).run()
.. TODO::
Many of these tests should use Lie algebras that are not the minimal
example and need to be added after :trac:`16820` (and :trac:`16823`).
"""
@cached_method
def super_categories(self):
"""
EXAMPLES::
sage: LieAlgebras(QQ).super_categories()
[Category of vector spaces over Rational Field]
"""
# We do not also derive from (Magmatic) algebras since we don't want *
# to be our Lie bracket
# Also this doesn't inherit the ability to add axioms like Associative
# and Unital, both of which do not make sense for Lie algebras
return [Modules(self.base_ring())]
# TODO: Find some way to do this without copying most of the logic.
def _repr_object_names(self):
r"""
Return the name of the objects of this category.
.. SEEALSO:: :meth:`Category._repr_object_names`
EXAMPLES::
sage: LieAlgebras(QQ)._repr_object_names()
'Lie algebras over Rational Field'
sage: LieAlgebras(Fields())._repr_object_names()
'Lie algebras over fields'
sage: from sage.categories.category import JoinCategory
sage: from sage.categories.category_with_axiom import Blahs
sage: LieAlgebras(JoinCategory((Blahs().Flying(), Fields())))
Category of Lie algebras over (flying unital blahs and fields)
"""
base = self.base()
if isinstance(base, Category):
if isinstance(base, JoinCategory):
name = '('+' and '.join(C._repr_object_names() for C in base.super_categories())+')'
else:
name = base._repr_object_names()
else:
name = base
return "Lie algebras over {}".format(name)
def example(self, gens=None):
"""
Return an example of a Lie algebra as per
:meth:`Category.example <sage.categories.category.Category.example>`.
EXAMPLES::
sage: LieAlgebras(QQ).example()
An example of a Lie algebra: the Lie algebra from the associative algebra
Symmetric group algebra of order 3 over Rational Field
generated by ([2, 1, 3], [2, 3, 1])
Another set of generators can be specified as an optional argument::
sage: F.<x,y,z> = FreeAlgebra(QQ)
sage: LieAlgebras(QQ).example(F.gens())
An example of a Lie algebra: the Lie algebra from the associative algebra
Free Algebra on 3 generators (x, y, z) over Rational Field
generated by (x, y, z)
"""
if gens is None:
from sage.combinat.symmetric_group_algebra import SymmetricGroupAlgebra
from sage.rings.all import QQ
gens = SymmetricGroupAlgebra(QQ, 3).algebra_generators()
from sage.categories.examples.lie_algebras import Example
return Example(gens)
WithBasis = LazyImport('sage.categories.lie_algebras_with_basis',
'LieAlgebrasWithBasis', as_name='WithBasis')
class FiniteDimensional(CategoryWithAxiom_over_base_ring):
WithBasis = LazyImport('sage.categories.finite_dimensional_lie_algebras_with_basis',
'FiniteDimensionalLieAlgebrasWithBasis', as_name='WithBasis')
def extra_super_categories(self):
"""
Implements the fact that a finite dimensional Lie algebra over
a finite ring is finite.
EXAMPLES::
sage: LieAlgebras(IntegerModRing(4)).FiniteDimensional().extra_super_categories()
[Category of finite sets]
sage: LieAlgebras(ZZ).FiniteDimensional().extra_super_categories()
[]
sage: LieAlgebras(GF(5)).FiniteDimensional().is_subcategory(Sets().Finite())
True
sage: LieAlgebras(ZZ).FiniteDimensional().is_subcategory(Sets().Finite())
False
sage: LieAlgebras(GF(5)).WithBasis().FiniteDimensional().is_subcategory(Sets().Finite())
True
"""
if self.base_ring() in Sets().Finite():
return [Sets().Finite()]
return []
class ParentMethods:
#@abstract_method
#def lie_algebra_generators(self):
# """
# Return the generators of ``self`` as a Lie algebra.
# """
# TODO: Move this to LieAlgebraElement, cythonize, and use more standard
# coercion framework test (i.e., have_same_parent)
def bracket(self, lhs, rhs):
"""
Return the Lie bracket ``[lhs, rhs]`` after coercing ``lhs`` and
``rhs`` into elements of ``self``.
EXAMPLES::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: L.bracket(x, x + y)
-[1, 3, 2] + [3, 2, 1]
sage: L.bracket(x, 0)
0
sage: L.bracket(0, x)
0
"""
return self(lhs)._bracket_(self(rhs))
# Do not override this. Instead implement :meth:`_construct_UEA`;
# then, :meth:`lift` and :meth:`universal_enveloping_algebra`
# will automatically setup the coercion.
def universal_enveloping_algebra(self):
"""
Return the universal enveloping algebra of ``self``.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: L.universal_enveloping_algebra()
Noncommutative Multivariate Polynomial Ring in b0, b1, b2
over Rational Field, nc-relations: {}
::
sage: L = LieAlgebra(QQ, 3, 'x', abelian=True)
sage: L.universal_enveloping_algebra()
Multivariate Polynomial Ring in x0, x1, x2 over Rational Field
.. SEEALSO::
:meth:`lift`
"""
return self.lift.codomain()
@abstract_method(optional=True)
def _construct_UEA(self):
"""
Return the universal enveloping algebra of ``self``.
Unlike :meth:`universal_enveloping_algebra`, this method does not
(usually) construct the canonical lift morphism from ``self``
to the universal enveloping algebra (let alone register it
as a coercion).
One should implement this method and the ``lift`` method for
the element class to construct the morphism the universal
enveloping algebra.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: L._construct_UEA()
Noncommutative Multivariate Polynomial Ring in b0, b1, b2
over Rational Field, nc-relations: {}
::
sage: L = LieAlgebra(QQ, 3, 'x', abelian=True)
sage: L.universal_enveloping_algebra() # indirect doctest
Multivariate Polynomial Ring in x0, x1, x2 over Rational Field
"""
@abstract_method(optional=True)
def module(self):
r"""
Return an `R`-module which is isomorphic to the
underlying `R`-module of ``self``.
The rationale behind this method is to enable linear
algebraic functionality on ``self`` (such as
computing the span of a list of vectors in ``self``)
via an isomorphism from ``self`` to an `R`-module
(typically, although not always, an `R`-module of
the form `R^n` for an `n \in \NN`) on which such
functionality already exists. For this method to be
of any use, it should return an `R`-module which has
linear algebraic functionality that ``self`` does
not have.
For instance, if ``self`` has ordered basis
`(e, f, h)`, then ``self.module()`` will be the
`R`-module `R^3`, and the elements `e`, `f` and
`h` of ``self`` will correspond to the basis
vectors `(1, 0, 0)`, `(0, 1, 0)` and `(0, 0, 1)`
of ``self.module()``.
This method :meth:`module` needs to be set whenever
a finite-dimensional Lie algebra with basis is
intended to support linear algebra (which is, e.g.,
used in the computation of centralizers and lower
central series). One then needs to also implement
the `R`-module isomorphism from ``self`` to
``self.module()`` in both directions; that is,
implement:
* a ``to_vector`` ElementMethod which sends every
element of ``self`` to the corresponding element of
``self.module()``;
* a ``from_vector`` ParentMethod which sends every
element of ``self.module()`` to an element
of ``self``.
The ``from_vector`` method will automatically serve
as an element constructor of ``self`` (that is,
``self(v)`` for any ``v`` in ``self.module()`` will
return ``self.from_vector(v)``).
.. TODO::
Ensure that this is actually so.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: L.module()
Vector space of dimension 3 over Rational Field
"""
@abstract_method(optional=True)
def from_vector(self, v):
"""
Return the element of ``self`` corresponding to the
vector ``v`` in ``self.module()``.
Implement this if you implement :meth:`module`; see the
documentation of the latter for how this is to be done.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: u = L.from_vector(vector(QQ, (1, 0, 0))); u
(1, 0, 0)
sage: parent(u) is L
True
"""
@lazy_attribute
def lift(self):
r"""
Construct the lift morphism from ``self`` to the universal
enveloping algebra of ``self`` (the latter is implemented
as :meth:`universal_enveloping_algebra`).
This is a Lie algebra homomorphism. It is injective if
``self`` is a free module over its base ring, or if the
base ring is a `\QQ`-algebra.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: lifted = L.lift(2*a + b - c); lifted
2*b0 + b1 - b2
sage: lifted.parent() is L.universal_enveloping_algebra()
True
"""
M = LiftMorphism(self, self._construct_UEA())
M.register_as_coercion()
return M
def subalgebra(self, gens, names=None, index_set=None, category=None):
r"""
Return the subalgebra of ``self`` generated by ``gens``.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: L.subalgebra([2*a - c, b + c])
An example of a finite dimensional Lie algebra with basis:
the 2-dimensional abelian Lie algebra over Rational Field
with basis matrix:
[ 1 0 -1/2]
[ 0 1 1]
::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: L.subalgebra([x + y])
Traceback (most recent call last):
...
NotImplementedError: subalgebras not yet implemented: see #17416
"""
raise NotImplementedError("subalgebras not yet implemented: see #17416")
#from sage.algebras.lie_algebras.subalgebra import LieSubalgebra
#return LieSubalgebra(gens, names, index_set, category)
def ideal(self, gens, names=None, index_set=None, category=None):
r"""
Return the ideal of ``self`` generated by ``gens``.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: L.ideal([2*a - c, b + c])
An example of a finite dimensional Lie algebra with basis:
the 2-dimensional abelian Lie algebra over Rational Field
with basis matrix:
[ 1 0 -1/2]
[ 0 1 1]
::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: L.ideal([x + y])
Traceback (most recent call last):
...
NotImplementedError: ideals not yet implemented: see #16824
"""
raise NotImplementedError("ideals not yet implemented: see #16824")
#from sage.algebras.lie_algebras.ideal import LieIdeal
#return LieIdeal(gens, names, index_set, category)
def is_ideal(self, A):
"""
Return if ``self`` is an ideal of ``A``.
EXAMPLES::
sage: L = LieAlgebras(QQ).example()
sage: L.is_ideal(L)
True
"""
if A == self:
return True
raise NotImplementedError("ideals not yet implemented: see #16824")
#from sage.algebras.lie_algebras.ideal import LieIdeal
#return isinstance(self, LieIdeal) and self._ambient is A
@abstract_method(optional=True)
def killing_form(self, x, y):
"""
Return the Killing form of ``x`` and ``y``.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: L.killing_form(a, b+c)
0
"""
def is_abelian(self):
r"""
Return ``True`` if this Lie algebra is abelian.
A Lie algebra `\mathfrak{g}` is abelian if `[x, y] = 0` for all
`x, y \in \mathfrak{g}`.
EXAMPLES::
sage: L = LieAlgebras(QQ).example()
sage: L.is_abelian()
False
sage: R = QQ['x,y']
sage: L = LieAlgebras(QQ).example(R.gens())
sage: L.is_abelian()
True
::
sage: L.<x> = LieAlgebra(QQ,1) # todo: not implemented - #16823
sage: L.is_abelian() # todo: not implemented - #16823
True
sage: L.<x,y> = LieAlgebra(QQ,2) # todo: not implemented - #16823
sage: L.is_abelian() # todo: not implemented - #16823
False
"""
G = self.lie_algebra_generators()
if G not in FiniteEnumeratedSets():
raise NotImplementedError("infinite number of generators")
zero = self.zero()
return all(x._bracket_(y) == zero for x in G for y in G)
def is_commutative(self):
"""
Return if ``self`` is commutative. This is equivalent to ``self``
being abelian.
EXAMPLES::
sage: L = LieAlgebras(QQ).example()
sage: L.is_commutative()
False
::
sage: L.<x> = LieAlgebra(QQ, 1) # todo: not implemented - #16823
sage: L.is_commutative() # todo: not implemented - #16823
True
"""
return self.is_abelian()
@abstract_method(optional=True)
def is_solvable(self):
"""
Return if ``self`` is a solvable Lie algebra.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: L.is_solvable()
True
"""
@abstract_method(optional=True)
def is_nilpotent(self):
"""
Return if ``self`` is a nilpotent Lie algebra.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: L.is_nilpotent()
True
"""
def _test_jacobi_identity(self, **options):
"""
Test that the Jacobi identity is satisfied on (not
necessarily all) elements of this set.
INPUT:
- ``options`` -- any keyword arguments accepted by :meth:`_tester`.
EXAMPLES:
By default, this method runs the tests only on the
elements returned by ``self.some_elements()``::
sage: L = LieAlgebras(QQ).example()
sage: L._test_jacobi_identity()
However, the elements tested can be customized with the
``elements`` keyword argument::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: L._test_jacobi_identity(elements=[x+y, x, 2*y, x.bracket(y)])
See the documentation for :class:`TestSuite` for more information.
"""
tester = self._tester(**options)
elts = tester.some_elements()
jacobi = lambda x, y, z: self.bracket(x, self.bracket(y, z)) + \
self.bracket(y, self.bracket(z, x)) + \
self.bracket(z, self.bracket(x, y))
zero = self.zero()
for x in elts:
for y in elts:
if x == y:
continue
for z in elts:
tester.assertTrue(jacobi(x, y, z) == zero)
def _test_antisymmetry(self, **options):
"""
Test that the antisymmetry axiom is satisfied on (not
necessarily all) elements of this set.
INPUT:
- ``options`` -- any keyword arguments accepted by :meth:`_tester`.
EXAMPLES:
By default, this method runs the tests only on the
elements returned by ``self.some_elements()``::
sage: L = LieAlgebras(QQ).example()
sage: L._test_antisymmetry()
However, the elements tested can be customized with the
``elements`` keyword argument::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: L._test_antisymmetry(elements=[x+y, x, 2*y, x.bracket(y)])
See the documentation for :class:`TestSuite` for more information.
"""
tester = self._tester(**options)
elts = tester.some_elements()
zero = self.zero()
for x in elts:
tester.assertTrue(self.bracket(x, x) == zero)
def _test_distributivity(self, **options):
r"""
Test the distributivity of the Lie bracket `[,]` on `+` on (not
necessarily all) elements of this set.
INPUT:
- ``options`` -- any keyword arguments accepted by :meth:`_tester`.
TESTS::
sage: L = LieAlgebras(QQ).example()
sage: L._test_distributivity()
EXAMPLES:
By default, this method runs the tests only on the
elements returned by ``self.some_elements()``::
sage: L = LieAlgebra(QQ, 3, 'x,y,z', representation="polynomial")
sage: L.some_elements()
[x + y + z]
sage: L._test_distributivity()
However, the elements tested can be customized with the
``elements`` keyword argument::
sage: L = LieAlgebra(QQ, cartan_type=['A', 2]) # todo: not implemented - #16821
sage: h1 = L.gen(0) # todo: not implemented - #16821
sage: h2 = L.gen(1) # todo: not implemented - #16821
sage: e2 = L.gen(3) # todo: not implemented - #16821
sage: L._test_distributivity(elements=[h1, h2, e2]) # todo: not implemented - #16821
See the documentation for :class:`TestSuite` for more information.
"""
tester = self._tester(**options)
S = tester.some_elements()
from sage.misc.misc import some_tuples
for x,y,z in some_tuples(S, 3, tester._max_runs):
# left distributivity
tester.assertTrue(self.bracket(x, (y + z))
== self.bracket(x, y) + self.bracket(x, z))
# right distributivity
tester.assertTrue(self.bracket((x + y), z)
== self.bracket(x, z) + self.bracket(y, z))
class ElementMethods:
@coerce_binop
def bracket(self, rhs):
"""
Return the Lie bracket ``[self, rhs]``.
EXAMPLES::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: x.bracket(y)
-[1, 3, 2] + [3, 2, 1]
sage: x.bracket(0)
0
"""
return self._bracket_(rhs)
# Implement this method to define the Lie bracket. You do not
# need to deal with the coercions here.
@abstract_method
def _bracket_(self, y):
"""
Return the Lie bracket ``[self, y]``, where ``y`` is an
element of the same Lie algebra as ``self``.
EXAMPLES::
sage: L = LieAlgebras(QQ).example()
sage: x,y = L.lie_algebra_generators()
sage: x._bracket_(y)
-[1, 3, 2] + [3, 2, 1]
sage: y._bracket_(x)
[1, 3, 2] - [3, 2, 1]
sage: x._bracket_(x)
0
"""
@abstract_method(optional=True)
def to_vector(self):
"""
Return the vector in ``g.module()`` corresponding to the
element ``self`` of ``g`` (where ``g`` is the parent of
``self``).
Implement this if you implement ``g.module()``.
See :meth:`LieAlgebras.module` for how this is to be done.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: u = L((1, 0, 0)).to_vector(); u
(1, 0, 0)
sage: parent(u)
Vector space of dimension 3 over Rational Field
"""
@abstract_method(optional=True)
def lift(self):
"""
Return the image of ``self`` under the canonical lift from the Lie
algebra to its universal enveloping algebra.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: elt = 3*a + b - c
sage: elt.lift()
3*b0 + b1 - b2
::
sage: L.<x,y> = LieAlgebra(QQ, abelian=True)
sage: x.lift()
x
"""
def killing_form(self, x):
"""
Return the Killing form of ``self`` and ``x``.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: a.killing_form(b)
0
"""
return self.parent().killing_form(self, x)
class LiftMorphism(Morphism):
"""
The natural lifting morphism from a Lie algebra to its
enveloping algebra.
"""
def __init__(self, domain, codomain):
"""
Initialize ``self``.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: f = L.lift
We skip the category test since this is currently not an element of
a homspace::
sage: TestSuite(f).run(skip="_test_category")
"""
Morphism.__init__(self, Hom(domain, codomain))
def _call_(self, x):
"""
Lift ``x`` to the universal enveloping algebra.
EXAMPLES::
sage: L = LieAlgebras(QQ).FiniteDimensional().WithBasis().example()
sage: a, b, c = L.lie_algebra_generators()
sage: L.lift(3*a + b - c)
3*b0 + b1 - b2
"""
return x.lift()
| 36.360526 | 104 | 0.536404 |
from sage.misc.abstract_method import abstract_method
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.lazy_import import LazyImport
from sage.categories.category import JoinCategory, Category
from sage.categories.category_types import Category_over_base_ring
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.modules import Modules
from sage.categories.sets_cat import Sets
from sage.categories.homset import Hom
from sage.categories.morphism import Morphism
from sage.structure.element import coerce_binop
class LieAlgebras(Category_over_base_ring):
@cached_method
def super_categories(self):
# to be our Lie bracket
# Also this doesn't inherit the ability to add axioms like Associative
return [Modules(self.base_ring())]
def _repr_object_names(self):
base = self.base()
if isinstance(base, Category):
if isinstance(base, JoinCategory):
name = '('+' and '.join(C._repr_object_names() for C in base.super_categories())+')'
else:
name = base._repr_object_names()
else:
name = base
return "Lie algebras over {}".format(name)
def example(self, gens=None):
if gens is None:
from sage.combinat.symmetric_group_algebra import SymmetricGroupAlgebra
from sage.rings.all import QQ
gens = SymmetricGroupAlgebra(QQ, 3).algebra_generators()
from sage.categories.examples.lie_algebras import Example
return Example(gens)
WithBasis = LazyImport('sage.categories.lie_algebras_with_basis',
'LieAlgebrasWithBasis', as_name='WithBasis')
class FiniteDimensional(CategoryWithAxiom_over_base_ring):
WithBasis = LazyImport('sage.categories.finite_dimensional_lie_algebras_with_basis',
'FiniteDimensionalLieAlgebrasWithBasis', as_name='WithBasis')
def extra_super_categories(self):
if self.base_ring() in Sets().Finite():
return [Sets().Finite()]
return []
class ParentMethods:
# Return the generators of ``self`` as a Lie algebra.
# """
def bracket(self, lhs, rhs):
return self(lhs)._bracket_(self(rhs))
def universal_enveloping_algebra(self):
return self.lift.codomain()
@abstract_method(optional=True)
def _construct_UEA(self):
@abstract_method(optional=True)
def module(self):
@abstract_method(optional=True)
def from_vector(self, v):
@lazy_attribute
def lift(self):
M = LiftMorphism(self, self._construct_UEA())
M.register_as_coercion()
return M
def subalgebra(self, gens, names=None, index_set=None, category=None):
raise NotImplementedError("subalgebras not yet implemented: see #17416")
def ideal(self, gens, names=None, index_set=None, category=None):
raise NotImplementedError("ideals not yet implemented: see #16824")
def is_ideal(self, A):
if A == self:
return True
raise NotImplementedError("ideals not yet implemented: see #16824")
@abstract_method(optional=True)
def killing_form(self, x, y):
def is_abelian(self):
G = self.lie_algebra_generators()
if G not in FiniteEnumeratedSets():
raise NotImplementedError("infinite number of generators")
zero = self.zero()
return all(x._bracket_(y) == zero for x in G for y in G)
def is_commutative(self):
return self.is_abelian()
@abstract_method(optional=True)
def is_solvable(self):
@abstract_method(optional=True)
def is_nilpotent(self):
def _test_jacobi_identity(self, **options):
tester = self._tester(**options)
elts = tester.some_elements()
jacobi = lambda x, y, z: self.bracket(x, self.bracket(y, z)) + \
self.bracket(y, self.bracket(z, x)) + \
self.bracket(z, self.bracket(x, y))
zero = self.zero()
for x in elts:
for y in elts:
if x == y:
continue
for z in elts:
tester.assertTrue(jacobi(x, y, z) == zero)
def _test_antisymmetry(self, **options):
tester = self._tester(**options)
elts = tester.some_elements()
zero = self.zero()
for x in elts:
tester.assertTrue(self.bracket(x, x) == zero)
def _test_distributivity(self, **options):
tester = self._tester(**options)
S = tester.some_elements()
from sage.misc.misc import some_tuples
for x,y,z in some_tuples(S, 3, tester._max_runs):
tester.assertTrue(self.bracket(x, (y + z))
== self.bracket(x, y) + self.bracket(x, z))
tester.assertTrue(self.bracket((x + y), z)
== self.bracket(x, z) + self.bracket(y, z))
class ElementMethods:
@coerce_binop
def bracket(self, rhs):
return self._bracket_(rhs)
@abstract_method
def _bracket_(self, y):
@abstract_method(optional=True)
def to_vector(self):
@abstract_method(optional=True)
def lift(self):
def killing_form(self, x):
return self.parent().killing_form(self, x)
class LiftMorphism(Morphism):
def __init__(self, domain, codomain):
Morphism.__init__(self, Hom(domain, codomain))
def _call_(self, x):
return x.lift()
| true | true |
f7227294e6071e2f3b324c9a105808e6b102856c | 106 | py | Python | django_ymap/__init__.py | Syndicat111/django-simple-yandex-map | 4f351b2de9e05790483a6a068df30ddd74b58c4e | [
"MIT"
] | 13 | 2015-04-17T21:46:17.000Z | 2021-02-16T16:51:33.000Z | django_ymap/__init__.py | Syndicat111/django-simple-yandex-map | 4f351b2de9e05790483a6a068df30ddd74b58c4e | [
"MIT"
] | 3 | 2016-07-22T06:23:13.000Z | 2018-08-24T04:36:18.000Z | django_ymap/__init__.py | Syndicat111/django-simple-yandex-map | 4f351b2de9e05790483a6a068df30ddd74b58c4e | [
"MIT"
] | 11 | 2015-09-21T09:26:37.000Z | 2020-04-26T19:42:33.000Z | # coding: utf-8
default_app_config = "django_ymap.apps.SimpleYandexMap"
from .apps import SimpleYandexMap
| 26.5 | 55 | 0.820755 |
default_app_config = "django_ymap.apps.SimpleYandexMap"
from .apps import SimpleYandexMap
| true | true |
f722742646422e65c474d75a884f5c5b160341a1 | 6,394 | py | Python | variants/PointWall.py | christsa/hide-rl | 47dc3dfd93b817831473c07137a6a6e7f2eda549 | [
"Apache-2.0"
] | 3 | 2021-09-17T15:16:17.000Z | 2021-12-15T14:24:39.000Z | variants/PointWall.py | christsa/hide-rl | 47dc3dfd93b817831473c07137a6a6e7f2eda549 | [
"Apache-2.0"
] | null | null | null | variants/PointWall.py | christsa/hide-rl | 47dc3dfd93b817831473c07137a6a6e7f2eda549 | [
"Apache-2.0"
] | null | null | null | """
This file provides the template for designing the agent and environment. The below hyperparameters must be assigned to a value for the algorithm to work properly.
"""
import numpy as np
# from environment import Environment
from environment_adapter import EnvironmentAdapter
from agent import Agent
from collections import OrderedDict
def design_agent_and_env(FLAGS):
"""
1. DESIGN AGENT
The key hyperparameters for agent construction are
a. Number of levels in agent hierarchy
b. Max sequence length in which each policy will specialize
c. Max number of atomic actions allowed in an episode
d. Environment timesteps per atomic action
See Section 3 of this file for other agent hyperparameters that can be configured.
"""
FLAGS.layers = 3 # Enter number of levels in agent hierarchy
FLAGS.time_scale = 10 # Enter max sequence length in which each policy will specialize
# Enter max number of atomic actions. This will typically be FLAGS.time_scale**(FLAGS.layers). However, in the UR5 Reacher task, we use a shorter episode length.
"""
2. DESIGN ENVIRONMENT
a. Designer must provide the original UMDP (S,A,T,G,R).
- The S,A,T components can be fulfilled by providing the Mujoco model.
- The user must separately specifiy the initial state space.
- G can be provided by specifying the end goal space.
- R, which by default uses a shortest path {-1,0} reward function, can be implemented by specifying two components: (i) a function that maps the state space to the end goal space and (ii) the end goal achievement thresholds for each dimensions of the end goal.
b. In order to convert the original UMDP into a hierarchy of k UMDPs, the designer must also provide
- The subgoal action space, A_i, for all higher-level UMDPs i > 0
- R_i for levels 0 <= i < k-1 (i.e., all levels that try to achieve goals in the subgoal space). As in the original UMDP, R_i can be implemented by providing two components:(i) a function that maps the state space to the subgoal space and (ii) the subgoal achievement thresholds.
c. Designer should also provide subgoal and end goal visualization functions in order to show video of training. These can be updated in "display_subgoal" and "display_end_goal" methods in the "environment.py" file.
"""
# Provide file name of Mujoco model(i.e., "pendulum.xml"). Make sure file is stored in "mujoco_files" folder
model_name = "ant_reacher.xml"
# FROM HIRO code:
# ( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
# CONTEXT_RANGE_MIN = (-10, -10, -0.5, -1, -1, -1, -1, -0.5, -0.3, -0.5, -0.3, -0.5, -0.3, -0.5, -0.3)
# CONTEXT_RANGE_MAX = ( 10, 10, 0.5, 1, 1, 1, 1, 0.5, 0.3, 0.5, 0.3, 0.5, 0.3, 0.5, 0.3)
normalization_dict = None if not FLAGS.normalization else {
'lows': OrderedDict((
(0, -14.), # xpos
(1, -14.), # ypos
)),
'highs': OrderedDict((
(0, 14.), # xpos
(1, 14.), # ypos
)),
'end_goal_dims': [0,1],
'subgoal_dims': [0,1],
}
project_state_to_end_goal = lambda sim, state: state[..., :2]
project_state_to_subgoal = lambda sim, state: state[..., :2]
env = EnvironmentAdapter("maze", "Point", "WallDict", project_state_to_end_goal, project_state_to_subgoal, FLAGS.show, normalization_dict, featurize_image=FLAGS.featurize_image, seed=FLAGS.seed)
# To properly visualize goals, update "display_end_goal" and "display_subgoals" methods in "environment.py"
"""
3. SET MISCELLANEOUS HYPERPARAMETERS
Below are some other agent hyperparameters that can affect results, including
a. Subgoal testing percentage
b. Subgoal penalty
c. Exploration noise
d. Replay buffer size
"""
agent_params = {}
# Define percentage of actions that a subgoal level (i.e. level i > 0) will test subgoal actions
agent_params["subgoal_test_perc"] = 0.3
# Define subgoal penalty for missing subgoal. Please note that by default the Q value target for missed subgoals does not include Q-value of next state (i.e, discount rate = 0). As a result, the Q-value target for missed subgoal just equals penalty. For instance in this 3-level UR5 implementation, if a level proposes a subgoal and misses it, the Q target value for this action would be -10. To incorporate the next state in the penalty, go to the "penalize_subgoal" method in the "layer.py" file.
agent_params["subgoal_penalty"] = -FLAGS.time_scale
# Define exploration noise that is added to both subgoal actions and atomic actions. Noise added is Gaussian N(0, noise_percentage * action_dim_range)
agent_params["atomic_noise"] = [0.1 for i in range(2)]
agent_params["subgoal_noise"] = [0.01 for i in range(len(env.subgoal_thresholds))]
agent_params["oracle_noise"] = [0.01 for i in range(len(env.subgoal_thresholds))]
agent_params["vpn_noise"] = [0.05 for i in range(len(env.subgoal_thresholds))]
# Define number of episodes of transitions to be stored by each level of the hierarchy
agent_params["episodes_to_store"] = 500
# Provide training schedule for agent. Training by default will alternate between exploration and testing. Hyperparameter below indicates number of exploration episodes. Testing occurs for 100 episodes. To change number of testing episodes, go to "ran_HAC.py".
agent_params["num_exploration_episodes"] = 100
# For other relavent agent hyperparameters, please refer to the "agent.py" and "layer.py" files
agent_params["num_batches"] = 250
# Ensure environment customization have been properly entered
# check_validity(model_name, goal_space_train, goal_space_test, end_goal_thresholds, initial_state_space, subgoal_bounds, subgoal_thresholds, max_actions, timesteps_per_action)
# Instantiate and return agent and environment
# env = Environment(model_name, goal_space_train, goal_space_test, project_state_to_end_goal, end_goal_thresholds, initial_state_space, subgoal_bounds, project_state_to_subgoal, subgoal_thresholds, max_actions, timesteps_per_action, FLAGS.show)
agent = Agent(FLAGS,env,agent_params)
return agent, env
| 52.842975 | 506 | 0.700344 |
import numpy as np
from environment_adapter import EnvironmentAdapter
from agent import Agent
from collections import OrderedDict
def design_agent_and_env(FLAGS):
FLAGS.layers = 3
FLAGS.time_scale = 10
model_name = "ant_reacher.xml"
normalization_dict = None if not FLAGS.normalization else {
'lows': OrderedDict((
(0, -14.),
(1, -14.),
)),
'highs': OrderedDict((
(0, 14.),
(1, 14.),
)),
'end_goal_dims': [0,1],
'subgoal_dims': [0,1],
}
project_state_to_end_goal = lambda sim, state: state[..., :2]
project_state_to_subgoal = lambda sim, state: state[..., :2]
env = EnvironmentAdapter("maze", "Point", "WallDict", project_state_to_end_goal, project_state_to_subgoal, FLAGS.show, normalization_dict, featurize_image=FLAGS.featurize_image, seed=FLAGS.seed)
agent_params = {}
agent_params["subgoal_test_perc"] = 0.3
agent_params["subgoal_penalty"] = -FLAGS.time_scale
agent_params["atomic_noise"] = [0.1 for i in range(2)]
agent_params["subgoal_noise"] = [0.01 for i in range(len(env.subgoal_thresholds))]
agent_params["oracle_noise"] = [0.01 for i in range(len(env.subgoal_thresholds))]
agent_params["vpn_noise"] = [0.05 for i in range(len(env.subgoal_thresholds))]
agent_params["episodes_to_store"] = 500
agent_params["num_exploration_episodes"] = 100
agent_params["num_batches"] = 250
agent = Agent(FLAGS,env,agent_params)
return agent, env
| true | true |
f72274be6eca348123ebbb2cfa1c699b6a3445cf | 3,235 | py | Python | rl_6_nimmt/play.py | johannbrehmer/rl-6-nimmt | 8bc504e0372bb4bc99a3d69e77418991092ffdac | [
"MIT"
] | 3 | 2021-04-21T07:41:45.000Z | 2022-02-12T23:43:44.000Z | rl_6_nimmt/play.py | johannbrehmer/rl-6-nimmt | 8bc504e0372bb4bc99a3d69e77418991092ffdac | [
"MIT"
] | null | null | null | rl_6_nimmt/play.py | johannbrehmer/rl-6-nimmt | 8bc504e0372bb4bc99a3d69e77418991092ffdac | [
"MIT"
] | 3 | 2021-04-20T04:28:58.000Z | 2021-12-31T13:06:51.000Z | import torch
import numpy as np
import logging
from .env import SechsNimmtEnv
logger = logging.getLogger(__name__)
class GameSession:
def __init__(self, *agents, device=torch.device("cpu"), dtype=torch.float):
""" Initializes a game session, which consists of an arbitrary number of games between the given agents """
self.device = device
self.dtype = dtype
self.agents = [agent.to(self.device, self.dtype) for agent in agents]
self.num_agents = len(agents)
self.env = SechsNimmtEnv(self.num_agents)
self.results = [] # List of total scores (negative Hornochsen) for each game
self.game = 0
self._set_env_player_names()
def play_game(self, render=False):
""" Play one game, i.e. until one player hits 66 Hornochsen or whatever it is """
states, all_legal_actions = self.env.reset()
states = self._tensorize(states)
done = False
rewards = np.zeros(self.num_agents, dtype=np.int)
scores = np.zeros(self.num_agents, dtype=np.int)
if render:
self.env.render()
while not done:
# Agent turns
actions, agent_infos = [], []
for agent, state, legal_actions in zip(self.agents, states, all_legal_actions):
action, agent_info = agent(state, legal_actions=legal_actions)
actions.append(int(action))
agent_infos.append(agent_info)
# TODO: gently enforce legality of actions by giving a negative reward and asking again
# Environment steps
(next_states, next_all_legal_actions), next_rewards, done, info = self.env.step(actions)
next_states = self._tensorize(next_states)
if render:
self.env.render()
# Learning
for agent, action, state, next_state, reward, next_reward, agent_info, legal_actions, next_legal_actions, in zip(
self.agents, actions, states, next_states, rewards, next_rewards, agent_infos, all_legal_actions, next_all_legal_actions
):
agent.learn(
state=state,
legal_actions=legal_actions.copy(),
reward=reward,
action=action,
done=done,
next_state=next_state,
next_legal_actions=next_legal_actions.copy(),
next_reward=next_reward,
num_episode=self.game,
episode_end=done,
**agent_info,
)
scores += np.array(next_rewards)
states = next_states
all_legal_actions = next_all_legal_actions
rewards = next_rewards
self.results.append(scores)
self.game += 1
def _tensorize(self, inputs):
return [torch.tensor(input).to(self.device, self.dtype) for input in inputs]
def _set_env_player_names(self):
names = []
for agent in self.agents:
try:
names.append(agent.__name__)
except:
names.append(type(agent).__name__)
self.env._player_names = names
| 36.761364 | 136 | 0.591654 | import torch
import numpy as np
import logging
from .env import SechsNimmtEnv
logger = logging.getLogger(__name__)
class GameSession:
def __init__(self, *agents, device=torch.device("cpu"), dtype=torch.float):
self.device = device
self.dtype = dtype
self.agents = [agent.to(self.device, self.dtype) for agent in agents]
self.num_agents = len(agents)
self.env = SechsNimmtEnv(self.num_agents)
self.results = []
self.game = 0
self._set_env_player_names()
def play_game(self, render=False):
states, all_legal_actions = self.env.reset()
states = self._tensorize(states)
done = False
rewards = np.zeros(self.num_agents, dtype=np.int)
scores = np.zeros(self.num_agents, dtype=np.int)
if render:
self.env.render()
while not done:
actions, agent_infos = [], []
for agent, state, legal_actions in zip(self.agents, states, all_legal_actions):
action, agent_info = agent(state, legal_actions=legal_actions)
actions.append(int(action))
agent_infos.append(agent_info)
(next_states, next_all_legal_actions), next_rewards, done, info = self.env.step(actions)
next_states = self._tensorize(next_states)
if render:
self.env.render()
for agent, action, state, next_state, reward, next_reward, agent_info, legal_actions, next_legal_actions, in zip(
self.agents, actions, states, next_states, rewards, next_rewards, agent_infos, all_legal_actions, next_all_legal_actions
):
agent.learn(
state=state,
legal_actions=legal_actions.copy(),
reward=reward,
action=action,
done=done,
next_state=next_state,
next_legal_actions=next_legal_actions.copy(),
next_reward=next_reward,
num_episode=self.game,
episode_end=done,
**agent_info,
)
scores += np.array(next_rewards)
states = next_states
all_legal_actions = next_all_legal_actions
rewards = next_rewards
self.results.append(scores)
self.game += 1
def _tensorize(self, inputs):
return [torch.tensor(input).to(self.device, self.dtype) for input in inputs]
def _set_env_player_names(self):
names = []
for agent in self.agents:
try:
names.append(agent.__name__)
except:
names.append(type(agent).__name__)
self.env._player_names = names
| true | true |
f72274cbfa432897ee252cd78be990ef01cbf7ec | 21,593 | py | Python | virt/ansible-2.3.0/lib/python2.7/site-packages/ansible/modules/packaging/language/pip.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | null | null | null | virt/ansible-2.3.0/lib/python2.7/site-packages/ansible/modules/packaging/language/pip.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | null | null | null | virt/ansible-2.3.0/lib/python2.7/site-packages/ansible/modules/packaging/language/pip.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: pip
short_description: Manages Python library dependencies.
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
or C(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url of the remote package.
- As of 2.2 you can supply a list of names.
required: false
default: null
version:
description:
- The version number to install of the Python library specified in the I(name) parameter
required: false
default: null
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the 'executable' parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional virtualenv_site_packages, virtualenv_command,
and virtualenv_python options affect the creation of the virtualenv.
required: false
default: null
virtualenv_site_packages:
version_added: "1.0"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command or a pathname to the command to create the virtual
environment with. For example C(pyvenv), C(virtualenv),
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
required: false
default: virtualenv
virtualenv_python:
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
For example C(python3.5), C(python2.7). When not specified, the
Python version used to run the ansible module is used.
required: false
default: null
state:
description:
- The state of module
- The 'forcereinstall' option is only available in Ansible 2.1 and above.
required: false
default: present
choices: [ "present", "absent", "latest", "forcereinstall" ]
extra_args:
description:
- Extra arguments passed to pip.
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag for versioning URLs.
required: false
default: yes
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
version_added: "1.3"
required: false
default: null
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run pip for a specific version of Python installed in the system. For
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
By default, it will take the appropriate version for the python interpreter
use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., 0077) and you want to pip install
packages which are to be used by all users. Note that this requires you
to specify desired umask mode in octal, with a leading 0 (e.g., 0077).
version_added: "2.1"
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- By default, this module will use the appropriate version of pip for the
interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
- pip:
name: bottle
# Install (Bottle) python package on version 0.11.
- pip:
name: bottle
version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.
- pip:
name: git+http://myrepo/app/MyApp
editable: false
# Install (MyApp) from local tarball
- pip:
name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
# Install specified python requirements.
- pip:
requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
- pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
- pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
- pip:
name: bottle
executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
- pip:
name: bottle
state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
- pip:
name: bottle
umask: 0022
become: True
'''
import tempfile
import re
import os
import sys
from ansible.module_utils.basic import AnsibleModule, is_executable
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_full_name(name, version=None):
if version is None:
resp = name
else:
resp = name + '==' + version
return resp
def _get_packages(module, pip, chdir):
'''Return results of pip command to get packages.'''
# Try 'pip list' command first.
command = '%s list' % pip
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (pip version too old) then use 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return (command, out, err)
def _is_present(name, version, installed_pkgs, pkg_command):
'''Return whether or not package is installed.'''
for pkg in installed_pkgs:
# Package listing will be different depending on which pip
# command was used ('pip list' vs. 'pip freeze').
if 'list' in pkg_command:
pkg = pkg.replace('(', '').replace(')', '')
if ',' in pkg:
pkg_name, pkg_version, _ = pkg.replace(',', '').split(' ')
else:
pkg_name, pkg_version = pkg.split(' ')
elif 'freeze' in pkg_command:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
else:
continue
else:
continue
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
# Older pip only installed under the "/usr/bin/pip" name. Many Linux
# distros install it there.
# By default, we try to use pip required for the current python
# interpreter, so people can use pip to install modules dependencies
candidate_pip_basenames = ('pip2', 'pip')
if PY3:
# pip under python3 installs the "/usr/bin/pip3" name
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
executable = os.path.expanduser(executable)
if os.path.isabs(executable):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
# As noted in the docs, executable doesn't work with virtualenvs.
candidate_pip_basenames = (executable,)
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
# virtualenv
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find pip in the virtualenv,'
' %s, under any of these names: %s. Make sure pip is'
' present in the virtualenv.' % (env,
', '.join(candidate_pip_basenames)))
return pip
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, env=None):
"""This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.
"""
if env:
opt_dirs = ['%s/bin' % env]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if python_bin is None:
formatted_dep = None
else:
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def main():
state_map = dict(
present='install',
absent='uninstall -y',
latest='install -U',
forcereinstall='install -U --force-reinstall',
)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=state_map.keys()),
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(),
editable=dict(default=True, type='bool'),
chdir=dict(type='path'),
executable=dict(),
umask=dict(),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if '--no-site-packages' in cmd_opts:
cmd += ' --no-site-packages'
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
elif PY3:
# Ubuntu currently has a patch making virtualenv always
# try to use python2. Since Ubuntu16 works without
# python2 installed, this is a problem. This code mimics
# the upstream behaviour of using the python which invoked
# virtualenv to determine which python is used inside of
# the virtualenv (when none are specified).
cmd += ' -p%s' % sys.executable
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
# in run_command by setting path_prefix here.
path_prefix = None
if env:
path_prefix = "/".join(pip.split('/')[:-1])
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
has_vcs = False
if name:
for pkg in name:
if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)):
has_vcs = True
break
if has_vcs and module.params['editable']:
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args
if name:
for pkg in name:
cmd += ' %s' % _get_full_name(pkg, version)
else:
if requirements:
cmd += ' -r %s' % requirements
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
elif has_vcs:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
# Older versions of pip (pre-1.3) do not have pip list.
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
if requirements or has_vcs:
_, out_freeze_before, _ = _get_packages(module, pip, chdir)
else:
out_freeze_before = None
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass # rc is 1 when attempting to uninstall non-installed package
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
_, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
| 36.660441 | 142 | 0.608253 |
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: pip
short_description: Manages Python library dependencies.
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
or C(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url of the remote package.
- As of 2.2 you can supply a list of names.
required: false
default: null
version:
description:
- The version number to install of the Python library specified in the I(name) parameter
required: false
default: null
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the 'executable' parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional virtualenv_site_packages, virtualenv_command,
and virtualenv_python options affect the creation of the virtualenv.
required: false
default: null
virtualenv_site_packages:
version_added: "1.0"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command or a pathname to the command to create the virtual
environment with. For example C(pyvenv), C(virtualenv),
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
required: false
default: virtualenv
virtualenv_python:
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
For example C(python3.5), C(python2.7). When not specified, the
Python version used to run the ansible module is used.
required: false
default: null
state:
description:
- The state of module
- The 'forcereinstall' option is only available in Ansible 2.1 and above.
required: false
default: present
choices: [ "present", "absent", "latest", "forcereinstall" ]
extra_args:
description:
- Extra arguments passed to pip.
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag for versioning URLs.
required: false
default: yes
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
version_added: "1.3"
required: false
default: null
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run pip for a specific version of Python installed in the system. For
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
By default, it will take the appropriate version for the python interpreter
use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., 0077) and you want to pip install
packages which are to be used by all users. Note that this requires you
to specify desired umask mode in octal, with a leading 0 (e.g., 0077).
version_added: "2.1"
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- By default, this module will use the appropriate version of pip for the
interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
- pip:
name: bottle
# Install (Bottle) python package on version 0.11.
- pip:
name: bottle
version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.
- pip:
name: git+http://myrepo/app/MyApp
editable: false
# Install (MyApp) from local tarball
- pip:
name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
# Install specified python requirements.
- pip:
requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
- pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
- pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
- pip:
name: bottle
executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
- pip:
name: bottle
state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
- pip:
name: bottle
umask: 0022
become: True
'''
import tempfile
import re
import os
import sys
from ansible.module_utils.basic import AnsibleModule, is_executable
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_full_name(name, version=None):
if version is None:
resp = name
else:
resp = name + '==' + version
return resp
def _get_packages(module, pip, chdir):
command = '%s list' % pip
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return (command, out, err)
def _is_present(name, version, installed_pkgs, pkg_command):
for pkg in installed_pkgs:
if 'list' in pkg_command:
pkg = pkg.replace('(', '').replace(')', '')
if ',' in pkg:
pkg_name, pkg_version, _ = pkg.replace(',', '').split(' ')
else:
pkg_name, pkg_version = pkg.split(' ')
elif 'freeze' in pkg_command:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
else:
continue
else:
continue
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
candidate_pip_basenames = ('pip2', 'pip')
if PY3:
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
executable = os.path.expanduser(executable)
if os.path.isabs(executable):
pip = executable
else:
candidate_pip_basenames = (executable,)
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
module.fail_json(msg='Unable to find pip in the virtualenv,'
' %s, under any of these names: %s. Make sure pip is'
' present in the virtualenv.' % (env,
', '.join(candidate_pip_basenames)))
return pip
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, env=None):
if env:
opt_dirs = ['%s/bin' % env]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if python_bin is None:
formatted_dep = None
else:
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def main():
state_map = dict(
present='install',
absent='uninstall -y',
latest='install -U',
forcereinstall='install -U --force-reinstall',
)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=state_map.keys()),
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(),
editable=dict(default=True, type='bool'),
chdir=dict(type='path'),
executable=dict(),
umask=dict(),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if '--no-site-packages' in cmd_opts:
cmd += ' --no-site-packages'
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
elif PY3:
cmd += ' -p%s' % sys.executable
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
path_prefix = None
if env:
path_prefix = "/".join(pip.split('/')[:-1])
has_vcs = False
if name:
for pkg in name:
if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)):
has_vcs = True
break
if has_vcs and module.params['editable']:
args_list = []
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args
if name:
for pkg in name:
cmd += ' %s' % _get_full_name(pkg, version)
else:
if requirements:
cmd += ' -r %s' % requirements
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
elif has_vcs:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
if requirements or has_vcs:
_, out_freeze_before, _ = _get_packages(module, pip, chdir)
else:
out_freeze_before = None
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
_, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
| true | true |
f7227586a1a0c54933e22c5615c1e4a40039c1df | 2,001 | py | Python | nix_review/tests/test_rev.py | zimbatm/nix-review | 33c31f06b4e8e74e1b48e8df8cb8083041ec14ea | [
"MIT"
] | null | null | null | nix_review/tests/test_rev.py | zimbatm/nix-review | 33c31f06b4e8e74e1b48e8df8cb8083041ec14ea | [
"MIT"
] | null | null | null | nix_review/tests/test_rev.py | zimbatm/nix-review | 33c31f06b4e8e74e1b48e8df8cb8083041ec14ea | [
"MIT"
] | null | null | null | import unittest
from typing import Any, List, Tuple
from unittest.mock import MagicMock, patch
from io import StringIO
from nix_review.cli import main
from .cli_mocks import (
CliTestCase,
Mock,
MockCompletedProcess,
build_cmds,
read_asset,
IgnoreArgument,
)
def rev_command_cmds() -> List[Tuple[Any, Any]]:
return [
(
["git", "rev-parse", "--verify", "HEAD"],
MockCompletedProcess(stdout=b"hash1\n"),
),
(
[
"git",
"-c",
"fetch.prune=false",
"fetch",
"--force",
"https://github.com/NixOS/nixpkgs",
"master:refs/nix-review/0",
],
MockCompletedProcess(),
),
(
["git", "rev-parse", "--verify", "refs/nix-review/0"],
MockCompletedProcess(stdout=b"hash1\n"),
),
(["git", "worktree", "add", IgnoreArgument, "hash1"], MockCompletedProcess()),
(IgnoreArgument, MockCompletedProcess(stdout=StringIO("<items></items>"))),
(["git", "merge", "--no-commit", "hash1"], MockCompletedProcess()),
(
IgnoreArgument,
MockCompletedProcess(stdout=StringIO(read_asset("package_list_after.txt"))),
),
]
class RevCommand(CliTestCase):
@patch("subprocess.run")
@patch("subprocess.Popen")
def test_rev_command(self, mock_popen: MagicMock, mock_run: MagicMock) -> None:
effects = Mock(self, rev_command_cmds() + build_cmds)
mock_run.side_effect = effects
popen_instance = mock_popen.return_value
popen_instance.__enter__.side_effect = effects
main(
"nix-review",
[
"rev",
"--build-args",
'--builders "ssh://joerg@10.243.29.170 aarch64-linux"',
"HEAD",
],
)
if __name__ == "__main__":
unittest.main(failfast=True)
| 27.410959 | 88 | 0.538231 | import unittest
from typing import Any, List, Tuple
from unittest.mock import MagicMock, patch
from io import StringIO
from nix_review.cli import main
from .cli_mocks import (
CliTestCase,
Mock,
MockCompletedProcess,
build_cmds,
read_asset,
IgnoreArgument,
)
def rev_command_cmds() -> List[Tuple[Any, Any]]:
return [
(
["git", "rev-parse", "--verify", "HEAD"],
MockCompletedProcess(stdout=b"hash1\n"),
),
(
[
"git",
"-c",
"fetch.prune=false",
"fetch",
"--force",
"https://github.com/NixOS/nixpkgs",
"master:refs/nix-review/0",
],
MockCompletedProcess(),
),
(
["git", "rev-parse", "--verify", "refs/nix-review/0"],
MockCompletedProcess(stdout=b"hash1\n"),
),
(["git", "worktree", "add", IgnoreArgument, "hash1"], MockCompletedProcess()),
(IgnoreArgument, MockCompletedProcess(stdout=StringIO("<items></items>"))),
(["git", "merge", "--no-commit", "hash1"], MockCompletedProcess()),
(
IgnoreArgument,
MockCompletedProcess(stdout=StringIO(read_asset("package_list_after.txt"))),
),
]
class RevCommand(CliTestCase):
@patch("subprocess.run")
@patch("subprocess.Popen")
def test_rev_command(self, mock_popen: MagicMock, mock_run: MagicMock) -> None:
effects = Mock(self, rev_command_cmds() + build_cmds)
mock_run.side_effect = effects
popen_instance = mock_popen.return_value
popen_instance.__enter__.side_effect = effects
main(
"nix-review",
[
"rev",
"--build-args",
'--builders "ssh://joerg@10.243.29.170 aarch64-linux"',
"HEAD",
],
)
if __name__ == "__main__":
unittest.main(failfast=True)
| true | true |
f72276336ffbfb188fb8f2fe5c2ebb405446b09d | 11,465 | py | Python | agents/policy_gradient/modules/generalized_onpolicy_loss.py | mrbermell/seed_rl | 9562e178fb8c16d2551d9e5d59594a7f908655dd | [
"Apache-2.0"
] | 733 | 2019-10-14T11:38:22.000Z | 2022-03-24T14:55:50.000Z | agents/policy_gradient/modules/generalized_onpolicy_loss.py | mrbermell/seed_rl | 9562e178fb8c16d2551d9e5d59594a7f908655dd | [
"Apache-2.0"
] | 76 | 2019-10-30T14:18:17.000Z | 2021-12-10T11:52:15.000Z | agents/policy_gradient/modules/generalized_onpolicy_loss.py | mrbermell/seed_rl | 9562e178fb8c16d2551d9e5d59594a7f908655dd | [
"Apache-2.0"
] | 141 | 2019-10-14T11:38:25.000Z | 2022-02-27T10:36:56.000Z | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a generalized onpolicy loss."""
import abc
import inspect
import gin
from seed_rl.agents.policy_gradient.modules import logging_module
import tensorflow as tf
@gin.configurable
class GeneralizedOnPolicyLoss(tf.Module, logging_module.LoggingModule):
"""TensorFlow module implementing the generalized onpolicy loss."""
def __init__(self, agent, reward_normalizer, parametric_action_distribution,
advantage_estimator, policy_loss, discount_factor,
regularizer=None, max_abs_reward=None,
handle_abandoned_episodes_properly=True,
huber_delta=None, value_ppo_style_clip_eps=None,
baseline_cost=1., include_regularization_in_returns=False,
frame_skip=1, reward_scaling=1.0):
"""Creates a GeneralizedOnPolicyLoss."""
self._agent = agent
self._reward_normalizer = reward_normalizer
self._parametric_action_distribution = parametric_action_distribution
self._advantage_estimator = advantage_estimator
self._policy_loss = policy_loss
self._regularizer = regularizer
self._max_abs_reward = max_abs_reward
self._reward_scaling = reward_scaling
self._baseline_cost = baseline_cost
# Provided here so that it is shared.
self._discount_factor = discount_factor
self._frame_skip = frame_skip
self._handle_abandoned_episodes_properly = handle_abandoned_episodes_properly
self._value_ppo_style_clip_eps = value_ppo_style_clip_eps
self._include_regularization_in_returns = include_regularization_in_returns
if huber_delta is not None:
self.v_loss_fn = tf.keras.losses.Huber(
delta=huber_delta, reduction=tf.keras.losses.Reduction.NONE)
else:
self.v_loss_fn = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE)
def init(self):
for module in self.submodules:
if hasattr(module, 'init'):
if not inspect.signature(module.init).parameters:
module.init()
def compute_advantages(self, agent_state, prev_actions, env_outputs,
agent_outputs, return_learner_outputs=False):
# Extract rewards and done information.
rewards, done, _, abandoned, _ = tf.nest.map_structure(lambda t: t[1:],
env_outputs)
if self._max_abs_reward is not None:
rewards = tf.clip_by_value(rewards, -self._max_abs_reward,
self._max_abs_reward)
rewards *= self._reward_scaling
# Compute the outputs of the neural networks on the learner.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
# At this point, we have unroll length + 1 steps. The last step is only used
# as bootstrap value, so it's removed.
agent_outputs = tf.nest.map_structure(lambda t: t[:-1], agent_outputs)
learner_v = learner_outputs.baseline # current value function
learner_outputs = tf.nest.map_structure(lambda t: t[:-1], learner_outputs)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_v)
unnormalized_predictions = self._reward_normalizer.unnormalize_prediction(
corrected_predictions)
else:
corrected_predictions = learner_v
unnormalized_predictions = learner_v
if not self._handle_abandoned_episodes_properly:
abandoned = tf.zeros_like(abandoned)
done_terminated = tf.logical_and(done, ~abandoned)
done_abandoned = tf.logical_and(done, abandoned)
if self._include_regularization_in_returns and self._regularizer:
additional_rewards, _ = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action, with_logging=False)
assert rewards.shape == additional_rewards.shape
rewards += additional_rewards
# tf.math.pow does not work on TPU so we compute it manually.
adjusted_discount_factor = 1.
for _ in range(self._frame_skip):
adjusted_discount_factor *= self._discount_factor
vs, advantages = self._advantage_estimator(
unnormalized_predictions,
rewards, done_terminated,
done_abandoned,
adjusted_discount_factor,
target_action_log_probs,
behaviour_action_log_probs)
if self._reward_normalizer:
normalized_targets = self._reward_normalizer.normalize_target(vs)
normalized_advantages = self._reward_normalizer.normalize_advantage(
advantages)
self._reward_normalizer.update_normalization_statistics(vs)
else:
normalized_targets = vs
normalized_advantages = advantages
outputs = (normalized_targets, normalized_advantages)
if return_learner_outputs:
outputs += (learner_outputs,)
return outputs
def __call__(self, agent_state, prev_actions, env_outputs, agent_outputs,
normalized_targets=None, normalized_advantages=None):
"""Computes the loss."""
if normalized_targets is None:
normalized_targets, normalized_advantages, learner_outputs = \
self.compute_advantages(
agent_state, prev_actions, env_outputs, agent_outputs,
return_learner_outputs=True)
# The last timestep is only used for computing advantages so we
# remove it here.
agent_state, prev_actions, env_outputs, agent_outputs = \
tf.nest.map_structure(
lambda t: t[:-1],
(agent_state, prev_actions, env_outputs, agent_outputs))
else: # Advantages are already precomputed.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_outputs.baseline)
old_corrected_predictions = self._reward_normalizer.correct_prediction(
agent_outputs.baseline)
else:
corrected_predictions = learner_outputs.baseline
old_corrected_predictions = agent_outputs.baseline
# Compute the advantage-based loss.
policy_loss = tf.reduce_mean(
self._policy_loss(
normalized_advantages,
target_action_log_probs,
behaviour_action_log_probs,
actions=agent_outputs.action,
target_logits=learner_outputs.policy_logits,
behaviour_logits=agent_outputs.policy_logits,
parametric_action_distribution=self._parametric_action_distribution)
)
# Value function loss
v_error = normalized_targets - corrected_predictions
self.log('GeneralizedOnPolicyLoss/V_error', v_error)
self.log('GeneralizedOnPolicyLoss/abs_V_error', tf.abs(v_error))
self.log('GeneralizedOnPolicyLoss/corrected_predictions',
corrected_predictions)
# Huber loss reduces the last dimension so we add a dummy one here.
normalized_targets = normalized_targets[..., tf.newaxis]
corrected_predictions = corrected_predictions[..., tf.newaxis]
v_loss = self.v_loss_fn(normalized_targets, corrected_predictions)
# PPO-style value loss clipping
if self._value_ppo_style_clip_eps is not None:
old_corrected_predictions = old_corrected_predictions[..., tf.newaxis]
clipped_corrected_predictions = tf.clip_by_value(
corrected_predictions,
old_corrected_predictions - self._value_ppo_style_clip_eps,
old_corrected_predictions + self._value_ppo_style_clip_eps)
clipped_v_loss = self.v_loss_fn(normalized_targets,
clipped_corrected_predictions)
v_loss = tf.maximum(v_loss, clipped_v_loss)
v_loss = tf.reduce_mean(v_loss)
# Compute the regularization loss.
if self._regularizer:
per_step_regularization, regularization_loss = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action)
if not self._include_regularization_in_returns:
regularization_loss += tf.reduce_mean(per_step_regularization)
else:
regularization_loss = 0.
total_loss = policy_loss + self._baseline_cost*v_loss + regularization_loss
return total_loss
class PolicyLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, advantages, target_action_log_probs,
behaviour_action_log_probs):
r"""Computes policy loss.
Args:
advantages: A float32 tensor of shape [T, B] of advantages.
target_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the current policy
behaviour_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the behavioural policy
Returns:
A float32 tensor of shape [T, B] with the policy loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
class RegularizationLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, parametric_action_distribution, target_action_logits,
behaviour_action_logits, actions):
r"""Computes regularization loss.
Args:
parametric_action_distribution: Parametric action distribution.
target_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the target policy.
behaviour_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the behavioural policy.
actions: A float32 tensor of shape [T, B, A] with the actions taken by the
behaviour policy.
Returns:
A float32 tensor of shape [T, B] with the regularization loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
| 42.150735 | 81 | 0.710772 |
import abc
import inspect
import gin
from seed_rl.agents.policy_gradient.modules import logging_module
import tensorflow as tf
@gin.configurable
class GeneralizedOnPolicyLoss(tf.Module, logging_module.LoggingModule):
def __init__(self, agent, reward_normalizer, parametric_action_distribution,
advantage_estimator, policy_loss, discount_factor,
regularizer=None, max_abs_reward=None,
handle_abandoned_episodes_properly=True,
huber_delta=None, value_ppo_style_clip_eps=None,
baseline_cost=1., include_regularization_in_returns=False,
frame_skip=1, reward_scaling=1.0):
self._agent = agent
self._reward_normalizer = reward_normalizer
self._parametric_action_distribution = parametric_action_distribution
self._advantage_estimator = advantage_estimator
self._policy_loss = policy_loss
self._regularizer = regularizer
self._max_abs_reward = max_abs_reward
self._reward_scaling = reward_scaling
self._baseline_cost = baseline_cost
self._discount_factor = discount_factor
self._frame_skip = frame_skip
self._handle_abandoned_episodes_properly = handle_abandoned_episodes_properly
self._value_ppo_style_clip_eps = value_ppo_style_clip_eps
self._include_regularization_in_returns = include_regularization_in_returns
if huber_delta is not None:
self.v_loss_fn = tf.keras.losses.Huber(
delta=huber_delta, reduction=tf.keras.losses.Reduction.NONE)
else:
self.v_loss_fn = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE)
def init(self):
for module in self.submodules:
if hasattr(module, 'init'):
if not inspect.signature(module.init).parameters:
module.init()
def compute_advantages(self, agent_state, prev_actions, env_outputs,
agent_outputs, return_learner_outputs=False):
rewards, done, _, abandoned, _ = tf.nest.map_structure(lambda t: t[1:],
env_outputs)
if self._max_abs_reward is not None:
rewards = tf.clip_by_value(rewards, -self._max_abs_reward,
self._max_abs_reward)
rewards *= self._reward_scaling
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
agent_outputs = tf.nest.map_structure(lambda t: t[:-1], agent_outputs)
learner_v = learner_outputs.baseline # current value function
learner_outputs = tf.nest.map_structure(lambda t: t[:-1], learner_outputs)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_v)
unnormalized_predictions = self._reward_normalizer.unnormalize_prediction(
corrected_predictions)
else:
corrected_predictions = learner_v
unnormalized_predictions = learner_v
if not self._handle_abandoned_episodes_properly:
abandoned = tf.zeros_like(abandoned)
done_terminated = tf.logical_and(done, ~abandoned)
done_abandoned = tf.logical_and(done, abandoned)
if self._include_regularization_in_returns and self._regularizer:
additional_rewards, _ = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action, with_logging=False)
assert rewards.shape == additional_rewards.shape
rewards += additional_rewards
# tf.math.pow does not work on TPU so we compute it manually.
adjusted_discount_factor = 1.
for _ in range(self._frame_skip):
adjusted_discount_factor *= self._discount_factor
vs, advantages = self._advantage_estimator(
unnormalized_predictions,
rewards, done_terminated,
done_abandoned,
adjusted_discount_factor,
target_action_log_probs,
behaviour_action_log_probs)
if self._reward_normalizer:
normalized_targets = self._reward_normalizer.normalize_target(vs)
normalized_advantages = self._reward_normalizer.normalize_advantage(
advantages)
self._reward_normalizer.update_normalization_statistics(vs)
else:
normalized_targets = vs
normalized_advantages = advantages
outputs = (normalized_targets, normalized_advantages)
if return_learner_outputs:
outputs += (learner_outputs,)
return outputs
def __call__(self, agent_state, prev_actions, env_outputs, agent_outputs,
normalized_targets=None, normalized_advantages=None):
if normalized_targets is None:
normalized_targets, normalized_advantages, learner_outputs = \
self.compute_advantages(
agent_state, prev_actions, env_outputs, agent_outputs,
return_learner_outputs=True)
# The last timestep is only used for computing advantages so we
# remove it here.
agent_state, prev_actions, env_outputs, agent_outputs = \
tf.nest.map_structure(
lambda t: t[:-1],
(agent_state, prev_actions, env_outputs, agent_outputs))
else: # Advantages are already precomputed.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_outputs.baseline)
old_corrected_predictions = self._reward_normalizer.correct_prediction(
agent_outputs.baseline)
else:
corrected_predictions = learner_outputs.baseline
old_corrected_predictions = agent_outputs.baseline
# Compute the advantage-based loss.
policy_loss = tf.reduce_mean(
self._policy_loss(
normalized_advantages,
target_action_log_probs,
behaviour_action_log_probs,
actions=agent_outputs.action,
target_logits=learner_outputs.policy_logits,
behaviour_logits=agent_outputs.policy_logits,
parametric_action_distribution=self._parametric_action_distribution)
)
# Value function loss
v_error = normalized_targets - corrected_predictions
self.log('GeneralizedOnPolicyLoss/V_error', v_error)
self.log('GeneralizedOnPolicyLoss/abs_V_error', tf.abs(v_error))
self.log('GeneralizedOnPolicyLoss/corrected_predictions',
corrected_predictions)
# Huber loss reduces the last dimension so we add a dummy one here.
normalized_targets = normalized_targets[..., tf.newaxis]
corrected_predictions = corrected_predictions[..., tf.newaxis]
v_loss = self.v_loss_fn(normalized_targets, corrected_predictions)
# PPO-style value loss clipping
if self._value_ppo_style_clip_eps is not None:
old_corrected_predictions = old_corrected_predictions[..., tf.newaxis]
clipped_corrected_predictions = tf.clip_by_value(
corrected_predictions,
old_corrected_predictions - self._value_ppo_style_clip_eps,
old_corrected_predictions + self._value_ppo_style_clip_eps)
clipped_v_loss = self.v_loss_fn(normalized_targets,
clipped_corrected_predictions)
v_loss = tf.maximum(v_loss, clipped_v_loss)
v_loss = tf.reduce_mean(v_loss)
# Compute the regularization loss.
if self._regularizer:
per_step_regularization, regularization_loss = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action)
if not self._include_regularization_in_returns:
regularization_loss += tf.reduce_mean(per_step_regularization)
else:
regularization_loss = 0.
total_loss = policy_loss + self._baseline_cost*v_loss + regularization_loss
return total_loss
class PolicyLoss(tf.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, advantages, target_action_log_probs,
behaviour_action_log_probs):
raise NotImplementedError('`__call__()` is not implemented!')
class RegularizationLoss(tf.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, parametric_action_distribution, target_action_logits,
behaviour_action_logits, actions):
raise NotImplementedError('`__call__()` is not implemented!')
| true | true |
f7227651dfe93209009ef0e269f56a6d756e56dc | 851 | py | Python | PotatoTube2.0.py | mrthundergod/PotatoTube | f932ecde0818eb3106fcf1ffbd1eeeea57d6358e | [
"MIT"
] | null | null | null | PotatoTube2.0.py | mrthundergod/PotatoTube | f932ecde0818eb3106fcf1ffbd1eeeea57d6358e | [
"MIT"
] | null | null | null | PotatoTube2.0.py | mrthundergod/PotatoTube | f932ecde0818eb3106fcf1ffbd1eeeea57d6358e | [
"MIT"
] | null | null | null | import youtube_dl,re, os, tkinter
def getUrlWindow(data=None):
root=tkinter.Tk()
root.withdraw()
data = root.clipboard_get()
if re.match('https://www.youtube.com/',data) != None:
print('Downloading as MP3')
return data
else: return None
def sendtoYDL(data):
ydl_opts = { 'outtmpl':os.getcwd()[:12].replace('\\',"/")+'Downloads/PotatoTube/','format': 'bestaudio/best',
'postprocessors':[{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '320'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.download([data])
if __name__ == '__main__':
Print("Welcome to PotatoTube! /n Waiting for URL...")
while True:
data= getUrlWindow()
if data==None: continue
sendtoYDL(data)
print("Downloaded. Waiting for URL....")
| 35.458333 | 115 | 0.615746 | import youtube_dl,re, os, tkinter
def getUrlWindow(data=None):
root=tkinter.Tk()
root.withdraw()
data = root.clipboard_get()
if re.match('https://www.youtube.com/',data) != None:
print('Downloading as MP3')
return data
else: return None
def sendtoYDL(data):
ydl_opts = { 'outtmpl':os.getcwd()[:12].replace('\\',"/")+'Downloads/PotatoTube/','format': 'bestaudio/best',
'postprocessors':[{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '320'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.download([data])
if __name__ == '__main__':
Print("Welcome to PotatoTube! /n Waiting for URL...")
while True:
data= getUrlWindow()
if data==None: continue
sendtoYDL(data)
print("Downloaded. Waiting for URL....")
| true | true |
f72276b0a1729560fa2fd6f7f6080d763f70ca6b | 16,829 | py | Python | SIPSim/IsoIncorp.py | arischwartz/test | 87a8306a294f59b0eef992529ce900cea876c605 | [
"MIT"
] | 2 | 2019-03-15T09:46:48.000Z | 2019-06-05T18:16:39.000Z | SIPSim/IsoIncorp.py | arischwartz/test | 87a8306a294f59b0eef992529ce900cea876c605 | [
"MIT"
] | 1 | 2020-11-01T23:18:10.000Z | 2020-11-01T23:18:10.000Z | SIPSim/IsoIncorp.py | arischwartz/test | 87a8306a294f59b0eef992529ce900cea876c605 | [
"MIT"
] | null | null | null | # import
## batteries
import os, sys
import re
from functools import partial
import types
import logging
import tempfile
import shutil
import glob
from collections import defaultdict
from random import shuffle
from StringIO import StringIO
## 3rd party
import numpy as np
import pandas as pd
from SIPSim_pymix import mixture
import scipy.stats as stats
import dill
from pathos.multiprocessing import ProcessingPool
## application
import Utils
from CommTable import CommTable
from IsoIncorpCython import add_incorp
from Config import Config
# logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
def main(args):
"""Main function for performing isotope incorporation simulation.
Parameters
----------
args : dict
See ``isotope_incorp`` subcommand.
"""
# loading input
## config file
config = Config.load_config(args['<config_file>'],
phylo=args['--phylo'])
## comm (optional)
if args['--comm'] is not None:
comm = CommTable.from_csv(args['--comm'], sep='\t')
else:
comm = None
## taxa list (optional
if args['--taxa'] is not None:
taxa_incorp_list = _load_taxa_incorp_list(args['--taxa'], config)
else:
taxa_incorp_list = {k:[] for k in config.keys()}
## kde_bd
sys.stderr.write('Loading KDE object...\n')
KDEs = Utils.load_kde(args['<BD_KDE>'])
# which depth of KDE object
KDE_type = Utils.KDE_type(KDEs)
# adding comm info to KDEs
if KDE_type < 4:
_add_comm_to_kde(KDEs, comm)
# temporary directory for BD shift stats
tmpdirpath = tempfile.mkdtemp()
# creating kde of BD distributions with BD shift from isotope
KDEs_iso = dict()
for libID in config.keys():
sys.stderr.write('Processing library: {}\n'.format(libID))
KDE = _get_KDEs_for_libID(KDEs, KDE_type, libID, comm)
# if needed: making a list of taxa that can incorporate
## (unique to this library)
## TODO: abundance cutoff:
### taxa must have abundance > threshold to incorporate
## TODO: abundance weighting with less incorp for less taxa
try:
incorp_list_len = len(taxa_incorp_list[libID])
except KeyError:
msg = 'Library "{}" not found'.format(libID)
raise KeyError, msg
if incorp_list_len == 0:
taxa_incorp_list[libID] = _taxon_incorp_list(libID, config, KDE)
# setting params for parallelized function
pfunc = partial(_make_kde,
config = config,
libID = libID,
stat_dir = tmpdirpath,
n = args['-n'],
taxa_incorp_list = taxa_incorp_list[libID],
isotope = args['--isotope'],
bw_method = args['--bw'])
# parallel by taxon
pool = ProcessingPool(nodes=int(args['--np']))
if args['--debug']:
tmp = map(pfunc, KDE.items())
else:
tmp = pool.map(pfunc, KDE.items())
KDE = None
# storing dict of KDEs
if args['-o'].lower() == 'none':
KDEs_iso[libID] = {taxon:kde for taxon,kde in tmp}
else:
KDEs_iso[libID] = Utils.write_lib_kde({taxon:kde for taxon,kde in tmp},
args['-o'],
libID)
tmp = None
# combine stats
_write_stats(args['--shift'], tmpdirpath)
shutil.rmtree(tmpdirpath)
# writing pickled BD-KDE with BD shift from isotope incorp
if args['-o'].lower() == 'none':
dill.dump(KDEs_iso, sys.stdout)
else:
with open(args['-o'], 'wb') as outFH:
dill.dump(KDEs_iso, outFH)
def _get_KDEs_for_libID(KDEs, KDE_type, libID, comm=None):
"""Parse out dict of KDE objects for just libID.
Parsing depends on the KDE type
"""
if KDE_type == 1:
KDE = {t:k for t,k in KDEs}
elif KDE_type == 2:
KDE = KDEs
elif KDE_type == 3:
try:
KDE = KDEs[libID]
except KeyError:
## kde library not found, duplicating KDE
msg = 'WARNING: config library {} not found in KDEs.' + \
'Using a different KDE object\n'
sys.stderr.write(msg.format(libID))
KDE = KDEs[KDEs.keys()[0]]
elif KDE_type == 4:
try:
KDE = Utils.load_kde(KDEs[libID])
except KeyError:
## kde library not found, duplicating KDE
msg = 'WARNING: config library {} not found in KDEs.' + \
'Using a different KDE object\n'
sys.stderr.write(msg.format(libID))
KDE = Utils.load_kde(KDEs[KDEs.keys()[0]])
# adding comm info to KDEs
_add_comm_to_kde(KDE, comm)
else:
raise ValueError, 'KDE object type not recognized'
return KDE
def _make_kde(x, libID, config, taxa_incorp_list,
isotope='13C', n=10000, bw_method=None, stat_dir=None):
"""Making new KDE of BD value distribution which includes
BD shift due to isotope incorporation.
Parameters
----------
x : list
[taxon_name, dict -- {kde:abundance}]
libID : str
library ID
config : config object
taxa_incorp_list : list
names of taxa that can incorporate isotope
isotope : str, optional
isotope that is incorporated
n : int
number of Monte Carlo samples to use for estimating BD+isotope_BD
distribution
bw_method : str or function
bandwidth scalar or function passed to scipy.stats.gaussian_kde().
stat_dir : str
directory path for writing BD shift stats. Nothing written if None
Returns
-------
tuple -- (taxon_name, KDE*)
*Note: KDE object may be None
"""
taxon_name,x = x
try:
bw_method = float(bw_method)
except (TypeError, ValueError) as e:
pass
# status
sys.stderr.write('Processing: {}\n'.format(taxon_name))
# unpack
n = int(n)
kde = x['kde']
if kde is None:
return (taxon_name, None)
# can taxon incorporate any isotope?
## if not, return 'raw' BD KDE
if taxon_name not in taxa_incorp_list:
shift_stats = [libID, taxon_name] + [float(0)] * 6
_write_tmp_stats(shift_stats, stat_dir)
return (taxon_name, kde)
# taxon abundance for library
try:
taxon_abund = x['abundances'][libID]
except KeyError:
tmp = re.sub('[Ll]ib(rary)* *','', libID)
try:
taxon_abund = x['abundances'][tmp]
except KeyError:
taxon_abund = None
# max incorporation for isotope
maxIsotopeBD = isotopeMaxBD(isotope)
# making a mixture model object lib:taxon
## mix_model => distribution of % incorporation for taxon population
mix_model = make_incorp_model(taxon_name, libID, config)
# BD shift stats
BDs = kde.resample(n)[0]
BDs_wIncorp = add_incorp(np.copy(BDs), mix_model, maxIsotopeBD)
shift_stats = _calc_BD_shift_stats(libID, taxon_name, BDs, BDs_wIncorp)
_write_tmp_stats(shift_stats, stat_dir)
# making KDE of BD + BD_isotope_incorp
kdeBD = stats.gaussian_kde(BDs_wIncorp, bw_method=bw_method)
# return
return (taxon_name, kdeBD)
def _calc_BD_shift_stats(libID, taxon_name, BDs, BDs_wIncorp):
BD_shift = BDs_wIncorp - BDs
BDs = None
shift_stats = [libID, taxon_name,
np.min(BD_shift),
np.percentile(BD_shift, 25),
np.mean(BD_shift),
np.median(BD_shift),
np.percentile(BD_shift, 75),
np.max(BD_shift)]
BD_shift = None
return shift_stats
def __add_comm_to_kdes(taxon_name, kde, comm, libIDs):
d = {'kde':kde, 'abundances':{}}
for libID in libIDs:
abund = comm.get_taxonAbund(taxon_name, libID=libID)
try:
d['abundances'][libID] = abund[0]
except IndexError:
msg = 'WARNING; no abundance data for: lib={}, taxon={}\n'
sys.stderr.write(msg.format(libID, taxon_name))
d['abundances'][libID] = 0
return d
def _add_comm_to_kde(KDEs, comm):
"""Adding comm data for each taxon to each KDE.
'abundances' will be an empty dict if comm is not provided.
In-place edit of KDE_BD {taxon_name:{kde|abundances}}
Parameters
----------
KDE_BD : KDE object
comm : gradient community object
"""
try:
libIDs = comm.get_unique_libIDs()
except AttributeError:
libIDs = []
for x,y in KDEs.items():
try:
d = {}
for xx,yy in y.items():
d[xx] = __add_comm_to_kdes(xx, yy, comm, libIDs)
KDEs[x] = d
except AttributeError:
KDEs[x] = __add_comm_to_kdes(x, y, comm, libIDs)
def _write_tmp_stats(stats, dirpath):
if dirpath is None:
return 0
outfn = tempfile.NamedTemporaryFile()
outfn = os.path.split(outfn.name)[1]
outfn = os.path.join(dirpath, outfn + '_stats.txt')
stats = [str(x) for x in stats]
with open(outfn, 'wb') as outfh:
outfh.write('\t'.join(stats) + '\n')
def _write_stats(outfn, tmpdirpath):
tmpfiles = glob.glob(os.path.join(tmpdirpath, '*_stats.txt'))
if len(tmpfiles) == 0:
return 0
header = ['library', 'taxon', 'min', 'q25', 'mean', 'median',
'q75', 'max']
df = []
for F in tmpfiles:
with open(F, 'rb') as infh:
for line in infh:
line = line.rstrip().split('\t')
df.append(line)
df = pd.DataFrame(df, columns=header)
#df = df.sort(['library', 'taxon'])
df = df.sort_values(by=['library','taxon'])
df.to_csv(outfn, sep='\t', index=False)
sys.stderr.write('File written: {}\n'.format(outfn))
def _taxon_incorp_list(libID, config, KDE_BD):
"""Make a list of taxa that incorporated isotope.
"""
try:
max_perc_taxa_incorp = config.get_max_perc_taxa_incorp(libID)
except KeyError:
max_perc_taxa_incorp = 100.0
max_perc_taxa_incorp /= 100.0
# randomized list of taxa
taxon_names = KDE_BD.keys()
shuffle(taxon_names)
# set of taxa that incorporate any isotope
n_incorp = int(round(len(taxon_names) * max_perc_taxa_incorp, 0))
return taxon_names[:n_incorp]
def make_incorp_model(taxon_name, libID, config):
"""Setting isotope incorporation based on the interPopDist
function for each intraPop parameter.
Parameters
----------
taxon_name : str
taxon name string
libID : str
library ID string
config : config object
Returns
-------
mixture model object (mixture class)
"""
psblDists = {'normal' : mixture.NormalDistribution,
'uniform' : mixture.UniformDistribution}
# creating individual distribution functions
libSect = config.get_libSection(libID)
intraPopDist_IDs = []
intraPopDist_funcs = []
weights = []
for (intraPopDistID,intraPopDist) in config.iter_configSections(libSect):
# name of standard distribution
try:
distID = intraPopDist['distribution']
except KeyError:
msg = 'Cannot find "distribution" key for "{}"'
raise KeyError, msg.format(intraPopDistID)
intraPopDist_IDs.append(distID)
# getting mixture model weight for this distribution
weight = float(intraPopDist.get('weight', 0))
weights.append(weight)
# selecting intra-pop param values from inter-pop dists
params = dict()
for (paramID,param) in config.iter_configSections(intraPopDist):
params[paramID] = _select_intrapop_param_value(param,
taxon_name)
# checking start-end parameters (if present)
_start_lt_end(params)
# making intra-pop dist function (a standard distribution)
try:
dist_func = psblDists[distID](**params)
except KeyError:
msg = 'Distribution "{}" not supported'
raise KeyError, msg.format(distID)
intraPopDist_funcs.append(dist_func)
# making sure weights add up to 1
weights = Config._fill_in_weights(weights)
assert len(weights) == len(intraPopDist_IDs), \
'number_of_distributions != number_of_weights'
assert len(intraPopDist_IDs) == len(intraPopDist_funcs), \
'number_of_distributions != number_of_dist_functions'
# making incorporation mixture model
return mixture.MixtureModel(len(intraPopDist_IDs),
weights,
intraPopDist_funcs)
def _select_intrapop_param_value(interPopDist, taxon_name, maxtries=1000):
"""Select the intra-population parameter value
based on the inter-population distribution function.
Values are % isotope incorporation, so acceptable
range is 0-100 (will try 'maxtries' times to select value in range).
Parameters
----------
interPopDist : dict
{'interPopDist':{'function':interPopdist_function}}
taxon_name : str
name of taxon
maxtries : int
number of tries to get a parameter values >0
Returns
-------
float : intra-pop param value
"""
# getting inter-pop dist function
try:
interPopDist_func = interPopDist['interPopDist']['function']
except KeyError:
raise KeyError, 'Cannot find inter-pop dist function'
# sampling from function to get parameter for intra-pop distribution
tries = 0
while True:
tries += 1
try:
# if Brownian Motion evolution
paramVal = interPopDist_func.sample(taxon_name)
except TypeError:
paramVal = interPopDist_func.sample()
try:
paramVal = paramVal[0]
except TypeError:
pass
# values must be >= 0 and <= 100 to end loop
if paramVal >= 0 and paramVal <= 100:
break
# exceeding maxtries?
if tries >= maxtries:
err = 'Taxon: {}'.format(taxon_name)
msg = 'Exceeded maxtries to get parameter in range: 0-100'
sys.exit(': '.join([err, msg]))
return paramVal
def _start_lt_end(params):
"""Check that 'start' param is < 'end' param
if both params are found in the provided dict.
In-place edit of params.
Parameters
----------
params : dict
{param_ID:param_value}
"""
if ('start' in params) & ('end' in params):
try:
startVal = float(params['start'])
endVal = float(params['end'])
except TypeError:
return None
if startVal > endVal:
params['start'] = endVal
params['end'] = startVal
elif startVal == endVal:
# start-end cannot ==
if endVal >= 100:
params['start'] = startVal - 1e-10
else:
params['end'] = endVal + 1e-10
def isotopeMaxBD(isotope):
"""Setting the theoretical max BD shift of an isotope
(if 100% incorporation).
Parameters
----------
isotope : str
name of isotope
Returns
-------
float : max BD value
"""
psblIsotopes = {'13C' : 0.036,
'15N' : 0.016}
try:
return psblIsotopes[isotope.upper()]
except KeyError:
raise KeyError('Isotope "{}" not supported.'.format(isotope))
def _load_taxa_incorp_list(inFile, config):
"""Loading list of taxa that incorporate isotope.
Parameters
----------
inFile : str
File name of taxon list
config : config object
Returns
-------
{library:[taxon1, ...]}
"""
taxa = {}
with open(inFile, 'rb') as inFH:
for line in inFH:
line = line.rstrip().split('\t')
# if 1 column, using config-defined libraries
if len(line) == 1:
line = [[x,line[0]] for x in config.keys()]
else:
line = [line]
for x in line:
try:
taxa[x[0]].append(x[1])
except KeyError:
taxa[x[0]] = [x[1]]
return taxa
| 31.280669 | 83 | 0.576743 |
sys
import re
from functools import partial
import types
import logging
import tempfile
import shutil
import glob
from collections import defaultdict
from random import shuffle
from StringIO import StringIO
py as np
import pandas as pd
from SIPSim_pymix import mixture
import scipy.stats as stats
import dill
from pathos.multiprocessing import ProcessingPool
from CommTable import CommTable
from IsoIncorpCython import add_incorp
from Config import Config
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
def main(args):
"""Main function for performing isotope incorporation simulation.
Parameters
----------
args : dict
See ``isotope_incorp`` subcommand.
"""
Config.load_config(args['<config_file>'],
phylo=args['--phylo'])
omm'] is not None:
comm = CommTable.from_csv(args['--comm'], sep='\t')
else:
comm = None
] is not None:
taxa_incorp_list = _load_taxa_incorp_list(args['--taxa'], config)
else:
taxa_incorp_list = {k:[] for k in config.keys()}
.stderr.write('Loading KDE object...\n')
KDEs = Utils.load_kde(args['<BD_KDE>'])
KDE_type = Utils.KDE_type(KDEs)
if KDE_type < 4:
_add_comm_to_kde(KDEs, comm)
tmpdirpath = tempfile.mkdtemp()
KDEs_iso = dict()
for libID in config.keys():
sys.stderr.write('Processing library: {}\n'.format(libID))
KDE = _get_KDEs_for_libID(KDEs, KDE_type, libID, comm)
if incorp_list_len == 0:
taxa_incorp_list[libID] = _taxon_incorp_list(libID, config, KDE)
pfunc = partial(_make_kde,
config = config,
libID = libID,
stat_dir = tmpdirpath,
n = args['-n'],
taxa_incorp_list = taxa_incorp_list[libID],
isotope = args['--isotope'],
bw_method = args['--bw'])
pool = ProcessingPool(nodes=int(args['--np']))
if args['--debug']:
tmp = map(pfunc, KDE.items())
else:
tmp = pool.map(pfunc, KDE.items())
KDE = None
if args['-o'].lower() == 'none':
KDEs_iso[libID] = {taxon:kde for taxon,kde in tmp}
else:
KDEs_iso[libID] = Utils.write_lib_kde({taxon:kde for taxon,kde in tmp},
args['-o'],
libID)
tmp = None
_write_stats(args['--shift'], tmpdirpath)
shutil.rmtree(tmpdirpath)
if args['-o'].lower() == 'none':
dill.dump(KDEs_iso, sys.stdout)
else:
with open(args['-o'], 'wb') as outFH:
dill.dump(KDEs_iso, outFH)
def _get_KDEs_for_libID(KDEs, KDE_type, libID, comm=None):
"""Parse out dict of KDE objects for just libID.
Parsing depends on the KDE type
"""
if KDE_type == 1:
KDE = {t:k for t,k in KDEs}
elif KDE_type == 2:
KDE = KDEs
elif KDE_type == 3:
try:
KDE = KDEs[libID]
except KeyError:
ary {} not found in KDEs.' + \
'Using a different KDE object\n'
sys.stderr.write(msg.format(libID))
KDE = KDEs[KDEs.keys()[0]]
elif KDE_type == 4:
try:
KDE = Utils.load_kde(KDEs[libID])
except KeyError:
ary {} not found in KDEs.' + \
'Using a different KDE object\n'
sys.stderr.write(msg.format(libID))
KDE = Utils.load_kde(KDEs[KDEs.keys()[0]])
_add_comm_to_kde(KDE, comm)
else:
raise ValueError, 'KDE object type not recognized'
return KDE
def _make_kde(x, libID, config, taxa_incorp_list,
isotope='13C', n=10000, bw_method=None, stat_dir=None):
"""Making new KDE of BD value distribution which includes
BD shift due to isotope incorporation.
Parameters
----------
x : list
[taxon_name, dict -- {kde:abundance}]
libID : str
library ID
config : config object
taxa_incorp_list : list
names of taxa that can incorporate isotope
isotope : str, optional
isotope that is incorporated
n : int
number of Monte Carlo samples to use for estimating BD+isotope_BD
distribution
bw_method : str or function
bandwidth scalar or function passed to scipy.stats.gaussian_kde().
stat_dir : str
directory path for writing BD shift stats. Nothing written if None
Returns
-------
tuple -- (taxon_name, KDE*)
*Note: KDE object may be None
"""
taxon_name,x = x
try:
bw_method = float(bw_method)
except (TypeError, ValueError) as e:
pass
sys.stderr.write('Processing: {}\n'.format(taxon_name))
n = int(n)
kde = x['kde']
if kde is None:
return (taxon_name, None)
_incorp_list:
shift_stats = [libID, taxon_name] + [float(0)] * 6
_write_tmp_stats(shift_stats, stat_dir)
return (taxon_name, kde)
try:
taxon_abund = x['abundances'][libID]
except KeyError:
tmp = re.sub('[Ll]ib(rary)* *','', libID)
try:
taxon_abund = x['abundances'][tmp]
except KeyError:
taxon_abund = None
maxIsotopeBD = isotopeMaxBD(isotope)
BDs = kde.resample(n)[0]
BDs_wIncorp = add_incorp(np.copy(BDs), mix_model, maxIsotopeBD)
shift_stats = _calc_BD_shift_stats(libID, taxon_name, BDs, BDs_wIncorp)
_write_tmp_stats(shift_stats, stat_dir)
kdeBD = stats.gaussian_kde(BDs_wIncorp, bw_method=bw_method)
return (taxon_name, kdeBD)
def _calc_BD_shift_stats(libID, taxon_name, BDs, BDs_wIncorp):
BD_shift = BDs_wIncorp - BDs
BDs = None
shift_stats = [libID, taxon_name,
np.min(BD_shift),
np.percentile(BD_shift, 25),
np.mean(BD_shift),
np.median(BD_shift),
np.percentile(BD_shift, 75),
np.max(BD_shift)]
BD_shift = None
return shift_stats
def __add_comm_to_kdes(taxon_name, kde, comm, libIDs):
d = {'kde':kde, 'abundances':{}}
for libID in libIDs:
abund = comm.get_taxonAbund(taxon_name, libID=libID)
try:
d['abundances'][libID] = abund[0]
except IndexError:
msg = 'WARNING; no abundance data for: lib={}, taxon={}\n'
sys.stderr.write(msg.format(libID, taxon_name))
d['abundances'][libID] = 0
return d
def _add_comm_to_kde(KDEs, comm):
"""Adding comm data for each taxon to each KDE.
'abundances' will be an empty dict if comm is not provided.
In-place edit of KDE_BD {taxon_name:{kde|abundances}}
Parameters
----------
KDE_BD : KDE object
comm : gradient community object
"""
try:
libIDs = comm.get_unique_libIDs()
except AttributeError:
libIDs = []
for x,y in KDEs.items():
try:
d = {}
for xx,yy in y.items():
d[xx] = __add_comm_to_kdes(xx, yy, comm, libIDs)
KDEs[x] = d
except AttributeError:
KDEs[x] = __add_comm_to_kdes(x, y, comm, libIDs)
def _write_tmp_stats(stats, dirpath):
if dirpath is None:
return 0
outfn = tempfile.NamedTemporaryFile()
outfn = os.path.split(outfn.name)[1]
outfn = os.path.join(dirpath, outfn + '_stats.txt')
stats = [str(x) for x in stats]
with open(outfn, 'wb') as outfh:
outfh.write('\t'.join(stats) + '\n')
def _write_stats(outfn, tmpdirpath):
tmpfiles = glob.glob(os.path.join(tmpdirpath, '*_stats.txt'))
if len(tmpfiles) == 0:
return 0
header = ['library', 'taxon', 'min', 'q25', 'mean', 'median',
'q75', 'max']
df = []
for F in tmpfiles:
with open(F, 'rb') as infh:
for line in infh:
line = line.rstrip().split('\t')
df.append(line)
df = pd.DataFrame(df, columns=header)
df = df.sort_values(by=['library','taxon'])
df.to_csv(outfn, sep='\t', index=False)
sys.stderr.write('File written: {}\n'.format(outfn))
def _taxon_incorp_list(libID, config, KDE_BD):
"""Make a list of taxa that incorporated isotope.
"""
try:
max_perc_taxa_incorp = config.get_max_perc_taxa_incorp(libID)
except KeyError:
max_perc_taxa_incorp = 100.0
max_perc_taxa_incorp /= 100.0
taxon_names = KDE_BD.keys()
shuffle(taxon_names)
n_incorp = int(round(len(taxon_names) * max_perc_taxa_incorp, 0))
return taxon_names[:n_incorp]
def make_incorp_model(taxon_name, libID, config):
"""Setting isotope incorporation based on the interPopDist
function for each intraPop parameter.
Parameters
----------
taxon_name : str
taxon name string
libID : str
library ID string
config : config object
Returns
-------
mixture model object (mixture class)
"""
psblDists = {'normal' : mixture.NormalDistribution,
'uniform' : mixture.UniformDistribution}
libSect = config.get_libSection(libID)
intraPopDist_IDs = []
intraPopDist_funcs = []
weights = []
for (intraPopDistID,intraPopDist) in config.iter_configSections(libSect):
try:
distID = intraPopDist['distribution']
except KeyError:
msg = 'Cannot find "distribution" key for "{}"'
raise KeyError, msg.format(intraPopDistID)
intraPopDist_IDs.append(distID)
weight = float(intraPopDist.get('weight', 0))
weights.append(weight)
params = dict()
for (paramID,param) in config.iter_configSections(intraPopDist):
params[paramID] = _select_intrapop_param_value(param,
taxon_name)
_start_lt_end(params)
try:
dist_func = psblDists[distID](**params)
except KeyError:
msg = 'Distribution "{}" not supported'
raise KeyError, msg.format(distID)
intraPopDist_funcs.append(dist_func)
weights = Config._fill_in_weights(weights)
assert len(weights) == len(intraPopDist_IDs), \
'number_of_distributions != number_of_weights'
assert len(intraPopDist_IDs) == len(intraPopDist_funcs), \
'number_of_distributions != number_of_dist_functions'
return mixture.MixtureModel(len(intraPopDist_IDs),
weights,
intraPopDist_funcs)
def _select_intrapop_param_value(interPopDist, taxon_name, maxtries=1000):
"""Select the intra-population parameter value
based on the inter-population distribution function.
Values are % isotope incorporation, so acceptable
range is 0-100 (will try 'maxtries' times to select value in range).
Parameters
----------
interPopDist : dict
{'interPopDist':{'function':interPopdist_function}}
taxon_name : str
name of taxon
maxtries : int
number of tries to get a parameter values >0
Returns
-------
float : intra-pop param value
"""
try:
interPopDist_func = interPopDist['interPopDist']['function']
except KeyError:
raise KeyError, 'Cannot find inter-pop dist function'
tries = 0
while True:
tries += 1
try:
paramVal = interPopDist_func.sample(taxon_name)
except TypeError:
paramVal = interPopDist_func.sample()
try:
paramVal = paramVal[0]
except TypeError:
pass
if paramVal >= 0 and paramVal <= 100:
break
if tries >= maxtries:
err = 'Taxon: {}'.format(taxon_name)
msg = 'Exceeded maxtries to get parameter in range: 0-100'
sys.exit(': '.join([err, msg]))
return paramVal
def _start_lt_end(params):
"""Check that 'start' param is < 'end' param
if both params are found in the provided dict.
In-place edit of params.
Parameters
----------
params : dict
{param_ID:param_value}
"""
if ('start' in params) & ('end' in params):
try:
startVal = float(params['start'])
endVal = float(params['end'])
except TypeError:
return None
if startVal > endVal:
params['start'] = endVal
params['end'] = startVal
elif startVal == endVal:
if endVal >= 100:
params['start'] = startVal - 1e-10
else:
params['end'] = endVal + 1e-10
def isotopeMaxBD(isotope):
"""Setting the theoretical max BD shift of an isotope
(if 100% incorporation).
Parameters
----------
isotope : str
name of isotope
Returns
-------
float : max BD value
"""
psblIsotopes = {'13C' : 0.036,
'15N' : 0.016}
try:
return psblIsotopes[isotope.upper()]
except KeyError:
raise KeyError('Isotope "{}" not supported.'.format(isotope))
def _load_taxa_incorp_list(inFile, config):
"""Loading list of taxa that incorporate isotope.
Parameters
----------
inFile : str
File name of taxon list
config : config object
Returns
-------
{library:[taxon1, ...]}
"""
taxa = {}
with open(inFile, 'rb') as inFH:
for line in inFH:
line = line.rstrip().split('\t')
if len(line) == 1:
line = [[x,line[0]] for x in config.keys()]
else:
line = [line]
for x in line:
try:
taxa[x[0]].append(x[1])
except KeyError:
taxa[x[0]] = [x[1]]
return taxa
| false | true |
f72276e645f03d22eb9c7347b11c45627ef65da4 | 38,018 | py | Python | dis_sdk_python/dependency/google/protobuf/descriptor_pool.py | leishanlin/huaweicloud-sdk-python-dis | 900317432b9e9b3fea331d2cb9aa402594f9992b | [
"Apache-2.0"
] | null | null | null | dis_sdk_python/dependency/google/protobuf/descriptor_pool.py | leishanlin/huaweicloud-sdk-python-dis | 900317432b9e9b3fea331d2cb9aa402594f9992b | [
"Apache-2.0"
] | null | null | null | dis_sdk_python/dependency/google/protobuf/descriptor_pool.py | leishanlin/huaweicloud-sdk-python-dis | 900317432b9e9b3fea331d2cb9aa402594f9992b | [
"Apache-2.0"
] | null | null | null | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides DescriptorPool to use as a container for proto2 descriptors.
The DescriptorPool is used in conjection with a DescriptorDatabase to maintain
a collection of protocol buffer descriptors for use when dynamically creating
message types at runtime.
For most applications protocol buffers should be used via modules generated by
the protocol buffer compiler tool. This should only be used when the type of
protocol buffers used in an application or library cannot be predetermined.
Below is a straightforward example on how to use this class:
pool = DescriptorPool()
file_descriptor_protos = [ ... ]
for file_descriptor_proto in file_descriptor_protos:
pool.Add(file_descriptor_proto)
my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType')
The message descriptor can be used in conjunction with the message_factory
module in order to create a protocol buffer class that can be encoded and
decoded.
If you want to get a Python class for the specified proto, use the
helper functions inside google.protobuf.message_factory
directly instead of this class.
"""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import collections
import warnings
from dis_sdk_python.dependency.google.protobuf import descriptor
from dis_sdk_python.dependency.google.protobuf import descriptor_database
from dis_sdk_python.dependency.google.protobuf import text_encoding
_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access
def _NormalizeFullyQualifiedName(name):
"""Remove leading period from fully-qualified type name.
Due to b/13860351 in descriptor_database.py, types in the root namespace are
generated with a leading period. This function removes that prefix.
Args:
name: A str, the fully-qualified symbol name.
Returns:
A str, the normalized fully-qualified symbol name.
"""
return name.lstrip('.')
def _OptionsOrNone(descriptor_proto):
"""Returns the value of the field `options`, or None if it is not set."""
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL)
class DescriptorPool(object):
"""A collection of protobufs dynamically constructed by descriptor protos."""
if _USE_C_DESCRIPTORS:
def __new__(cls, descriptor_db=None):
# pylint: disable=protected-access
return descriptor._message.DescriptorPool(descriptor_db)
def __init__(self, descriptor_db=None):
"""Initializes a Pool of proto buffs.
The descriptor_db argument to the constructor is provided to allow
specialized file descriptor proto lookup code to be triggered on demand. An
example would be an implementation which will read and compile a file
specified in a call to FindFileByName() and not require the call to Add()
at all. Results from this database will be cached internally here as well.
Args:
descriptor_db: A secondary source of file descriptors.
"""
self._internal_db = descriptor_database.DescriptorDatabase()
self._descriptor_db = descriptor_db
self._descriptors = {}
self._enum_descriptors = {}
self._service_descriptors = {}
self._file_descriptors = {}
self._toplevel_extensions = {}
# TODO(jieluo): Remove _file_desc_by_toplevel_extension after
# maybe year 2020 for compatibility issue (with 3.4.1 only).
self._file_desc_by_toplevel_extension = {}
# We store extensions in two two-level mappings: The first key is the
# descriptor of the message being extended, the second key is the extension
# full name or its tag number.
self._extensions_by_name = collections.defaultdict(dict)
self._extensions_by_number = collections.defaultdict(dict)
def _CheckConflictRegister(self, desc):
"""Check if the descriptor name conflicts with another of the same name.
Args:
desc: Descriptor of a message, enum, service or extension.
"""
desc_name = desc.full_name
for register, descriptor_type in [
(self._descriptors, descriptor.Descriptor),
(self._enum_descriptors, descriptor.EnumDescriptor),
(self._service_descriptors, descriptor.ServiceDescriptor),
(self._toplevel_extensions, descriptor.FieldDescriptor)]:
if desc_name in register:
file_name = register[desc_name].file.name
if not isinstance(desc, descriptor_type) or (
file_name != desc.file.name):
warn_msg = ('Conflict register for file "' + desc.file.name +
'": ' + desc_name +
' is already defined in file "' +
file_name + '"')
warnings.warn(warn_msg, RuntimeWarning)
return
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._internal_db.Add(file_desc_proto)
def AddSerializedFile(self, serialized_file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
serialized_file_desc_proto: A bytes string, serialization of the
FileDescriptorProto to add.
"""
# pylint: disable=g-import-not-at-top
from dis_sdk_python.dependency.google.protobuf import descriptor_pb2
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
serialized_file_desc_proto)
self.Add(file_desc_proto)
def AddDescriptor(self, desc):
"""Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor.
"""
if not isinstance(desc, descriptor.Descriptor):
raise TypeError('Expected instance of descriptor.Descriptor.')
self._CheckConflictRegister(desc)
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file)
def AddEnumDescriptor(self, enum_desc):
"""Adds an EnumDescriptor to the pool.
This method also registers the FileDescriptor associated with the enum.
Args:
enum_desc: An EnumDescriptor.
"""
if not isinstance(enum_desc, descriptor.EnumDescriptor):
raise TypeError('Expected instance of descriptor.EnumDescriptor.')
self._CheckConflictRegister(enum_desc)
self._enum_descriptors[enum_desc.full_name] = enum_desc
self._AddFileDescriptor(enum_desc.file)
def AddServiceDescriptor(self, service_desc):
"""Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
"""
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._CheckConflictRegister(service_desc)
self._service_descriptors[service_desc.full_name] = service_desc
def AddExtensionDescriptor(self, extension):
"""Adds a FieldDescriptor describing an extension to the pool.
Args:
extension: A FieldDescriptor.
Raises:
AssertionError: when another extension with the same number extends the
same message.
TypeError: when the specified extension is not a
descriptor.FieldDescriptor.
"""
if not (isinstance(extension, descriptor.FieldDescriptor) and
extension.is_extension):
raise TypeError('Expected an extension descriptor.')
if extension.extension_scope is None:
self._CheckConflictRegister(extension)
self._toplevel_extensions[extension.full_name] = extension
try:
existing_desc = self._extensions_by_number[
extension.containing_type][extension.number]
except KeyError:
pass
else:
if extension is not existing_desc:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" '
'with field number %d.' %
(extension.full_name, existing_desc.full_name,
extension.containing_type.full_name, extension.number))
self._extensions_by_number[extension.containing_type][
extension.number] = extension
self._extensions_by_name[extension.containing_type][
extension.full_name] = extension
# Also register MessageSet extensions with the type name.
if _IsMessageSetExtension(extension):
self._extensions_by_name[extension.containing_type][
extension.message_type.full_name] = extension
def AddFileDescriptor(self, file_desc):
"""Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor.
"""
self._AddFileDescriptor(file_desc)
# TODO(jieluo): This is a temporary solution for FieldDescriptor.file.
# FieldDescriptor.file is added in code gen. Remove this solution after
# maybe 2020 for compatibility reason (with 3.4.1 only).
for extension in list(file_desc.extensions_by_name.values()):
self._file_desc_by_toplevel_extension[
extension.full_name] = file_desc
def _AddFileDescriptor(self, file_desc):
"""Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor.
"""
if not isinstance(file_desc, descriptor.FileDescriptor):
raise TypeError('Expected instance of descriptor.FileDescriptor.')
self._file_descriptors[file_desc.name] = file_desc
def FindFileByName(self, file_name):
"""Gets a FileDescriptor by file name.
Args:
file_name: The path to the file to get a descriptor for.
Returns:
A FileDescriptor for the named file.
Raises:
KeyError: if the file cannot be found in the pool.
"""
try:
return self._file_descriptors[file_name]
except KeyError:
pass
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file named %s' % file_name)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def FindFileContainingSymbol(self, symbol):
"""Gets the FileDescriptor for the file containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the pool.
"""
symbol = _NormalizeFullyQualifiedName(symbol)
try:
return self._descriptors[symbol].file
except KeyError:
pass
try:
return self._enum_descriptors[symbol].file
except KeyError:
pass
try:
return self._service_descriptors[symbol].file
except KeyError:
pass
try:
return self._FindFileContainingSymbolInDb(symbol)
except KeyError:
pass
try:
return self._file_desc_by_toplevel_extension[symbol]
except KeyError:
pass
# Try nested extensions inside a message.
message_name, _, extension_name = symbol.rpartition('.')
try:
message = self.FindMessageTypeByName(message_name)
assert message.extensions_by_name[extension_name]
return message.file
except KeyError:
raise KeyError('Cannot find a file containing %s' % symbol)
def FindMessageTypeByName(self, full_name):
"""Loads the named descriptor from the pool.
Args:
full_name: The full name of the descriptor to load.
Returns:
The descriptor for the named type.
Raises:
KeyError: if the message cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._descriptors[full_name]
def FindEnumTypeByName(self, full_name):
"""Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type.
Raises:
KeyError: if the enum cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._enum_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._enum_descriptors[full_name]
def FindFieldByName(self, full_name):
"""Loads the named field descriptor from the pool.
Args:
full_name: The full name of the field descriptor to load.
Returns:
The field descriptor for the named field.
Raises:
KeyError: if the field cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, field_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.fields_by_name[field_name]
def FindOneofByName(self, full_name):
"""Loads the named oneof descriptor from the pool.
Args:
full_name: The full name of the oneof descriptor to load.
Returns:
The oneof descriptor for the named oneof.
Raises:
KeyError: if the oneof cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, oneof_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.oneofs_by_name[oneof_name]
def FindExtensionByName(self, full_name):
"""Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension.
Raises:
KeyError: if the extension cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
try:
# The proto compiler does not give any link between the FileDescriptor
# and top-level extensions unless the FileDescriptorProto is added to
# the DescriptorDatabase, but this can impact memory usage.
# So we registered these extensions by name explicitly.
return self._toplevel_extensions[full_name]
except KeyError:
pass
message_name, _, extension_name = full_name.rpartition('.')
try:
# Most extensions are nested inside a message.
scope = self.FindMessageTypeByName(message_name)
except KeyError:
# Some extensions are defined at file scope.
scope = self._FindFileContainingSymbolInDb(full_name)
return scope.extensions_by_name[extension_name]
def FindExtensionByNumber(self, message_descriptor, number):
"""Gets the extension of the specified message with the specified number.
Extensions have to be registered to this pool by calling
AddExtensionDescriptor.
Args:
message_descriptor: descriptor of the extended message.
number: integer, number of the extension field.
Returns:
A FieldDescriptor describing the extension.
Raises:
KeyError: when no extension with the given number is known for the
specified message.
"""
return self._extensions_by_number[message_descriptor][number]
def FindAllExtensions(self, message_descriptor):
"""Gets all the known extension of a given message.
Extensions have to be registered to this pool by calling
AddExtensionDescriptor.
Args:
message_descriptor: descriptor of the extended message.
Returns:
A list of FieldDescriptor describing the extensions.
"""
return list(self._extensions_by_number[message_descriptor].values())
def FindServiceByName(self, full_name):
"""Loads the named service descriptor from the pool.
Args:
full_name: The full name of the service descriptor to load.
Returns:
The service descriptor for the named service.
Raises:
KeyError: if the service cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._service_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._service_descriptors[full_name]
def _FindFileContainingSymbolInDb(self, symbol):
"""Finds the file in descriptor DB containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the descriptor database.
"""
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file containing %s' % symbol)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def _ConvertFileProtoToFileDescriptor(self, file_proto):
"""Creates a FileDescriptor from a proto or returns a cached copy.
This method also has the side effect of loading all the symbols found in
the file into the appropriate dictionaries in the pool.
Args:
file_proto: The proto to convert.
Returns:
A FileDescriptor matching the passed in proto.
"""
if file_proto.name not in self._file_descriptors:
built_deps = list(self._GetDeps(file_proto.dependency))
direct_deps = [self.FindFileByName(n) for n in file_proto.dependency]
public_deps = [direct_deps[i] for i in file_proto.public_dependency]
file_descriptor = descriptor.FileDescriptor(
pool=self,
name=file_proto.name,
package=file_proto.package,
syntax=file_proto.syntax,
options=_OptionsOrNone(file_proto),
serialized_pb=file_proto.SerializeToString(),
dependencies=direct_deps,
public_dependencies=public_deps)
scope = {}
# This loop extracts all the message and enum types from all the
# dependencies of the file_proto. This is necessary to create the
# scope of available message types when defining the passed in
# file proto.
for dependency in built_deps:
scope.update(self._ExtractSymbols(
list(dependency.message_types_by_name.values())))
scope.update((_PrefixWithDot(enum.full_name), enum)
for enum in list(dependency.enum_types_by_name.values()))
for message_type in file_proto.message_type:
message_desc = self._ConvertMessageDescriptor(
message_type, file_proto.package, file_descriptor, scope,
file_proto.syntax)
file_descriptor.message_types_by_name[message_desc.name] = (
message_desc)
for enum_type in file_proto.enum_type:
file_descriptor.enum_types_by_name[enum_type.name] = (
self._ConvertEnumDescriptor(enum_type, file_proto.package,
file_descriptor, None, scope))
for index, extension_proto in enumerate(file_proto.extension):
extension_desc = self._MakeFieldDescriptor(
extension_proto, file_proto.package, index, file_descriptor,
is_extension=True)
extension_desc.containing_type = self._GetTypeFromScope(
file_descriptor.package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc,
file_descriptor.package, scope)
file_descriptor.extensions_by_name[extension_desc.name] = (
extension_desc)
for desc_proto in file_proto.message_type:
self._SetAllFieldTypes(file_proto.package, desc_proto, scope)
if file_proto.package:
desc_proto_prefix = _PrefixWithDot(file_proto.package)
else:
desc_proto_prefix = ''
for desc_proto in file_proto.message_type:
desc = self._GetTypeFromScope(
desc_proto_prefix, desc_proto.name, scope)
file_descriptor.message_types_by_name[desc_proto.name] = desc
for index, service_proto in enumerate(file_proto.service):
file_descriptor.services_by_name[service_proto.name] = (
self._MakeServiceDescriptor(service_proto, index, scope,
file_proto.package, file_descriptor))
self.Add(file_proto)
self._file_descriptors[file_proto.name] = file_descriptor
return self._file_descriptors[file_proto.name]
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None, syntax=None):
"""Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
syntax: string indicating syntax of the file ("proto2" or "proto3")
Returns:
The added descriptor.
"""
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(
nested, desc_name, file_desc, scope, syntax)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope)
for enum in desc_proto.enum_type]
fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc)
for index, field in enumerate(desc_proto.field)]
extensions = [
self._MakeFieldDescriptor(extension, desc_name, index, file_desc,
is_extension=True)
for index, extension in enumerate(desc_proto.extension)]
oneofs = [
descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)),
index, None, [], desc.options)
for index, desc in enumerate(desc_proto.oneof_decl)]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
oneofs=oneofs,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=_OptionsOrNone(desc_proto),
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None,
syntax=syntax)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
for field_index, field_desc in enumerate(desc_proto.field):
if field_desc.HasField('oneof_index'):
oneof_index = field_desc.oneof_index
oneofs[oneof_index].fields.append(fields[field_index])
fields[field_index].containing_oneof = oneofs[oneof_index]
scope[_PrefixWithDot(desc_name)] = desc
self._CheckConflictRegister(desc)
self._descriptors[desc_name] = desc
return desc
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None):
"""Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
Args:
enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the enum descriptor.
containing_type: The type containing this enum.
scope: Scope containing available types.
Returns:
The added descriptor
"""
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
values = [self._MakeEnumValueDescriptor(value, index)
for index, value in enumerate(enum_proto.value)]
desc = descriptor.EnumDescriptor(name=enum_proto.name,
full_name=enum_name,
filename=file_name,
file=file_desc,
values=values,
containing_type=containing_type,
options=_OptionsOrNone(enum_proto))
scope['.%s' % enum_name] = desc
self._CheckConflictRegister(desc)
self._enum_descriptors[enum_name] = desc
return desc
def _MakeFieldDescriptor(self, field_proto, message_name, index,
file_desc, is_extension=False):
"""Creates a field descriptor from a FieldDescriptorProto.
For message and enum type fields, this method will do a look up
in the pool for the appropriate descriptor for that type. If it
is unavailable, it will fall back to the _source function to
create it. If this type is still unavailable, construction will
fail.
Args:
field_proto: The proto describing the field.
message_name: The name of the containing message.
index: Index of the field
file_desc: The file containing the field descriptor.
is_extension: Indication that this field is for an extension.
Returns:
An initialized FieldDescriptor object
"""
if message_name:
full_name = '.'.join((message_name, field_proto.name))
else:
full_name = field_proto.name
return descriptor.FieldDescriptor(
name=field_proto.name,
full_name=full_name,
index=index,
number=field_proto.number,
type=field_proto.type,
cpp_type=None,
message_type=None,
enum_type=None,
containing_type=None,
label=field_proto.label,
has_default_value=False,
default_value=None,
is_extension=is_extension,
extension_scope=None,
options=_OptionsOrNone(field_proto),
file=file_desc)
def _SetAllFieldTypes(self, package, desc_proto, scope):
"""Sets all the descriptor's fields's types.
This method also sets the containing types on any extensions.
Args:
package: The current package of desc_proto.
desc_proto: The message descriptor to update.
scope: Enclosing scope of available types.
"""
package = _PrefixWithDot(package)
main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)
if package == '.':
nested_package = _PrefixWithDot(desc_proto.name)
else:
nested_package = '.'.join([package, desc_proto.name])
for field_proto, field_desc in zip(desc_proto.field, main_desc.fields):
self._SetFieldType(field_proto, field_desc, nested_package, scope)
for extension_proto, extension_desc in (
list(zip(desc_proto.extension, main_desc.extensions))):
extension_desc.containing_type = self._GetTypeFromScope(
nested_package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc, nested_package, scope)
for nested_type in desc_proto.nested_type:
self._SetAllFieldTypes(nested_package, nested_type, scope)
def _SetFieldType(self, field_proto, field_desc, package, scope):
"""Sets the field's type, cpp_type, message_type and enum_type.
Args:
field_proto: Data about the field in proto format.
field_desc: The descriptor to modiy.
package: The package the field's container is in.
scope: Enclosing scope of available types.
"""
if field_proto.type_name:
desc = self._GetTypeFromScope(package, field_proto.type_name, scope)
else:
desc = None
if not field_proto.HasField('type'):
if isinstance(desc, descriptor.Descriptor):
field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE
else:
field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM
field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(
field_proto.type)
if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE
or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):
field_desc.message_type = desc
if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.enum_type = desc
if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:
field_desc.has_default_value = False
field_desc.default_value = []
elif field_proto.HasField('default_value'):
field_desc.has_default_value = True
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = float(field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = field_proto.default_value
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = field_proto.default_value.lower() == 'true'
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values_by_name[
field_proto.default_value].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = text_encoding.CUnescape(
field_proto.default_value)
else:
# All other types are of the "int" type.
field_desc.default_value = int(field_proto.default_value)
else:
field_desc.has_default_value = False
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = 0.0
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = ''
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = False
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values[0].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = b''
else:
# All other types are of the "int" type.
field_desc.default_value = 0
field_desc.type = field_proto.type
def _MakeEnumValueDescriptor(self, value_proto, index):
"""Creates a enum value descriptor object from a enum value proto.
Args:
value_proto: The proto describing the enum value.
index: The index of the enum value.
Returns:
An initialized EnumValueDescriptor object.
"""
return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=_OptionsOrNone(value_proto),
type=None)
def _MakeServiceDescriptor(self, service_proto, service_index, scope,
package, file_desc):
"""Make a protobuf ServiceDescriptor given a ServiceDescriptorProto.
Args:
service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message.
service_index: The index of the service in the File.
scope: Dict mapping short and full symbols to message and enum types.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the service descriptor.
Returns:
The added descriptor.
"""
if package:
service_name = '.'.join((package, service_proto.name))
else:
service_name = service_proto.name
methods = [self._MakeMethodDescriptor(method_proto, service_name, package,
scope, index)
for index, method_proto in enumerate(service_proto.method)]
desc = descriptor.ServiceDescriptor(name=service_proto.name,
full_name=service_name,
index=service_index,
methods=methods,
options=_OptionsOrNone(service_proto),
file=file_desc)
self._CheckConflictRegister(desc)
self._service_descriptors[service_name] = desc
return desc
def _MakeMethodDescriptor(self, method_proto, service_name, package, scope,
index):
"""Creates a method descriptor from a MethodDescriptorProto.
Args:
method_proto: The proto describing the method.
service_name: The name of the containing service.
package: Optional package name to look up for types.
scope: Scope containing available types.
index: Index of the method in the service.
Returns:
An initialized MethodDescriptor object.
"""
full_name = '.'.join((service_name, method_proto.name))
input_type = self._GetTypeFromScope(
package, method_proto.input_type, scope)
output_type = self._GetTypeFromScope(
package, method_proto.output_type, scope)
return descriptor.MethodDescriptor(name=method_proto.name,
full_name=full_name,
index=index,
containing_service=None,
input_type=input_type,
output_type=output_type,
options=_OptionsOrNone(method_proto))
def _ExtractSymbols(self, descriptors):
"""Pulls out all the symbols from descriptor protos.
Args:
descriptors: The messages to extract descriptors from.
Yields:
A two element tuple of the type name and descriptor object.
"""
for desc in descriptors:
yield (_PrefixWithDot(desc.full_name), desc)
for symbol in self._ExtractSymbols(desc.nested_types):
yield symbol
for enum in desc.enum_types:
yield (_PrefixWithDot(enum.full_name), enum)
def _GetDeps(self, dependencies):
"""Recursively finds dependencies for file protos.
Args:
dependencies: The names of the files being depended on.
Yields:
Each direct and indirect dependency.
"""
for dependency in dependencies:
dep_desc = self.FindFileByName(dependency)
yield dep_desc
for parent_dep in dep_desc.dependencies:
yield parent_dep
def _GetTypeFromScope(self, package, type_name, scope):
"""Finds a given type name in the current scope.
Args:
package: The package the proto should be located in.
type_name: The name of the type to be found in the scope.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The descriptor for the requested type.
"""
if type_name not in scope:
components = _PrefixWithDot(package).split('.')
while components:
possible_match = '.'.join(components + [type_name])
if possible_match in scope:
type_name = possible_match
break
else:
components.pop(-1)
return scope[type_name]
def _PrefixWithDot(name):
return name if name.startswith('.') else '.%s' % name
if _USE_C_DESCRIPTORS:
# TODO(amauryfa): This pool could be constructed from Python code, when we
# support a flag like 'use_cpp_generated_pool=True'.
# pylint: disable=protected-access
_DEFAULT = descriptor._message.default_pool
else:
_DEFAULT = DescriptorPool()
def Default():
return _DEFAULT
| 35.933837 | 86 | 0.698853 |
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'matthewtoia@google.com (Matt Toia)'
import collections
import warnings
from dis_sdk_python.dependency.google.protobuf import descriptor
from dis_sdk_python.dependency.google.protobuf import descriptor_database
from dis_sdk_python.dependency.google.protobuf import text_encoding
_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access
def _NormalizeFullyQualifiedName(name):
return name.lstrip('.')
def _OptionsOrNone(descriptor_proto):
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL)
class DescriptorPool(object):
if _USE_C_DESCRIPTORS:
def __new__(cls, descriptor_db=None):
# pylint: disable=protected-access
return descriptor._message.DescriptorPool(descriptor_db)
def __init__(self, descriptor_db=None):
self._internal_db = descriptor_database.DescriptorDatabase()
self._descriptor_db = descriptor_db
self._descriptors = {}
self._enum_descriptors = {}
self._service_descriptors = {}
self._file_descriptors = {}
self._toplevel_extensions = {}
# TODO(jieluo): Remove _file_desc_by_toplevel_extension after
# maybe year 2020 for compatibility issue (with 3.4.1 only).
self._file_desc_by_toplevel_extension = {}
# We store extensions in two two-level mappings: The first key is the
# descriptor of the message being extended, the second key is the extension
# full name or its tag number.
self._extensions_by_name = collections.defaultdict(dict)
self._extensions_by_number = collections.defaultdict(dict)
def _CheckConflictRegister(self, desc):
desc_name = desc.full_name
for register, descriptor_type in [
(self._descriptors, descriptor.Descriptor),
(self._enum_descriptors, descriptor.EnumDescriptor),
(self._service_descriptors, descriptor.ServiceDescriptor),
(self._toplevel_extensions, descriptor.FieldDescriptor)]:
if desc_name in register:
file_name = register[desc_name].file.name
if not isinstance(desc, descriptor_type) or (
file_name != desc.file.name):
warn_msg = ('Conflict register for file "' + desc.file.name +
'": ' + desc_name +
' is already defined in file "' +
file_name + '"')
warnings.warn(warn_msg, RuntimeWarning)
return
def Add(self, file_desc_proto):
self._internal_db.Add(file_desc_proto)
def AddSerializedFile(self, serialized_file_desc_proto):
# pylint: disable=g-import-not-at-top
from dis_sdk_python.dependency.google.protobuf import descriptor_pb2
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
serialized_file_desc_proto)
self.Add(file_desc_proto)
def AddDescriptor(self, desc):
if not isinstance(desc, descriptor.Descriptor):
raise TypeError('Expected instance of descriptor.Descriptor.')
self._CheckConflictRegister(desc)
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file)
def AddEnumDescriptor(self, enum_desc):
if not isinstance(enum_desc, descriptor.EnumDescriptor):
raise TypeError('Expected instance of descriptor.EnumDescriptor.')
self._CheckConflictRegister(enum_desc)
self._enum_descriptors[enum_desc.full_name] = enum_desc
self._AddFileDescriptor(enum_desc.file)
def AddServiceDescriptor(self, service_desc):
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._CheckConflictRegister(service_desc)
self._service_descriptors[service_desc.full_name] = service_desc
def AddExtensionDescriptor(self, extension):
if not (isinstance(extension, descriptor.FieldDescriptor) and
extension.is_extension):
raise TypeError('Expected an extension descriptor.')
if extension.extension_scope is None:
self._CheckConflictRegister(extension)
self._toplevel_extensions[extension.full_name] = extension
try:
existing_desc = self._extensions_by_number[
extension.containing_type][extension.number]
except KeyError:
pass
else:
if extension is not existing_desc:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" '
'with field number %d.' %
(extension.full_name, existing_desc.full_name,
extension.containing_type.full_name, extension.number))
self._extensions_by_number[extension.containing_type][
extension.number] = extension
self._extensions_by_name[extension.containing_type][
extension.full_name] = extension
# Also register MessageSet extensions with the type name.
if _IsMessageSetExtension(extension):
self._extensions_by_name[extension.containing_type][
extension.message_type.full_name] = extension
def AddFileDescriptor(self, file_desc):
self._AddFileDescriptor(file_desc)
# TODO(jieluo): This is a temporary solution for FieldDescriptor.file.
# FieldDescriptor.file is added in code gen. Remove this solution after
# maybe 2020 for compatibility reason (with 3.4.1 only).
for extension in list(file_desc.extensions_by_name.values()):
self._file_desc_by_toplevel_extension[
extension.full_name] = file_desc
def _AddFileDescriptor(self, file_desc):
if not isinstance(file_desc, descriptor.FileDescriptor):
raise TypeError('Expected instance of descriptor.FileDescriptor.')
self._file_descriptors[file_desc.name] = file_desc
def FindFileByName(self, file_name):
try:
return self._file_descriptors[file_name]
except KeyError:
pass
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file named %s' % file_name)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def FindFileContainingSymbol(self, symbol):
symbol = _NormalizeFullyQualifiedName(symbol)
try:
return self._descriptors[symbol].file
except KeyError:
pass
try:
return self._enum_descriptors[symbol].file
except KeyError:
pass
try:
return self._service_descriptors[symbol].file
except KeyError:
pass
try:
return self._FindFileContainingSymbolInDb(symbol)
except KeyError:
pass
try:
return self._file_desc_by_toplevel_extension[symbol]
except KeyError:
pass
# Try nested extensions inside a message.
message_name, _, extension_name = symbol.rpartition('.')
try:
message = self.FindMessageTypeByName(message_name)
assert message.extensions_by_name[extension_name]
return message.file
except KeyError:
raise KeyError('Cannot find a file containing %s' % symbol)
def FindMessageTypeByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._descriptors[full_name]
def FindEnumTypeByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._enum_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._enum_descriptors[full_name]
def FindFieldByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, field_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.fields_by_name[field_name]
def FindOneofByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, oneof_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.oneofs_by_name[oneof_name]
def FindExtensionByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
try:
# The proto compiler does not give any link between the FileDescriptor
# and top-level extensions unless the FileDescriptorProto is added to
# the DescriptorDatabase, but this can impact memory usage.
# So we registered these extensions by name explicitly.
return self._toplevel_extensions[full_name]
except KeyError:
pass
message_name, _, extension_name = full_name.rpartition('.')
try:
# Most extensions are nested inside a message.
scope = self.FindMessageTypeByName(message_name)
except KeyError:
# Some extensions are defined at file scope.
scope = self._FindFileContainingSymbolInDb(full_name)
return scope.extensions_by_name[extension_name]
def FindExtensionByNumber(self, message_descriptor, number):
return self._extensions_by_number[message_descriptor][number]
def FindAllExtensions(self, message_descriptor):
return list(self._extensions_by_number[message_descriptor].values())
def FindServiceByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._service_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._service_descriptors[full_name]
def _FindFileContainingSymbolInDb(self, symbol):
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file containing %s' % symbol)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def _ConvertFileProtoToFileDescriptor(self, file_proto):
if file_proto.name not in self._file_descriptors:
built_deps = list(self._GetDeps(file_proto.dependency))
direct_deps = [self.FindFileByName(n) for n in file_proto.dependency]
public_deps = [direct_deps[i] for i in file_proto.public_dependency]
file_descriptor = descriptor.FileDescriptor(
pool=self,
name=file_proto.name,
package=file_proto.package,
syntax=file_proto.syntax,
options=_OptionsOrNone(file_proto),
serialized_pb=file_proto.SerializeToString(),
dependencies=direct_deps,
public_dependencies=public_deps)
scope = {}
# This loop extracts all the message and enum types from all the
# dependencies of the file_proto. This is necessary to create the
# scope of available message types when defining the passed in
# file proto.
for dependency in built_deps:
scope.update(self._ExtractSymbols(
list(dependency.message_types_by_name.values())))
scope.update((_PrefixWithDot(enum.full_name), enum)
for enum in list(dependency.enum_types_by_name.values()))
for message_type in file_proto.message_type:
message_desc = self._ConvertMessageDescriptor(
message_type, file_proto.package, file_descriptor, scope,
file_proto.syntax)
file_descriptor.message_types_by_name[message_desc.name] = (
message_desc)
for enum_type in file_proto.enum_type:
file_descriptor.enum_types_by_name[enum_type.name] = (
self._ConvertEnumDescriptor(enum_type, file_proto.package,
file_descriptor, None, scope))
for index, extension_proto in enumerate(file_proto.extension):
extension_desc = self._MakeFieldDescriptor(
extension_proto, file_proto.package, index, file_descriptor,
is_extension=True)
extension_desc.containing_type = self._GetTypeFromScope(
file_descriptor.package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc,
file_descriptor.package, scope)
file_descriptor.extensions_by_name[extension_desc.name] = (
extension_desc)
for desc_proto in file_proto.message_type:
self._SetAllFieldTypes(file_proto.package, desc_proto, scope)
if file_proto.package:
desc_proto_prefix = _PrefixWithDot(file_proto.package)
else:
desc_proto_prefix = ''
for desc_proto in file_proto.message_type:
desc = self._GetTypeFromScope(
desc_proto_prefix, desc_proto.name, scope)
file_descriptor.message_types_by_name[desc_proto.name] = desc
for index, service_proto in enumerate(file_proto.service):
file_descriptor.services_by_name[service_proto.name] = (
self._MakeServiceDescriptor(service_proto, index, scope,
file_proto.package, file_descriptor))
self.Add(file_proto)
self._file_descriptors[file_proto.name] = file_descriptor
return self._file_descriptors[file_proto.name]
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None, syntax=None):
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(
nested, desc_name, file_desc, scope, syntax)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope)
for enum in desc_proto.enum_type]
fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc)
for index, field in enumerate(desc_proto.field)]
extensions = [
self._MakeFieldDescriptor(extension, desc_name, index, file_desc,
is_extension=True)
for index, extension in enumerate(desc_proto.extension)]
oneofs = [
descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)),
index, None, [], desc.options)
for index, desc in enumerate(desc_proto.oneof_decl)]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
oneofs=oneofs,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=_OptionsOrNone(desc_proto),
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None,
syntax=syntax)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
for field_index, field_desc in enumerate(desc_proto.field):
if field_desc.HasField('oneof_index'):
oneof_index = field_desc.oneof_index
oneofs[oneof_index].fields.append(fields[field_index])
fields[field_index].containing_oneof = oneofs[oneof_index]
scope[_PrefixWithDot(desc_name)] = desc
self._CheckConflictRegister(desc)
self._descriptors[desc_name] = desc
return desc
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None):
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
values = [self._MakeEnumValueDescriptor(value, index)
for index, value in enumerate(enum_proto.value)]
desc = descriptor.EnumDescriptor(name=enum_proto.name,
full_name=enum_name,
filename=file_name,
file=file_desc,
values=values,
containing_type=containing_type,
options=_OptionsOrNone(enum_proto))
scope['.%s' % enum_name] = desc
self._CheckConflictRegister(desc)
self._enum_descriptors[enum_name] = desc
return desc
def _MakeFieldDescriptor(self, field_proto, message_name, index,
file_desc, is_extension=False):
if message_name:
full_name = '.'.join((message_name, field_proto.name))
else:
full_name = field_proto.name
return descriptor.FieldDescriptor(
name=field_proto.name,
full_name=full_name,
index=index,
number=field_proto.number,
type=field_proto.type,
cpp_type=None,
message_type=None,
enum_type=None,
containing_type=None,
label=field_proto.label,
has_default_value=False,
default_value=None,
is_extension=is_extension,
extension_scope=None,
options=_OptionsOrNone(field_proto),
file=file_desc)
def _SetAllFieldTypes(self, package, desc_proto, scope):
package = _PrefixWithDot(package)
main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)
if package == '.':
nested_package = _PrefixWithDot(desc_proto.name)
else:
nested_package = '.'.join([package, desc_proto.name])
for field_proto, field_desc in zip(desc_proto.field, main_desc.fields):
self._SetFieldType(field_proto, field_desc, nested_package, scope)
for extension_proto, extension_desc in (
list(zip(desc_proto.extension, main_desc.extensions))):
extension_desc.containing_type = self._GetTypeFromScope(
nested_package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc, nested_package, scope)
for nested_type in desc_proto.nested_type:
self._SetAllFieldTypes(nested_package, nested_type, scope)
def _SetFieldType(self, field_proto, field_desc, package, scope):
if field_proto.type_name:
desc = self._GetTypeFromScope(package, field_proto.type_name, scope)
else:
desc = None
if not field_proto.HasField('type'):
if isinstance(desc, descriptor.Descriptor):
field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE
else:
field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM
field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(
field_proto.type)
if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE
or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):
field_desc.message_type = desc
if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.enum_type = desc
if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:
field_desc.has_default_value = False
field_desc.default_value = []
elif field_proto.HasField('default_value'):
field_desc.has_default_value = True
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = float(field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = field_proto.default_value
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = field_proto.default_value.lower() == 'true'
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values_by_name[
field_proto.default_value].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = text_encoding.CUnescape(
field_proto.default_value)
else:
# All other types are of the "int" type.
field_desc.default_value = int(field_proto.default_value)
else:
field_desc.has_default_value = False
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = 0.0
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = ''
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = False
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values[0].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = b''
else:
# All other types are of the "int" type.
field_desc.default_value = 0
field_desc.type = field_proto.type
def _MakeEnumValueDescriptor(self, value_proto, index):
return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=_OptionsOrNone(value_proto),
type=None)
def _MakeServiceDescriptor(self, service_proto, service_index, scope,
package, file_desc):
if package:
service_name = '.'.join((package, service_proto.name))
else:
service_name = service_proto.name
methods = [self._MakeMethodDescriptor(method_proto, service_name, package,
scope, index)
for index, method_proto in enumerate(service_proto.method)]
desc = descriptor.ServiceDescriptor(name=service_proto.name,
full_name=service_name,
index=service_index,
methods=methods,
options=_OptionsOrNone(service_proto),
file=file_desc)
self._CheckConflictRegister(desc)
self._service_descriptors[service_name] = desc
return desc
def _MakeMethodDescriptor(self, method_proto, service_name, package, scope,
index):
full_name = '.'.join((service_name, method_proto.name))
input_type = self._GetTypeFromScope(
package, method_proto.input_type, scope)
output_type = self._GetTypeFromScope(
package, method_proto.output_type, scope)
return descriptor.MethodDescriptor(name=method_proto.name,
full_name=full_name,
index=index,
containing_service=None,
input_type=input_type,
output_type=output_type,
options=_OptionsOrNone(method_proto))
def _ExtractSymbols(self, descriptors):
for desc in descriptors:
yield (_PrefixWithDot(desc.full_name), desc)
for symbol in self._ExtractSymbols(desc.nested_types):
yield symbol
for enum in desc.enum_types:
yield (_PrefixWithDot(enum.full_name), enum)
def _GetDeps(self, dependencies):
for dependency in dependencies:
dep_desc = self.FindFileByName(dependency)
yield dep_desc
for parent_dep in dep_desc.dependencies:
yield parent_dep
def _GetTypeFromScope(self, package, type_name, scope):
if type_name not in scope:
components = _PrefixWithDot(package).split('.')
while components:
possible_match = '.'.join(components + [type_name])
if possible_match in scope:
type_name = possible_match
break
else:
components.pop(-1)
return scope[type_name]
def _PrefixWithDot(name):
return name if name.startswith('.') else '.%s' % name
if _USE_C_DESCRIPTORS:
# TODO(amauryfa): This pool could be constructed from Python code, when we
# support a flag like 'use_cpp_generated_pool=True'.
# pylint: disable=protected-access
_DEFAULT = descriptor._message.default_pool
else:
_DEFAULT = DescriptorPool()
def Default():
return _DEFAULT
| true | true |
f72277a27d70906472f0c9a71c45935812317f3f | 1,568 | py | Python | tests/io/test_k2sff.py | jorgemarpa/lightkurve | 86320a67eabb3a93f60e9faff0447e4b235bccf2 | [
"MIT"
] | 235 | 2018-01-22T01:22:10.000Z | 2021-02-02T04:57:26.000Z | tests/io/test_k2sff.py | jorgemarpa/lightkurve | 86320a67eabb3a93f60e9faff0447e4b235bccf2 | [
"MIT"
] | 847 | 2018-01-22T05:49:16.000Z | 2021-02-10T17:05:19.000Z | tests/io/test_k2sff.py | jorgemarpa/lightkurve | 86320a67eabb3a93f60e9faff0447e4b235bccf2 | [
"MIT"
] | 121 | 2018-01-22T01:11:19.000Z | 2021-01-26T21:07:07.000Z | import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve.io.k2sff import read_k2sff_lightcurve
from lightkurve import search_lightcurve
@pytest.mark.remote_data
def test_read_k2sff():
"""Can we read K2SFF files?"""
url = "http://archive.stsci.edu/hlsps/k2sff/c16/212100000/00236/hlsp_k2sff_k2_lightcurve_212100236-c16_kepler_v1_llc.fits"
f = fits.open(url)
# Verify different extensions
fluxes = []
for ext in ["BESTAPER", "CIRC_APER9"]:
lc = read_k2sff_lightcurve(url, ext=ext)
assert type(lc).__name__ == "KeplerLightCurve"
# Are `time` and `flux` consistent with the FITS file?
assert_array_equal(f[ext].data["T"], lc.time.value)
assert_array_equal(f[ext].data["FCOR"], lc.flux.value)
fluxes.append(lc.flux)
# Different extensions should show different fluxes
assert not np.array_equal(fluxes[0], fluxes[1])
@pytest.mark.remote_data
def test_search_k2sff():
"""Can we search and download a K2SFF light curve?"""
# Try an early campaign
search = search_lightcurve("K2-18", author="K2SFF", campaign=1)
assert len(search) == 1
assert search.table["author"][0] == "K2SFF"
lc = search.download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.campaign == 1
# Try a late campaign
lc = search_lightcurve("GJ 9827", author="K2SFF", campaign=19).download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.targetid == 246389858
assert lc.campaign == 19
| 35.636364 | 126 | 0.699617 | import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve.io.k2sff import read_k2sff_lightcurve
from lightkurve import search_lightcurve
@pytest.mark.remote_data
def test_read_k2sff():
url = "http://archive.stsci.edu/hlsps/k2sff/c16/212100000/00236/hlsp_k2sff_k2_lightcurve_212100236-c16_kepler_v1_llc.fits"
f = fits.open(url)
fluxes = []
for ext in ["BESTAPER", "CIRC_APER9"]:
lc = read_k2sff_lightcurve(url, ext=ext)
assert type(lc).__name__ == "KeplerLightCurve"
assert_array_equal(f[ext].data["T"], lc.time.value)
assert_array_equal(f[ext].data["FCOR"], lc.flux.value)
fluxes.append(lc.flux)
assert not np.array_equal(fluxes[0], fluxes[1])
@pytest.mark.remote_data
def test_search_k2sff():
search = search_lightcurve("K2-18", author="K2SFF", campaign=1)
assert len(search) == 1
assert search.table["author"][0] == "K2SFF"
lc = search.download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.campaign == 1
lc = search_lightcurve("GJ 9827", author="K2SFF", campaign=19).download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.targetid == 246389858
assert lc.campaign == 19
| true | true |
f72277a4d50c47c876335e9670a84b3bf178e017 | 273 | py | Python | utils/__init__.py | Douile/royale-bot | c55c0fed5db271a27f5a7663ebd6582392198fbd | [
"Apache-2.0"
] | 1 | 2021-12-22T18:07:45.000Z | 2021-12-22T18:07:45.000Z | utils/__init__.py | Douile/royale-bot | c55c0fed5db271a27f5a7663ebd6582392198fbd | [
"Apache-2.0"
] | 1 | 2021-06-08T19:29:56.000Z | 2021-06-08T19:29:56.000Z | utils/__init__.py | Douile/royale-bot | c55c0fed5db271a27f5a7663ebd6582392198fbd | [
"Apache-2.0"
] | 4 | 2019-01-15T00:57:48.000Z | 2019-04-25T10:54:08.000Z | import os
def getEnv(name,default=None):
value = os.environ.get(name,None)
if value == None:
if default == None:
value = input("Env variable not found, please enter {}: ".format(name))
else:
value = default
return value
| 24.818182 | 83 | 0.578755 | import os
def getEnv(name,default=None):
value = os.environ.get(name,None)
if value == None:
if default == None:
value = input("Env variable not found, please enter {}: ".format(name))
else:
value = default
return value
| true | true |
f7227820a8a62443b673431d50a0efce4371625c | 2,727 | py | Python | benchmarks/earthquake/mar2022/benchmark-rivanna.py | laszewsk/mlcommons | 0a07ef2904623f9d3f5eb18581c906813bf563ba | [
"Apache-2.0"
] | 1 | 2022-02-06T04:56:41.000Z | 2022-02-06T04:56:41.000Z | benchmarks/earthquake/mar2022/benchmark-rivanna.py | laszewsk/mlcommons | 0a07ef2904623f9d3f5eb18581c906813bf563ba | [
"Apache-2.0"
] | 6 | 2022-02-22T02:33:38.000Z | 2022-03-31T23:14:09.000Z | benchmarks/earthquake/mar2022/prepare-rivanna-a100.py | laszewsk/mlcommons | 0a07ef2904623f9d3f5eb18581c906813bf563ba | [
"Apache-2.0"
] | 2 | 2022-02-06T04:40:51.000Z | 2022-03-31T03:27:15.000Z | #!/usr/bin/env bash
#SBATCH --job-name=mlcommons-science-earthquake-a100
#SBATCH --output=mlcommons-science-earthquake-a100.out
#SBATCH --error=mlcommons-science-earthquake-a100.err
#SBATCH --partition=gpu
#SBATCH --cpus-per-task=6
#SBATCH --mem=32G
#SBATCH --time=06:00:00
#SBATCH --gres=gpu:a100:1
#SBATCH --account=ds6011-sp22-002
# record top ond gpustat output
# ./sampleTop2.sh thf2bn ${SLURM_JOB_ID} 10 &
GPU_TYPE="a100"
PYTHON_VERSION="3.10.2"
RESOURCE_DIR="/project/ds6011-sp22-002"
# BASE=/scratch/$USER/${GPU_TYPE}
BASE=${RESOURCE_DIR}/$USER/${GPU_TYPE}
HOME=${BASE}
REV="mar2022"
VARIANT="-gregor"
echo "Working in <$(pwd)>"
echo "Base directory in <${BASE}>"
echo "Overridden home in <${HOME}>"
echo "Revision: <${REV}>"
echo "Variant: <${VARIANT}>"
echo "Python: <${PYTHON_VERSION}>"
echo "GPU: <${GPU_TYPE}>"
module load cuda cudnn
nvidia-smi
mkdir -p ${BASE}
cd ${BASE}
if [ ! -e "${BASE}/.local/python/${PYTHON_VESRION}" ] ; then
tar Jxvf "${RESOURCE_DIR}/python-${PYTHON_VERSION}.tar.xz" -C "${BASE}"
fi
export LD_LIBRARY_PATH=${BASE}/.local/ssl/lib:$LD_LIBRARY_PATH
echo "Python setup"
if [ ! -e "${BASE}/ENV3/bin/activate" ]; then
${BASE}/.local/python/${PYTHON_VERSION}/bin/python3.10 -m venv ${BASE}/ENV3
fi
echo "ENV3 Setup"
source ${BASE}/ENV3/bin/activate
python -m pip install -U pip wheel papermill
if [ ! -e "${BASE}/mlcommons-data-earthquake" ]; then
git clone https://github.com/laszewsk/mlcommons-data-earthquake.git "${BASE}/mlcommons-data-earthquake"
else
(cd ${BASE}/mlcommons-data-earthquake ; \
git fetch origin ; \
git checkout main ; \
git reset --hard origin/main ; \
git clean -d --force)
fi
if [ ! -e "${BASE}/mlcommons" ]; then
git clone https://github.com/laszewsk/mlcommons.git "${BASE}/mlcommons"
else
(cd ${BASE}/mlcommons ; \
git fetch origin ; \
git checkout main ; \
git reset --hard origin/main ; \
git clean -d --force)
fi
if [ ! -e ${BASE}/mlcommons/benchmarks/earthquake/data/EarthquakeDec2020 ]; then
tar Jxvf ${BASE}/mlcommons-data-earthquake/data.tar.xz \
-C ${BASE}/mlcommons/benchmarks/earthquake
mkdir -p ${BASE}/mlcommons/benchmarks/earthquake/data/EarthquakeDec2020/outputs
fi
(cd ${BASE}/mlcommons/benchmarks/earthquake/${REV} && \
python -m pip install -r requirements.txt)
# prg >> xyz.out &
(cd ${BASE}/mlcommons/benchmarks/earthquake/${REV} && \
cp "FFFFWNPFEARTHQ_newTFTv29${VARIANT}.ipynb" FFFFWNPFEARTHQ_newTFTv29-$USER.ipynb)
(cd mlcommons/benchmarks/earthquake/mar2022 && \
papermill FFFFWNPFEARTHQ_newTFTv29-$USER.ipynb FFFFWNPFEARTHQ_newTFTv29-$USER-$GPU_TYPE.ipynb --no-progress-bar --log-output --log-level INFO)
| 28.40625 | 146 | 0.685735 |
GPU_TYPE="a100"
PYTHON_VERSION="3.10.2"
RESOURCE_DIR="/project/ds6011-sp22-002"
BASE=${RESOURCE_DIR}/$USER/${GPU_TYPE}
HOME=${BASE}
REV="mar2022"
VARIANT="-gregor"
echo "Working in <$(pwd)>"
echo "Base directory in <${BASE}>"
echo "Overridden home in <${HOME}>"
echo "Revision: <${REV}>"
echo "Variant: <${VARIANT}>"
echo "Python: <${PYTHON_VERSION}>"
echo "GPU: <${GPU_TYPE}>"
module load cuda cudnn
nvidia-smi
mkdir -p ${BASE}
cd ${BASE}
if [ ! -e "${BASE}/.local/python/${PYTHON_VESRION}" ] ; then
tar Jxvf "${RESOURCE_DIR}/python-${PYTHON_VERSION}.tar.xz" -C "${BASE}"
fi
export LD_LIBRARY_PATH=${BASE}/.local/ssl/lib:$LD_LIBRARY_PATH
echo "Python setup"
if [ ! -e "${BASE}/ENV3/bin/activate" ]; then
${BASE}/.local/python/${PYTHON_VERSION}/bin/python3.10 -m venv ${BASE}/ENV3
fi
echo "ENV3 Setup"
source ${BASE}/ENV3/bin/activate
python -m pip install -U pip wheel papermill
if [ ! -e "${BASE}/mlcommons-data-earthquake" ]; then
git clone https://github.com/laszewsk/mlcommons-data-earthquake.git "${BASE}/mlcommons-data-earthquake"
else
(cd ${BASE}/mlcommons-data-earthquake ; \
git fetch origin ; \
git checkout main ; \
git reset --hard origin/main ; \
git clean -d --force)
fi
if [ ! -e "${BASE}/mlcommons" ]; then
git clone https://github.com/laszewsk/mlcommons.git "${BASE}/mlcommons"
else
(cd ${BASE}/mlcommons ; \
git fetch origin ; \
git checkout main ; \
git reset --hard origin/main ; \
git clean -d --force)
fi
if [ ! -e ${BASE}/mlcommons/benchmarks/earthquake/data/EarthquakeDec2020 ]; then
tar Jxvf ${BASE}/mlcommons-data-earthquake/data.tar.xz \
-C ${BASE}/mlcommons/benchmarks/earthquake
mkdir -p ${BASE}/mlcommons/benchmarks/earthquake/data/EarthquakeDec2020/outputs
fi
(cd ${BASE}/mlcommons/benchmarks/earthquake/${REV} && \
python -m pip install -r requirements.txt)
(cd ${BASE}/mlcommons/benchmarks/earthquake/${REV} && \
cp "FFFFWNPFEARTHQ_newTFTv29${VARIANT}.ipynb" FFFFWNPFEARTHQ_newTFTv29-$USER.ipynb)
(cd mlcommons/benchmarks/earthquake/mar2022 && \
papermill FFFFWNPFEARTHQ_newTFTv29-$USER.ipynb FFFFWNPFEARTHQ_newTFTv29-$USER-$GPU_TYPE.ipynb --no-progress-bar --log-output --log-level INFO)
| false | true |
f72278214081544cda312e677f9ea50f74607e24 | 834 | py | Python | alipay/aop/api/domain/FinUserInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/FinUserInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/FinUserInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class FinUserInfo(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FinUserInfo()
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 20.341463 | 65 | 0.565947 |
import json
from alipay.aop.api.constant.ParamConstants import *
class FinUserInfo(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FinUserInfo()
if 'user_id' in d:
o.user_id = d['user_id']
return o
| true | true |
f722787f0f169fce7598cd63728f38b786bcd0f8 | 652 | py | Python | padawan/users/migrations/0006_auto_20161221_1742.py | Lordaeron12/padawan | 05b5b240d10538f31f4b45b67a75e8b7a096bc78 | [
"MIT"
] | null | null | null | padawan/users/migrations/0006_auto_20161221_1742.py | Lordaeron12/padawan | 05b5b240d10538f31f4b45b67a75e8b7a096bc78 | [
"MIT"
] | null | null | null | padawan/users/migrations/0006_auto_20161221_1742.py | Lordaeron12/padawan | 05b5b240d10538f31f4b45b67a75e8b7a096bc78 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-21 22:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20161221_1710'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id_type',
field=models.CharField(blank=True, choices=[(b'CC', b'C\xc3\xa9dula de ciudadan\xc3\xada'), (b'TI', b'Tarjeta de identidad'), (b'CE', b'C\xc3\xa9dula de extranjer\xc3\xada'), (b'NIT', b'NIT'), (b'PA', b'Pasaporte')], max_length=3, null=True),
),
]
| 31.047619 | 254 | 0.622699 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20161221_1710'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id_type',
field=models.CharField(blank=True, choices=[(b'CC', b'C\xc3\xa9dula de ciudadan\xc3\xada'), (b'TI', b'Tarjeta de identidad'), (b'CE', b'C\xc3\xa9dula de extranjer\xc3\xada'), (b'NIT', b'NIT'), (b'PA', b'Pasaporte')], max_length=3, null=True),
),
]
| true | true |
f72278b9145e01c86e575754887d733dac60acdb | 5,194 | py | Python | examples/jobs/cobalt.py | wjlei1990/radical.saga | de022ea4fb29d95e8acffff8a68aa8648de807d4 | [
"MIT"
] | 12 | 2019-04-13T21:41:45.000Z | 2021-08-03T09:43:25.000Z | examples/jobs/cobalt.py | wjlei1990/radical.saga | de022ea4fb29d95e8acffff8a68aa8648de807d4 | [
"MIT"
] | 103 | 2019-04-10T14:23:41.000Z | 2022-03-15T19:43:56.000Z | examples/jobs/cobalt.py | wjlei1990/radical.saga | de022ea4fb29d95e8acffff8a68aa8648de807d4 | [
"MIT"
] | 7 | 2019-07-11T07:59:56.000Z | 2022-02-02T22:28:24.000Z | #!/usr/bin/env python
__author__ = "RADICAL Team"
__copyright__ = "Copyright 2012-2020, The SAGA Project"
__license__ = "MIT"
""" This examples shows how to run a job on a remote cluster
using the 'COBALT' job adaptor.
More information about the radical.saga job API can be found at:
http://radical-cybertools.github.com/radical.saga/doc/library/job/index.html
"""
import sys
import radical.saga as rs
js_url = "cobalt://localhost/"
# ------------------------------------------------------------------------------
#
def start():
try:
# Create a job service object that represent a remote cluster.
js = rs.job.Service(js_url)
# Next, we describe the job we want to run. A complete set of job
# description attributes can be found in the API documentation.
jd = rs.job.Description()
jd.environment = {'FILENAME': 'testfile'}
jd.wall_time_limit = 1 # minutes
jd.executable = '/bin/touch'
jd.arguments = ['$FILENAME']
jd.name = "examplejob"
jd.queue = "debug-cache-quad"
jd.project = "CVD_Research"
jd.working_directory = ".saga/test"
jd.output = "examplejob.out"
jd.error = "examplejob.err"
# Create a new job from the job description. The initial state of
# the job is 'New'.
job = js.create_job(jd)
# Check our job's id and state
print("Job State : %s" % (job.state))
# Now we can start our job.
print("starting job")
job.run()
print("Job ID : %s" % (job.id))
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
def check(jobid):
try:
# Create a job service object to the same cluster
js = rs.job.Service(js_url)
# List all jobs that are known by the adaptor.
# This should show our job as well.
print("Listing active jobs: ")
for jid in js.list():
if jid == jobid:
print(' * %s' % jid)
else:
print(' - %s' % jid)
# reconnect to the given job
job = js.get_job(jobid)
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
def stop(jobid):
try:
# Create a job service object to the same cluster and reconnect to job
js = rs.job.Service(js_url)
job = js.get_job(jobid)
print("Job ID : %s" % (job.id))
print("Job State : %s" % (job.state))
print("cacnel job")
job.cancel()
# wait for our job to complete
print("wait for job")
job.wait()
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
return 0
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
if len(sys.argv) < 2:
print("\n\tusage: %s [start | check | stop] <jobid>\n" % sys.argv[0])
sys.exit(-1)
if sys.argv[1] == 'start': sys.exit(start())
elif sys.argv[1] == 'check': sys.exit(check(sys.argv[2]))
elif sys.argv[1] == 'stop' : sys.exit(stop(sys.argv[2]))
# ------------------------------------------------------------------------------
| 29.68 | 80 | 0.506739 |
__author__ = "RADICAL Team"
__copyright__ = "Copyright 2012-2020, The SAGA Project"
__license__ = "MIT"
import sys
import radical.saga as rs
js_url = "cobalt://localhost/"
def start():
try:
js = rs.job.Service(js_url)
jd = rs.job.Description()
jd.environment = {'FILENAME': 'testfile'}
jd.wall_time_limit = 1
jd.executable = '/bin/touch'
jd.arguments = ['$FILENAME']
jd.name = "examplejob"
jd.queue = "debug-cache-quad"
jd.project = "CVD_Research"
jd.working_directory = ".saga/test"
jd.output = "examplejob.out"
jd.error = "examplejob.err"
job = js.create_job(jd)
print("Job State : %s" % (job.state))
# Now we can start our job.
print("starting job")
job.run()
print("Job ID : %s" % (job.id))
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
def check(jobid):
try:
# Create a job service object to the same cluster
js = rs.job.Service(js_url)
# List all jobs that are known by the adaptor.
# This should show our job as well.
print("Listing active jobs: ")
for jid in js.list():
if jid == jobid:
print(' * %s' % jid)
else:
print(' - %s' % jid)
# reconnect to the given job
job = js.get_job(jobid)
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
def stop(jobid):
try:
# Create a job service object to the same cluster and reconnect to job
js = rs.job.Service(js_url)
job = js.get_job(jobid)
print("Job ID : %s" % (job.id))
print("Job State : %s" % (job.state))
print("cacnel job")
job.cancel()
# wait for our job to complete
print("wait for job")
job.wait()
print("Job State : %s" % job.state)
print("Exitcode : %s" % job.exit_code)
print("Exec. hosts : %s" % job.execution_hosts)
print("Create time : %s" % job.created)
print("Start time : %s" % job.started)
print("End time : %s" % job.finished)
js.close()
return 0
except rs.SagaException as e:
# Catch all saga exceptions
print("An exception occured: (%s) %s " % (e.type, (str(e))))
# Get the whole traceback in case of an exception -
# this can be helpful for debugging the problem
print(" \n*** Backtrace:\n %s" % e.traceback)
return -1
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
if len(sys.argv) < 2:
print("\n\tusage: %s [start | check | stop] <jobid>\n" % sys.argv[0])
sys.exit(-1)
if sys.argv[1] == 'start': sys.exit(start())
elif sys.argv[1] == 'check': sys.exit(check(sys.argv[2]))
elif sys.argv[1] == 'stop' : sys.exit(stop(sys.argv[2]))
# ------------------------------------------------------------------------------
| true | true |
f72279e7a45aeafb7dcac167948a1193e2044e12 | 1,386 | py | Python | tests/src/objects/test_qudit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
] | 2 | 2019-10-28T20:26:14.000Z | 2019-10-29T08:28:45.000Z | tests/src/objects/test_qudit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
] | 3 | 2019-10-28T09:19:27.000Z | 2019-10-28T13:42:08.000Z | tests/src/objects/test_qudit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
] | null | null | null | import pytest
from pytest_mock import mocker
from hamcrest import *
import numpy as np
from src.objects.quantum_system import SystemType
from src.objects.qudit import Qudit
from src.dirac_notation.constants import *
@pytest.mark.parametrize('input', [
(
comp_ket_x(0, 4)
)
])
def test_init(input):
system = Qudit(input)
assert_that(system.children_systems, equal_to(None))
assert_that(system.system_type, equal_to(SystemType.simple))
@pytest.mark.parametrize('input', [
(
ket_0
)
])
def test_init_fail(input):
try:
system = Qudit(input)
pytest.fail()
except AssertionError:
pass
@pytest.mark.parametrize('input_1,input_2', [
(
comp_ket_x(0, 8), comp_ket_x(0, 4)
)
])
def test_children_systems_1(input_1, input_2):
system = Qudit(input_1)
child_system = Qudit(input_2)
system.children_systems = [child_system]
assert_that(system.children_systems, equal_to([child_system]))
assert_that(system.system_type, equal_to(SystemType.product))
@pytest.mark.parametrize('input', [
(
comp_ket_x(0, 8)
)
])
def test_children_systems_2(input):
system = Qudit(input)
system.children_systems = []
system.children_systems = None
assert_that(system.children_systems, equal_to(None))
assert_that(system.system_type, equal_to(SystemType.simple)) | 22.354839 | 66 | 0.697691 | import pytest
from pytest_mock import mocker
from hamcrest import *
import numpy as np
from src.objects.quantum_system import SystemType
from src.objects.qudit import Qudit
from src.dirac_notation.constants import *
@pytest.mark.parametrize('input', [
(
comp_ket_x(0, 4)
)
])
def test_init(input):
system = Qudit(input)
assert_that(system.children_systems, equal_to(None))
assert_that(system.system_type, equal_to(SystemType.simple))
@pytest.mark.parametrize('input', [
(
ket_0
)
])
def test_init_fail(input):
try:
system = Qudit(input)
pytest.fail()
except AssertionError:
pass
@pytest.mark.parametrize('input_1,input_2', [
(
comp_ket_x(0, 8), comp_ket_x(0, 4)
)
])
def test_children_systems_1(input_1, input_2):
system = Qudit(input_1)
child_system = Qudit(input_2)
system.children_systems = [child_system]
assert_that(system.children_systems, equal_to([child_system]))
assert_that(system.system_type, equal_to(SystemType.product))
@pytest.mark.parametrize('input', [
(
comp_ket_x(0, 8)
)
])
def test_children_systems_2(input):
system = Qudit(input)
system.children_systems = []
system.children_systems = None
assert_that(system.children_systems, equal_to(None))
assert_that(system.system_type, equal_to(SystemType.simple)) | true | true |
f7227a990b2080feec7babd7076ac93ac69a025b | 2,737 | py | Python | onnxruntime/python/tools/transformers/quantize_helper.py | vpisarev/onnxruntime | bab9b80f1f2330d3a115e0abbb4d8278c2be3f44 | [
"MIT"
] | null | null | null | onnxruntime/python/tools/transformers/quantize_helper.py | vpisarev/onnxruntime | bab9b80f1f2330d3a115e0abbb4d8278c2be3f44 | [
"MIT"
] | null | null | null | onnxruntime/python/tools/transformers/quantize_helper.py | vpisarev/onnxruntime | bab9b80f1f2330d3a115e0abbb4d8278c2be3f44 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import torch
import onnx
import os
from transformers.modeling_utils import Conv1D
logger = logging.getLogger(__name__)
def _conv1d_to_linear(module):
in_size, out_size = module.weight.shape
linear = torch.nn.Linear(in_size, out_size)
linear.weight.data = module.weight.data.T.contiguous()
linear.bias.data = module.bias.data
return linear
def conv1d_to_linear(model):
'''in-place
This is for Dynamic Quantization, as Conv1D is not recognized by PyTorch, convert it to nn.Linear
'''
logger.debug("replace Conv1D with Linear")
for name in list(model._modules):
module = model._modules[name]
if isinstance(module, Conv1D):
linear = _conv1d_to_linear(module)
model._modules[name] = linear
else:
conv1d_to_linear(module)
def _get_size_of_pytorch_model(model):
torch.save(model.state_dict(), "temp.p")
size = os.path.getsize("temp.p") / (1024 * 1024)
os.remove('temp.p')
return size
class QuantizeHelper:
@staticmethod
def quantize_torch_model(model, dtype=torch.qint8):
'''
Usage: model = quantize_model(model)
TODO: mix of in-place and return, but results are different
'''
conv1d_to_linear(model)
quantized_model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=dtype)
logger.info(f'Size of full precision Torch model(MB):{_get_size_of_pytorch_model(model)}')
logger.info(f'Size of quantized Torch model(MB):{_get_size_of_pytorch_model(quantized_model)}')
return quantized_model
@staticmethod
def quantize_onnx_model(onnx_model_path, quantized_model_path, use_external_data_format=False):
from onnxruntime.quantization import quantize_dynamic
from pathlib import Path
Path(quantized_model_path).parent.mkdir(parents=True, exist_ok=True)
logger.info(f'Size of full precision ONNX model(MB):{os.path.getsize(onnx_model_path)/(1024*1024)}')
quantize_dynamic(onnx_model_path,
quantized_model_path,
use_external_data_format = use_external_data_format)
logger.info(f"quantized model saved to:{quantized_model_path}")
#TODO: inlcude external data in total model size.
logger.info(f'Size of quantized ONNX model(MB):{os.path.getsize(quantized_model_path)/(1024*1024)}')
| 38.549296 | 108 | 0.662769 |
import logging
import torch
import onnx
import os
from transformers.modeling_utils import Conv1D
logger = logging.getLogger(__name__)
def _conv1d_to_linear(module):
in_size, out_size = module.weight.shape
linear = torch.nn.Linear(in_size, out_size)
linear.weight.data = module.weight.data.T.contiguous()
linear.bias.data = module.bias.data
return linear
def conv1d_to_linear(model):
logger.debug("replace Conv1D with Linear")
for name in list(model._modules):
module = model._modules[name]
if isinstance(module, Conv1D):
linear = _conv1d_to_linear(module)
model._modules[name] = linear
else:
conv1d_to_linear(module)
def _get_size_of_pytorch_model(model):
torch.save(model.state_dict(), "temp.p")
size = os.path.getsize("temp.p") / (1024 * 1024)
os.remove('temp.p')
return size
class QuantizeHelper:
@staticmethod
def quantize_torch_model(model, dtype=torch.qint8):
conv1d_to_linear(model)
quantized_model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=dtype)
logger.info(f'Size of full precision Torch model(MB):{_get_size_of_pytorch_model(model)}')
logger.info(f'Size of quantized Torch model(MB):{_get_size_of_pytorch_model(quantized_model)}')
return quantized_model
@staticmethod
def quantize_onnx_model(onnx_model_path, quantized_model_path, use_external_data_format=False):
from onnxruntime.quantization import quantize_dynamic
from pathlib import Path
Path(quantized_model_path).parent.mkdir(parents=True, exist_ok=True)
logger.info(f'Size of full precision ONNX model(MB):{os.path.getsize(onnx_model_path)/(1024*1024)}')
quantize_dynamic(onnx_model_path,
quantized_model_path,
use_external_data_format = use_external_data_format)
logger.info(f"quantized model saved to:{quantized_model_path}")
logger.info(f'Size of quantized ONNX model(MB):{os.path.getsize(quantized_model_path)/(1024*1024)}')
| true | true |
f7227aa85ca1c59d3b21afa2dec9bf48aab8225c | 405 | py | Python | src/process/models/viewmodels/integration/CreateDataIntegrationConnectionFileCsvModel.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 14 | 2020-12-19T15:06:13.000Z | 2022-01-12T19:52:17.000Z | src/process/models/viewmodels/integration/CreateDataIntegrationConnectionFileCsvModel.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 43 | 2021-01-06T22:05:22.000Z | 2022-03-10T10:30:30.000Z | src/process/models/viewmodels/integration/CreateDataIntegrationConnectionFileCsvModel.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 4 | 2020-12-18T23:10:09.000Z | 2021-04-02T13:03:12.000Z | from infrastructure.json.JsonConvert import JsonConvert
@JsonConvert.register
class CreateDataIntegrationConnectionFileModel:
def __init__(self,
HasHeader: bool = None,
Header: str = None,
Separator: str = None,
):
self.HasHeader: bool = HasHeader
self.Header: str = Header
self.Separator: str = Separator
| 28.928571 | 55 | 0.604938 | from infrastructure.json.JsonConvert import JsonConvert
@JsonConvert.register
class CreateDataIntegrationConnectionFileModel:
def __init__(self,
HasHeader: bool = None,
Header: str = None,
Separator: str = None,
):
self.HasHeader: bool = HasHeader
self.Header: str = Header
self.Separator: str = Separator
| true | true |
f7227b1aaa44648fa67f742da8bed95f400a4d64 | 2,127 | py | Python | assignments/assignment2/my_NB_hint.py | CryptoTheSuperDog/fds | 12795290f5784eb5d218a648aee4edbcfa890078 | [
"MIT"
] | 17 | 2020-08-05T16:47:24.000Z | 2022-02-28T19:52:49.000Z | assignments/assignment2/my_NB_hint.py | CryptoTheSuperDog/fds | 12795290f5784eb5d218a648aee4edbcfa890078 | [
"MIT"
] | 98 | 2020-08-11T19:05:39.000Z | 2021-05-05T14:10:24.000Z | assignments/assignment2/my_NB_hint.py | CryptoTheSuperDog/fds | 12795290f5784eb5d218a648aee4edbcfa890078 | [
"MIT"
] | 16 | 2020-08-15T12:18:16.000Z | 2021-04-08T13:53:25.000Z | import pandas as pd
import numpy as np
from collections import Counter
class my_NB:
def __init__(self, alpha=1):
# alpha: smoothing factor
# P(xi = t | y = c) = (N(t,c) + alpha) / (N(c) + n(i)*alpha)
# where n(i) is the number of available categories (values) of feature i
# Setting alpha = 1 is called Laplace smoothing
self.alpha = alpha
def fit(self, X, y):
# X: pd.DataFrame, independent variables, str
# y: list, np.array or pd.Series, dependent variables, int or str
# list of classes for this model
self.classes_ = list(set(list(y)))
# for calculation of P(y)
self.P_y = Counter(y)
# self.P[yj][Xi][xi] = P(xi|yj) where Xi is the feature name and xi is the feature value, yj is a specific class label
# make sure to use self.alpha in the __init__() function as the smoothing factor when calculating P(xi|yj)
self.P = {}
return
def predict_proba(self, X):
# X: pd.DataFrame, independent variables, str
# prob is a dict of prediction probabilities belonging to each categories
# return probs = pd.DataFrame(list of prob, columns = self.classes_)
# P(yj|x) = P(x|yj)P(yj)/P(x)
# P(x|yj) = P(x1|yj)P(x2|yj)...P(xk|yj) = self.P[yj][X1][x1]*self.P[yj][X2][x2]*...*self.P[yj][Xk][xk]
probs = {}
for label in self.classes_:
p = self.P_y[label]
for key in X:
p *= X[key].apply(lambda value: self.P[label][key][value] if value in self.P[label][key] else 1)
probs[label] = p
probs = pd.DataFrame(probs, columns=self.classes_)
sums = probs.sum(axis=1)
probs = probs.apply(lambda v: v / sums)
return probs
def predict(self, X):
# X: pd.DataFrame, independent variables, str
# return predictions: list
# Hint: predicted class is the class with highest prediction probability (from self.predict_proba)
probs = self.predict_proba(X)
predictions = "Write your own code"
return predictions
| 34.868852 | 126 | 0.595675 | import pandas as pd
import numpy as np
from collections import Counter
class my_NB:
def __init__(self, alpha=1):
self.alpha = alpha
def fit(self, X, y):
self.classes_ = list(set(list(y)))
self.P_y = Counter(y)
self.P = {}
return
def predict_proba(self, X):
probs = {}
for label in self.classes_:
p = self.P_y[label]
for key in X:
p *= X[key].apply(lambda value: self.P[label][key][value] if value in self.P[label][key] else 1)
probs[label] = p
probs = pd.DataFrame(probs, columns=self.classes_)
sums = probs.sum(axis=1)
probs = probs.apply(lambda v: v / sums)
return probs
def predict(self, X):
probs = self.predict_proba(X)
predictions = "Write your own code"
return predictions
| true | true |
f7227c7ba9bcd083859d4202044f6b22b7cbd5d6 | 3,260 | py | Python | joj/horse/models/problem.py | joint-online-judge/horse | ec08ecd0528f6a4fad3fa5f5932aef1495721437 | [
"MIT"
] | 6 | 2020-12-28T07:05:52.000Z | 2022-01-16T04:44:02.000Z | joj/horse/models/problem.py | joint-online-judge/horse | ec08ecd0528f6a4fad3fa5f5932aef1495721437 | [
"MIT"
] | 56 | 2021-02-02T02:21:52.000Z | 2022-03-13T02:39:05.000Z | joj/horse/models/problem.py | joint-online-judge/horse | ec08ecd0528f6a4fad3fa5f5932aef1495721437 | [
"MIT"
] | 3 | 2021-01-28T17:52:58.000Z | 2021-12-17T17:42:42.000Z | from typing import TYPE_CHECKING, List, Optional, Type
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.link_tables import ProblemProblemSetLink
from joj.horse.schemas.problem import ProblemDetail, WithLatestRecordType
from joj.horse.services.db import db_session
if TYPE_CHECKING:
from joj.horse.models import (
Domain,
ProblemConfig,
ProblemGroup,
ProblemSet,
Record,
User,
)
class Problem(DomainURLORMModel, ProblemDetail, table=True): # type: ignore[call-arg]
__tablename__ = "problems"
__table_args__ = (UniqueConstraint("domain_id", "url"),)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="problems")
owner_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="SET NULL"), nullable=True
)
)
owner: Optional["User"] = Relationship(back_populates="owned_problems")
problem_group_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("problem_groups.id", ondelete="SET NULL"), nullable=True
)
)
problem_group: Optional["ProblemGroup"] = Relationship(back_populates="problems")
problem_sets: List["ProblemSet"] = Relationship(
back_populates="problems",
link_model=ProblemProblemSetLink,
)
problem_problem_set_links: List[ProblemProblemSetLink] = Relationship(
back_populates="problem",
)
records: List["Record"] = Relationship(back_populates="problem")
problem_configs: List["ProblemConfig"] = Relationship(back_populates="problem")
@classmethod
async def get_problems_with_record_states(
cls,
result_cls: Type[WithLatestRecordType],
problem_set_id: Optional[UUID],
problems: List["Problem"],
user_id: UUID,
) -> List[WithLatestRecordType]:
from joj.horse import models
problem_ids = [problem.id for problem in problems]
records = await models.Record.get_user_latest_records(
problem_set_id=problem_set_id, problem_ids=problem_ids, user_id=user_id
)
problems = [
result_cls(**problems[i].dict(), latest_record=records[i])
for i, record in enumerate(records)
]
return problems
async def get_latest_problem_config(self) -> Optional["ProblemConfig"]:
from joj.horse import models
statement = (
models.ProblemConfig.sql_select()
.where(models.ProblemConfig.problem_id == self.id)
.order_by(models.ProblemConfig.created_at.desc()) # type: ignore
.limit(1)
)
async with db_session() as session:
results = await session.exec(statement)
return results.one_or_none()
event.listen(Problem, "before_insert", url_pre_save)
event.listen(Problem, "before_update", url_pre_save)
| 33.608247 | 86 | 0.677914 | from typing import TYPE_CHECKING, List, Optional, Type
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.link_tables import ProblemProblemSetLink
from joj.horse.schemas.problem import ProblemDetail, WithLatestRecordType
from joj.horse.services.db import db_session
if TYPE_CHECKING:
from joj.horse.models import (
Domain,
ProblemConfig,
ProblemGroup,
ProblemSet,
Record,
User,
)
class Problem(DomainURLORMModel, ProblemDetail, table=True):
__tablename__ = "problems"
__table_args__ = (UniqueConstraint("domain_id", "url"),)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="problems")
owner_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="SET NULL"), nullable=True
)
)
owner: Optional["User"] = Relationship(back_populates="owned_problems")
problem_group_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("problem_groups.id", ondelete="SET NULL"), nullable=True
)
)
problem_group: Optional["ProblemGroup"] = Relationship(back_populates="problems")
problem_sets: List["ProblemSet"] = Relationship(
back_populates="problems",
link_model=ProblemProblemSetLink,
)
problem_problem_set_links: List[ProblemProblemSetLink] = Relationship(
back_populates="problem",
)
records: List["Record"] = Relationship(back_populates="problem")
problem_configs: List["ProblemConfig"] = Relationship(back_populates="problem")
@classmethod
async def get_problems_with_record_states(
cls,
result_cls: Type[WithLatestRecordType],
problem_set_id: Optional[UUID],
problems: List["Problem"],
user_id: UUID,
) -> List[WithLatestRecordType]:
from joj.horse import models
problem_ids = [problem.id for problem in problems]
records = await models.Record.get_user_latest_records(
problem_set_id=problem_set_id, problem_ids=problem_ids, user_id=user_id
)
problems = [
result_cls(**problems[i].dict(), latest_record=records[i])
for i, record in enumerate(records)
]
return problems
async def get_latest_problem_config(self) -> Optional["ProblemConfig"]:
from joj.horse import models
statement = (
models.ProblemConfig.sql_select()
.where(models.ProblemConfig.problem_id == self.id)
.order_by(models.ProblemConfig.created_at.desc())
.limit(1)
)
async with db_session() as session:
results = await session.exec(statement)
return results.one_or_none()
event.listen(Problem, "before_insert", url_pre_save)
event.listen(Problem, "before_update", url_pre_save)
| true | true |
f7227cd64f6d565b915ee1f7395f067c2c40c918 | 12,666 | py | Python | plistutils/alias.py | sathwikv143/plistutils | fc7783449da1ed222547ceb5c416402216fa9b34 | [
"BSD-3-Clause"
] | 35 | 2017-10-17T17:24:16.000Z | 2022-03-18T22:10:47.000Z | plistutils/alias.py | sathwikv143/plistutils | fc7783449da1ed222547ceb5c416402216fa9b34 | [
"BSD-3-Clause"
] | 1 | 2021-07-09T01:06:30.000Z | 2021-07-09T01:06:30.000Z | plistutils/alias.py | sathwikv143/plistutils | fc7783449da1ed222547ceb5c416402216fa9b34 | [
"BSD-3-Clause"
] | 4 | 2018-11-17T15:52:36.000Z | 2022-02-28T08:01:14.000Z | import binascii
import logging
import struct
from plistutils.utils import HFS_EPOCH_FROM_UNIX_SHIFT, interpret_flags, NamedStruct, parse_timestamp
logger = logging.getLogger(__name__)
class AliasParser(object):
"""
Note that for...
v2: volume_creation_date, creation_date, and volume_name are present in both the main struct and named fields.
v3: volume_creation_date and creation_date are present in both the main struct and named fields.
The named fields value is higher resolution in all cases, and will overwrite the main struct value, if present.
References
https://opensource.apple.com/source/CarbonHeaders/CarbonHeaders-8A428/Aliases.h
http://dubeyko.com/development/FileSystems/HFSPLUS/hexdumps/hfsplus_volume_header.html
"""
HEADER = struct.Struct('> 4sHH')
ALIASV3 = NamedStruct('AliasV3', '>', [
('is_directory', 'H'),
('volume_creation_date', '8s'),
('signature_fsid', '4s'),
('_unknown_x16', '2x'), # Maybe disk type - See comments above DISK_TYPES
('parent_inode', 'I'), # 0xFFFFFFFF for none (e.g. alias points to volume - see BLAH sample)
('target_inode', 'I'), # 0xFFFFFFFF for none (e.g. alias points to volume - see BLAH sample)
('creation_date', '8s'),
('volume_flags', 'I'),
('_unknown_x2c', '14x'),
])
ALIASV2 = NamedStruct('AliasV2', '>', [
('is_directory', 'H'),
('_volume_name_length', 'x'),
('volume_name', '27s'), # first octet is volume length
('volume_creation_date', 'I'),
('signature', '2s'),
('disk_type', 'H'),
('parent_inode', 'I'), # 0xFFFFFFFF for none (e.g. alias points to volume - see BLAH sample)
('_filename_len', 'x'),
('target_filename', '63s'),
('target_inode', 'I'), # 0xFFFFFFFF for none (e.g. alias points to volume - see BLAH sample)
('creation_date', 'I'),
('application', '4s'), # creator code (4CC)
('target_type', '4s'),
('alias_to_root_depth', 'H'),
('root_to_target_depth', 'H'),
('volume_flags', 'I'),
('filesystem_id', '2s'),
('_unknown_x2c', '10x'),
])
# getattrlist and statfs specify signature as 32-bit when on 64-bit systems
SIGNATURE_FSID = {
b'BDcu': 'UDF (CD/DVD)',
b'BDIS': 'FAT32',
b'BDxF': 'exFAT',
b'HX\x00\x00': 'HFSX',
b'H+\x00\x00': 'HFS+',
b'KG\x00\x00': 'FTP',
b'NTcu': 'NTFS'
}
# Disk Types are known good for Alias v2.
#
# These don't seem quite right for Alias v3, though:
# - 0 samples look correct ('Fixed')
# - we have one sample for 1 which is a USB thumb drive
# - we have one sample for 5 which is a DMG
DISK_TYPES = {
0: 'Fixed',
1: 'Network',
2: '400KB Floppy',
3: '800KB Floppy',
4: '1.44MB Floppy',
5: 'Ejectable'
}
ALIAS_FLAGS = [
(0x0002, 'IsEjectable'),
(0x0020, 'IsBootVolume'),
(0x0080, 'IsAutomounted'),
(0x0100, 'HasPersistentFileIds')
]
@classmethod
def named_fields(cls):
# Named fields appear to be the same between v2 and v3.
# Fields with a decoder of None are ignored.
return {
0x0000: ('folder_name', cls.decode_utf8),
0x0001: ('cnid_path', cls.decode_cnid_path),
0x0002: ('hfs_path', cls.decode_utf8),
0x0003: ('appleshare_zone', None),
0x0004: ('appleshare_server_name', None),
0x0005: ('appleshare_username', None),
0x0006: ('driver_name', cls.decode_utf8),
# 0x0007: ?
# 0x0008: ?
0x0009: ('network_mount_info', None),
0x000A: ('dialup_info', None),
# 0x000B: ?
# 0x000C: ?
# 0x000D: ?
0x000E: ('target_filename', cls.decode_hfs_unicode_str),
0x000F: ('volume_name', cls.decode_hfs_unicode_str),
0x0010: ('volume_creation_date', cls.decode_hfs_epoch_date),
0x0011: ('creation_date', cls.decode_hfs_epoch_date),
0x0012: ('path', cls.decode_utf8),
0x0013: ('volume_mount_point', cls.decode_utf8),
0x0014: ('alias_data', lambda buf, offset, length: buf[offset:offset + length]),
0x0015: ('user_home_prefix_length', None) # does anyone care about this? struct.unpack('>H')
}
@classmethod
def parse(cls, fullpath, idx, buf):
"""
:param fullpath: Full path to file, used for logging only
:param idx: Index enumerated from original plist structure, used for reference only
:param buf: Alias binary blob
:return: dictionary containing parsed data
"""
supported_versions = {
2: cls.ALIASV2,
3: cls.ALIASV3
}
if buf is None or len(buf) < cls.HEADER.size:
return
app_info, record_length, version = cls.HEADER.unpack_from(buf)
if app_info != b'\x00\x00\x00\x00':
logger.warning("Alias data unexpected app info '{}', please report.", app_info)
if record_length != len(buf):
logger.warning("Alias data unexpected size in '{}': expected {:,} bytes, got {:,} bytes.", fullpath, record_length, len(buf))
if version not in supported_versions:
logger.error("Unsupported Alias version ({}) in '{}', please report.", version, fullpath)
return
yield from cls.parse_version(fullpath, idx, buf, cls.HEADER.size, supported_versions[version])
@classmethod
def parse_version(cls, fullpath, idx, buf, offset, version_struct):
buf_len = len(buf)
try:
record = version_struct.parse_as_dict(buf, offset)
except struct.error:
logger.debug("Could not decode alias data in file '{}'.", fullpath)
return {}
cur_offset = offset + version_struct.size
loop_ct = 0
# Iterate field list with a hard cap on iterations.
# We only know of 22 fields (and we don't know what all of those are),
# but maybe there are more we don't know about.
while cur_offset < buf_len and loop_ct < 50:
cur_offset = cls.decode_field(fullpath, buf, cur_offset, record)
loop_ct += 1
cls.decode_ascii_fields(record)
cls.decode_dates(record)
cls.filter_cnids(record)
cls.filter_levels(record)
cls.join_path_mount(record)
record['is_directory'] = bool(record['is_directory'])
if record.get('signature_fsid') is None:
record['signature_fsid'] = record.pop('signature') + record.pop('filesystem_id')
record['filesystem_description'] = cls.SIGNATURE_FSID.get(record['signature_fsid'], 'Unknown')
if 'disk_type' in record:
record['disk_type_description'] = cls.DISK_TYPES.get(record['disk_type'], 'Unknown')
record['signature_fsid'] = cls.decode_utf8(record['signature_fsid'], 0, None)
record['volume_flags'] = interpret_flags(record.pop('volume_flags', None), cls.ALIAS_FLAGS)
record['bookmark_index'] = idx
alias_data = record.pop('alias_data', None)
yield record
if alias_data:
try:
yield from AliasParser.parse(fullpath, idx, alias_data)
except RecursionError:
logger.error("Could not fully parse embedded alias data due to depth, please report.")
@classmethod
def decode_field(cls, fullpath, buf, offset, record):
"""
2-byte field ID, followed by 2-byte length
length must be padded to a multiple of 2 to find next offset
e.g. b'\x00\x13\x00\x01\x2F\x00' denotes:
- field 0x13 ('volume_mount_point')
- data length of 1 byte
- decoded value of '/'
- total length of 2 bytes
"""
cur_offset = offset
field_id, length = struct.unpack_from('>HH', buf, cur_offset)
cur_offset += 4
if field_id != 0xFFFF and length > 0:
field_name, decoder = cls.named_fields().get(field_id, (None, None))
if decoder:
try:
record[field_name] = decoder(buf, cur_offset, length)
except Exception as e:
logger.debug("Could not decode field '{}' in file '{}': {}.", field_name, fullpath, e)
elif field_name is None:
logger.warning("Unexpected field tag {} in Alias data for {}, please report.", field_id, fullpath)
cur_offset += length + length % 2
return cur_offset
@classmethod
def decode_utf8(cls, buf, offset, length):
"""
In Alias v2 data, some path strings contain ':\x00' as a separator. Other tools
include the \x00 in output, which seems useless/careless.
"""
if length:
raw = buf[offset:offset + length]
else:
raw = buf[offset:]
try:
return raw.decode('utf-8').replace('\x00', '')
except UnicodeDecodeError:
return binascii.hexlify(raw).decode('ascii')
@classmethod
def decode_ascii_fields(cls, record):
fields = ['application', 'target_type']
for f in fields:
if f in record and isinstance(record[f], bytes):
val = record[f]
try:
record[f] = val.decode('ascii')
except UnicodeDecodeError:
record[f] = binascii.hexlify(val).decode('ascii')
@classmethod
def decode_hfs_unicode_str(cls, buf, offset, _length):
# HFSUniStr255 - a string of up to 255 16-bit Unicode characters,
# with a preceding 16-bit length (number of characters)
cur_offset = offset
uni_str_len = struct.Struct('>H')
char_count = uni_str_len.unpack_from(buf, cur_offset)[0]
cur_offset += uni_str_len.size
hfs_unicode_str = buf[cur_offset:cur_offset + (char_count * 2)].decode('utf-16-be')
return hfs_unicode_str
@classmethod
def decode_cnid_path(cls, buf, offset, length):
path = None
if length % 4 != 0:
logger.warning(
"Unable to parse CNIDs from alias data. Expected multiple of 4 bytes, but got {}. Please report.", length)
elif length:
path = '/'.join([str(x) for x in struct.unpack('>{}I'.format(length // 4), buf[offset:offset + length])])
return path
@classmethod
def decode_hfs_epoch_date(cls, buf, offset, length=8, struct_endian='>'):
"""
Args:
buf: bytes object containing the HFS timestamp
offset: int offset within the buf
length: number of bytes to read
struct_endian: endianness to use when reading values (MS Office 2011 Access Date is LE)
Returns: datetime.datetime
"""
timestamp = buf[offset:offset + length]
high = struct.unpack('{}H'.format(struct_endian), timestamp[0:2])[0]
low = struct.unpack('{}I'.format(struct_endian), timestamp[2:6])[0]
fraction = struct.unpack('{}H'.format(struct_endian), timestamp[6:8])[0]
return cls.combine_hfs_datetime(high, low, fraction)
@classmethod
def combine_hfs_datetime(cls, high_seconds, low_seconds, fraction):
seconds = ((high_seconds << 32) + low_seconds) * 65535 + fraction
try:
return parse_timestamp(seconds, 65535, HFS_EPOCH_FROM_UNIX_SHIFT) if seconds else None
except Exception:
return None
@classmethod
def decode_dates(cls, record):
for field_name in ['creation_date', 'volume_creation_date']:
if field_name in record and isinstance(record[field_name], bytes):
record[field_name] = cls.decode_hfs_epoch_date(record.pop(field_name), 0, 8)
@classmethod
def filter_cnids(cls, record):
for cnid in ['parent_inode', 'target_inode']:
if cnid in record:
record[cnid] = None if record[cnid] == 0xFFFFFFFF else record[cnid]
@classmethod
def filter_levels(cls, record):
for level in ['alias_to_root_depth', 'root_to_target_depth']:
if record.get(level) == 0xFFFF:
record[level] = None
@classmethod
def join_path_mount(cls, record):
mount = record.get('volume_mount_point')
if mount:
path = record.get('path') or ''
if not mount.endswith('/') and path:
mount += '/'
record['path'] = mount + path
| 40.466454 | 137 | 0.596084 | import binascii
import logging
import struct
from plistutils.utils import HFS_EPOCH_FROM_UNIX_SHIFT, interpret_flags, NamedStruct, parse_timestamp
logger = logging.getLogger(__name__)
class AliasParser(object):
HEADER = struct.Struct('> 4sHH')
ALIASV3 = NamedStruct('AliasV3', '>', [
('is_directory', 'H'),
('volume_creation_date', '8s'),
('signature_fsid', '4s'),
('_unknown_x16', '2x'),
('parent_inode', 'I'),
('target_inode', 'I'),
('creation_date', '8s'),
('volume_flags', 'I'),
('_unknown_x2c', '14x'),
])
ALIASV2 = NamedStruct('AliasV2', '>', [
('is_directory', 'H'),
('_volume_name_length', 'x'),
('volume_name', '27s'),
('volume_creation_date', 'I'),
('signature', '2s'),
('disk_type', 'H'),
('parent_inode', 'I'),
('_filename_len', 'x'),
('target_filename', '63s'),
('target_inode', 'I'),
('creation_date', 'I'),
('application', '4s'),
('target_type', '4s'),
('alias_to_root_depth', 'H'),
('root_to_target_depth', 'H'),
('volume_flags', 'I'),
('filesystem_id', '2s'),
('_unknown_x2c', '10x'),
])
SIGNATURE_FSID = {
b'BDcu': 'UDF (CD/DVD)',
b'BDIS': 'FAT32',
b'BDxF': 'exFAT',
b'HX\x00\x00': 'HFSX',
b'H+\x00\x00': 'HFS+',
b'KG\x00\x00': 'FTP',
b'NTcu': 'NTFS'
}
# - 0 samples look correct ('Fixed')
# - we have one sample for 1 which is a USB thumb drive
# - we have one sample for 5 which is a DMG
DISK_TYPES = {
0: 'Fixed',
1: 'Network',
2: '400KB Floppy',
3: '800KB Floppy',
4: '1.44MB Floppy',
5: 'Ejectable'
}
ALIAS_FLAGS = [
(0x0002, 'IsEjectable'),
(0x0020, 'IsBootVolume'),
(0x0080, 'IsAutomounted'),
(0x0100, 'HasPersistentFileIds')
]
@classmethod
def named_fields(cls):
# Named fields appear to be the same between v2 and v3.
# Fields with a decoder of None are ignored.
return {
0x0000: ('folder_name', cls.decode_utf8),
0x0001: ('cnid_path', cls.decode_cnid_path),
0x0002: ('hfs_path', cls.decode_utf8),
0x0003: ('appleshare_zone', None),
0x0004: ('appleshare_server_name', None),
0x0005: ('appleshare_username', None),
0x0006: ('driver_name', cls.decode_utf8),
# 0x0007: ?
# 0x0008: ?
0x0009: ('network_mount_info', None),
0x000A: ('dialup_info', None),
# 0x000B: ?
# 0x000C: ?
# 0x000D: ?
0x000E: ('target_filename', cls.decode_hfs_unicode_str),
0x000F: ('volume_name', cls.decode_hfs_unicode_str),
0x0010: ('volume_creation_date', cls.decode_hfs_epoch_date),
0x0011: ('creation_date', cls.decode_hfs_epoch_date),
0x0012: ('path', cls.decode_utf8),
0x0013: ('volume_mount_point', cls.decode_utf8),
0x0014: ('alias_data', lambda buf, offset, length: buf[offset:offset + length]),
0x0015: ('user_home_prefix_length', None) # does anyone care about this? struct.unpack('>H')
}
@classmethod
def parse(cls, fullpath, idx, buf):
supported_versions = {
2: cls.ALIASV2,
3: cls.ALIASV3
}
if buf is None or len(buf) < cls.HEADER.size:
return
app_info, record_length, version = cls.HEADER.unpack_from(buf)
if app_info != b'\x00\x00\x00\x00':
logger.warning("Alias data unexpected app info '{}', please report.", app_info)
if record_length != len(buf):
logger.warning("Alias data unexpected size in '{}': expected {:,} bytes, got {:,} bytes.", fullpath, record_length, len(buf))
if version not in supported_versions:
logger.error("Unsupported Alias version ({}) in '{}', please report.", version, fullpath)
return
yield from cls.parse_version(fullpath, idx, buf, cls.HEADER.size, supported_versions[version])
@classmethod
def parse_version(cls, fullpath, idx, buf, offset, version_struct):
buf_len = len(buf)
try:
record = version_struct.parse_as_dict(buf, offset)
except struct.error:
logger.debug("Could not decode alias data in file '{}'.", fullpath)
return {}
cur_offset = offset + version_struct.size
loop_ct = 0
# Iterate field list with a hard cap on iterations.
# We only know of 22 fields (and we don't know what all of those are),
while cur_offset < buf_len and loop_ct < 50:
cur_offset = cls.decode_field(fullpath, buf, cur_offset, record)
loop_ct += 1
cls.decode_ascii_fields(record)
cls.decode_dates(record)
cls.filter_cnids(record)
cls.filter_levels(record)
cls.join_path_mount(record)
record['is_directory'] = bool(record['is_directory'])
if record.get('signature_fsid') is None:
record['signature_fsid'] = record.pop('signature') + record.pop('filesystem_id')
record['filesystem_description'] = cls.SIGNATURE_FSID.get(record['signature_fsid'], 'Unknown')
if 'disk_type' in record:
record['disk_type_description'] = cls.DISK_TYPES.get(record['disk_type'], 'Unknown')
record['signature_fsid'] = cls.decode_utf8(record['signature_fsid'], 0, None)
record['volume_flags'] = interpret_flags(record.pop('volume_flags', None), cls.ALIAS_FLAGS)
record['bookmark_index'] = idx
alias_data = record.pop('alias_data', None)
yield record
if alias_data:
try:
yield from AliasParser.parse(fullpath, idx, alias_data)
except RecursionError:
logger.error("Could not fully parse embedded alias data due to depth, please report.")
@classmethod
def decode_field(cls, fullpath, buf, offset, record):
cur_offset = offset
field_id, length = struct.unpack_from('>HH', buf, cur_offset)
cur_offset += 4
if field_id != 0xFFFF and length > 0:
field_name, decoder = cls.named_fields().get(field_id, (None, None))
if decoder:
try:
record[field_name] = decoder(buf, cur_offset, length)
except Exception as e:
logger.debug("Could not decode field '{}' in file '{}': {}.", field_name, fullpath, e)
elif field_name is None:
logger.warning("Unexpected field tag {} in Alias data for {}, please report.", field_id, fullpath)
cur_offset += length + length % 2
return cur_offset
@classmethod
def decode_utf8(cls, buf, offset, length):
if length:
raw = buf[offset:offset + length]
else:
raw = buf[offset:]
try:
return raw.decode('utf-8').replace('\x00', '')
except UnicodeDecodeError:
return binascii.hexlify(raw).decode('ascii')
@classmethod
def decode_ascii_fields(cls, record):
fields = ['application', 'target_type']
for f in fields:
if f in record and isinstance(record[f], bytes):
val = record[f]
try:
record[f] = val.decode('ascii')
except UnicodeDecodeError:
record[f] = binascii.hexlify(val).decode('ascii')
@classmethod
def decode_hfs_unicode_str(cls, buf, offset, _length):
# HFSUniStr255 - a string of up to 255 16-bit Unicode characters,
# with a preceding 16-bit length (number of characters)
cur_offset = offset
uni_str_len = struct.Struct('>H')
char_count = uni_str_len.unpack_from(buf, cur_offset)[0]
cur_offset += uni_str_len.size
hfs_unicode_str = buf[cur_offset:cur_offset + (char_count * 2)].decode('utf-16-be')
return hfs_unicode_str
@classmethod
def decode_cnid_path(cls, buf, offset, length):
path = None
if length % 4 != 0:
logger.warning(
"Unable to parse CNIDs from alias data. Expected multiple of 4 bytes, but got {}. Please report.", length)
elif length:
path = '/'.join([str(x) for x in struct.unpack('>{}I'.format(length // 4), buf[offset:offset + length])])
return path
@classmethod
def decode_hfs_epoch_date(cls, buf, offset, length=8, struct_endian='>'):
timestamp = buf[offset:offset + length]
high = struct.unpack('{}H'.format(struct_endian), timestamp[0:2])[0]
low = struct.unpack('{}I'.format(struct_endian), timestamp[2:6])[0]
fraction = struct.unpack('{}H'.format(struct_endian), timestamp[6:8])[0]
return cls.combine_hfs_datetime(high, low, fraction)
@classmethod
def combine_hfs_datetime(cls, high_seconds, low_seconds, fraction):
seconds = ((high_seconds << 32) + low_seconds) * 65535 + fraction
try:
return parse_timestamp(seconds, 65535, HFS_EPOCH_FROM_UNIX_SHIFT) if seconds else None
except Exception:
return None
@classmethod
def decode_dates(cls, record):
for field_name in ['creation_date', 'volume_creation_date']:
if field_name in record and isinstance(record[field_name], bytes):
record[field_name] = cls.decode_hfs_epoch_date(record.pop(field_name), 0, 8)
@classmethod
def filter_cnids(cls, record):
for cnid in ['parent_inode', 'target_inode']:
if cnid in record:
record[cnid] = None if record[cnid] == 0xFFFFFFFF else record[cnid]
@classmethod
def filter_levels(cls, record):
for level in ['alias_to_root_depth', 'root_to_target_depth']:
if record.get(level) == 0xFFFF:
record[level] = None
@classmethod
def join_path_mount(cls, record):
mount = record.get('volume_mount_point')
if mount:
path = record.get('path') or ''
if not mount.endswith('/') and path:
mount += '/'
record['path'] = mount + path
| true | true |
f7227db9ad3577ade7c86c8e9376ca6d209a1ec0 | 2,370 | py | Python | Validation/Viscoelastic/PeriodicTimeseriesAnalysis.py | laurentmackay/3d-vertex | 6d6e124ecfaca018d979c5ef17d0b83d1cc0f96c | [
"MIT"
] | null | null | null | Validation/Viscoelastic/PeriodicTimeseriesAnalysis.py | laurentmackay/3d-vertex | 6d6e124ecfaca018d979c5ef17d0b83d1cc0f96c | [
"MIT"
] | null | null | null | Validation/Viscoelastic/PeriodicTimeseriesAnalysis.py | laurentmackay/3d-vertex | 6d6e124ecfaca018d979c5ef17d0b83d1cc0f96c | [
"MIT"
] | null | null | null | import string
import numpy as np
import matplotlib.pyplot as plt
from VertexTissue import globals as const
from VertexTissue.Analysis import *
from VertexTissue.funcs import euclidean_distance
from periodic_2D import forces, omegas
if __name__ == '__main__':
square_length = lambda G, t : euclidean_distance(G.nodes[0]['pos'], G.nodes[2]['pos'])
fig, axs = plt.subplots(2,int(np.ceil(len(omegas)*len(forces)/2)))
fig.set_size_inches(12, 8)
axs = axs.flatten()
patterns=[]
i=0
ke=const.mu_apical
kv = ke*60
eta = const.eta
kappa = eta + 2*kv
alphabet_string = string.ascii_lowercase
alphabet_list = list(alphabet_string)
linewidth=3
for f in forces:
for omega in omegas:
ax=axs[i]
lbl = alphabet_list[i]
i+=1
arg = (kappa*ke/(omega*kv**2)+omega*eta/ke)/2
delta = -np.arctan(arg)
num = ke**2+(omega*kv)**2
denom = (kappa*omega*ke)**2+(kv*eta*omega**2)**2
denom2 = (kappa*ke)**3+kappa*ke*(omega*kv*eta)**2
res = analyze_network_evolution(path='./data/viscoelastic/',
pattern=f'periodic_force_{f}_freq_{omega}_*.pickle',
func=square_length)
res=np.array(res)
t=res[:,0]
ax.plot(t, res[:,1],linewidth=linewidth, label='numerical')
A=f
lam = 2*ke/eta + 1/60
gamma = ke/eta * (2*A)
B=(2.0/(lam*eta))*(0+gamma/lam)
sol = (3.4+B)+gamma*(1/const.mu_apical-2.0/(const.eta*lam))*t - B*np.exp(-lam*t)
num2 = -kv*2*A*omega*ke*eta*kv**2
l_final = const.l_apical + 2*A/(2*omega*kv+omega*eta)
l_trans = -2*np.exp(-lam*t)*(num2)/denom2
amp = 2*A*np.sqrt(num/denom)
sol = l_final+amp*np.sin(omega*t+delta) +l_trans
ax.plot(t, sol, '--',linewidth=linewidth, label='theoretical')
ax.set_xlabel('t (seconds)', fontsize=14)
ax.set_ylabel('length', fontsize=14)
ax.title.set_text(f'({lbl}) Force={f}, max error = {np.max(np.abs(sol-res[:,1])):.3e}')
ax.title.set_fontsize(16)
ax.legend(loc='right')
plt.tight_layout()
plt.show() | 24.6875 | 99 | 0.538819 | import string
import numpy as np
import matplotlib.pyplot as plt
from VertexTissue import globals as const
from VertexTissue.Analysis import *
from VertexTissue.funcs import euclidean_distance
from periodic_2D import forces, omegas
if __name__ == '__main__':
square_length = lambda G, t : euclidean_distance(G.nodes[0]['pos'], G.nodes[2]['pos'])
fig, axs = plt.subplots(2,int(np.ceil(len(omegas)*len(forces)/2)))
fig.set_size_inches(12, 8)
axs = axs.flatten()
patterns=[]
i=0
ke=const.mu_apical
kv = ke*60
eta = const.eta
kappa = eta + 2*kv
alphabet_string = string.ascii_lowercase
alphabet_list = list(alphabet_string)
linewidth=3
for f in forces:
for omega in omegas:
ax=axs[i]
lbl = alphabet_list[i]
i+=1
arg = (kappa*ke/(omega*kv**2)+omega*eta/ke)/2
delta = -np.arctan(arg)
num = ke**2+(omega*kv)**2
denom = (kappa*omega*ke)**2+(kv*eta*omega**2)**2
denom2 = (kappa*ke)**3+kappa*ke*(omega*kv*eta)**2
res = analyze_network_evolution(path='./data/viscoelastic/',
pattern=f'periodic_force_{f}_freq_{omega}_*.pickle',
func=square_length)
res=np.array(res)
t=res[:,0]
ax.plot(t, res[:,1],linewidth=linewidth, label='numerical')
A=f
lam = 2*ke/eta + 1/60
gamma = ke/eta * (2*A)
B=(2.0/(lam*eta))*(0+gamma/lam)
sol = (3.4+B)+gamma*(1/const.mu_apical-2.0/(const.eta*lam))*t - B*np.exp(-lam*t)
num2 = -kv*2*A*omega*ke*eta*kv**2
l_final = const.l_apical + 2*A/(2*omega*kv+omega*eta)
l_trans = -2*np.exp(-lam*t)*(num2)/denom2
amp = 2*A*np.sqrt(num/denom)
sol = l_final+amp*np.sin(omega*t+delta) +l_trans
ax.plot(t, sol, '--',linewidth=linewidth, label='theoretical')
ax.set_xlabel('t (seconds)', fontsize=14)
ax.set_ylabel('length', fontsize=14)
ax.title.set_text(f'({lbl}) Force={f}, max error = {np.max(np.abs(sol-res[:,1])):.3e}')
ax.title.set_fontsize(16)
ax.legend(loc='right')
plt.tight_layout()
plt.show() | true | true |
f7227e235f5693500e6783740fb4531072475eb2 | 834 | py | Python | new-api/core/settings.py | rtjfarrimond/spotify-recommender | 798a0b2bb500fcc1a7165071b8d9583b69a57ae0 | [
"MIT"
] | 1 | 2022-02-09T13:18:57.000Z | 2022-02-09T13:18:57.000Z | new-api/core/settings.py | rtjfarrimond/spotify-recommender | 798a0b2bb500fcc1a7165071b8d9583b69a57ae0 | [
"MIT"
] | null | null | null | new-api/core/settings.py | rtjfarrimond/spotify-recommender | 798a0b2bb500fcc1a7165071b8d9583b69a57ae0 | [
"MIT"
] | null | null | null | import boto3
def __get_parameter(name):
if not name or name == "":
raise ValueError("name not passed.")
system_code = "spot-rec"
name = f"/{system_code}/{name}"
client = boto3.client('ssm')
response = client.get_parameter(Name=name)
return response ['Parameter']['Value']
client_id = __get_parameter('shared/client_id')
client_secret = __get_parameter('shared/client_secret')
DYNAMODB_TABLE = __get_parameter('dynamodb')
DYNAMODB_TABLE_HASH_KEY = __get_parameter('dynamodb_hash_key_name')
DYNAMODB_TABLE_SORT_KEY = __get_parameter('dynamodb_sort_key_name')
AUDIO_UPLOAD_BUCKET = __get_parameter('audio_bucket_name')
FEATURE_COL = __get_parameter('feature_column_name')
FEATURE_VECTOR_LENGTH = int(__get_parameter('feature_vector_length'))
ANNOY_INDEX_COL = __get_parameter('annoy_index_col_name')
| 34.75 | 69 | 0.77458 | import boto3
def __get_parameter(name):
if not name or name == "":
raise ValueError("name not passed.")
system_code = "spot-rec"
name = f"/{system_code}/{name}"
client = boto3.client('ssm')
response = client.get_parameter(Name=name)
return response ['Parameter']['Value']
client_id = __get_parameter('shared/client_id')
client_secret = __get_parameter('shared/client_secret')
DYNAMODB_TABLE = __get_parameter('dynamodb')
DYNAMODB_TABLE_HASH_KEY = __get_parameter('dynamodb_hash_key_name')
DYNAMODB_TABLE_SORT_KEY = __get_parameter('dynamodb_sort_key_name')
AUDIO_UPLOAD_BUCKET = __get_parameter('audio_bucket_name')
FEATURE_COL = __get_parameter('feature_column_name')
FEATURE_VECTOR_LENGTH = int(__get_parameter('feature_vector_length'))
ANNOY_INDEX_COL = __get_parameter('annoy_index_col_name')
| true | true |
f7227e314c05b47ccea1162c713451aa3b856aa1 | 22 | py | Python | dexy/version.py | dexy/dexy | 323c1806e51f75435e11d2265703e68f46c8aef3 | [
"MIT"
] | 136 | 2015-01-06T15:04:47.000Z | 2021-12-21T22:52:41.000Z | dexy/version.py | dexy/dexy | 323c1806e51f75435e11d2265703e68f46c8aef3 | [
"MIT"
] | 13 | 2015-01-26T14:06:58.000Z | 2020-03-27T21:16:10.000Z | dexy/version.py | dexy/dexy | 323c1806e51f75435e11d2265703e68f46c8aef3 | [
"MIT"
] | 34 | 2015-01-02T16:24:53.000Z | 2021-11-27T05:38:30.000Z | DEXY_VERSION="2.0.9b"
| 11 | 21 | 0.727273 | DEXY_VERSION="2.0.9b"
| true | true |
f7227e401c083c41cb8ec97664924d845fa44c99 | 35,441 | py | Python | scripts/automation/trex_control_plane/server/trex_server.py | Elvor/trex-core | c518275a922ed99612cd82574ed9574809f055ec | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/server/trex_server.py | Elvor/trex-core | c518275a922ed99612cd82574ed9574809f055ec | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/server/trex_server.py | Elvor/trex-core | c518275a922ed99612cd82574ed9574809f055ec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import stat
import sys
import time
import outer_packages
import zmq
import yaml
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import jsonrpclib
from jsonrpclib import Fault
import binascii
import socket
import errno
import signal
from common.trex_status_e import TRexStatus
from common.trex_exceptions import *
import subprocess
from random import randrange
import logging
import threading
import CCustomLogger
from trex_launch_thread import AsynchronousTRexSession
from zmq_monitor_thread import ZmqMonitorSession
from argparse import ArgumentParser, RawTextHelpFormatter
import json
import re
import shlex
import tempfile
try:
from .tcp_daemon import run_command
except:
from tcp_daemon import run_command
# setup the logger
CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')
class CTRexServer(object):
"""This class defines the server side of the RESTfull interaction with TRex"""
TREX_START_CMD = './t-rex-64'
DEFAULT_FILE_PATH = '/tmp/trex_files/'
DEFAULT_TREX_LOG = '/tmp/trex.txt'
DEFAULT_TREX_CFG = '/etc/trex_cfg.yaml'
def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500, trex_nice=-19):
"""
Parameters
----------
trex_host : str
a string of the TRex ip address or hostname.
default value: machine hostname as fetched from socket.gethostname()
trex_daemon_port : int
the port number on which the trex-daemon server can be reached
default value: 8090
trex_zmq_port : int
default TRex ZMQ publisher port to listen to (if not specified in config file).
default value: 4500
nice: int
priority of the TRex process
Instantiate a TRex client object, and connecting it to listening daemon-server
"""
self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
self.__check_trex_path_validity()
self.__check_files_path_validity()
self.trex = CTRex()
self.trex_version = None
self.trex_host = trex_host
self.trex_daemon_port = trex_daemon_port
self.trex_zmq_port = trex_zmq_port
self.trex_server_path = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
self.start_lock = threading.Lock()
self.__reservation = None
self.trex_nice = int(trex_nice)
self.trex_log = self.DEFAULT_TREX_LOG
self.trex_cfg = self.DEFAULT_TREX_CFG
if self.trex_nice < -20 or self.trex_nice > 19:
err = "Parameter 'nice' should be integer in range [-20, 19]"
print(err)
logger.error(err)
raise Exception(err)
def add(self, x, y):
logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
return x + y
# return Fault(-10, "")
# Get information about available network interfaces
def get_devices_info(self):
logger.info("Processing get_devices_info() command.")
try:
args = [os.path.join(self.TREX_PATH, 'dpdk_nic_bind.py'), '-s', '--json']
result = subprocess.check_output(args, cwd=self.TREX_PATH, universal_newlines=True)
return json.loads(result)
except Exception as e:
err_str = "Error processing get_devices_info(): %s" % e
logger.error(e)
return Fault(-33, err_str)
def push_file (self, filename, bin_data):
logger.info("Processing push_file() command.")
try:
filepath = os.path.join(self.trex_files_path, os.path.basename(filename))
with open(filepath, 'wb') as f:
f.write(binascii.a2b_base64(bin_data))
logger.info("push_file() command finished. File is saved as %s" % filepath)
return True
except IOError as inst:
logger.error("push_file method failed. " + str(inst))
return False
def connectivity_check (self):
logger.info("Processing connectivity_check function.")
return True
def start(self):
"""This method fires up the daemon server based on initialized parameters of the class"""
# initialize the server instance with given resources
try:
print("Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port ))
logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
logger.info("current working dir is: {0}".format(self.TREX_PATH) )
logger.info("current files dir is : {0}".format(self.trex_files_path) )
logger.debug("Starting TRex server. Registering methods to process.")
logger.info(self.get_trex_version(base64 = False))
self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
except socket.error as e:
if e.errno == errno.EADDRINUSE:
logger.error("TRex server requested address already in use. Aborting server launching.")
print("TRex server requested address already in use. Aborting server launching.")
raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. "
"Server launch aborted. Please make sure no other process is "
"using the desired server properties.")
elif isinstance(e, socket.gaierror) and e.errno == -3:
# handling Temporary failure in name resolution exception
raise socket.gaierror(-3, "Temporary failure in name resolution.\n"
"Make sure provided hostname has DNS resolving.")
else:
raise
# set further functionality and peripherals to server instance
self.server.register_function(self.add)
self.server.register_function(self.get_devices_info)
self.server.register_function(self.cancel_reservation)
self.server.register_function(self.connectivity_check)
self.server.register_function(self.connectivity_check, 'check_connectivity') # alias
self.server.register_function(self.force_trex_kill)
self.server.register_function(self.get_file)
self.server.register_function(self.get_files_list)
self.server.register_function(self.get_files_path)
self.server.register_function(self.get_latest_dump)
self.server.register_function(self.get_running_info)
self.server.register_function(self.get_running_status)
self.server.register_function(self.get_trex_cmds)
self.server.register_function(self.get_trex_config)
self.server.register_function(self.get_trex_config_metadata)
self.server.register_function(self.get_trex_daemon_log)
self.server.register_function(self.get_trex_log)
self.server.register_function(self.get_trex_version)
self.server.register_function(self.is_reserved)
self.server.register_function(self.is_running)
self.server.register_function(self.kill_all_trexes)
self.server.register_function(self.push_file)
self.server.register_function(self.reserve_trex)
self.server.register_function(self.start_trex)
self.server.register_function(self.stop_trex)
self.server.register_function(self.wait_until_kickoff_finish)
signal.signal(signal.SIGTSTP, self.stop_handler)
signal.signal(signal.SIGTERM, self.stop_handler)
try:
self.server.serve_forever()
except KeyboardInterrupt:
logger.info("Daemon shutdown request detected." )
finally:
self.server.shutdown()
#self.server.server_close()
# get files from Trex server and return their content (mainly for logs)
@staticmethod
def _pull_file(filepath):
try:
with open(filepath, 'rb') as f:
file_content = f.read()
return binascii.b2a_base64(file_content).decode(errors='replace')
except Exception as e:
err_str = "Can't get requested file %s: %s" % (filepath, e)
logger.error(err_str)
return Fault(-33, err_str)
# returns True if given path is under TRex package or under /tmp/trex_files
def _check_path_under_TRex_or_temp(self, path):
if not os.path.relpath(path, self.trex_files_path).startswith(os.pardir):
return True
if not os.path.relpath(path, self.TREX_PATH).startswith(os.pardir):
return True
return False
# gets the file content encoded base64 either from /tmp/trex_files or TRex server dir
def get_file(self, filepath):
try:
logger.info("Processing get_file() command.")
if not self._check_path_under_TRex_or_temp(filepath):
raise Exception('Given path should be under current TRex package or /tmp/trex_files')
return self._pull_file(filepath)
except Exception as e:
err_str = "Can't get requested file %s: %s" % (filepath, e)
logger.error(err_str)
return Fault(-33, err_str)
# get tuple (dirs, files) with directories and files lists from given path (limited under TRex package or /tmp/trex_files)
def get_files_list(self, path):
try:
logger.info("Processing get_files_list() command, given path: %s" % path)
if not self._check_path_under_TRex_or_temp(path):
raise Exception('Given path should be under current TRex package or /tmp/trex_files')
return os.walk(path).next()[1:3]
except Exception as e:
err_str = "Error processing get_files_list(): %s" % e
logger.error(err_str)
return Fault(-33, err_str)
# get Trex log
def get_trex_log(self):
logger.info("Processing get_trex_log() command.")
return self._pull_file(self.trex_log)
# get trex config file
def get_trex_config(self):
logger.info("Processing get_trex_config() command.")
return self._pull_file(self.trex_cfg)
#get metadata used to generate trex_cfg.yaml
def get_trex_config_metadata(self):
logger.info("Processing get_trex_config_metadata() command.")
metadata_json_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'config_metadata.json'))
try:
with open(metadata_json_path) as f:
return json.load(f)
except Exception as e:
return Fault(-33, "Can't load config metadata contents: %s" % e)
# get daemon log (default:/var/log/trex/trex_daemon_server.log)
def get_trex_daemon_log (self):
logger.info("Processing get_trex_daemon_log() command.")
default_log_path = '/var/log/trex/trex_daemon_server.log'
daemon_log_path = os.getenv('TREX_DAEMON_LOG', default_log_path)
return self._pull_file(daemon_log_path)
# get Trex version from ./t-rex-64 --help (last lines starting with "Version : ...")
def get_trex_version (self, base64 = True):
try:
logger.info("Processing get_trex_version() command.")
if not self.trex_version:
ret_code, stdout, stderr = run_command('./t-rex-64 --help', cwd = self.TREX_PATH)
search_result = re.search('\n\s*(Version\s*:.+)', stdout, re.DOTALL)
if not search_result:
raise Exception('Could not determine version from ./t-rex-64 --help. Stdout: %s. Stderr: %s' % (stdout, stderr))
self.trex_version = binascii.b2a_base64(search_result.group(1).encode(errors='replace'))
if base64:
return self.trex_version.decode(errors='replace')
else:
return binascii.a2b_base64(self.trex_version).decode(errors='replace')
except Exception as e:
err_str = "Can't get trex version, error: %s" % e
logger.error(err_str)
return Fault(-33, err_str)
def stop_handler (self, *args, **kwargs):
logger.info("Daemon STOP request detected.")
if self.is_running():
# in case TRex process is currently running, stop it before terminating server process
self.stop_trex(self.trex.get_seq())
sys.exit(0)
def assert_zmq_ok(self, check_alive = True):
if self.trex.zmq_error:
self.trex.zmq_error, err = None, self.trex.zmq_error
raise Exception('ZMQ thread got error: %s' % err)
if check_alive and self.trex.zmq_monitor and not self.trex.zmq_monitor.is_alive():
if self.trex.get_status() != TRexStatus.Idle:
self.force_trex_kill()
raise Exception('ZMQ thread is dead.')
def is_running (self):
run_status = self.trex.get_status()
logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
if run_status==TRexStatus.Running:
return True
else:
return False
def is_reserved (self):
logger.info("Processing is_reserved() command.")
return bool(self.__reservation)
def get_running_status (self):
self.assert_zmq_ok(check_alive = False)
run_status = self.trex.get_status()
logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }
def get_files_path (self):
logger.info("Processing get_files_path() command." )
return self.trex_files_path
def reserve_trex (self, user):
if user == "":
logger.info("TRex reservation cannot apply to empty string user. Request denied.")
return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")
with self.start_lock:
logger.info("Processing reserve_trex() command.")
if self.is_reserved():
if user == self.__reservation['user']:
# return True is the same user is asking and already has the resrvation
logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
return True
logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
res_user = self.__reservation['user']) ) # raise at client TRexInUseError
elif self.trex.get_status() != TRexStatus.Idle:
logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.') # raise at client TRexInUseError
else:
logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
self.__reservation = {'user' : user, 'since' : time.ctime()}
logger.debug("Reservation details: "+ str(self.__reservation))
return True
def cancel_reservation (self, user):
with self.start_lock:
logger.info("Processing cancel_reservation() command.")
if self.is_reserved():
if self.__reservation['user'] == user:
logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
self.__reservation = None
return True
else:
logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
else:
logger.info("TRex is not reserved to anyone. No need to cancel anything")
assert(self.__reservation is None)
return False
def start_trex(self, trex_cmd_options, user, block_to_success = True, timeout = 40, stateless = False, debug_image = False, trex_args = ''):
self.trex.zmq_error = None
with self.start_lock:
logger.info("Processing start_trex() command.")
if self.is_reserved():
# check if this is not the user to which TRex is reserved
if self.__reservation['user'] != user:
logger.info("TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
return Fault(-33, "TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
elif self.trex.get_status() != TRexStatus.Idle:
err = 'TRex is already taken, cannot create another run until done.'
logger.info(err)
return Fault(-13, err) # raise at client TRexInUseError
try:
server_cmd_data, zmq_port = self.generate_run_cmd(stateless = stateless, debug_image = debug_image, trex_args = trex_args, **trex_cmd_options)
self.trex.start_trex(self.TREX_PATH, server_cmd_data, zmq_port)
logger.info("TRex session has been successfully initiated.")
if block_to_success:
# delay server response until TRex is at 'Running' state.
start_time = time.time()
trex_state = None
while (time.time() - start_time) < timeout :
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
break
else:
time.sleep(0.5)
self.assert_zmq_ok()
# check for TRex run started normally
if trex_state == TRexStatus.Starting: # reached timeout
logger.warning("TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.")
return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
elif trex_state == TRexStatus.Idle:
return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
# reach here only if TRex is at 'Running' state
self.trex.gen_seq()
return self.trex.get_seq() # return unique seq number to client
except TypeError as e:
logger.error("TRex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
raise TypeError('TRex -f (traffic generation .yaml file) and -c (num of cores) must be specified. %s' % e)
def stop_trex(self, seq, with_tb = False):
logger.info("Processing stop_trex() command.")
if self.trex.get_seq()== seq:
logger.debug("Abort request legit since seq# match")
return self.trex.stop_trex(with_tb)
else:
if self.trex.get_status() != TRexStatus.Idle:
logger.warning("Abort request is only allowed to process initiated the run. Request denied.")
return Fault(-33, 'Abort request is only allowed to process initiated the run. Request denied.') # raise at client TRexRequestDenied
else:
return False
def force_trex_kill(self, with_tb = False):
tb_print = ''
if with_tb:
tb_print = ' (with traceback)'
logger.info("Processing force_trex_kill() command%s." % tb_print)
return self.trex.stop_trex(with_tb)
# returns list of tuples (pid, command line) of running TRex(es)
def get_trex_cmds(self):
logger.info('Processing get_trex_cmds() command.')
ret_code, stdout, stderr = run_command('ps -u root --format pid,comm,cmd')
if ret_code:
raise Exception('Failed to determine running processes. Stdout: %s. Stderr: %s' % (stdout, stderr))
trex_cmds_list = []
for line in stdout.splitlines():
pid, proc_name, full_cmd = line.strip().split(' ', 2)
pid = pid.strip()
full_cmd = full_cmd.strip()
if proc_name.find('_t-rex-64') >= 0:
trex_cmds_list.append((pid, full_cmd))
return trex_cmds_list
# Silently tries to kill TRexes with given signal.
# Responsibility of client to verify with get_trex_cmds.
def kill_all_trexes(self, signal_name):
logger.info('Processing kill_all_trexes() command.')
trex_cmds_list = self.get_trex_cmds()
for pid, cmd in trex_cmds_list:
logger.info('Killing with signal %s process %s %s' % (signal_name, pid, cmd))
try:
os.kill(int(pid), signal_name)
except OSError as e:
if e.errno == errno.ESRCH:
logger.info('No such process, ignoring.')
raise
def wait_until_kickoff_finish (self, timeout = 40):
# block until TRex exits Starting state
logger.info("Processing wait_until_kickoff_finish() command.")
start_time = time.time()
while (time.time() - start_time) < timeout :
self.assert_zmq_ok()
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
return
time.sleep(0.1)
return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
def get_running_info (self):
logger.info("Processing get_running_info() command.")
self.assert_zmq_ok(check_alive = False)
return self.trex.get_running_info()
def get_latest_dump(self):
logger.info("Processing get_latest_dump() command.")
self.assert_zmq_ok(check_alive = False)
return self.trex.get_latest_dump()
def generate_run_cmd (self, iom = 0, export_path = None, stateless = False, debug_image = False, trex_args = '', **kwargs):
""" generate_run_cmd(self, iom, export_path, kwargs) -> str
Generates a custom running command for the kick-off of the TRex traffic generator.
Returns a tuple of command (string) and export path (string) to be issued on the trex server
Parameters
----------
iom: int
0 = don't print stats screen to log, 1 = print stats (can generate huge logs)
stateless: boolean
True = run as stateless, False = require -f and -d arguments
kwargs: dictionary
Dictionary of parameters for trex. For example: (c=1, nc=True, l_pkt_mode=3).
Notice that when sending command line parameters that has -, you need to replace it with _.
for example, to have on command line "--l-pkt-mode 3", you need to send l_pkt_mode=3
export_path : str
Full system path to which the results of the trex-run will be logged.
"""
if 'results_file_path' in kwargs:
export_path = kwargs['results_file_path']
del kwargs['results_file_path']
elif export_path is None:
export_path = self.DEFAULT_TREX_LOG
if stateless:
kwargs['i'] = True
# adding additional options to the command
trex_cmd_options = ''
for key, value in kwargs.items():
tmp_key = key.replace('_','-').lstrip('-')
dash = ' -' if (len(key)==1) else ' --'
if value is True:
trex_cmd_options += (dash + tmp_key)
elif value is False:
continue
else:
trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
if trex_args:
trex_cmd_options += ' %s' % trex_args
zmq_port = self._check_zmq_port(trex_cmd_options)
if not stateless:
if 'f' not in kwargs:
raise Exception('Argument -f should be specified in stateful command')
if 'd' not in kwargs:
raise Exception('Argument -d should be specified in stateful command')
cmd = "{nice}{run_command}{debug_image} --iom {io} {cmd_options} --no-key".format( # -- iom 0 disables the periodic log to the screen (not needed)
nice = '' if self.trex_nice == 0 else 'nice -n %s ' % self.trex_nice,
run_command = self.TREX_START_CMD,
debug_image = '-debug' if debug_image else '',
cmd_options = trex_cmd_options,
io = iom)
logger.info("TREX FULL COMMAND: {command}".format(command = cmd) )
# save export_path for get_trex_log()
self.trex_log = export_path
return (cmd, export_path, kwargs.get('d', 0)), zmq_port
def _check_zmq_port(self, trex_cmd_options):
parser = ArgumentParser()
parser.add_argument('--cfg', default = self.DEFAULT_TREX_CFG)
args, _ = parser.parse_known_args(shlex.split(trex_cmd_options))
if not os.path.exists(args.cfg):
raise Exception('Platform config file "%s" does not exist!' % args.cfg)
with open(args.cfg) as f:
trex_cfg = yaml.safe_load(f.read())
if type(trex_cfg) is not list:
raise Exception('Platform config file "%s" content should be array.' % args.cfg)
if not len(trex_cfg):
raise Exception('Platform config file "%s" content should be array with one element.' % args.cfg)
trex_cfg = trex_cfg[0]
if 'enable_zmq_pub' in trex_cfg and trex_cfg['enable_zmq_pub'] == False:
raise Exception('TRex daemon expects ZMQ publisher to be enabled. Please change "enable_zmq_pub" to true.')
# save used trex config file (default:/etc/trex_cfg.yaml)
self.trex_cfg = args.cfg
if 'zmq_pub_port' in trex_cfg:
return trex_cfg['zmq_pub_port']
return self.trex_zmq_port
def __check_trex_path_validity(self):
# check for executable existance
if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
print("The provided TRex path do not contain an executable TRex file.\nPlease check the path and retry.")
logger.error("The provided TRex path do not contain an executable TRex file")
exit(-1)
# check for executable permissions
st = os.stat(self.TREX_PATH+'/t-rex-64')
if not bool(st.st_mode & (stat.S_IXUSR ) ):
print("The provided TRex path do not contain an TRex file with execution privileges.\nPlease check the files permissions and retry.")
logger.error("The provided TRex path do not contain an TRex file with execution privileges")
exit(-1)
else:
return
def __check_files_path_validity(self):
# first, check for path existance. otherwise, try creating it with appropriate credentials
if not os.path.exists(self.trex_files_path):
try:
os.makedirs(self.trex_files_path, 0o660)
return
except os.error as inst:
print("The provided files path does not exist and cannot be created with needed access credentials using root user.\nPlease check the path's permissions and retry.")
logger.error("The provided files path does not exist and cannot be created with needed access credentials using root user.")
exit(-1)
elif os.access(self.trex_files_path, os.W_OK):
return
else:
print("The provided files path has insufficient access credentials for root user.\nPlease check the path's permissions and retry.")
logger.error("The provided files path has insufficient access credentials for root user")
exit(-1)
class CTRex(object):
def __init__(self):
self.status = TRexStatus.Idle
self.verbose_status = 'TRex is Idle'
self.errcode = None
self.session = None
self.zmq_monitor = None
self.__zmq_dump = {}
self.zmq_dump_lock = threading.Lock()
self.zmq_error = None
self.seq = None
self.expect_trex = threading.Event()
def __del__(self):
if self.zmq_monitor:
self.zmq_monitor.join()
if self.session:
self.session.join()
def get_status(self):
return self.status
def set_status(self, new_status):
self.status = new_status
def get_verbose_status(self):
return self.verbose_status
def set_verbose_status(self, new_status):
self.verbose_status = new_status
def gen_seq (self):
self.seq = randrange(1,1000)
def get_seq (self):
return self.seq
def get_latest_dump(self):
with self.zmq_dump_lock:
return json.dumps(self.__zmq_dump)
def update_zmq_dump_key(self, key, val):
with self.zmq_dump_lock:
self.__zmq_dump[key] = val
def clear_zmq_dump(self):
with self.zmq_dump_lock:
self.__zmq_dump = {}
def get_running_info (self):
if self.status == TRexStatus.Running:
return self.get_latest_dump()
else:
logger.info("TRex isn't running. Running information isn't available.")
if self.status == TRexStatus.Idle:
if self.errcode is not None: # some error occured
logger.info("TRex is in Idle state, with errors. returning fault")
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
else:
logger.info("TRex is in Idle state, no errors. returning {}")
return u'{}'
return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating TRex is back to Idle state or still in Starting state
def stop_trex(self, with_tb):
if self.status == TRexStatus.Idle:
# TRex isn't running, nothing to abort
logger.info("TRex isn't running. No need to stop anything.")
if self.errcode is not None: # some error occurred, notify client despite TRex already stopped
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
return False
else:
# handle stopping TRex's run
self.session.join(with_tb = with_tb)
logger.info("TRex session has been successfully aborted.")
return True
def start_trex(self, trex_launch_path, trex_cmd, zmq_port):
self.set_status(TRexStatus.Starting)
logger.info("TRex running state changed to 'Starting'.")
self.set_verbose_status('TRex is starting (data is not available yet)')
if not self.zmq_monitor:
logger.info('Starting ZMQ monitor on port %s' % zmq_port)
self.zmq_monitor = ZmqMonitorSession(self, zmq_port)
self.zmq_monitor.start()
else:
if not self.zmq_monitor.is_alive() or self.zmq_monitor.zmq_port != zmq_port:
if not self.zmq_monitor.is_alive():
logger.info('Old ZMQ monitor is dead, starting new')
else:
logger.info('ZMQ port is changed to %s, starting new monitor' % zmq_port)
self.zmq_monitor.join()
self.zmq_monitor = ZmqMonitorSession(self, zmq_port)
self.zmq_monitor.start()
self.zmq_monitor.first_dump = True
self.errcode = None
self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
self.session.start()
self.expect_trex.set()
def generate_trex_parser ():
cur = os.path.dirname(__file__)
default_path = os.path.abspath(os.path.join(cur, os.pardir, os.pardir, os.pardir))
default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
parser = ArgumentParser(description = 'Run server application for TRex traffic generator',
formatter_class = RawTextHelpFormatter,
usage = """
trex_daemon_server [options]
""" )
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT",
help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
parser.add_argument("-z", "--zmq-port", type=int, metavar="PORT", default = 4500, action="store",
help="default TRex ZMQ publisher port to listen to (if not specified in config file).\nDefault port is 4500.")
parser.add_argument("-t", "--trex-path", metavar="PATH", default = default_path, action="store",
help="Specify the compiled TRex directory from which TRex would run.\nDefault path is: %s." % default_path)
parser.add_argument("-f", "--files-path", metavar="PATH", default = default_files_path, action="store",
help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: %s." % default_files_path)
parser.add_argument("--trex-host", metavar="HOST", default = '0.0.0.0', action="store",
help="Specify a hostname to be registered as the TRex server.\nDefault is to bind all IPs using '0.0.0.0'.")
parser.add_argument('-n', '--nice', dest='nice', action="store", default = -19, type = int,
help="Determine the priority TRex process [-20, 19] (lower = higher priority)\nDefault is -19.")
return parser
trex_parser = generate_trex_parser()
def do_main_program ():
args = trex_parser.parse_args()
server = CTRexServer(trex_path = args.trex_path, trex_files_path = args.files_path,
trex_host = args.trex_host, trex_daemon_port = args.daemon_port,
trex_zmq_port = args.zmq_port, trex_nice = args.nice)
server.start()
if __name__ == "__main__":
do_main_program()
| 48.153533 | 236 | 0.628707 |
import os
import stat
import sys
import time
import outer_packages
import zmq
import yaml
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import jsonrpclib
from jsonrpclib import Fault
import binascii
import socket
import errno
import signal
from common.trex_status_e import TRexStatus
from common.trex_exceptions import *
import subprocess
from random import randrange
import logging
import threading
import CCustomLogger
from trex_launch_thread import AsynchronousTRexSession
from zmq_monitor_thread import ZmqMonitorSession
from argparse import ArgumentParser, RawTextHelpFormatter
import json
import re
import shlex
import tempfile
try:
from .tcp_daemon import run_command
except:
from tcp_daemon import run_command
CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')
class CTRexServer(object):
TREX_START_CMD = './t-rex-64'
DEFAULT_FILE_PATH = '/tmp/trex_files/'
DEFAULT_TREX_LOG = '/tmp/trex.txt'
DEFAULT_TREX_CFG = '/etc/trex_cfg.yaml'
def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500, trex_nice=-19):
self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
self.__check_trex_path_validity()
self.__check_files_path_validity()
self.trex = CTRex()
self.trex_version = None
self.trex_host = trex_host
self.trex_daemon_port = trex_daemon_port
self.trex_zmq_port = trex_zmq_port
self.trex_server_path = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
self.start_lock = threading.Lock()
self.__reservation = None
self.trex_nice = int(trex_nice)
self.trex_log = self.DEFAULT_TREX_LOG
self.trex_cfg = self.DEFAULT_TREX_CFG
if self.trex_nice < -20 or self.trex_nice > 19:
err = "Parameter 'nice' should be integer in range [-20, 19]"
print(err)
logger.error(err)
raise Exception(err)
def add(self, x, y):
logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
return x + y
def get_devices_info(self):
logger.info("Processing get_devices_info() command.")
try:
args = [os.path.join(self.TREX_PATH, 'dpdk_nic_bind.py'), '-s', '--json']
result = subprocess.check_output(args, cwd=self.TREX_PATH, universal_newlines=True)
return json.loads(result)
except Exception as e:
err_str = "Error processing get_devices_info(): %s" % e
logger.error(e)
return Fault(-33, err_str)
def push_file (self, filename, bin_data):
logger.info("Processing push_file() command.")
try:
filepath = os.path.join(self.trex_files_path, os.path.basename(filename))
with open(filepath, 'wb') as f:
f.write(binascii.a2b_base64(bin_data))
logger.info("push_file() command finished. File is saved as %s" % filepath)
return True
except IOError as inst:
logger.error("push_file method failed. " + str(inst))
return False
def connectivity_check (self):
logger.info("Processing connectivity_check function.")
return True
def start(self):
try:
print("Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port ))
logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
logger.info("current working dir is: {0}".format(self.TREX_PATH) )
logger.info("current files dir is : {0}".format(self.trex_files_path) )
logger.debug("Starting TRex server. Registering methods to process.")
logger.info(self.get_trex_version(base64 = False))
self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
except socket.error as e:
if e.errno == errno.EADDRINUSE:
logger.error("TRex server requested address already in use. Aborting server launching.")
print("TRex server requested address already in use. Aborting server launching.")
raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. "
"Server launch aborted. Please make sure no other process is "
"using the desired server properties.")
elif isinstance(e, socket.gaierror) and e.errno == -3:
raise socket.gaierror(-3, "Temporary failure in name resolution.\n"
"Make sure provided hostname has DNS resolving.")
else:
raise
self.server.register_function(self.add)
self.server.register_function(self.get_devices_info)
self.server.register_function(self.cancel_reservation)
self.server.register_function(self.connectivity_check)
self.server.register_function(self.connectivity_check, 'check_connectivity')
self.server.register_function(self.force_trex_kill)
self.server.register_function(self.get_file)
self.server.register_function(self.get_files_list)
self.server.register_function(self.get_files_path)
self.server.register_function(self.get_latest_dump)
self.server.register_function(self.get_running_info)
self.server.register_function(self.get_running_status)
self.server.register_function(self.get_trex_cmds)
self.server.register_function(self.get_trex_config)
self.server.register_function(self.get_trex_config_metadata)
self.server.register_function(self.get_trex_daemon_log)
self.server.register_function(self.get_trex_log)
self.server.register_function(self.get_trex_version)
self.server.register_function(self.is_reserved)
self.server.register_function(self.is_running)
self.server.register_function(self.kill_all_trexes)
self.server.register_function(self.push_file)
self.server.register_function(self.reserve_trex)
self.server.register_function(self.start_trex)
self.server.register_function(self.stop_trex)
self.server.register_function(self.wait_until_kickoff_finish)
signal.signal(signal.SIGTSTP, self.stop_handler)
signal.signal(signal.SIGTERM, self.stop_handler)
try:
self.server.serve_forever()
except KeyboardInterrupt:
logger.info("Daemon shutdown request detected." )
finally:
self.server.shutdown()
@staticmethod
def _pull_file(filepath):
try:
with open(filepath, 'rb') as f:
file_content = f.read()
return binascii.b2a_base64(file_content).decode(errors='replace')
except Exception as e:
err_str = "Can't get requested file %s: %s" % (filepath, e)
logger.error(err_str)
return Fault(-33, err_str)
# returns True if given path is under TRex package or under /tmp/trex_files
def _check_path_under_TRex_or_temp(self, path):
if not os.path.relpath(path, self.trex_files_path).startswith(os.pardir):
return True
if not os.path.relpath(path, self.TREX_PATH).startswith(os.pardir):
return True
return False
# gets the file content encoded base64 either from /tmp/trex_files or TRex server dir
def get_file(self, filepath):
try:
logger.info("Processing get_file() command.")
if not self._check_path_under_TRex_or_temp(filepath):
raise Exception('Given path should be under current TRex package or /tmp/trex_files')
return self._pull_file(filepath)
except Exception as e:
err_str = "Can't get requested file %s: %s" % (filepath, e)
logger.error(err_str)
return Fault(-33, err_str)
def get_files_list(self, path):
try:
logger.info("Processing get_files_list() command, given path: %s" % path)
if not self._check_path_under_TRex_or_temp(path):
raise Exception('Given path should be under current TRex package or /tmp/trex_files')
return os.walk(path).next()[1:3]
except Exception as e:
err_str = "Error processing get_files_list(): %s" % e
logger.error(err_str)
return Fault(-33, err_str)
def get_trex_log(self):
logger.info("Processing get_trex_log() command.")
return self._pull_file(self.trex_log)
def get_trex_config(self):
logger.info("Processing get_trex_config() command.")
return self._pull_file(self.trex_cfg)
def get_trex_config_metadata(self):
logger.info("Processing get_trex_config_metadata() command.")
metadata_json_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'config_metadata.json'))
try:
with open(metadata_json_path) as f:
return json.load(f)
except Exception as e:
return Fault(-33, "Can't load config metadata contents: %s" % e)
# get daemon log (default:/var/log/trex/trex_daemon_server.log)
def get_trex_daemon_log (self):
logger.info("Processing get_trex_daemon_log() command.")
default_log_path = '/var/log/trex/trex_daemon_server.log'
daemon_log_path = os.getenv('TREX_DAEMON_LOG', default_log_path)
return self._pull_file(daemon_log_path)
# get Trex version from ./t-rex-64 --help (last lines starting with "Version : ...")
def get_trex_version (self, base64 = True):
try:
logger.info("Processing get_trex_version() command.")
if not self.trex_version:
ret_code, stdout, stderr = run_command('./t-rex-64 --help', cwd = self.TREX_PATH)
search_result = re.search('\n\s*(Version\s*:.+)', stdout, re.DOTALL)
if not search_result:
raise Exception('Could not determine version from ./t-rex-64 --help. Stdout: %s. Stderr: %s' % (stdout, stderr))
self.trex_version = binascii.b2a_base64(search_result.group(1).encode(errors='replace'))
if base64:
return self.trex_version.decode(errors='replace')
else:
return binascii.a2b_base64(self.trex_version).decode(errors='replace')
except Exception as e:
err_str = "Can't get trex version, error: %s" % e
logger.error(err_str)
return Fault(-33, err_str)
def stop_handler (self, *args, **kwargs):
logger.info("Daemon STOP request detected.")
if self.is_running():
self.stop_trex(self.trex.get_seq())
sys.exit(0)
def assert_zmq_ok(self, check_alive = True):
if self.trex.zmq_error:
self.trex.zmq_error, err = None, self.trex.zmq_error
raise Exception('ZMQ thread got error: %s' % err)
if check_alive and self.trex.zmq_monitor and not self.trex.zmq_monitor.is_alive():
if self.trex.get_status() != TRexStatus.Idle:
self.force_trex_kill()
raise Exception('ZMQ thread is dead.')
def is_running (self):
run_status = self.trex.get_status()
logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
if run_status==TRexStatus.Running:
return True
else:
return False
def is_reserved (self):
logger.info("Processing is_reserved() command.")
return bool(self.__reservation)
def get_running_status (self):
self.assert_zmq_ok(check_alive = False)
run_status = self.trex.get_status()
logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }
def get_files_path (self):
logger.info("Processing get_files_path() command." )
return self.trex_files_path
def reserve_trex (self, user):
if user == "":
logger.info("TRex reservation cannot apply to empty string user. Request denied.")
return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")
with self.start_lock:
logger.info("Processing reserve_trex() command.")
if self.is_reserved():
if user == self.__reservation['user']:
logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
return True
logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
res_user = self.__reservation['user']) )
elif self.trex.get_status() != TRexStatus.Idle:
logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.')
else:
logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
self.__reservation = {'user' : user, 'since' : time.ctime()}
logger.debug("Reservation details: "+ str(self.__reservation))
return True
def cancel_reservation (self, user):
with self.start_lock:
logger.info("Processing cancel_reservation() command.")
if self.is_reserved():
if self.__reservation['user'] == user:
logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
self.__reservation = None
return True
else:
logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
else:
logger.info("TRex is not reserved to anyone. No need to cancel anything")
assert(self.__reservation is None)
return False
def start_trex(self, trex_cmd_options, user, block_to_success = True, timeout = 40, stateless = False, debug_image = False, trex_args = ''):
self.trex.zmq_error = None
with self.start_lock:
logger.info("Processing start_trex() command.")
if self.is_reserved():
# check if this is not the user to which TRex is reserved
if self.__reservation['user'] != user:
logger.info("TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
return Fault(-33, "TRex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
elif self.trex.get_status() != TRexStatus.Idle:
err = 'TRex is already taken, cannot create another run until done.'
logger.info(err)
return Fault(-13, err) # raise at client TRexInUseError
try:
server_cmd_data, zmq_port = self.generate_run_cmd(stateless = stateless, debug_image = debug_image, trex_args = trex_args, **trex_cmd_options)
self.trex.start_trex(self.TREX_PATH, server_cmd_data, zmq_port)
logger.info("TRex session has been successfully initiated.")
if block_to_success:
# delay server response until TRex is at 'Running' state.
start_time = time.time()
trex_state = None
while (time.time() - start_time) < timeout :
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
break
else:
time.sleep(0.5)
self.assert_zmq_ok()
# check for TRex run started normally
if trex_state == TRexStatus.Starting: # reached timeout
logger.warning("TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.")
return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
elif trex_state == TRexStatus.Idle:
return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
# reach here only if TRex is at 'Running' state
self.trex.gen_seq()
return self.trex.get_seq() # return unique seq number to client
except TypeError as e:
logger.error("TRex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
raise TypeError('TRex -f (traffic generation .yaml file) and -c (num of cores) must be specified. %s' % e)
def stop_trex(self, seq, with_tb = False):
logger.info("Processing stop_trex() command.")
if self.trex.get_seq()== seq:
logger.debug("Abort request legit since seq# match")
return self.trex.stop_trex(with_tb)
else:
if self.trex.get_status() != TRexStatus.Idle:
logger.warning("Abort request is only allowed to process initiated the run. Request denied.")
return Fault(-33, 'Abort request is only allowed to process initiated the run. Request denied.') # raise at client TRexRequestDenied
else:
return False
def force_trex_kill(self, with_tb = False):
tb_print = ''
if with_tb:
tb_print = ' (with traceback)'
logger.info("Processing force_trex_kill() command%s." % tb_print)
return self.trex.stop_trex(with_tb)
# returns list of tuples (pid, command line) of running TRex(es)
def get_trex_cmds(self):
logger.info('Processing get_trex_cmds() command.')
ret_code, stdout, stderr = run_command('ps -u root --format pid,comm,cmd')
if ret_code:
raise Exception('Failed to determine running processes. Stdout: %s. Stderr: %s' % (stdout, stderr))
trex_cmds_list = []
for line in stdout.splitlines():
pid, proc_name, full_cmd = line.strip().split(' ', 2)
pid = pid.strip()
full_cmd = full_cmd.strip()
if proc_name.find('_t-rex-64') >= 0:
trex_cmds_list.append((pid, full_cmd))
return trex_cmds_list
# Silently tries to kill TRexes with given signal.
# Responsibility of client to verify with get_trex_cmds.
def kill_all_trexes(self, signal_name):
logger.info('Processing kill_all_trexes() command.')
trex_cmds_list = self.get_trex_cmds()
for pid, cmd in trex_cmds_list:
logger.info('Killing with signal %s process %s %s' % (signal_name, pid, cmd))
try:
os.kill(int(pid), signal_name)
except OSError as e:
if e.errno == errno.ESRCH:
logger.info('No such process, ignoring.')
raise
def wait_until_kickoff_finish (self, timeout = 40):
# block until TRex exits Starting state
logger.info("Processing wait_until_kickoff_finish() command.")
start_time = time.time()
while (time.time() - start_time) < timeout :
self.assert_zmq_ok()
trex_state = self.trex.get_status()
if trex_state != TRexStatus.Starting:
return
time.sleep(0.1)
return Fault(-12, 'TimeoutError: TRex initiation outcome could not be obtained, since TRex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
def get_running_info (self):
logger.info("Processing get_running_info() command.")
self.assert_zmq_ok(check_alive = False)
return self.trex.get_running_info()
def get_latest_dump(self):
logger.info("Processing get_latest_dump() command.")
self.assert_zmq_ok(check_alive = False)
return self.trex.get_latest_dump()
def generate_run_cmd (self, iom = 0, export_path = None, stateless = False, debug_image = False, trex_args = '', **kwargs):
if 'results_file_path' in kwargs:
export_path = kwargs['results_file_path']
del kwargs['results_file_path']
elif export_path is None:
export_path = self.DEFAULT_TREX_LOG
if stateless:
kwargs['i'] = True
# adding additional options to the command
trex_cmd_options = ''
for key, value in kwargs.items():
tmp_key = key.replace('_','-').lstrip('-')
dash = ' -' if (len(key)==1) else ' --'
if value is True:
trex_cmd_options += (dash + tmp_key)
elif value is False:
continue
else:
trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
if trex_args:
trex_cmd_options += ' %s' % trex_args
zmq_port = self._check_zmq_port(trex_cmd_options)
if not stateless:
if 'f' not in kwargs:
raise Exception('Argument -f should be specified in stateful command')
if 'd' not in kwargs:
raise Exception('Argument -d should be specified in stateful command')
cmd = "{nice}{run_command}{debug_image} --iom {io} {cmd_options} --no-key".format( # -- iom 0 disables the periodic log to the screen (not needed)
nice = '' if self.trex_nice == 0 else 'nice -n %s ' % self.trex_nice,
run_command = self.TREX_START_CMD,
debug_image = '-debug' if debug_image else '',
cmd_options = trex_cmd_options,
io = iom)
logger.info("TREX FULL COMMAND: {command}".format(command = cmd) )
# save export_path for get_trex_log()
self.trex_log = export_path
return (cmd, export_path, kwargs.get('d', 0)), zmq_port
def _check_zmq_port(self, trex_cmd_options):
parser = ArgumentParser()
parser.add_argument('--cfg', default = self.DEFAULT_TREX_CFG)
args, _ = parser.parse_known_args(shlex.split(trex_cmd_options))
if not os.path.exists(args.cfg):
raise Exception('Platform config file "%s" does not exist!' % args.cfg)
with open(args.cfg) as f:
trex_cfg = yaml.safe_load(f.read())
if type(trex_cfg) is not list:
raise Exception('Platform config file "%s" content should be array.' % args.cfg)
if not len(trex_cfg):
raise Exception('Platform config file "%s" content should be array with one element.' % args.cfg)
trex_cfg = trex_cfg[0]
if 'enable_zmq_pub' in trex_cfg and trex_cfg['enable_zmq_pub'] == False:
raise Exception('TRex daemon expects ZMQ publisher to be enabled. Please change "enable_zmq_pub" to true.')
# save used trex config file (default:/etc/trex_cfg.yaml)
self.trex_cfg = args.cfg
if 'zmq_pub_port' in trex_cfg:
return trex_cfg['zmq_pub_port']
return self.trex_zmq_port
def __check_trex_path_validity(self):
# check for executable existance
if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
print("The provided TRex path do not contain an executable TRex file.\nPlease check the path and retry.")
logger.error("The provided TRex path do not contain an executable TRex file")
exit(-1)
# check for executable permissions
st = os.stat(self.TREX_PATH+'/t-rex-64')
if not bool(st.st_mode & (stat.S_IXUSR ) ):
print("The provided TRex path do not contain an TRex file with execution privileges.\nPlease check the files permissions and retry.")
logger.error("The provided TRex path do not contain an TRex file with execution privileges")
exit(-1)
else:
return
def __check_files_path_validity(self):
# first, check for path existance. otherwise, try creating it with appropriate credentials
if not os.path.exists(self.trex_files_path):
try:
os.makedirs(self.trex_files_path, 0o660)
return
except os.error as inst:
print("The provided files path does not exist and cannot be created with needed access credentials using root user.\nPlease check the path's permissions and retry.")
logger.error("The provided files path does not exist and cannot be created with needed access credentials using root user.")
exit(-1)
elif os.access(self.trex_files_path, os.W_OK):
return
else:
print("The provided files path has insufficient access credentials for root user.\nPlease check the path's permissions and retry.")
logger.error("The provided files path has insufficient access credentials for root user")
exit(-1)
class CTRex(object):
def __init__(self):
self.status = TRexStatus.Idle
self.verbose_status = 'TRex is Idle'
self.errcode = None
self.session = None
self.zmq_monitor = None
self.__zmq_dump = {}
self.zmq_dump_lock = threading.Lock()
self.zmq_error = None
self.seq = None
self.expect_trex = threading.Event()
def __del__(self):
if self.zmq_monitor:
self.zmq_monitor.join()
if self.session:
self.session.join()
def get_status(self):
return self.status
def set_status(self, new_status):
self.status = new_status
def get_verbose_status(self):
return self.verbose_status
def set_verbose_status(self, new_status):
self.verbose_status = new_status
def gen_seq (self):
self.seq = randrange(1,1000)
def get_seq (self):
return self.seq
def get_latest_dump(self):
with self.zmq_dump_lock:
return json.dumps(self.__zmq_dump)
def update_zmq_dump_key(self, key, val):
with self.zmq_dump_lock:
self.__zmq_dump[key] = val
def clear_zmq_dump(self):
with self.zmq_dump_lock:
self.__zmq_dump = {}
def get_running_info (self):
if self.status == TRexStatus.Running:
return self.get_latest_dump()
else:
logger.info("TRex isn't running. Running information isn't available.")
if self.status == TRexStatus.Idle:
if self.errcode is not None: # some error occured
logger.info("TRex is in Idle state, with errors. returning fault")
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
else:
logger.info("TRex is in Idle state, no errors. returning {}")
return u'{}'
return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating TRex is back to Idle state or still in Starting state
def stop_trex(self, with_tb):
if self.status == TRexStatus.Idle:
# TRex isn't running, nothing to abort
logger.info("TRex isn't running. No need to stop anything.")
if self.errcode is not None: # some error occurred, notify client despite TRex already stopped
return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
return False
else:
# handle stopping TRex's run
self.session.join(with_tb = with_tb)
logger.info("TRex session has been successfully aborted.")
return True
def start_trex(self, trex_launch_path, trex_cmd, zmq_port):
self.set_status(TRexStatus.Starting)
logger.info("TRex running state changed to 'Starting'.")
self.set_verbose_status('TRex is starting (data is not available yet)')
if not self.zmq_monitor:
logger.info('Starting ZMQ monitor on port %s' % zmq_port)
self.zmq_monitor = ZmqMonitorSession(self, zmq_port)
self.zmq_monitor.start()
else:
if not self.zmq_monitor.is_alive() or self.zmq_monitor.zmq_port != zmq_port:
if not self.zmq_monitor.is_alive():
logger.info('Old ZMQ monitor is dead, starting new')
else:
logger.info('ZMQ port is changed to %s, starting new monitor' % zmq_port)
self.zmq_monitor.join()
self.zmq_monitor = ZmqMonitorSession(self, zmq_port)
self.zmq_monitor.start()
self.zmq_monitor.first_dump = True
self.errcode = None
self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
self.session.start()
self.expect_trex.set()
def generate_trex_parser ():
cur = os.path.dirname(__file__)
default_path = os.path.abspath(os.path.join(cur, os.pardir, os.pardir, os.pardir))
default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
parser = ArgumentParser(description = 'Run server application for TRex traffic generator',
formatter_class = RawTextHelpFormatter,
usage = """
trex_daemon_server [options]
""" )
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT",
help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
parser.add_argument("-z", "--zmq-port", type=int, metavar="PORT", default = 4500, action="store",
help="default TRex ZMQ publisher port to listen to (if not specified in config file).\nDefault port is 4500.")
parser.add_argument("-t", "--trex-path", metavar="PATH", default = default_path, action="store",
help="Specify the compiled TRex directory from which TRex would run.\nDefault path is: %s." % default_path)
parser.add_argument("-f", "--files-path", metavar="PATH", default = default_files_path, action="store",
help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: %s." % default_files_path)
parser.add_argument("--trex-host", metavar="HOST", default = '0.0.0.0', action="store",
help="Specify a hostname to be registered as the TRex server.\nDefault is to bind all IPs using '0.0.0.0'.")
parser.add_argument('-n', '--nice', dest='nice', action="store", default = -19, type = int,
help="Determine the priority TRex process [-20, 19] (lower = higher priority)\nDefault is -19.")
return parser
trex_parser = generate_trex_parser()
def do_main_program ():
args = trex_parser.parse_args()
server = CTRexServer(trex_path = args.trex_path, trex_files_path = args.files_path,
trex_host = args.trex_host, trex_daemon_port = args.daemon_port,
trex_zmq_port = args.zmq_port, trex_nice = args.nice)
server.start()
if __name__ == "__main__":
do_main_program()
| true | true |
f72280c4ecf19e33278ffe74061f44bbb7b21709 | 7,109 | py | Python | tensorflow/contrib/factorization/python/ops/gmm.py | harunpehlivan/tensorflow | 376e2cfdab31f4da251ea2e50992a9bf97fd171b | [
"Apache-2.0"
] | 24 | 2018-02-01T15:49:22.000Z | 2021-01-11T16:31:18.000Z | tensorflow/contrib/factorization/python/ops/gmm.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 3 | 2018-05-09T11:31:58.000Z | 2021-01-27T12:26:21.000Z | tensorflow/contrib/factorization/python/ops/gmm.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 11 | 2018-04-02T03:37:08.000Z | 2020-10-20T09:32:12.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops as logging
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.training import session_run_hook
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode, config):
"""Model function."""
assert labels is None, labels
(all_scores,
model_predictions,
losses, training_op,
init_op,
is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters, self._random_seed,
self._covariance_type,
self._params)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op,
training_hooks=training_hooks)
return _model_fn
| 37.81383 | 85 | 0.683218 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops as logging
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.training import session_run_hook
def _streaming_sum(scalar_tensor):
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class _InitializeClustersHook(session_run_hook.SessionRunHook):
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
class GMM(estimator.Estimator):
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
def _model_fn(features, labels, mode, config):
assert labels is None, labels
(all_scores,
model_predictions,
losses, training_op,
init_op,
is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters, self._random_seed,
self._covariance_type,
self._params)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op,
training_hooks=training_hooks)
return _model_fn
| true | true |
f7228106a822e4677b5701f46b4650fc9f6a1e11 | 306 | py | Python | python/test/testfiles.py | MarcosFernandez/gemtools | b17c4b973e57bbceafe72b6f81b2d4395eaa3b4a | [
"CNRI-Python"
] | 14 | 2015-01-06T20:25:01.000Z | 2021-06-17T20:35:34.000Z | python/test/testfiles.py | MarcosFernandez/gemtools | b17c4b973e57bbceafe72b6f81b2d4395eaa3b4a | [
"CNRI-Python"
] | 20 | 2015-01-23T15:02:35.000Z | 2020-05-17T03:25:35.000Z | python/test/testfiles.py | MarcosFernandez/gemtools | b17c4b973e57bbceafe72b6f81b2d4395eaa3b4a | [
"CNRI-Python"
] | 7 | 2015-11-09T18:30:14.000Z | 2020-04-08T13:53:30.000Z | #!/usr/bin/env python
import os
## setup testfiles for easier access
__base_dir = os.path.dirname(os.path.abspath(__file__))
__testfiles_dir = os.path.realpath(__base_dir + "/../testdata")
testfiles = {}
for file in os.listdir(__testfiles_dir):
testfiles[file] = "%s/%s" % (__testfiles_dir, file)
| 21.857143 | 63 | 0.715686 |
import os
th.abspath(__file__))
__testfiles_dir = os.path.realpath(__base_dir + "/../testdata")
testfiles = {}
for file in os.listdir(__testfiles_dir):
testfiles[file] = "%s/%s" % (__testfiles_dir, file)
| true | true |
f722817124e6e06306044d0dc374c72d96c7d4ec | 109,433 | py | Python | nova/tests/unit/conductor/test_conductor.py | orbitfp7/nova | 90f843cdc47b494cd568507820d45868765697b1 | [
"Apache-2.0"
] | 5 | 2017-06-23T07:37:39.000Z | 2020-10-21T07:07:50.000Z | nova/tests/unit/conductor/test_conductor.py | orbitfp7/nova | 90f843cdc47b494cd568507820d45868765697b1 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/conductor/test_conductor.py | orbitfp7/nova | 90f843cdc47b494cd568507820d45868765697b1 | [
"Apache-2.0"
] | 4 | 2017-06-23T07:37:43.000Z | 2020-12-28T09:57:22.000Z | # Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova.image import api as image_api
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('report_interval', 'nova.service')
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
@mock.patch.object(notifications, 'audit_period_bounds')
@mock.patch.object(notifications, 'bandwidth_usage')
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
def test_notify_usage_exists(self, mock_notify, mock_bw, mock_audit):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = objects.Instance(id=1, system_metadata={})
mock_audit.return_value = ('start', 'end')
mock_bw.return_value = 'bw_usage'
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
class MatchInstance(object):
def __eq__(self, thing):
return thing.id == instance.id
notifier = self.conductor_manager.notifier
mock_audit.assert_called_once_with(False)
mock_bw.assert_called_once_with(MatchInstance(), 'start', True)
mock_notify.assert_called_once_with(notifier, self.context,
MatchInstance(),
'exists', system_metadata={},
extra_usage_info=info)
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(topic=None, host=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_big(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 10)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=9)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_small(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 3)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=3)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_no_time(self, mock_prepare, mock_update):
CONF.set_override('report_interval', None)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host', None)
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
with mock.patch('nova.objects.Instance._from_db_object'):
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(objects.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def _test_cold_migrate(self, clean_shutdown=True):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor.extra_specs = {'extra_specs': 'fake'}
request_spec = {'instance_type': obj_base.obj_to_primitive(flavor),
'instance_properties': {}}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec, {})
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_client.select_destinations(
self.context, request_spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(objects.Flavor), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [],
clean_shutdown)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [],
clean_shutdown)
def test_cold_migrate(self):
self._test_cold_migrate()
def test_cold_migrate_forced_shutdown(self):
self._test_cold_migrate(clean_shutdown=False)
@mock.patch('nova.objects.Instance.refresh')
@mock.patch('nova.utils.spawn_n')
def test_build_instances(self, mock_spawn, mock_refresh):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
instance_type = flavors.get_default_flavor()
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuid.uuid4(),
flavor=instance_type) for i in xrange(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
instance_properties = instance_obj.compat_instance(instances[0])
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2}
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
scheduler_utils.set_vm_state_and_notify(
self.context, instance.uuid, 'compute_task', 'build_instances',
updates, exception, spec, self.conductor_manager.db)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_scheduler_group_failure(self, state_mock,
sig_mock, bs_mock,
spawn_mock):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
# NOTE(gibi): LocalComputeTaskAPI use eventlet spawn that makes mocking
# hard so use direct call instead.
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
bs_mock.return_value = spec
exception = exc.UnsupportedPolicyException(reason='fake-reason')
sig_mock.side_effect = exception
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
calls = []
for instance in instances:
calls.append(mock.call(self.context, instance.uuid,
'build_instances', updates, exception, spec))
state_mock.assert_has_calls(calls)
def test_unshelve_instance_on_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id, show_deleted=False).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_offloaded_instance_image_id_is_none(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
# 'shelved_image_id' is None for volumebacked instance
instance.system_metadata['shelved_image_id'] = None
with contextlib.nested(
mock.patch.object(self.conductor_manager,
'_schedule_instances'),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance'),
) as (schedule_mock, unshelve_mock):
schedule_mock.return_value = [{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}]
self.conductor_manager.unshelve_instance(self.context, instance)
self.assertEqual(1, unshelve_mock.call_count)
def test_unshelve_instance_schedule_and_rebuild(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances',
side_effect=messaging.MessagingTimeout())
@mock.patch.object(image_api.API, 'get', return_value='fake_image')
def test_unshelve_instance_schedule_and_rebuild_messaging_exception(
self, mock_get_image, mock_schedule_instances):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.assertRaises(messaging.MessagingTimeout,
self.conductor_manager.unshelve_instance,
self.context, instance)
mock_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler_no_host(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
'rebuild_instance')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.scheduler_client.SchedulerClient,
'select_destinations')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_rebuild_instance_with_scheduler_group_failure(self,
state_mock,
bs_mock,
select_dest_mock,
sig_mock,
rebuild_mock,
spawn_mock):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
bs_mock.return_value = request_spec
# NOTE(gibi): LocalComputeTaskAPI use eventlet spawn that makes mocking
# hard so use direct call instead.
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
exception = exc.UnsupportedPolicyException(reason='')
sig_mock.side_effect = exception
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor.rebuild_instance,
self.context,
inst_obj,
**rebuild_args)
updates = {'vm_state': vm_states.ACTIVE, 'task_state': None}
state_mock.assert_called_once_with(self.context, inst_obj.uuid,
'rebuild_server', updates,
exception, request_spec)
self.assertFalse(select_dest_mock.called)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, flavor, None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def _test_migrate_server_deals_with_expected_exceptions(self, ex):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_instancenotrunning_exception(self,
mock_live_migrate, mock_set_state):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
error = exc.InstanceNotRunning(instance_id="fake")
mock_live_migrate.side_effect = error
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InstanceNotRunning,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
request_spec = self._build_request_spec(inst_obj)
mock_set_state.assert_called_once_with(self.context, inst_obj.uuid,
'compute_task',
'migrate_server',
dict(vm_state=inst_obj.vm_state,
task_state=None,
expected_task_state=task_states.MIGRATING),
error, request_spec, self.conductor_manager.db)
def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
ex = exc.DestinationHypervisorTooOld()
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_HypervisorUnavailable(self):
ex = exc.HypervisorUnavailable(host='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_LiveMigrationWithOldNovaNotSafe(self):
ex = exc.LiveMigrationWithOldNovaNotSafe(server='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_unexpected_exceptions(self,
mock_live_migrate, mock_set_state):
expected_ex = IOError('fake error')
mock_live_migrate.side_effect = expected_ex
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
ex = self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
mock_set_state.assert_called_once_with(self.context,
instance['uuid'],
'compute_task', 'migrate_server',
dict(vm_state=vm_states.ERROR,
task_state=inst_obj.task_state,
expected_task_state=task_states.MIGRATING,),
expected_ex, request_spec, self.conductor.db)
self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 1, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 1, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, set_vm_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
self.assertIn('cold migrate', nvh.message)
@mock.patch.object(compute_utils, 'get_image_metadata')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_cold_migrate_no_valid_host_in_group(self,
set_vm_mock,
sig_mock,
brs_mock,
image_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
exception = exc.UnsupportedPolicyException(reason='')
image_mock.return_value = image
brs_mock.return_value = request_spec
sig_mock.side_effect = exception
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
updates = {'vm_state': vm_states.STOPPED, 'task_state': None}
set_vm_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exception, request_spec)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []},
'context': None}
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
expected_filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
exc_info = test.TestingException('something happened')
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename'],
clean_shutdown=True).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs],
clean_shutdown=True)
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, vm_st_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs], clean_shutdown=True)
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
# NOTE(sbauza): Due to populate_retry() later in the code,
# filter_properties is dynamically modified
setup_instance_group.assert_called_once_with(
self.context, spec, {'retry': {'num_attempts': 1,
'hosts': []}})
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
| 46.946804 | 79 | 0.592454 |
import contextlib
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova.image import api as image_api
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('report_interval', 'nova.service')
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
@mock.patch.object(notifications, 'audit_period_bounds')
@mock.patch.object(notifications, 'bandwidth_usage')
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
def test_notify_usage_exists(self, mock_notify, mock_bw, mock_audit):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = objects.Instance(id=1, system_metadata={})
mock_audit.return_value = ('start', 'end')
mock_bw.return_value = 'bw_usage'
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
class MatchInstance(object):
def __eq__(self, thing):
return thing.id == instance.id
notifier = self.conductor_manager.notifier
mock_audit.assert_called_once_with(False)
mock_bw.assert_called_once_with(MatchInstance(), 'start', True)
mock_notify.assert_called_once_with(notifier, self.context,
MatchInstance(),
'exists', system_metadata={},
extra_usage_info=info)
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(topic=None, host=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_big(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 10)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=9)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_small(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 3)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=3)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_no_time(self, mock_prepare, mock_update):
CONF.set_override('report_interval', None)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host', None)
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
with mock.patch('nova.objects.Instance._from_db_object'):
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(objects.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def _test_cold_migrate(self, clean_shutdown=True):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor.extra_specs = {'extra_specs': 'fake'}
request_spec = {'instance_type': obj_base.obj_to_primitive(flavor),
'instance_properties': {}}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec, {})
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_client.select_destinations(
self.context, request_spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(objects.Flavor), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [],
clean_shutdown)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [],
clean_shutdown)
def test_cold_migrate(self):
self._test_cold_migrate()
def test_cold_migrate_forced_shutdown(self):
self._test_cold_migrate(clean_shutdown=False)
@mock.patch('nova.objects.Instance.refresh')
@mock.patch('nova.utils.spawn_n')
def test_build_instances(self, mock_spawn, mock_refresh):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
instance_type = flavors.get_default_flavor()
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuid.uuid4(),
flavor=instance_type) for i in xrange(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
instance_properties = instance_obj.compat_instance(instances[0])
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2}
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
scheduler_utils.set_vm_state_and_notify(
self.context, instance.uuid, 'compute_task', 'build_instances',
updates, exception, spec, self.conductor_manager.db)
self.mox.ReplayAll()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_scheduler_group_failure(self, state_mock,
sig_mock, bs_mock,
spawn_mock):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
bs_mock.return_value = spec
exception = exc.UnsupportedPolicyException(reason='fake-reason')
sig_mock.side_effect = exception
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
calls = []
for instance in instances:
calls.append(mock.call(self.context, instance.uuid,
'build_instances', updates, exception, spec))
state_mock.assert_has_calls(calls)
def test_unshelve_instance_on_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id, show_deleted=False).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_offloaded_instance_image_id_is_none(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.system_metadata['shelved_image_id'] = None
with contextlib.nested(
mock.patch.object(self.conductor_manager,
'_schedule_instances'),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance'),
) as (schedule_mock, unshelve_mock):
schedule_mock.return_value = [{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}]
self.conductor_manager.unshelve_instance(self.context, instance)
self.assertEqual(1, unshelve_mock.call_count)
def test_unshelve_instance_schedule_and_rebuild(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances',
side_effect=messaging.MessagingTimeout())
@mock.patch.object(image_api.API, 'get', return_value='fake_image')
def test_unshelve_instance_schedule_and_rebuild_messaging_exception(
self, mock_get_image, mock_schedule_instances):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.assertRaises(messaging.MessagingTimeout,
self.conductor_manager.unshelve_instance,
self.context, instance)
mock_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler_no_host(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
'rebuild_instance')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.scheduler_client.SchedulerClient,
'select_destinations')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_rebuild_instance_with_scheduler_group_failure(self,
state_mock,
bs_mock,
select_dest_mock,
sig_mock,
rebuild_mock,
spawn_mock):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
bs_mock.return_value = request_spec
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
exception = exc.UnsupportedPolicyException(reason='')
sig_mock.side_effect = exception
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor.rebuild_instance,
self.context,
inst_obj,
**rebuild_args)
updates = {'vm_state': vm_states.ACTIVE, 'task_state': None}
state_mock.assert_called_once_with(self.context, inst_obj.uuid,
'rebuild_server', updates,
exception, request_spec)
self.assertFalse(select_dest_mock.called)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, flavor, None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def _test_migrate_server_deals_with_expected_exceptions(self, ex):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_instancenotrunning_exception(self,
mock_live_migrate, mock_set_state):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
error = exc.InstanceNotRunning(instance_id="fake")
mock_live_migrate.side_effect = error
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InstanceNotRunning,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
request_spec = self._build_request_spec(inst_obj)
mock_set_state.assert_called_once_with(self.context, inst_obj.uuid,
'compute_task',
'migrate_server',
dict(vm_state=inst_obj.vm_state,
task_state=None,
expected_task_state=task_states.MIGRATING),
error, request_spec, self.conductor_manager.db)
def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
ex = exc.DestinationHypervisorTooOld()
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_HypervisorUnavailable(self):
ex = exc.HypervisorUnavailable(host='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_LiveMigrationWithOldNovaNotSafe(self):
ex = exc.LiveMigrationWithOldNovaNotSafe(server='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_unexpected_exceptions(self,
mock_live_migrate, mock_set_state):
expected_ex = IOError('fake error')
mock_live_migrate.side_effect = expected_ex
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
ex = self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
mock_set_state.assert_called_once_with(self.context,
instance['uuid'],
'compute_task', 'migrate_server',
dict(vm_state=vm_states.ERROR,
task_state=inst_obj.task_state,
expected_task_state=task_states.MIGRATING,),
expected_ex, request_spec, self.conductor.db)
self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 1, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 1, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, set_vm_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
self.assertIn('cold migrate', nvh.message)
@mock.patch.object(compute_utils, 'get_image_metadata')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_cold_migrate_no_valid_host_in_group(self,
set_vm_mock,
sig_mock,
brs_mock,
image_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
exception = exc.UnsupportedPolicyException(reason='')
image_mock.return_value = image
brs_mock.return_value = request_spec
sig_mock.side_effect = exception
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
updates = {'vm_state': vm_states.STOPPED, 'task_state': None}
set_vm_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exception, request_spec)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []},
'context': None}
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
expected_filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
exc_info = test.TestingException('something happened')
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename'],
clean_shutdown=True).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs],
clean_shutdown=True)
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, vm_st_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs], clean_shutdown=True)
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
setup_instance_group.assert_called_once_with(
self.context, spec, {'retry': {'num_attempts': 1,
'hosts': []}})
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
| true | true |
f72281c8dec6874aa11aa6ba69bc1dc45cdffafc | 5,480 | py | Python | rviz_pyplot/python/rviz_pyplot/Plotter.py | uschwes/rviz_pyplot | 7080a0730bf20aa12b7f533a78a29e4d9e4b49c1 | [
"BSD-3-Clause"
] | 1 | 2017-08-29T10:19:23.000Z | 2017-08-29T10:19:23.000Z | rviz_pyplot/python/rviz_pyplot/Plotter.py | uschwes/rviz_pyplot | 7080a0730bf20aa12b7f533a78a29e4d9e4b49c1 | [
"BSD-3-Clause"
] | null | null | null | rviz_pyplot/python/rviz_pyplot/Plotter.py | uschwes/rviz_pyplot | 7080a0730bf20aa12b7f533a78a29e4d9e4b49c1 | [
"BSD-3-Clause"
] | null | null | null | import rospy
import PointCloud
reload(PointCloud)
import CoordinateFrames
reload(CoordinateFrames)
import Lines
reload(Lines)
import Image as ImagePy
reload(ImagePy)
import Text
reload(Text)
from PointCloud import PointCloudMarker
from PlotObject import PlotObject
from CoordinateFrames import CoordinateFramesMarker
from Image import ImageMarker
from sensor_msgs.msg import PointCloud2, Image
from visualization_msgs.msg import Marker, MarkerArray
publishedMessages = [] # to be able to delete them later with clf
class Plotter(object):
def __init__(self, initRosNode=True, rosNodeName=None, visFrame=None):
if initRosNode:
if rosNodeName is None:
rosNodeName = 'rviz_pyplot'#_%s'.format(uuid.uuid1().get_hex())
rospy.init_node(rosNodeName,['Plotter.py'], disable_signals=True)
if visFrame is None:
visFrame = "/rviz_pyplot"
self._visFrame = visFrame
self._publishers = {}
self._publishers[PointCloud2] = {}
self._publishers[MarkerArray] = {}
self._publishers[Image] = {}
self._defaultTopics = {}
self._defaultTopics[PointCloud2] = "{0}/points".format(rospy.get_name())
self._defaultTopics[MarkerArray] = "{0}/marker_array".format(rospy.get_name())
self._defaultTopics[Image] = "{0}/images".format(rospy.get_name())
# \todo publish transforms in a thread.
def __del__(self):
# \todo clean up ROS
pass
def clf(self):
global publishedMessages
for topic,msg in publishedMessages:
if type(msg) == Marker:
pub = self.getPublisher(Marker, topic)
msg.action = Marker.DELETE
if type(msg) == MarkerArray:
pub = self.getPublisher(MarkerArray, topic)
for m in msg.markers:
m.action = Marker.DELETE
else:
continue
pub.publish( msg )
publishedMessages = []
def getDefaultPointCloudTopic(self):
return self._defaultPointCloudTopic
def getDefaultMarkerArrayTopic(self):
return self._defaultMarkerArrayTopic
def getPublisher(self, messageType, topic=None, latch=True):
publisherList = self._publishers[messageType]
if topic is None:
topic = self._defaultTopics[messageType]
if topic in publisherList:
pub = publisherList[topic]
else:
# Initialize a new publisher
pub = rospy.Publisher(topic, messageType, latch=latch)
# Save the publisher for later
publisherList[topic] = pub
return pub
def activeTopics( self ):
return (self._pointCloudPubs.keys(), self._markerArrayPubs.keys())
def printActiveTopics( self ):
print "Point cloud topics:"
for key in self._pointCloudPubs.keys():
print "\t{0}".format(key)
print "Marker array topics:"
for key in self._markerArrayPubs.keys():
print "\t{0}".format(key)
def plot( self, plotItems, stamp=None ):
if stamp is None:
stamp = rospy.Time.now()
# Accumulate a list of point clouds and markers to publish
messages = []
if type(plotItems) == list:
for item in plotItems:
item.appendMessages(stamp, messages)
else:
# Assume this is a single plotItem
plotItems.appendMessages(stamp, messages)
global publishedMessages
topics = {}
for topic, msg in messages:
if type(msg) == PointCloud2:
pub = self.getPublisher(PointCloud2, topic)
# Always override the stamp. This is a design choice
# that may be revisited
msg.header.stamp = stamp
if msg.header.frame_id is None:
msg.header.frame_id = self._visFrame
pub.publish( msg )
publishedMessages.append( (topic,msg) )
elif type(msg) == Marker:
msg.header.stamp = stamp
if msg.header.frame_id is None:
msg.header.frame_id = self._visFrame
if topic in topics:
topics[topic].markers.append(msg)
else:
ma = MarkerArray()
ma.markers.append(msg)
topics[topic] = ma
elif type(msg) == Image:
pub = self.getPublisher(Image, topic)
# Always override the stamp. This is a design choice
# that may be revisited
msg.header.stamp = stamp
if msg.header.frame_id is None:
msg.header.frame_id = self._visFrame
pub.publish( msg )
publishedMessages.append( (topic,msg) )
else:
raise RuntimeError("Unknown message type {0}\n{1}".format(type(msg), msg))
for topic, ma in topics.iteritems():
pub = self.getPublisher(MarkerArray, topic)
pub.publish(ma)
publishedMessages.append( (topic,ma) )
def plotImage(self, I, frameId=None, topic=None):
img = ImageMarker(frameId=frameId, topic=topic)
img.addImage(I)
self.plot(img)
def plotText(self, text, position, scale, frameId=None, topic=None):
textMarker = Text.TextMarker(frameId=frameId, topic=topic, scale=scale)
textMarker.setText(text, position)
self.plot(textMarker)
| 34.683544 | 90 | 0.60438 | import rospy
import PointCloud
reload(PointCloud)
import CoordinateFrames
reload(CoordinateFrames)
import Lines
reload(Lines)
import Image as ImagePy
reload(ImagePy)
import Text
reload(Text)
from PointCloud import PointCloudMarker
from PlotObject import PlotObject
from CoordinateFrames import CoordinateFramesMarker
from Image import ImageMarker
from sensor_msgs.msg import PointCloud2, Image
from visualization_msgs.msg import Marker, MarkerArray
publishedMessages = []
class Plotter(object):
def __init__(self, initRosNode=True, rosNodeName=None, visFrame=None):
if initRosNode:
if rosNodeName is None:
rosNodeName = 'rviz_pyplot'
rospy.init_node(rosNodeName,['Plotter.py'], disable_signals=True)
if visFrame is None:
visFrame = "/rviz_pyplot"
self._visFrame = visFrame
self._publishers = {}
self._publishers[PointCloud2] = {}
self._publishers[MarkerArray] = {}
self._publishers[Image] = {}
self._defaultTopics = {}
self._defaultTopics[PointCloud2] = "{0}/points".format(rospy.get_name())
self._defaultTopics[MarkerArray] = "{0}/marker_array".format(rospy.get_name())
self._defaultTopics[Image] = "{0}/images".format(rospy.get_name())
# \todo publish transforms in a thread.
def __del__(self):
# \todo clean up ROS
pass
def clf(self):
global publishedMessages
for topic,msg in publishedMessages:
if type(msg) == Marker:
pub = self.getPublisher(Marker, topic)
msg.action = Marker.DELETE
if type(msg) == MarkerArray:
pub = self.getPublisher(MarkerArray, topic)
for m in msg.markers:
m.action = Marker.DELETE
else:
continue
pub.publish( msg )
publishedMessages = []
def getDefaultPointCloudTopic(self):
return self._defaultPointCloudTopic
def getDefaultMarkerArrayTopic(self):
return self._defaultMarkerArrayTopic
def getPublisher(self, messageType, topic=None, latch=True):
publisherList = self._publishers[messageType]
if topic is None:
topic = self._defaultTopics[messageType]
if topic in publisherList:
pub = publisherList[topic]
else:
# Initialize a new publisher
pub = rospy.Publisher(topic, messageType, latch=latch)
# Save the publisher for later
publisherList[topic] = pub
return pub
def activeTopics( self ):
return (self._pointCloudPubs.keys(), self._markerArrayPubs.keys())
def printActiveTopics( self ):
print "Point cloud topics:"
for key in self._pointCloudPubs.keys():
print "\t{0}".format(key)
print "Marker array topics:"
for key in self._markerArrayPubs.keys():
print "\t{0}".format(key)
def plot( self, plotItems, stamp=None ):
if stamp is None:
stamp = rospy.Time.now()
# Accumulate a list of point clouds and markers to publish
messages = []
if type(plotItems) == list:
for item in plotItems:
item.appendMessages(stamp, messages)
else:
# Assume this is a single plotItem
plotItems.appendMessages(stamp, messages)
global publishedMessages
topics = {}
for topic, msg in messages:
if type(msg) == PointCloud2:
pub = self.getPublisher(PointCloud2, topic)
# Always override the stamp. This is a design choice
# that may be revisited
msg.header.stamp = stamp
if msg.header.frame_id is None:
msg.header.frame_id = self._visFrame
pub.publish( msg )
publishedMessages.append( (topic,msg) )
elif type(msg) == Marker:
msg.header.stamp = stamp
if msg.header.frame_id is None:
msg.header.frame_id = self._visFrame
if topic in topics:
topics[topic].markers.append(msg)
else:
ma = MarkerArray()
ma.markers.append(msg)
topics[topic] = ma
elif type(msg) == Image:
pub = self.getPublisher(Image, topic)
# Always override the stamp. This is a design choice
# that may be revisited
msg.header.stamp = stamp
if msg.header.frame_id is None:
msg.header.frame_id = self._visFrame
pub.publish( msg )
publishedMessages.append( (topic,msg) )
else:
raise RuntimeError("Unknown message type {0}\n{1}".format(type(msg), msg))
for topic, ma in topics.iteritems():
pub = self.getPublisher(MarkerArray, topic)
pub.publish(ma)
publishedMessages.append( (topic,ma) )
def plotImage(self, I, frameId=None, topic=None):
img = ImageMarker(frameId=frameId, topic=topic)
img.addImage(I)
self.plot(img)
def plotText(self, text, position, scale, frameId=None, topic=None):
textMarker = Text.TextMarker(frameId=frameId, topic=topic, scale=scale)
textMarker.setText(text, position)
self.plot(textMarker)
| false | true |
f722823d99d3774e7583e2201796279320d2e199 | 7,715 | py | Python | MRIAssimilator/MRIAThreads.py | meewa1/BrukerGUI | f71211557f3a61322a8a8bc9bdb1f70f3cc82969 | [
"MIT"
] | 2 | 2019-05-16T14:30:21.000Z | 2019-10-23T10:42:57.000Z | MRIAssimilator/MRIAThreads.py | meewa1/BrukerGUI | f71211557f3a61322a8a8bc9bdb1f70f3cc82969 | [
"MIT"
] | 3 | 2019-05-16T14:34:25.000Z | 2021-12-13T20:23:15.000Z | MRIAssimilator/MRIAThreads.py | meewa1/MRI-assimilator | f71211557f3a61322a8a8bc9bdb1f70f3cc82969 | [
"MIT"
] | 2 | 2019-05-16T14:34:58.000Z | 2021-12-27T06:33:57.000Z | from PyQt5 import QtCore
import os, tempfile
from scipy.misc import toimage
import brukerWriter as bw
import utils
from FilesTreeWidget import *
__all__ = ["FilesTreeThread", "SaveThread"]
class FilesTreeThread(QtCore.QThread):
def __init__(self, parent = None, mode = "create", dirnames = ""):
super().__init__()
self.parent = parent
self.fail = 0
self.mode = mode
self.dirnames = dirnames
def run(self):
if not self.dirnames:
self.parent.tree.manageTree(self.parent.curDir, self.mode)
else:
#for dirname in self.dirnames:
self.parent.tree.manageTree(self.dirnames, self.mode)
class CANCELThread(Exception):
pass
class SaveThread(QtCore.QThread):
"""
Create thread for saving experiment in the text format
if self.trigger == "all" then each experiment will be saved as
a single text file in the folder corresponding to the experiment name
else self.trigger == "single" then only one experiment will be saved without creating folder
"""
progressText = QtCore.pyqtSignal(str)
progress = QtCore.pyqtSignal(int)
suggestedTypes = ["Image", "XML", "Text"]
def __init__(self, parent, savepath, saveType, form = "", filename = ""):
super().__init__()
self.saveType = saveType
if self.saveType not in self.suggestedTypes:
raise CANCELThread("Uncorrect function type")
self.parent = parent
self.SaveDir = savepath
self.form = "xml" if self.saveType=="XML" else form
self.trigger = "all"
self.cancelThread = False
self.filename = filename
def _SaveAllChecked(self):
completed = 0
data = self.parent.tree.ImageData
checkedItemList = []
self.parent.tree.findCheckedItems(self.parent.tree.invisibleRootItem(), checkedItemList)
allDim = 0
self.progressText.emit(self.tr("Data size counting"))
for expNumItem in checkedItemList:
allDim += int(utils.num_pattern.findall(expNumItem.text(0))[1])
for expNumItem in checkedItemList:
exp_name = self.parent.tree.getExpNameItem(expNumItem).text(0)
exp_num = utils.num_pattern.findall(expNumItem.text(0))[0]
saveDir = os.path.join(self.tmp_folder.name, exp_name)
utils.checkdir(saveDir)
if self.saveType == "Image":
saveDir = os.path.join(saveDir, exp_num)
utils.checkdir(saveDir)
if self.saveType != "Image":
fname = '{0}{1}Experiment_{2}.{3}'.format(saveDir,
os.sep,
exp_num,
self.form)
img_data = data[exp_name][exp_num]["data"]
for i in range(img_data.Dimension[0]):
if self.cancelThread:
raise CANCELThread()
if self.saveType == "Image":
fname = '{0}{1}Image_{2}.{3}'.format(saveDir,
os.sep,
i+1,
self.form)
self.progressText.emit(
self.tr("Writting Image_{0}.{1} to the folder /{2}/{3}").format(
i+1,
self.form,
exp_name,
exp_num))
toimage(img_data.IntenseData[i,:,:],
cmin=img_data.min_val, cmax=img_data.max_val).save(fname)
else:
self.progressText.emit(
self.tr("Writting Image {0}\{1} to the Experiment_{2}.{3}").format(
i+1,
img_data.Dimension[0],
exp_num,
self.form))
eval("bw.SingleWriteTo{}File".format(self.saveType))(fname,
img_data,
i,
i==0)
completed += 100/allDim
self.progress.emit(completed)
def _SaveSingle(self):
"""
Saving current experiment number
"""
completed = 0
allDim = self.parent.scroll.maximum()
saveDir = self.tmp_folder.name
img_data = self.parent.tree.ImageData[self.parent.curExpName][self.parent.curExpNum]["data"]
# add ".xml" postfix if it's not presented for XML files
if self.saveType == "XML":
try:
self.filename = re.search(r".+\.xml$", self.filename).group()
except AttributeError:
self.filename += ".xml"
fname = '{0}{1}{2}'.format(saveDir,
os.sep,
self.filename)
for i in range(allDim):
if self.cancelThread:
raise CANCELThread()
if self.saveType == "Image":
fname = '{0}{1}{2}_{3}.{4}'.format(saveDir,
os.sep,
self.filename,
i+1,
self.form)
self.progressText.emit(
self.tr("Writting {0}_{1}.{2}").format(self.filename,
i+1,
self.form))
toimage(img_data.IntenseData[i,:,:],
cmin=img_data.min_val, cmax=img_data.max_val).save(fname)
else:
self.progressText.emit(
self.tr("Writting Image {0}\{1} to the {2}").format(i+1,
allDim + 1,
self.filename))
eval("bw.SingleWriteTo{}File".format(self.saveType))(fname,
img_data,
i,
i==0)
completed += 100/allDim
self.progress.emit(completed)
def run(self):
try:
utils.checkdir(self.SaveDir)
# create a temporary folder
self.tmp_folder = tempfile.TemporaryDirectory(suffix = ".TMP",
prefix="_MRIAssimilator_",
dir = self.SaveDir)
if self.trigger == "all":
self._SaveAllChecked()
elif self.trigger == "single":
self._SaveSingle()
except CANCELThread:
self.quit()
| 40.605263 | 100 | 0.423461 | from PyQt5 import QtCore
import os, tempfile
from scipy.misc import toimage
import brukerWriter as bw
import utils
from FilesTreeWidget import *
__all__ = ["FilesTreeThread", "SaveThread"]
class FilesTreeThread(QtCore.QThread):
def __init__(self, parent = None, mode = "create", dirnames = ""):
super().__init__()
self.parent = parent
self.fail = 0
self.mode = mode
self.dirnames = dirnames
def run(self):
if not self.dirnames:
self.parent.tree.manageTree(self.parent.curDir, self.mode)
else:
self.parent.tree.manageTree(self.dirnames, self.mode)
class CANCELThread(Exception):
pass
class SaveThread(QtCore.QThread):
progressText = QtCore.pyqtSignal(str)
progress = QtCore.pyqtSignal(int)
suggestedTypes = ["Image", "XML", "Text"]
def __init__(self, parent, savepath, saveType, form = "", filename = ""):
super().__init__()
self.saveType = saveType
if self.saveType not in self.suggestedTypes:
raise CANCELThread("Uncorrect function type")
self.parent = parent
self.SaveDir = savepath
self.form = "xml" if self.saveType=="XML" else form
self.trigger = "all"
self.cancelThread = False
self.filename = filename
def _SaveAllChecked(self):
completed = 0
data = self.parent.tree.ImageData
checkedItemList = []
self.parent.tree.findCheckedItems(self.parent.tree.invisibleRootItem(), checkedItemList)
allDim = 0
self.progressText.emit(self.tr("Data size counting"))
for expNumItem in checkedItemList:
allDim += int(utils.num_pattern.findall(expNumItem.text(0))[1])
for expNumItem in checkedItemList:
exp_name = self.parent.tree.getExpNameItem(expNumItem).text(0)
exp_num = utils.num_pattern.findall(expNumItem.text(0))[0]
saveDir = os.path.join(self.tmp_folder.name, exp_name)
utils.checkdir(saveDir)
if self.saveType == "Image":
saveDir = os.path.join(saveDir, exp_num)
utils.checkdir(saveDir)
if self.saveType != "Image":
fname = '{0}{1}Experiment_{2}.{3}'.format(saveDir,
os.sep,
exp_num,
self.form)
img_data = data[exp_name][exp_num]["data"]
for i in range(img_data.Dimension[0]):
if self.cancelThread:
raise CANCELThread()
if self.saveType == "Image":
fname = '{0}{1}Image_{2}.{3}'.format(saveDir,
os.sep,
i+1,
self.form)
self.progressText.emit(
self.tr("Writting Image_{0}.{1} to the folder /{2}/{3}").format(
i+1,
self.form,
exp_name,
exp_num))
toimage(img_data.IntenseData[i,:,:],
cmin=img_data.min_val, cmax=img_data.max_val).save(fname)
else:
self.progressText.emit(
self.tr("Writting Image {0}\{1} to the Experiment_{2}.{3}").format(
i+1,
img_data.Dimension[0],
exp_num,
self.form))
eval("bw.SingleWriteTo{}File".format(self.saveType))(fname,
img_data,
i,
i==0)
completed += 100/allDim
self.progress.emit(completed)
def _SaveSingle(self):
completed = 0
allDim = self.parent.scroll.maximum()
saveDir = self.tmp_folder.name
img_data = self.parent.tree.ImageData[self.parent.curExpName][self.parent.curExpNum]["data"]
if self.saveType == "XML":
try:
self.filename = re.search(r".+\.xml$", self.filename).group()
except AttributeError:
self.filename += ".xml"
fname = '{0}{1}{2}'.format(saveDir,
os.sep,
self.filename)
for i in range(allDim):
if self.cancelThread:
raise CANCELThread()
if self.saveType == "Image":
fname = '{0}{1}{2}_{3}.{4}'.format(saveDir,
os.sep,
self.filename,
i+1,
self.form)
self.progressText.emit(
self.tr("Writting {0}_{1}.{2}").format(self.filename,
i+1,
self.form))
toimage(img_data.IntenseData[i,:,:],
cmin=img_data.min_val, cmax=img_data.max_val).save(fname)
else:
self.progressText.emit(
self.tr("Writting Image {0}\{1} to the {2}").format(i+1,
allDim + 1,
self.filename))
eval("bw.SingleWriteTo{}File".format(self.saveType))(fname,
img_data,
i,
i==0)
completed += 100/allDim
self.progress.emit(completed)
def run(self):
try:
utils.checkdir(self.SaveDir)
# create a temporary folder
self.tmp_folder = tempfile.TemporaryDirectory(suffix = ".TMP",
prefix="_MRIAssimilator_",
dir = self.SaveDir)
if self.trigger == "all":
self._SaveAllChecked()
elif self.trigger == "single":
self._SaveSingle()
except CANCELThread:
self.quit()
| true | true |
f722827390cb58b1a6cd72a02b31a1dfb88f2244 | 17,043 | py | Python | clients/hydra/python/ory_hydra_client/configuration.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | clients/hydra/python/ory_hydra_client/configuration.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | clients/hydra/python/ory_hydra_client/configuration.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | """
ORY Hydra
Welcome to the ORY Hydra HTTP API documentation. You will find documentation for all HTTP APIs here. # noqa: E501
The version of the OpenAPI document: v1.10.5
Generated by: https://openapi-generator.tech
"""
import copy
import logging
import multiprocessing
import sys
import urllib3
from http import client as http_client
from ory_hydra_client.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:param disabled_client_side_validations (string): Comma-separated list of
JSON schema validation keywords to disable JSON schema structural validation
rules. The following keywords may be specified: multipleOf, maximum,
exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern,
maxItems, minItems.
By default, the validation is performed for data generated locally by the client
and data received from the server, independent of any validation performed by
the server side. If the input data does not satisfy the JSON schema validation
rules specified in the OpenAPI document, an exception is raised.
If disabled_client_side_validations is set, structural validation is
disabled. This can be useful to troubleshoot data validation problem, such as
when the OpenAPI document validation rules do not match the actual API data
received by the server.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:param ssl_ca_cert: str - the path to a file of concatenated CA certificates
in PEM format
:Example:
HTTP Basic Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
http_basic_auth:
type: http
scheme: basic
Configure API client with HTTP basic authentication:
conf = ory_hydra_client.Configuration(
username='the-user',
password='the-password',
)
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
access_token=None,
username=None, password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
ssl_ca_cert=None,
):
"""Constructor
"""
self._base_path = "http://localhost" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.access_token = access_token
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("ory_hydra_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = ssl_ca_cert
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
# Options to pass down to the underlying urllib3 socket
self.socket_options = None
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'disabled_client_side_validations':
s = set(filter(None, value.split(',')))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError(
"Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if self.username is not None and self.password is not None:
auth['basic'] = {
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
}
if self.access_token is not None:
auth['oauth2'] = {
'type': 'oauth2',
'in': 'header',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1.10.5\n"\
"SDK Package Version: v1.10.5".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
| 35.88 | 118 | 0.619022 |
import copy
import logging
import multiprocessing
import sys
import urllib3
from http import client as http_client
from ory_hydra_client.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
access_token=None,
username=None, password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
ssl_ca_cert=None,
):
self._base_path = "http://localhost" if host is None else host
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
self.temp_folder_path = None
self.access_token = access_token
self.api_key = {}
if api_key:
self.api_key = api_key
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
self.refresh_api_key_hook = None
self.username = username
self.password = password
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
self.logger["package_logger"] = logging.getLogger("ory_hydra_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
self.logger_stream_handler = None
self.logger_file_handler = None
self.logger_file = None
self.debug = False
self.verify_ssl = True
self.ssl_ca_cert = ssl_ca_cert
self.cert_file = None
self.key_file = None
self.assert_hostname = None
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
self.proxy = None
self.proxy_headers = None
self.safe_chars_for_path_param = ''
self.retries = None
self.client_side_validation = True
self.socket_options = None
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
result.logger = copy.copy(self.logger)
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'disabled_client_side_validations':
s = set(filter(None, value.split(',')))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError(
"Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
return self.__debug
@debug.setter
def debug(self, value):
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
auth = {}
if self.username is not None and self.password is not None:
auth['basic'] = {
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
}
if self.access_token is not None:
auth['oauth2'] = {
'type': 'oauth2',
'in': 'header',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
}
return auth
def to_debug_report(self):
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1.10.5\n"\
"SDK Package Version: v1.10.5".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
return [
{
'url': "",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
self._base_path = value
self.server_index = None
| true | true |
f72283ae4e1379a687501776df3b0e07c89cff2f | 236 | py | Python | boderAroundImage.py | MayankShrivastava17/opencv-operation-on-image | c5ade8fa527ad05d99b65016678d8d25db201132 | [
"MIT"
] | 1 | 2021-03-20T13:02:07.000Z | 2021-03-20T13:02:07.000Z | boderAroundImage.py | MayankShrivastava17/opencv-operation-on-image | c5ade8fa527ad05d99b65016678d8d25db201132 | [
"MIT"
] | null | null | null | boderAroundImage.py | MayankShrivastava17/opencv-operation-on-image | c5ade8fa527ad05d99b65016678d8d25db201132 | [
"MIT"
] | null | null | null | import cv2 as cv
img = cv.imread("dog.jpg")
cv.imshow("Original Image", img)
cv.waitKey(0)
border = cv.copyMakeBorder(img, 10, 10, 10, 10, cv.BORDER_CONSTANT)
cv.imshow("Bordered Image", border)
cv.waitKey(0)
cv.destoryAllWindows()
| 18.153846 | 67 | 0.720339 | import cv2 as cv
img = cv.imread("dog.jpg")
cv.imshow("Original Image", img)
cv.waitKey(0)
border = cv.copyMakeBorder(img, 10, 10, 10, 10, cv.BORDER_CONSTANT)
cv.imshow("Bordered Image", border)
cv.waitKey(0)
cv.destoryAllWindows()
| true | true |
f72283efb5d82450f4772fcd196e0ff640ca6fda | 6,696 | py | Python | PyStationB/libraries/StaticCharacterization/tests/notebooks/test_introduction.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | 6 | 2021-09-29T15:46:55.000Z | 2021-12-14T18:39:51.000Z | PyStationB/libraries/StaticCharacterization/tests/notebooks/test_introduction.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | null | null | null | PyStationB/libraries/StaticCharacterization/tests/notebooks/test_introduction.py | BrunoKM/station-b-libraries | ea3591837e4a33f0bef789d905467754c27913b3 | [
"MIT"
] | 3 | 2021-09-27T10:35:20.000Z | 2021-10-02T17:53:07.000Z | # # Introduction
# In this notebook, we will load an example time series, fit a growth model
# and plot the signals.
#
# ## Load example time series
#
# Let's start by loading example time series data.
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import Iterable, List, Optional, cast
import matplotlib.pyplot as plt
import pytest
import seaborn as sns
import staticchar as ch
from psbutils.filecheck import Plottable, figure_found
from psbutils.misc import find_subrepo_directory
from staticchar.plotting.core import AnnotationSpec
SUBREPO_DIR = find_subrepo_directory()
S_SHAPE_FOLDER = SUBREPO_DIR / "tests/test_data/S-shape"
def plot_figure(name: str, ax: Optional[Plottable] = None) -> List[str]:
sns.despine()
found = figure_found(ax, f"test_introduction/{name}")
plt.clf()
return [] if found else [name]
@pytest.mark.timeout(10)
def test_introduction():
dataset = ch.datasets.Dataset(S_SHAPE_FOLDER) # type: ignore # auto
raw_timeseries = dataset.get_a_frame()
rth = raw_timeseries.head()
# As we can see, there is some non-zero signal at the beginning, which we attribute to
# the media absorbance and media fluorescence (as initially we have very low cell density).
assert sorted(rth.keys().to_list()) == sorted([ch.TIME, "EYFP", "OD", "ECFP", "OD700", "mRFP1"])
colors = {"EYFP": "yellow", "ECFP": "cyan", "mRFP1": "red", "OD": "black"}
plt.figure(figsize=(6.4, 4.8))
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(raw_timeseries, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found = []
figures_not_found += plot_figure("plot1_raw_timeseries", ax)
# ## Pre-processing
# Let's assume this is the background and subtract it.
# (A more precise, but also costly alternative is to estimate this using several blanks).
# In[ ]:
subtracted = ch.subtract_background(
raw_timeseries, columns=["OD", "ECFP", "EYFP", "mRFP1"], strategy=ch.BackgroundChoices.Minimum
)
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found += plot_figure("plot2_subtracted_timeseries", ax)
# ## Run characterization on an example
# In[ ]:
yaml_path = find_subrepo_directory() / "tests/configs/integral_basic.yml"
config = ch.config.load(yaml_path, ch.config.CharacterizationConfig)
# config
# ### Fitting a growth model
#
# Let's fit a growth model to the OD signal.
model_params = ch.LogisticModel.fit(subtracted["time"], subtracted[config.growth_signal]) # type: ignore # auto
model = ch.LogisticModel(model_params)
# model_params = ch.GompertzModel.fit(subtracted["time"], subtracted[config.growth_signal])
# model = ch.GompertzModel(model_params)
print(f"Inferred parameters: {model_params}")
print(f"Growth phase: {model.growth_period}")
print(f"Time of maximal activity: {model.time_maximal_activity}")
print(f"Inferred (log of) initial density: {model.initial_density(log=True)}")
ch.plot_growth_model(subtracted["time"], subtracted[config.growth_signal], model=model) # type: ignore # auto
figures_not_found += plot_figure("plot3_growth_model_fit")
# ### Plotting the data
#
# Some time after the growth phase, we should observe a similar exponential production
# of the proteins. Suppose that this maturation time is about 50 minutes,
# that is about 0.85 hours.
#
# Then, fluorescence signals should be linear when drawn with respect to each other.
# Add offset to the growth phase
production_phase = model.growth_period + config.maturation_offset
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(subtracted, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2)
figures_not_found += plot_figure("plot4_fluorescence_signals", f)
# ### Truncate the time-series
#
# We see that this very well captures the growth phase of mRFP1 (the reference signal),
# but is a bit too late for EYFP and ECFP -- we won't have a linear dependence between
# the signals...
#
# Let's choose a more narrow interval.
another_production_phase = ch.TimePeriod(reference=12, left=2, right=2)
truncated_timeseries = ch.select_time_interval(subtracted, interval=another_production_phase)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=another_production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(
truncated_timeseries, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2 # type: ignore # auto
)
figures_not_found += plot_figure("plot5_truncated")
# Run method
gradient, gradient_error = ch.transcriptional_activity_ratio(
truncated_timeseries, # type: ignore # auto
config.signals,
config.reference,
config.signal_properties,
model_params.growth_rate,
model.growth_period,
maturation_offset=config.maturation_offset,
)
# gradient
# ### Integration-based characterization
# Now assume that we want to integrate the signals over the production period.
signals = ["EYFP", "ECFP"]
ch.integrate(data=subtracted, signals=signals, interval=config.time_window)
# Now plot the output
f, axs = plt.subplots(1, len(config.signals), figsize=(12, 4))
for signal, ax in zip(config.signals, cast(Iterable, axs)):
ch.plot_integration(
subtracted,
signal,
config.time_window,
ax,
fillcolor=colors[signal],
annotation_spec=AnnotationSpec(title=True),
)
figures_not_found += plot_figure("plot6_integration", f)
assert figures_not_found == [], f"Figures not found: {', '.join(figures_not_found)}"
| 39.157895 | 119 | 0.682348 | ---------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import Iterable, List, Optional, cast
import matplotlib.pyplot as plt
import pytest
import seaborn as sns
import staticchar as ch
from psbutils.filecheck import Plottable, figure_found
from psbutils.misc import find_subrepo_directory
from staticchar.plotting.core import AnnotationSpec
SUBREPO_DIR = find_subrepo_directory()
S_SHAPE_FOLDER = SUBREPO_DIR / "tests/test_data/S-shape"
def plot_figure(name: str, ax: Optional[Plottable] = None) -> List[str]:
sns.despine()
found = figure_found(ax, f"test_introduction/{name}")
plt.clf()
return [] if found else [name]
@pytest.mark.timeout(10)
def test_introduction():
dataset = ch.datasets.Dataset(S_SHAPE_FOLDER) # type: ignore # auto
raw_timeseries = dataset.get_a_frame()
rth = raw_timeseries.head()
# As we can see, there is some non-zero signal at the beginning, which we attribute to
# the media absorbance and media fluorescence (as initially we have very low cell density).
assert sorted(rth.keys().to_list()) == sorted([ch.TIME, "EYFP", "OD", "ECFP", "OD700", "mRFP1"])
colors = {"EYFP": "yellow", "ECFP": "cyan", "mRFP1": "red", "OD": "black"}
plt.figure(figsize=(6.4, 4.8))
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(raw_timeseries, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found = []
figures_not_found += plot_figure("plot1_raw_timeseries", ax)
# ## Pre-processing
# Let's assume this is the background and subtract it.
subtracted = ch.subtract_background(
raw_timeseries, columns=["OD", "ECFP", "EYFP", "mRFP1"], strategy=ch.BackgroundChoices.Minimum
)
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found += plot_figure("plot2_subtracted_timeseries", ax)
l_basic.yml"
config = ch.config.load(yaml_path, ch.config.CharacterizationConfig)
tracted[config.growth_signal]) # type: ignore # auto
model = ch.LogisticModel(model_params)
# model_params = ch.GompertzModel.fit(subtracted["time"], subtracted[config.growth_signal])
# model = ch.GompertzModel(model_params)
print(f"Inferred parameters: {model_params}")
print(f"Growth phase: {model.growth_period}")
print(f"Time of maximal activity: {model.time_maximal_activity}")
print(f"Inferred (log of) initial density: {model.initial_density(log=True)}")
ch.plot_growth_model(subtracted["time"], subtracted[config.growth_signal], model=model) # type: ignore # auto
figures_not_found += plot_figure("plot3_growth_model_fit")
# ### Plotting the data
#
# Some time after the growth phase, we should observe a similar exponential production
# of the proteins. Suppose that this maturation time is about 50 minutes,
# that is about 0.85 hours.
#
# Then, fluorescence signals should be linear when drawn with respect to each other.
# Add offset to the growth phase
production_phase = model.growth_period + config.maturation_offset
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(subtracted, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2)
figures_not_found += plot_figure("plot4_fluorescence_signals", f)
# ### Truncate the time-series
#
# We see that this very well captures the growth phase of mRFP1 (the reference signal),
# but is a bit too late for EYFP and ECFP -- we won't have a linear dependence between
another_production_phase = ch.TimePeriod(reference=12, left=2, right=2)
truncated_timeseries = ch.select_time_interval(subtracted, interval=another_production_phase)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=another_production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(
truncated_timeseries, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2 # type: ignore # auto
)
figures_not_found += plot_figure("plot5_truncated")
# Run method
gradient, gradient_error = ch.transcriptional_activity_ratio(
truncated_timeseries, # type: ignore # auto
config.signals,
config.reference,
config.signal_properties,
model_params.growth_rate,
model.growth_period,
maturation_offset=config.maturation_offset,
)
# gradient
# ### Integration-based characterization
# Now assume that we want to integrate the signals over the production period.
signals = ["EYFP", "ECFP"]
ch.integrate(data=subtracted, signals=signals, interval=config.time_window)
# Now plot the output
f, axs = plt.subplots(1, len(config.signals), figsize=(12, 4))
for signal, ax in zip(config.signals, cast(Iterable, axs)):
ch.plot_integration(
subtracted,
signal,
config.time_window,
ax,
fillcolor=colors[signal],
annotation_spec=AnnotationSpec(title=True),
)
figures_not_found += plot_figure("plot6_integration", f)
assert figures_not_found == [], f"Figures not found: {', '.join(figures_not_found)}"
| true | true |
f722840026be59144b1d690af2f4860ac9af1a7a | 1,479 | py | Python | nequip/nn/embedding/_one_hot.py | mir-group/nequip | 4e6a0914a289cf000da57a6b6e79678efdf3347f | [
"MIT"
] | 153 | 2021-06-20T20:12:01.000Z | 2022-03-31T13:57:45.000Z | nequip/nn/embedding/_one_hot.py | mir-group/nequip | 4e6a0914a289cf000da57a6b6e79678efdf3347f | [
"MIT"
] | 25 | 2021-06-17T16:00:16.000Z | 2022-03-29T07:04:00.000Z | nequip/nn/embedding/_one_hot.py | mir-group/nequip | 4e6a0914a289cf000da57a6b6e79678efdf3347f | [
"MIT"
] | 25 | 2021-06-21T22:25:22.000Z | 2022-03-30T04:39:46.000Z | import torch
import torch.nn.functional
from e3nn.o3 import Irreps
from e3nn.util.jit import compile_mode
from nequip.data import AtomicDataDict
from .._graph_mixin import GraphModuleMixin
@compile_mode("script")
class OneHotAtomEncoding(GraphModuleMixin, torch.nn.Module):
num_types: int
set_features: bool
# TODO: use torch.unique?
# TODO: type annotation
# Docstrings
def __init__(
self,
num_types: int,
set_features: bool = True,
irreps_in=None,
):
super().__init__()
self.num_types = num_types
self.set_features = set_features
# Output irreps are num_types even (invariant) scalars
irreps_out = {AtomicDataDict.NODE_ATTRS_KEY: Irreps([(self.num_types, (0, 1))])}
if self.set_features:
irreps_out[AtomicDataDict.NODE_FEATURES_KEY] = irreps_out[
AtomicDataDict.NODE_ATTRS_KEY
]
self._init_irreps(irreps_in=irreps_in, irreps_out=irreps_out)
def forward(self, data: AtomicDataDict.Type):
type_numbers = data[AtomicDataDict.ATOM_TYPE_KEY].squeeze(-1)
one_hot = torch.nn.functional.one_hot(
type_numbers, num_classes=self.num_types
).to(device=type_numbers.device, dtype=data[AtomicDataDict.POSITIONS_KEY].dtype)
data[AtomicDataDict.NODE_ATTRS_KEY] = one_hot
if self.set_features:
data[AtomicDataDict.NODE_FEATURES_KEY] = one_hot
return data
| 32.866667 | 88 | 0.684246 | import torch
import torch.nn.functional
from e3nn.o3 import Irreps
from e3nn.util.jit import compile_mode
from nequip.data import AtomicDataDict
from .._graph_mixin import GraphModuleMixin
@compile_mode("script")
class OneHotAtomEncoding(GraphModuleMixin, torch.nn.Module):
num_types: int
set_features: bool
def __init__(
self,
num_types: int,
set_features: bool = True,
irreps_in=None,
):
super().__init__()
self.num_types = num_types
self.set_features = set_features
irreps_out = {AtomicDataDict.NODE_ATTRS_KEY: Irreps([(self.num_types, (0, 1))])}
if self.set_features:
irreps_out[AtomicDataDict.NODE_FEATURES_KEY] = irreps_out[
AtomicDataDict.NODE_ATTRS_KEY
]
self._init_irreps(irreps_in=irreps_in, irreps_out=irreps_out)
def forward(self, data: AtomicDataDict.Type):
type_numbers = data[AtomicDataDict.ATOM_TYPE_KEY].squeeze(-1)
one_hot = torch.nn.functional.one_hot(
type_numbers, num_classes=self.num_types
).to(device=type_numbers.device, dtype=data[AtomicDataDict.POSITIONS_KEY].dtype)
data[AtomicDataDict.NODE_ATTRS_KEY] = one_hot
if self.set_features:
data[AtomicDataDict.NODE_FEATURES_KEY] = one_hot
return data
| true | true |
f7228426f925b777888476c39cb85634ecb6403f | 670 | py | Python | menu/spof.py | Jolmberg/fn22snesticle | f62d84a2070f0bd728d8822901bc5daaff55981e | [
"MIT"
] | 56 | 2022-01-10T18:01:51.000Z | 2022-03-16T23:36:05.000Z | menu/spof.py | Jolmberg/fn22snesticle | f62d84a2070f0bd728d8822901bc5daaff55981e | [
"MIT"
] | 2 | 2022-01-10T19:38:04.000Z | 2022-01-18T09:49:54.000Z | menu/spof.py | Jolmberg/fn22snesticle | f62d84a2070f0bd728d8822901bc5daaff55981e | [
"MIT"
] | 2 | 2022-01-12T02:55:55.000Z | 2022-01-23T22:20:21.000Z | #!/usr/bin/env python3
import sys
import io
def decompress(stream):
output = io.BytesIO()
planes = 0
while planes < 0x30 * 2:
favourite = stream.read(1)
control = stream.read(1)[0]
for x in range(8):
if control & 0x80:
output.write(stream.read(1))
else:
output.write(favourite)
control <<= 1
planes += 1
return output
def main():
with open(sys.argv[1], 'rb') as f:
f.seek(0x56e58000 + 0x38ab2)
output = decompress(f)
print('.byte ' + ','.join(f'${x:02x}' for x in output.getvalue()))
if __name__ == '__main__':
main()
| 20.30303 | 70 | 0.528358 |
import sys
import io
def decompress(stream):
output = io.BytesIO()
planes = 0
while planes < 0x30 * 2:
favourite = stream.read(1)
control = stream.read(1)[0]
for x in range(8):
if control & 0x80:
output.write(stream.read(1))
else:
output.write(favourite)
control <<= 1
planes += 1
return output
def main():
with open(sys.argv[1], 'rb') as f:
f.seek(0x56e58000 + 0x38ab2)
output = decompress(f)
print('.byte ' + ','.join(f'${x:02x}' for x in output.getvalue()))
if __name__ == '__main__':
main()
| true | true |
f722842bb61389e16a448f195a271aca78b7749e | 20,862 | py | Python | telemetry/third_party/altgraph/altgraph/Graph.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | telemetry/third_party/altgraph/altgraph/Graph.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | telemetry/third_party/altgraph/altgraph/Graph.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | """
altgraph.Graph - Base Graph class
=================================
..
#--Version 2.1
#--Bob Ippolito October, 2004
#--Version 2.0
#--Istvan Albert June, 2004
#--Version 1.0
#--Nathan Denny, May 27, 1999
"""
from __future__ import division
from __future__ import absolute_import
from altgraph import GraphError
from collections import deque
# 2To3-division: the / operations here are not converted to // as the results
# are expected floats.
class Graph(object):
"""
The Graph class represents a directed graph with *N* nodes and *E* edges.
Naming conventions:
- the prefixes such as *out*, *inc* and *all* will refer to methods
that operate on the outgoing, incoming or all edges of that node.
For example: :py:meth:`inc_degree` will refer to the degree of the node
computed over the incoming edges (the number of neighbours linking to
the node).
- the prefixes such as *forw* and *back* will refer to the
orientation of the edges used in the method with respect to the node.
For example: :py:meth:`forw_bfs` will start at the node then use the outgoing
edges to traverse the graph (goes forward).
"""
def __init__(self, edges=None):
"""
Initialization
"""
self.next_edge = 0
self.nodes, self.edges = {}, {}
self.hidden_edges, self.hidden_nodes = {}, {}
if edges is not None:
for item in edges:
if len(item) == 2:
head, tail = item
self.add_edge(head, tail)
elif len(item) == 3:
head, tail, data = item
self.add_edge(head, tail, data)
else:
raise GraphError("Cannot create edge from %s"%(item,))
def __repr__(self):
return '<Graph: %d nodes, %d edges>' % (
self.number_of_nodes(), self.number_of_edges())
def add_node(self, node, node_data=None):
"""
Adds a new node to the graph. Arbitrary data can be attached to the
node via the node_data parameter. Adding the same node twice will be
silently ignored.
The node must be a hashable value.
"""
#
# the nodes will contain tuples that will store incoming edges,
# outgoing edges and data
#
# index 0 -> incoming edges
# index 1 -> outgoing edges
if node in self.hidden_nodes:
# Node is present, but hidden
return
if node not in self.nodes:
self.nodes[node] = ([], [], node_data)
def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):
"""
Adds a directed edge going from head_id to tail_id.
Arbitrary data can be attached to the edge via edge_data.
It may create the nodes if adding edges between nonexisting ones.
:param head_id: head node
:param tail_id: tail node
:param edge_data: (optional) data attached to the edge
:param create_nodes: (optional) creates the head_id or tail_id node in case they did not exist
"""
# shorcut
edge = self.next_edge
# add nodes if on automatic node creation
if create_nodes:
self.add_node(head_id)
self.add_node(tail_id)
# update the corresponding incoming and outgoing lists in the nodes
# index 0 -> incoming edges
# index 1 -> outgoing edges
try:
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
except KeyError:
raise GraphError('Invalid nodes %s -> %s' % (head_id, tail_id))
# store edge information
self.edges[edge] = (head_id, tail_id, edge_data)
self.next_edge += 1
def hide_edge(self, edge):
"""
Hides an edge from the graph. The edge may be unhidden at some later
time.
"""
try:
head_id, tail_id, edge_data = self.hidden_edges[edge] = self.edges[edge]
self.nodes[tail_id][0].remove(edge)
self.nodes[head_id][1].remove(edge)
del self.edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
def hide_node(self, node):
"""
Hides a node from the graph. The incoming and outgoing edges of the
node will also be hidden. The node may be unhidden at some later time.
"""
try:
all_edges = self.all_edges(node)
self.hidden_nodes[node] = (self.nodes[node], all_edges)
for edge in all_edges:
self.hide_edge(edge)
del self.nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node)
def restore_node(self, node):
"""
Restores a previously hidden node back into the graph and restores
all of its incoming and outgoing edges.
"""
try:
self.nodes[node], all_edges = self.hidden_nodes[node]
for edge in all_edges:
self.restore_edge(edge)
del self.hidden_nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node)
def restore_edge(self, edge):
"""
Restores a previously hidden edge back into the graph.
"""
try:
head_id, tail_id, data = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = head_id, tail_id, data
del self.hidden_edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
def restore_all_edges(self):
"""
Restores all hidden edges.
"""
for edge in list(self.hidden_edges.keys()):
try:
self.restore_edge(edge)
except GraphError:
pass
def restore_all_nodes(self):
"""
Restores all hidden nodes.
"""
for node in list(self.hidden_nodes.keys()):
self.restore_node(node)
def __contains__(self, node):
"""
Test whether a node is in the graph
"""
return node in self.nodes
def edge_by_id(self, edge):
"""
Returns the edge that connects the head_id and tail_id nodes
"""
try:
head, tail, data = self.edges[edge]
except KeyError:
head, tail = None, None
raise GraphError('Invalid edge %s' % edge)
return (head, tail)
def edge_by_node(self, head, tail):
"""
Returns the edge that connects the head_id and tail_id nodes
"""
for edge in self.out_edges(head):
if self.tail(edge) == tail:
return edge
return None
def number_of_nodes(self):
"""
Returns the number of nodes
"""
return len(self.nodes)
def number_of_edges(self):
"""
Returns the number of edges
"""
return len(self.edges)
def __iter__(self):
"""
Iterates over all nodes in the graph
"""
return iter(self.nodes)
def node_list(self):
"""
Return a list of the node ids for all visible nodes in the graph.
"""
return list(self.nodes.keys())
def edge_list(self):
"""
Returns an iterator for all visible nodes in the graph.
"""
return list(self.edges.keys())
def number_of_hidden_edges(self):
"""
Returns the number of hidden edges
"""
return len(self.hidden_edges)
def number_of_hidden_nodes(self):
"""
Returns the number of hidden nodes
"""
return len(self.hidden_nodes)
def hidden_node_list(self):
"""
Returns the list with the hidden nodes
"""
return list(self.hidden_nodes.keys())
def hidden_edge_list(self):
"""
Returns a list with the hidden edges
"""
return list(self.hidden_edges.keys())
def describe_node(self, node):
"""
return node, node data, outgoing edges, incoming edges for node
"""
incoming, outgoing, data = self.nodes[node]
return node, data, outgoing, incoming
def describe_edge(self, edge):
"""
return edge, edge data, head, tail for edge
"""
head, tail, data = self.edges[edge]
return edge, data, head, tail
def node_data(self, node):
"""
Returns the data associated with a node
"""
return self.nodes[node][2]
def edge_data(self, edge):
"""
Returns the data associated with an edge
"""
return self.edges[edge][2]
def update_edge_data(self, edge, edge_data):
"""
Replace the edge data for a specific edge
"""
self.edges[edge] = self.edges[edge][0:2] + (edge_data,)
def head(self, edge):
"""
Returns the node of the head of the edge.
"""
return self.edges[edge][0]
def tail(self, edge):
"""
Returns node of the tail of the edge.
"""
return self.edges[edge][1]
def out_nbrs(self, node):
"""
List of nodes connected by outgoing edges
"""
l = [self.tail(n) for n in self.out_edges(node)]
return l
def inc_nbrs(self, node):
"""
List of nodes connected by incoming edges
"""
l = [self.head(n) for n in self.inc_edges(node)]
return l
def all_nbrs(self, node):
"""
List of nodes connected by incoming and outgoing edges
"""
l = dict.fromkeys( self.inc_nbrs(node) + self.out_nbrs(node) )
return list(l)
def out_edges(self, node):
"""
Returns a list of the outgoing edges
"""
try:
return list(self.nodes[node][1])
except KeyError:
raise GraphError('Invalid node %s' % node)
return None
def inc_edges(self, node):
"""
Returns a list of the incoming edges
"""
try:
return list(self.nodes[node][0])
except KeyError:
raise GraphError('Invalid node %s' % node)
return None
def all_edges(self, node):
"""
Returns a list of incoming and outging edges.
"""
return set(self.inc_edges(node) + self.out_edges(node))
def out_degree(self, node):
"""
Returns the number of outgoing edges
"""
return len(self.out_edges(node))
def inc_degree(self, node):
"""
Returns the number of incoming edges
"""
return len(self.inc_edges(node))
def all_degree(self, node):
"""
The total degree of a node
"""
return self.inc_degree(node) + self.out_degree(node)
def _topo_sort(self, forward=True):
"""
Topological sort.
Returns a list of nodes where the successors (based on outgoing and
incoming edges selected by the forward parameter) of any given node
appear in the sequence after that node.
"""
topo_list = []
queue = deque()
indeg = {}
# select the operation that will be performed
if forward:
get_edges = self.out_edges
get_degree = self.inc_degree
get_next = self.tail
else:
get_edges = self.inc_edges
get_degree = self.out_degree
get_next = self.head
for node in self.node_list():
degree = get_degree(node)
if degree:
indeg[node] = degree
else:
queue.append(node)
while queue:
curr_node = queue.popleft()
topo_list.append(curr_node)
for edge in get_edges(curr_node):
tail_id = get_next(edge)
if tail_id in indeg:
indeg[tail_id] -= 1
if indeg[tail_id] == 0:
queue.append(tail_id)
if len(topo_list) == len(self.node_list()):
valid = True
else:
# the graph has cycles, invalid topological sort
valid = False
return (valid, topo_list)
def forw_topo_sort(self):
"""
Topological sort.
Returns a list of nodes where the successors (based on outgoing edges)
of any given node appear in the sequence after that node.
"""
return self._topo_sort(forward=True)
def back_topo_sort(self):
"""
Reverse topological sort.
Returns a list of nodes where the successors (based on incoming edges)
of any given node appear in the sequence after that node.
"""
return self._topo_sort(forward=False)
def _bfs_subgraph(self, start_id, forward=True):
"""
Private method creates a subgraph in a bfs order.
The forward parameter specifies whether it is a forward or backward
traversal.
"""
if forward:
get_bfs = self.forw_bfs
get_nbrs = self.out_nbrs
else:
get_bfs = self.back_bfs
get_nbrs = self.inc_nbrs
g = Graph()
bfs_list = get_bfs(start_id)
for node in bfs_list:
g.add_node(node)
for node in bfs_list:
for nbr_id in get_nbrs(node):
g.add_edge(node, nbr_id)
return g
def forw_bfs_subgraph(self, start_id):
"""
Creates and returns a subgraph consisting of the breadth first
reachable nodes based on their outgoing edges.
"""
return self._bfs_subgraph(start_id, forward=True)
def back_bfs_subgraph(self, start_id):
"""
Creates and returns a subgraph consisting of the breadth first
reachable nodes based on the incoming edges.
"""
return self._bfs_subgraph(start_id, forward=False)
def iterdfs(self, start, end=None, forward=True):
"""
Collecting nodes in some depth first traversal.
The forward parameter specifies whether it is a forward or backward
traversal.
"""
visited, stack = {start}, deque([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while stack:
curr_node = stack.pop()
yield curr_node
if curr_node == end:
break
for edge in sorted(get_edges(curr_node)):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
stack.append(tail)
def iterdata(self, start, end=None, forward=True, condition=None):
"""
Perform a depth-first walk of the graph (as ``iterdfs``)
and yield the item data of every node where condition matches. The
condition callback is only called when node_data is not None.
"""
visited, stack = {start}, deque([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
get_data = self.node_data
while stack:
curr_node = stack.pop()
curr_data = get_data(curr_node)
if curr_data is not None:
if condition is not None and not condition(curr_data):
continue
yield curr_data
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
stack.append(tail)
def _iterbfs(self, start, end=None, forward=True):
"""
The forward parameter specifies whether it is a forward or backward
traversal. Returns a list of tuples where the first value is the hop
value the second value is the node id.
"""
queue, visited = deque([(start, 0)]), {start}
# the direction of the bfs depends on the edges that are sampled
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while queue:
curr_node, curr_step = queue.popleft()
yield (curr_node, curr_step)
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
queue.append((tail, curr_step + 1))
def forw_bfs(self, start, end=None):
"""
Returns a list of nodes in some forward BFS order.
Starting from the start node the breadth first search proceeds along
outgoing edges.
"""
return [node for node, step in self._iterbfs(start, end, forward=True)]
def back_bfs(self, start, end=None):
"""
Returns a list of nodes in some backward BFS order.
Starting from the start node the breadth first search proceeds along
incoming edges.
"""
return [node for node, step in self._iterbfs(start, end, forward=False)]
def forw_dfs(self, start, end=None):
"""
Returns a list of nodes in some forward DFS order.
Starting with the start node the depth first search proceeds along
outgoing edges.
"""
return list(self.iterdfs(start, end, forward=True))
def back_dfs(self, start, end=None):
"""
Returns a list of nodes in some backward DFS order.
Starting from the start node the depth first search proceeds along
incoming edges.
"""
return list(self.iterdfs(start, end, forward=False))
def connected(self):
"""
Returns :py:data:`True` if the graph's every node can be reached from every
other node.
"""
node_list = self.node_list()
for node in node_list:
bfs_list = self.forw_bfs(node)
if len(bfs_list) != len(node_list):
return False
return True
def clust_coef(self, node):
"""
Computes and returns the local clustering coefficient of node. The
local cluster coefficient is proportion of the actual number of edges between
neighbours of node and the maximum number of edges between those neighbours.
See <http://en.wikipedia.org/wiki/Clustering_coefficient#Local_clustering_coefficient>
for a formal definition.
"""
num = 0
nbr_set = set(self.out_nbrs(node))
if node in nbr_set:
nbr_set.remove(node) # loop defense
for nbr in nbr_set:
sec_set = set(self.out_nbrs(nbr))
if nbr in sec_set:
sec_set.remove(nbr) # loop defense
num += len(nbr_set & sec_set)
nbr_num = len(nbr_set)
if nbr_num:
clust_coef = float(num) / (nbr_num * (nbr_num - 1))
else:
clust_coef = 0.0
return clust_coef
def get_hops(self, start, end=None, forward=True):
"""
Computes the hop distance to all nodes centered around a specified node.
First order neighbours are at hop 1, their neigbours are at hop 2 etc.
Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward
parameter. If the distance between all neighbouring nodes is 1 the hop
number corresponds to the shortest distance between the nodes.
:param start: the starting node
:param end: ending node (optional). When not specified will search the whole graph.
:param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
:return: returns a list of tuples where each tuple contains the node and the hop.
Typical usage::
>>> print (graph.get_hops(1, 8))
>>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
# node 1 is at 0 hops
# node 2 is at 1 hop
# ...
# node 8 is at 5 hops
"""
if forward:
return list(self._iterbfs(start=start, end=end, forward=True))
else:
return list(self._iterbfs(start=start, end=end, forward=False))
| 30.544656 | 124 | 0.564951 |
from __future__ import division
from __future__ import absolute_import
from altgraph import GraphError
from collections import deque
class Graph(object):
def __init__(self, edges=None):
self.next_edge = 0
self.nodes, self.edges = {}, {}
self.hidden_edges, self.hidden_nodes = {}, {}
if edges is not None:
for item in edges:
if len(item) == 2:
head, tail = item
self.add_edge(head, tail)
elif len(item) == 3:
head, tail, data = item
self.add_edge(head, tail, data)
else:
raise GraphError("Cannot create edge from %s"%(item,))
def __repr__(self):
return '<Graph: %d nodes, %d edges>' % (
self.number_of_nodes(), self.number_of_edges())
def add_node(self, node, node_data=None):
if node in self.hidden_nodes:
return
if node not in self.nodes:
self.nodes[node] = ([], [], node_data)
def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):
edge = self.next_edge
if create_nodes:
self.add_node(head_id)
self.add_node(tail_id)
try:
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
except KeyError:
raise GraphError('Invalid nodes %s -> %s' % (head_id, tail_id))
self.edges[edge] = (head_id, tail_id, edge_data)
self.next_edge += 1
def hide_edge(self, edge):
try:
head_id, tail_id, edge_data = self.hidden_edges[edge] = self.edges[edge]
self.nodes[tail_id][0].remove(edge)
self.nodes[head_id][1].remove(edge)
del self.edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
def hide_node(self, node):
try:
all_edges = self.all_edges(node)
self.hidden_nodes[node] = (self.nodes[node], all_edges)
for edge in all_edges:
self.hide_edge(edge)
del self.nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node)
def restore_node(self, node):
try:
self.nodes[node], all_edges = self.hidden_nodes[node]
for edge in all_edges:
self.restore_edge(edge)
del self.hidden_nodes[node]
except KeyError:
raise GraphError('Invalid node %s' % node)
def restore_edge(self, edge):
try:
head_id, tail_id, data = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = head_id, tail_id, data
del self.hidden_edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge)
def restore_all_edges(self):
for edge in list(self.hidden_edges.keys()):
try:
self.restore_edge(edge)
except GraphError:
pass
def restore_all_nodes(self):
for node in list(self.hidden_nodes.keys()):
self.restore_node(node)
def __contains__(self, node):
return node in self.nodes
def edge_by_id(self, edge):
try:
head, tail, data = self.edges[edge]
except KeyError:
head, tail = None, None
raise GraphError('Invalid edge %s' % edge)
return (head, tail)
def edge_by_node(self, head, tail):
for edge in self.out_edges(head):
if self.tail(edge) == tail:
return edge
return None
def number_of_nodes(self):
return len(self.nodes)
def number_of_edges(self):
return len(self.edges)
def __iter__(self):
return iter(self.nodes)
def node_list(self):
return list(self.nodes.keys())
def edge_list(self):
return list(self.edges.keys())
def number_of_hidden_edges(self):
return len(self.hidden_edges)
def number_of_hidden_nodes(self):
return len(self.hidden_nodes)
def hidden_node_list(self):
return list(self.hidden_nodes.keys())
def hidden_edge_list(self):
return list(self.hidden_edges.keys())
def describe_node(self, node):
incoming, outgoing, data = self.nodes[node]
return node, data, outgoing, incoming
def describe_edge(self, edge):
head, tail, data = self.edges[edge]
return edge, data, head, tail
def node_data(self, node):
return self.nodes[node][2]
def edge_data(self, edge):
return self.edges[edge][2]
def update_edge_data(self, edge, edge_data):
self.edges[edge] = self.edges[edge][0:2] + (edge_data,)
def head(self, edge):
return self.edges[edge][0]
def tail(self, edge):
return self.edges[edge][1]
def out_nbrs(self, node):
l = [self.tail(n) for n in self.out_edges(node)]
return l
def inc_nbrs(self, node):
l = [self.head(n) for n in self.inc_edges(node)]
return l
def all_nbrs(self, node):
l = dict.fromkeys( self.inc_nbrs(node) + self.out_nbrs(node) )
return list(l)
def out_edges(self, node):
try:
return list(self.nodes[node][1])
except KeyError:
raise GraphError('Invalid node %s' % node)
return None
def inc_edges(self, node):
try:
return list(self.nodes[node][0])
except KeyError:
raise GraphError('Invalid node %s' % node)
return None
def all_edges(self, node):
return set(self.inc_edges(node) + self.out_edges(node))
def out_degree(self, node):
return len(self.out_edges(node))
def inc_degree(self, node):
return len(self.inc_edges(node))
def all_degree(self, node):
return self.inc_degree(node) + self.out_degree(node)
def _topo_sort(self, forward=True):
topo_list = []
queue = deque()
indeg = {}
if forward:
get_edges = self.out_edges
get_degree = self.inc_degree
get_next = self.tail
else:
get_edges = self.inc_edges
get_degree = self.out_degree
get_next = self.head
for node in self.node_list():
degree = get_degree(node)
if degree:
indeg[node] = degree
else:
queue.append(node)
while queue:
curr_node = queue.popleft()
topo_list.append(curr_node)
for edge in get_edges(curr_node):
tail_id = get_next(edge)
if tail_id in indeg:
indeg[tail_id] -= 1
if indeg[tail_id] == 0:
queue.append(tail_id)
if len(topo_list) == len(self.node_list()):
valid = True
else:
valid = False
return (valid, topo_list)
def forw_topo_sort(self):
return self._topo_sort(forward=True)
def back_topo_sort(self):
return self._topo_sort(forward=False)
def _bfs_subgraph(self, start_id, forward=True):
if forward:
get_bfs = self.forw_bfs
get_nbrs = self.out_nbrs
else:
get_bfs = self.back_bfs
get_nbrs = self.inc_nbrs
g = Graph()
bfs_list = get_bfs(start_id)
for node in bfs_list:
g.add_node(node)
for node in bfs_list:
for nbr_id in get_nbrs(node):
g.add_edge(node, nbr_id)
return g
def forw_bfs_subgraph(self, start_id):
return self._bfs_subgraph(start_id, forward=True)
def back_bfs_subgraph(self, start_id):
return self._bfs_subgraph(start_id, forward=False)
def iterdfs(self, start, end=None, forward=True):
visited, stack = {start}, deque([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while stack:
curr_node = stack.pop()
yield curr_node
if curr_node == end:
break
for edge in sorted(get_edges(curr_node)):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
stack.append(tail)
def iterdata(self, start, end=None, forward=True, condition=None):
visited, stack = {start}, deque([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
get_data = self.node_data
while stack:
curr_node = stack.pop()
curr_data = get_data(curr_node)
if curr_data is not None:
if condition is not None and not condition(curr_data):
continue
yield curr_data
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
stack.append(tail)
def _iterbfs(self, start, end=None, forward=True):
queue, visited = deque([(start, 0)]), {start}
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while queue:
curr_node, curr_step = queue.popleft()
yield (curr_node, curr_step)
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
queue.append((tail, curr_step + 1))
def forw_bfs(self, start, end=None):
return [node for node, step in self._iterbfs(start, end, forward=True)]
def back_bfs(self, start, end=None):
return [node for node, step in self._iterbfs(start, end, forward=False)]
def forw_dfs(self, start, end=None):
return list(self.iterdfs(start, end, forward=True))
def back_dfs(self, start, end=None):
return list(self.iterdfs(start, end, forward=False))
def connected(self):
node_list = self.node_list()
for node in node_list:
bfs_list = self.forw_bfs(node)
if len(bfs_list) != len(node_list):
return False
return True
def clust_coef(self, node):
num = 0
nbr_set = set(self.out_nbrs(node))
if node in nbr_set:
nbr_set.remove(node)
for nbr in nbr_set:
sec_set = set(self.out_nbrs(nbr))
if nbr in sec_set:
sec_set.remove(nbr)
num += len(nbr_set & sec_set)
nbr_num = len(nbr_set)
if nbr_num:
clust_coef = float(num) / (nbr_num * (nbr_num - 1))
else:
clust_coef = 0.0
return clust_coef
def get_hops(self, start, end=None, forward=True):
if forward:
return list(self._iterbfs(start=start, end=end, forward=True))
else:
return list(self._iterbfs(start=start, end=end, forward=False))
| true | true |
f7228538402e71c82d83f635f86f69621986543e | 43 | py | Python | 2_from_go_to_python/trysum.py | TihonV/pygoexamples | ca9604862d08145057aefc60cd0f9b77c9f5346a | [
"MIT"
] | 18 | 2020-01-23T21:20:47.000Z | 2022-02-20T19:10:02.000Z | 2_from_go_to_python/trysum.py | TihonV/pygoexamples | ca9604862d08145057aefc60cd0f9b77c9f5346a | [
"MIT"
] | null | null | null | 2_from_go_to_python/trysum.py | TihonV/pygoexamples | ca9604862d08145057aefc60cd0f9b77c9f5346a | [
"MIT"
] | 4 | 2019-08-03T12:59:53.000Z | 2022-02-07T23:43:35.000Z | from newmath import sum
print(sum(2, 40))
| 10.75 | 23 | 0.72093 | from newmath import sum
print(sum(2, 40))
| true | true |
f72285e8d9341b3fdd5f9beb71eb70ee3b71247c | 11,503 | py | Python | sciibo/server/server.py | fdev/sciibo | 984ec1945cd0f371bce148c1eb1e811befadb478 | [
"MIT"
] | 14 | 2017-06-16T14:16:57.000Z | 2021-02-26T13:53:56.000Z | sciibo/server/server.py | fdev/sciibo | 984ec1945cd0f371bce148c1eb1e811befadb478 | [
"MIT"
] | 1 | 2018-06-27T16:11:48.000Z | 2019-01-23T12:02:17.000Z | sciibo/server/server.py | fdev/sciibo | 984ec1945cd0f371bce148c1eb1e811befadb478 | [
"MIT"
] | null | null | null | from threading import Thread
import random
import time
from sciibo.bot import Bot
from sciibo.core.emitter import Emitter
from sciibo.core.helpers import Queue
from sciibo.network.broadcast import BroadcastThread
from sciibo.network.connection import ConnectionThread
from sciibo.network.listen import ListenThread
from sciibo.network.proxy import ProxyConnections
from .game import Game
class Server(Thread, Emitter):
def __init__(self, name, cards=15, local=False):
Thread.__init__(self)
Emitter.__init__(self)
# Indicates the thread should be stopped
self.stopped = False
# Server settings
self.name = name
self.local = local
self.cards = cards
# Create game state
self.game = Game()
if not self.local:
# Binding the port might fail, exception will bubble up
# before we start any of these threads
self.listen = ListenThread()
self.broadcast = BroadcastThread()
# Listen for incoming connections
self.listen.on('connect', self.on_connect)
self.listen.start()
# Let clients find this server by broadcasting
self.broadcast.on('discover', self.on_discover)
self.broadcast.start()
# Connections with clients
self.connections = []
# Queue of incoming messages to be processed
self.queue = Queue()
# Bot clients
self.bots = []
self.bot_names = [
'Anna',
'Becca',
'Charlotte',
'Daphne',
'Emily',
]
random.shuffle(self.bot_names)
def stop(self):
# Thread should be stopped
self.stopped = True
def run(self):
while not self.stopped:
while not self.queue.empty():
conn, data = self.queue.get()
self.on_client_message(conn, data)
self.queue.task_done()
time.sleep(0.2)
# Stop networking
if not self.local:
self.listen.stop()
self.broadcast.stop()
# Stop connections with clients
for conn in self.connections:
conn.stop()
# Stop bot threads
for bot in self.bots:
bot.stop()
"""
Events
"""
def on_discover(self, address):
self.broadcast.send(address, self.name)
def on_connect(self, sock, address):
conn = ConnectionThread(sock)
conn.on('receive', self.on_client_receive)
self.connections.append(conn)
conn.start()
def on_client_receive(self, conn, data):
self.queue.put((conn, data))
def on_client_message(self, conn, data):
type = data['type']
# Validate message type
if type not in ('join', 'play', 'disconnect'):
return
if type == 'disconnect':
# Stop connection
conn.stop()
self.connections.remove(conn)
if conn.player:
player = self.game.get_player(conn.player)
# Let other players know player left
self.send_all({
'type': 'leave',
'player': player.id,
})
# Remove player from player list
self.game.remove_player(player.id)
# Let interface know (only used in lobby)
self.trigger('leave', player.name)
# Active game
if self.game.started and not self.game.ended:
if len(self.game.player_ids) == 1:
# Let other players know player left
self.send_all({
'type': 'end',
})
self.game.end()
return
# It was this player's turn
if self.game.turn == player.id:
self.next_turn()
if type == 'join':
# Connection is already joined
if conn.player:
return
name = data['name']
reason = None
if self.game.started:
reason = 'started'
elif len(self.game.players) == 5:
reason = 'full'
elif any(player.name.lower() == name.lower() for player in self.game.players):
reason = 'name'
if reason:
conn.send({
'type': 'reject',
'reason': reason,
})
conn.stop()
self.connections.remove(conn)
return
# Call helper method to add player to game
self.add_player(name, conn, 'network')
if type == 'play':
if not conn.player:
return
if not self.game.started:
return
if self.game.ended:
return
if self.game.turn != conn.player:
return
player = self.game.get_player(conn.player)
value = data['value']
source = data['source']
target = data['target']
# Invalid move, let player try again
if not self.game.valid_move(value, source, target):
# Only send turn again to player, other players
# don't know an invalid move was made
player.send({
'type': 'invalid',
})
return
# Perform move
self.game.play(value, source, target)
message = {
'type': 'play',
'player': player.id,
'value': value,
'source': source,
'target': target,
}
# New stock card revealed
if source == 'stock':
message['reveal'] = player.stock_pile.top
self.send_all(message)
if player.stock_pile.empty():
self.send_all({
'type': 'end',
'winner': player.id,
})
self.game.end(player.id)
return
# Card was played to build pile
if target in ('build:0', 'build:1', 'build:2', 'build:3'):
# Player emptied their hand
if len(player.hand) == 0:
# Give five new cards
self.draw_cards()
# Player gets another turn
player.send({
'type': 'turn',
'player': player.id,
})
else:
self.next_turn()
"""
Actions
"""
def add_bot(self):
if len(self.game.player_ids) == 5:
return
# Find a bot name that is not in use
player_names = [player.name.lower() for player in self.game.players]
name = [name for name in self.bot_names if name.lower() not in player_names][0]
self.bot_names.remove(name)
# Add bot
proxy_server, proxy_client = ProxyConnections()
bot = Bot(proxy_client)
bot.start()
self.bots.append(bot)
# Call helper method to add player to game
self.add_player(name, proxy_server, 'bot')
self.connections.append(proxy_server)
def kick_player(self, id):
player = self.game.get_player(id)
if player:
# Local player can not be kicked
if player.type == 'local':
return
# Return bot name back to pool
if player.type == 'bot':
self.bot_names.append(player.name)
if player.type == 'network':
player.send({
'type': 'kick'
})
# Stop connection
player.conn.stop()
self.connections.remove(player.conn)
# Let other players know player left
self.send_all({
'type': 'leave',
'player': player.id,
})
# Remove player from player list
self.game.remove_player(id)
# Let interface know
self.trigger('leave', player.name)
def start_game(self):
if len(self.game.player_ids) < 2:
return
self.game.start(self.cards)
# Top stock cards in player order
reveal_cards = [player.stock_pile.top for player in self.game.players]
# Start the game
self.send_all({
'type': 'start',
'order': self.game.player_ids,
'stock': self.cards,
'reveal': reveal_cards,
})
# Send dealt cards to players
for player in self.game.players:
# Send hand to player
player.send({
'type': 'hand',
'cards': player.hand,
})
# Send hand count to opponents
self.send_all({
'type': 'draw',
'player': player.id,
'cards': 5,
}, without=player.id)
# Let players know whose turn it is
self.send_all({
'type': 'turn',
'player': self.game.turn,
})
"""
Helper methods
"""
def send_all(self, data, without=None):
for player in self.game.players:
if player.id == without:
continue
player.send(data)
def add_player(self, name, conn, type):
# Add player to game
player = self.game.add_player(name, conn, type)
# Reset possibly set receive handler by on_connect
conn.off('receive')
# Handle messages from client
conn.on('receive', self.on_client_receive)
# Bind connection to player
conn.player = player.id
# List of players (including player who just joined)
players = []
for opponent in self.game.players:
players.append({
'id': opponent.id,
'name': opponent.name,
})
# Send welcome message
player.send({
'type': 'welcome',
'name': self.name,
'id': player.id,
'players': players,
})
# Send join message to other players
self.send_all({
'type': 'join',
'id': player.id,
'name': player.name,
}, without=player.id)
# Let interface know
self.trigger('join', player.name)
return player
def draw_cards(self):
cards = self.game.draw_cards()
player = self.game.get_player(self.game.turn)
# Draw pile might be empty
if cards:
# Let player who which cards he drew
self.send_all({
'type': 'draw',
'player': player.id,
'cards': len(cards),
}, without=player.id)
# Let opponents know how many cards the player drew
player.send({
'type': 'hand',
'cards': cards,
})
def next_turn(self):
self.game.next_turn()
self.draw_cards()
# Let everybody know whose turn it is
self.send_all({
'type': 'turn',
'player': self.game.turn,
})
| 28.332512 | 90 | 0.494567 | from threading import Thread
import random
import time
from sciibo.bot import Bot
from sciibo.core.emitter import Emitter
from sciibo.core.helpers import Queue
from sciibo.network.broadcast import BroadcastThread
from sciibo.network.connection import ConnectionThread
from sciibo.network.listen import ListenThread
from sciibo.network.proxy import ProxyConnections
from .game import Game
class Server(Thread, Emitter):
def __init__(self, name, cards=15, local=False):
Thread.__init__(self)
Emitter.__init__(self)
self.stopped = False
self.name = name
self.local = local
self.cards = cards
self.game = Game()
if not self.local:
self.listen = ListenThread()
self.broadcast = BroadcastThread()
self.listen.on('connect', self.on_connect)
self.listen.start()
self.broadcast.on('discover', self.on_discover)
self.broadcast.start()
self.connections = []
self.queue = Queue()
self.bots = []
self.bot_names = [
'Anna',
'Becca',
'Charlotte',
'Daphne',
'Emily',
]
random.shuffle(self.bot_names)
def stop(self):
self.stopped = True
def run(self):
while not self.stopped:
while not self.queue.empty():
conn, data = self.queue.get()
self.on_client_message(conn, data)
self.queue.task_done()
time.sleep(0.2)
if not self.local:
self.listen.stop()
self.broadcast.stop()
for conn in self.connections:
conn.stop()
for bot in self.bots:
bot.stop()
def on_discover(self, address):
self.broadcast.send(address, self.name)
def on_connect(self, sock, address):
conn = ConnectionThread(sock)
conn.on('receive', self.on_client_receive)
self.connections.append(conn)
conn.start()
def on_client_receive(self, conn, data):
self.queue.put((conn, data))
def on_client_message(self, conn, data):
type = data['type']
if type not in ('join', 'play', 'disconnect'):
return
if type == 'disconnect':
conn.stop()
self.connections.remove(conn)
if conn.player:
player = self.game.get_player(conn.player)
self.send_all({
'type': 'leave',
'player': player.id,
})
self.game.remove_player(player.id)
self.trigger('leave', player.name)
if self.game.started and not self.game.ended:
if len(self.game.player_ids) == 1:
self.send_all({
'type': 'end',
})
self.game.end()
return
if self.game.turn == player.id:
self.next_turn()
if type == 'join':
# Connection is already joined
if conn.player:
return
name = data['name']
reason = None
if self.game.started:
reason = 'started'
elif len(self.game.players) == 5:
reason = 'full'
elif any(player.name.lower() == name.lower() for player in self.game.players):
reason = 'name'
if reason:
conn.send({
'type': 'reject',
'reason': reason,
})
conn.stop()
self.connections.remove(conn)
return
# Call helper method to add player to game
self.add_player(name, conn, 'network')
if type == 'play':
if not conn.player:
return
if not self.game.started:
return
if self.game.ended:
return
if self.game.turn != conn.player:
return
player = self.game.get_player(conn.player)
value = data['value']
source = data['source']
target = data['target']
# Invalid move, let player try again
if not self.game.valid_move(value, source, target):
# Only send turn again to player, other players
# don't know an invalid move was made
player.send({
'type': 'invalid',
})
return
self.game.play(value, source, target)
message = {
'type': 'play',
'player': player.id,
'value': value,
'source': source,
'target': target,
}
if source == 'stock':
message['reveal'] = player.stock_pile.top
self.send_all(message)
if player.stock_pile.empty():
self.send_all({
'type': 'end',
'winner': player.id,
})
self.game.end(player.id)
return
if target in ('build:0', 'build:1', 'build:2', 'build:3'):
if len(player.hand) == 0:
self.draw_cards()
player.send({
'type': 'turn',
'player': player.id,
})
else:
self.next_turn()
def add_bot(self):
if len(self.game.player_ids) == 5:
return
player_names = [player.name.lower() for player in self.game.players]
name = [name for name in self.bot_names if name.lower() not in player_names][0]
self.bot_names.remove(name)
proxy_server, proxy_client = ProxyConnections()
bot = Bot(proxy_client)
bot.start()
self.bots.append(bot)
self.add_player(name, proxy_server, 'bot')
self.connections.append(proxy_server)
def kick_player(self, id):
player = self.game.get_player(id)
if player:
if player.type == 'local':
return
if player.type == 'bot':
self.bot_names.append(player.name)
if player.type == 'network':
player.send({
'type': 'kick'
})
player.conn.stop()
self.connections.remove(player.conn)
self.send_all({
'type': 'leave',
'player': player.id,
})
self.game.remove_player(id)
self.trigger('leave', player.name)
def start_game(self):
if len(self.game.player_ids) < 2:
return
self.game.start(self.cards)
reveal_cards = [player.stock_pile.top for player in self.game.players]
self.send_all({
'type': 'start',
'order': self.game.player_ids,
'stock': self.cards,
'reveal': reveal_cards,
})
for player in self.game.players:
player.send({
'type': 'hand',
'cards': player.hand,
})
self.send_all({
'type': 'draw',
'player': player.id,
'cards': 5,
}, without=player.id)
self.send_all({
'type': 'turn',
'player': self.game.turn,
})
def send_all(self, data, without=None):
for player in self.game.players:
if player.id == without:
continue
player.send(data)
def add_player(self, name, conn, type):
player = self.game.add_player(name, conn, type)
conn.off('receive')
conn.on('receive', self.on_client_receive)
conn.player = player.id
players = []
for opponent in self.game.players:
players.append({
'id': opponent.id,
'name': opponent.name,
})
player.send({
'type': 'welcome',
'name': self.name,
'id': player.id,
'players': players,
})
self.send_all({
'type': 'join',
'id': player.id,
'name': player.name,
}, without=player.id)
self.trigger('join', player.name)
return player
def draw_cards(self):
cards = self.game.draw_cards()
player = self.game.get_player(self.game.turn)
if cards:
self.send_all({
'type': 'draw',
'player': player.id,
'cards': len(cards),
}, without=player.id)
player.send({
'type': 'hand',
'cards': cards,
})
def next_turn(self):
self.game.next_turn()
self.draw_cards()
self.send_all({
'type': 'turn',
'player': self.game.turn,
})
| true | true |
f7228645fa62a5b2234016ce0f4af3df946424a2 | 4,120 | py | Python | RecoEgamma/EgammaIsolationAlgos/python/egmGedGsfElectronPFIsolation_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoEgamma/EgammaIsolationAlgos/python/egmGedGsfElectronPFIsolation_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoEgamma/EgammaIsolationAlgos/python/egmGedGsfElectronPFIsolation_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
egmGedGsfElectronPFNoPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.0),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.08),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3) )
)
)
egmGedGsfElectronPFPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(0,1) )
)
)
egmGedGsfElectronPFNoPileUpIsolationMapBasedVeto = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") )
)
)
egmGedGsfElectronPFPileUpIsolationMapBasedVeto = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") )
)
)
| 47.356322 | 103 | 0.616505 | import FWCore.ParameterSet.Config as cms
egmGedGsfElectronPFNoPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.0),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.08),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3) )
)
)
egmGedGsfElectronPFPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(0,1) )
)
)
egmGedGsfElectronPFNoPileUpIsolationMapBasedVeto = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") )
)
)
egmGedGsfElectronPFPileUpIsolationMapBasedVeto = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") )
)
)
| true | true |
f722864831e45d7a09384c7672276faa4eb8a8f0 | 1,691 | py | Python | pysock/__init__.py | Polidea/SOCK | cfb7a39d375d60ce3e11b36606673842691c7f44 | [
"Unlicense"
] | 13 | 2015-11-18T00:58:24.000Z | 2020-03-03T00:17:15.000Z | pysock/__init__.py | Polidea/SOCK | cfb7a39d375d60ce3e11b36606673842691c7f44 | [
"Unlicense"
] | 4 | 2016-04-15T11:06:51.000Z | 2020-03-27T04:27:05.000Z | pysock/__init__.py | Polidea/SOCK | cfb7a39d375d60ce3e11b36606673842691c7f44 | [
"Unlicense"
] | 4 | 2016-07-16T04:26:50.000Z | 2021-02-05T10:58:12.000Z | # Tomasz Netczuk (netczuk.tomasz at gmail.com)
# Dariusz Seweryn (dariusz.seweryn at gmail.com)
#
# Copyright (c) 2009-2013 Polidea Sp. z o.o. (http://www.polidea.pl)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project. | 58.310345 | 81 | 0.784151 | true | true | |
f7228871205a20643787bd41df943154c3bd2e25 | 971 | py | Python | url/resources/percent-encoding.py | xi/wpt | 9ebc6151dae4d8200f33d51de12c84e3a3ff9b4e | [
"BSD-3-Clause"
] | 1 | 2021-12-19T09:30:55.000Z | 2021-12-19T09:30:55.000Z | url/resources/percent-encoding.py | xi/wpt | 9ebc6151dae4d8200f33d51de12c84e3a3ff9b4e | [
"BSD-3-Clause"
] | null | null | null | url/resources/percent-encoding.py | xi/wpt | 9ebc6151dae4d8200f33d51de12c84e3a3ff9b4e | [
"BSD-3-Clause"
] | 1 | 2020-11-09T05:05:06.000Z | 2020-11-09T05:05:06.000Z | import base64
from wptserve.utils import isomorphic_decode
# Use numeric references to let the HTML parser take care of inserting the correct code points
# rather than trying to figure out the necessary bytes for each encoding. (The latter can be
# especially tricky given that Python does not implement the Encoding Standard.)
def numeric_references(input):
output = b""
for cp in input:
output += b"&#x" + format(ord(cp), b"X") + b";"
return output
def main(request, response):
# Undo the "magic" space with + replacement as otherwise base64 decoding will fail.
value = request.GET.first(b"value").replace(" ", "+")
encoding = request.GET.first(b"encoding")
output_value = numeric_references(base64.b64decode(value).decode(b"utf-8"))
return (
[(b"Content-Type", b"text/html;charset=" + encoding)],
b"""<!doctype html>
<a href="https://doesnotmatter.invalid/?%s#%s">test</a>
""" % (output_value, output_value))
| 40.458333 | 94 | 0.69516 | import base64
from wptserve.utils import isomorphic_decode
def numeric_references(input):
output = b""
for cp in input:
output += b"&#x" + format(ord(cp), b"X") + b";"
return output
def main(request, response):
value = request.GET.first(b"value").replace(" ", "+")
encoding = request.GET.first(b"encoding")
output_value = numeric_references(base64.b64decode(value).decode(b"utf-8"))
return (
[(b"Content-Type", b"text/html;charset=" + encoding)],
b"""<!doctype html>
<a href="https://doesnotmatter.invalid/?%s#%s">test</a>
""" % (output_value, output_value))
| true | true |
f72288a1068e029afcb77cc16a912627b6584a89 | 6,138 | py | Python | Pyrado/pyrado/environments/mujoco/openai_half_cheetah.py | swami1995/SimuRLacra | 795e6ea45fbb722242ddb0c0ea5c62432826411e | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 52 | 2020-05-02T13:55:09.000Z | 2022-03-09T14:49:36.000Z | Pyrado/pyrado/environments/mujoco/openai_half_cheetah.py | swami1995/SimuRLacra | 795e6ea45fbb722242ddb0c0ea5c62432826411e | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 40 | 2020-09-01T15:19:22.000Z | 2021-11-02T14:51:41.000Z | Pyrado/pyrado/environments/mujoco/openai_half_cheetah.py | swami1995/SimuRLacra | 795e6ea45fbb722242ddb0c0ea5c62432826411e | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 13 | 2020-07-03T11:39:21.000Z | 2022-02-20T01:12:42.000Z | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os.path as osp
from typing import Optional
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.environments.mujoco.base import MujocoSimEnv
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.tasks.base import Task
from pyrado.tasks.goalless import GoallessTask
from pyrado.tasks.reward_functions import ForwardVelocityRewFcn
class HalfCheetahSim(MujocoSimEnv, Serializable):
"""
The Half-Cheetah (v3) MuJoCo simulation environment where a planar cheetah-like robot tries to run forward.
.. note::
The OpenAI Gym variant considers this task solved at a reward over 4800
(https://github.com/openai/gym/blob/master/gym/envs/__init__.py).
.. seealso::
https://github.com/openai/gym/blob/master/gym/envs/mujoco/half_cheetah_v3.py
"""
name: str = "cth"
def __init__(
self,
frame_skip: int = 5,
dt: Optional[float] = None,
max_steps: int = 1000,
task_args: Optional[dict] = None,
):
"""
Constructor
:param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of
the time step size `dt`
:param dt: by default the time step size is the one from the mujoco config file multiplied by the number of
frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be
overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`.
:param max_steps: max number of simulation time steps
:param task_args: arguments for the task construction, e.g `dict(fwd_rew_weight=1.)`
"""
# Call MujocoSimEnv's constructor
model_path = osp.join(osp.dirname(__file__), "assets", "openai_half_cheetah.xml")
super().__init__(model_path, frame_skip, dt, max_steps, task_args)
# Initial state
noise_halfspan = self.domain_param["reset_noise_halfspan"]
min_init_qpos = self.init_qpos - np.full_like(self.init_qpos, noise_halfspan)
max_init_qpos = self.init_qpos + np.full_like(self.init_qpos, noise_halfspan)
min_init_qvel = self.init_qvel - np.full_like(self.init_qpos, noise_halfspan)
max_init_qvel = self.init_qvel + np.full_like(self.init_qpos, noise_halfspan)
min_init_state = np.concatenate([min_init_qpos, min_init_qvel]).ravel()
max_init_state = np.concatenate([max_init_qpos, max_init_qvel]).ravel()
self._init_space = BoxSpace(min_init_state, max_init_state)
self.camera_config = dict(distance=5.0)
@property
def state_space(self) -> Space:
state_shape = np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).shape
return BoxSpace(-pyrado.inf, pyrado.inf, shape=state_shape)
@property
def obs_space(self) -> Space:
obs_shape = self.observe(self.state_space.bound_up).shape
return BoxSpace(-pyrado.inf, pyrado.inf, shape=obs_shape)
@property
def act_space(self) -> Space:
act_bounds = self.model.actuator_ctrlrange.copy().T
return BoxSpace(*act_bounds, labels=["bthigh", "bshin", "bfoot", "fthigh", "fshin", "ffoot"])
@classmethod
def get_nominal_domain_param(cls) -> dict:
return dict(
reset_noise_halfspan=0.0, # fixed initial state by default
total_mass=14,
tangential_friction_coeff=0.4,
torsional_friction_coeff=0.1,
rolling_friction_coeff=0.1,
)
def _create_task(self, task_args: dict) -> Task:
if "fwd_rew_weight" not in task_args:
task_args["fwd_rew_weight"] = 1.0
if "ctrl_cost_weight" not in task_args:
task_args["ctrl_cost_weight"] = 0.1
return GoallessTask(self.spec, ForwardVelocityRewFcn(self._dt, idx_fwd=0, **task_args))
def _mujoco_step(self, act: np.ndarray) -> dict:
self.sim.data.ctrl[:] = act
self.sim.step()
pos = self.sim.data.qpos.copy()
vel = self.sim.data.qvel.copy()
self.state = np.concatenate([pos, vel])
return dict()
def observe(self, state: np.ndarray) -> np.ndarray:
# Ignore horizontal position to maintain translational invariance
return state[1:].copy()
| 44.478261 | 117 | 0.70593 |
import os.path as osp
from typing import Optional
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.environments.mujoco.base import MujocoSimEnv
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.tasks.base import Task
from pyrado.tasks.goalless import GoallessTask
from pyrado.tasks.reward_functions import ForwardVelocityRewFcn
class HalfCheetahSim(MujocoSimEnv, Serializable):
name: str = "cth"
def __init__(
self,
frame_skip: int = 5,
dt: Optional[float] = None,
max_steps: int = 1000,
task_args: Optional[dict] = None,
):
model_path = osp.join(osp.dirname(__file__), "assets", "openai_half_cheetah.xml")
super().__init__(model_path, frame_skip, dt, max_steps, task_args)
# Initial state
noise_halfspan = self.domain_param["reset_noise_halfspan"]
min_init_qpos = self.init_qpos - np.full_like(self.init_qpos, noise_halfspan)
max_init_qpos = self.init_qpos + np.full_like(self.init_qpos, noise_halfspan)
min_init_qvel = self.init_qvel - np.full_like(self.init_qpos, noise_halfspan)
max_init_qvel = self.init_qvel + np.full_like(self.init_qpos, noise_halfspan)
min_init_state = np.concatenate([min_init_qpos, min_init_qvel]).ravel()
max_init_state = np.concatenate([max_init_qpos, max_init_qvel]).ravel()
self._init_space = BoxSpace(min_init_state, max_init_state)
self.camera_config = dict(distance=5.0)
@property
def state_space(self) -> Space:
state_shape = np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).shape
return BoxSpace(-pyrado.inf, pyrado.inf, shape=state_shape)
@property
def obs_space(self) -> Space:
obs_shape = self.observe(self.state_space.bound_up).shape
return BoxSpace(-pyrado.inf, pyrado.inf, shape=obs_shape)
@property
def act_space(self) -> Space:
act_bounds = self.model.actuator_ctrlrange.copy().T
return BoxSpace(*act_bounds, labels=["bthigh", "bshin", "bfoot", "fthigh", "fshin", "ffoot"])
@classmethod
def get_nominal_domain_param(cls) -> dict:
return dict(
reset_noise_halfspan=0.0, # fixed initial state by default
total_mass=14,
tangential_friction_coeff=0.4,
torsional_friction_coeff=0.1,
rolling_friction_coeff=0.1,
)
def _create_task(self, task_args: dict) -> Task:
if "fwd_rew_weight" not in task_args:
task_args["fwd_rew_weight"] = 1.0
if "ctrl_cost_weight" not in task_args:
task_args["ctrl_cost_weight"] = 0.1
return GoallessTask(self.spec, ForwardVelocityRewFcn(self._dt, idx_fwd=0, **task_args))
def _mujoco_step(self, act: np.ndarray) -> dict:
self.sim.data.ctrl[:] = act
self.sim.step()
pos = self.sim.data.qpos.copy()
vel = self.sim.data.qvel.copy()
self.state = np.concatenate([pos, vel])
return dict()
def observe(self, state: np.ndarray) -> np.ndarray:
# Ignore horizontal position to maintain translational invariance
return state[1:].copy()
| true | true |
f72289075635f87bd323ca7a7f73bad787bdd356 | 5,548 | py | Python | pcstac/pcstac/main.py | hobu/planetary-computer-apis | 27f5b8ce78737f43b306fa4738007c207a329b5b | [
"MIT"
] | null | null | null | pcstac/pcstac/main.py | hobu/planetary-computer-apis | 27f5b8ce78737f43b306fa4738007c207a329b5b | [
"MIT"
] | null | null | null | pcstac/pcstac/main.py | hobu/planetary-computer-apis | 27f5b8ce78737f43b306fa4738007c207a329b5b | [
"MIT"
] | null | null | null | """FastAPI application using PGStac."""
import logging
import os
from typing import Any, Awaitable, Callable, Dict, List
from fastapi import FastAPI, Request, Response
from fastapi.exceptions import RequestValidationError, StarletteHTTPException
from fastapi.openapi.utils import get_openapi
from fastapi.responses import ORJSONResponse
from stac_fastapi.api.errors import DEFAULT_STATUS_CODES
from stac_fastapi.api.models import create_get_request_model, create_post_request_model
from stac_fastapi.extensions.core import (
ContextExtension,
FieldsExtension,
FilterExtension,
QueryExtension,
SortExtension,
TokenPaginationExtension,
)
from stac_fastapi.pgstac.config import Settings
from stac_fastapi.pgstac.db import close_db_connection, connect_to_db
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import PlainTextResponse
from pccommon.logging import init_logging
from pccommon.middleware import handle_exceptions
from pccommon.openapi import fixup_schema
from pcstac.api import PCStacApi
from pcstac.client import PCClient
from pcstac.config import API_DESCRIPTION, API_TITLE, API_VERSION, get_settings
from pcstac.errors import PC_DEFAULT_STATUS_CODES
from pcstac.middleware import trace_request
from pcstac.search import PCSearch
DEBUG: bool = os.getenv("DEBUG") == "TRUE" or False
# Initialize logging
init_logging("stac")
logger = logging.getLogger(__name__)
# Get the root path if set in the environment
APP_ROOT_PATH = os.environ.get("APP_ROOT_PATH", "")
logger.info(f"APP_ROOT_PATH: {APP_ROOT_PATH}")
INCLUDE_TRANSACTIONS = os.environ.get("INCLUDE_TRANSACTIONS", "") == "yes"
logger.info(f"INCLUDE_TRANSACTIONS: {INCLUDE_TRANSACTIONS}")
# Allow setting of SQLAlchemy connection pools
POOL_SIZE = int(os.environ.get("POOL_SIZE", "1"))
logger.info(f"POOL_SIZE: {POOL_SIZE}")
extensions = [
QueryExtension(),
SortExtension(),
FieldsExtension(),
FilterExtension(),
TokenPaginationExtension(),
ContextExtension(),
]
# Planetary Computer conformance classes differ from the default
# stac-fastapi case so they are manually specified
cql_conformance_classes: List[str] = [
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#fields",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:cql-json",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:item-search-filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:basic-spatial-operators",
(
"https://api.stacspec.org/v1.0.0-beta.3/item-search"
"#filter:basic-temporal-operators"
),
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#sort",
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#query",
]
collections_conformance_classes: List[str] = [
"http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/oas30",
]
extra_conformance_classes = cql_conformance_classes + collections_conformance_classes
search_get_request_model = create_get_request_model(extensions)
search_post_request_model = create_post_request_model(extensions, base_model=PCSearch)
api = PCStacApi(
title=API_TITLE,
description=API_DESCRIPTION,
api_version=API_VERSION,
settings=Settings(debug=DEBUG),
client=PCClient.create(
post_request_model=search_post_request_model,
extra_conformance_classes=extra_conformance_classes,
),
extensions=extensions,
app=FastAPI(root_path=APP_ROOT_PATH, default_response_class=ORJSONResponse),
search_get_request_model=search_get_request_model,
search_post_request_model=search_post_request_model,
response_class=ORJSONResponse,
exceptions={**DEFAULT_STATUS_CODES, **PC_DEFAULT_STATUS_CODES},
)
app: FastAPI = api.app
app.add_middleware(
CORSMiddleware,
allow_origins="*",
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
@app.middleware("http")
async def _handle_exceptions(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
return await handle_exceptions(request, call_next)
@app.middleware("http")
async def _trace_request(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
return await trace_request(request, call_next)
@app.on_event("startup")
async def startup_event() -> None:
"""Connect to database on startup."""
await connect_to_db(app)
@app.on_event("shutdown")
async def shutdown_event() -> None:
"""Close database connection."""
await close_db_connection(app)
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(
request: Request, exc: StarletteHTTPException
) -> PlainTextResponse:
return PlainTextResponse(str(exc.detail), status_code=exc.status_code)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(
request: Request, exc: RequestValidationError
) -> PlainTextResponse:
return PlainTextResponse(str(exc), status_code=400)
def custom_openapi() -> Dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
else:
schema = get_openapi(
title="Planetary Computer STAC API",
version=get_settings().api_version,
routes=app.routes,
)
app.openapi_schema = fixup_schema(app.root_path, schema)
import json
print(json.dumps(app.openapi_schema["paths"], indent=2))
return schema
| 33.221557 | 88 | 0.758291 | import logging
import os
from typing import Any, Awaitable, Callable, Dict, List
from fastapi import FastAPI, Request, Response
from fastapi.exceptions import RequestValidationError, StarletteHTTPException
from fastapi.openapi.utils import get_openapi
from fastapi.responses import ORJSONResponse
from stac_fastapi.api.errors import DEFAULT_STATUS_CODES
from stac_fastapi.api.models import create_get_request_model, create_post_request_model
from stac_fastapi.extensions.core import (
ContextExtension,
FieldsExtension,
FilterExtension,
QueryExtension,
SortExtension,
TokenPaginationExtension,
)
from stac_fastapi.pgstac.config import Settings
from stac_fastapi.pgstac.db import close_db_connection, connect_to_db
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import PlainTextResponse
from pccommon.logging import init_logging
from pccommon.middleware import handle_exceptions
from pccommon.openapi import fixup_schema
from pcstac.api import PCStacApi
from pcstac.client import PCClient
from pcstac.config import API_DESCRIPTION, API_TITLE, API_VERSION, get_settings
from pcstac.errors import PC_DEFAULT_STATUS_CODES
from pcstac.middleware import trace_request
from pcstac.search import PCSearch
DEBUG: bool = os.getenv("DEBUG") == "TRUE" or False
init_logging("stac")
logger = logging.getLogger(__name__)
APP_ROOT_PATH = os.environ.get("APP_ROOT_PATH", "")
logger.info(f"APP_ROOT_PATH: {APP_ROOT_PATH}")
INCLUDE_TRANSACTIONS = os.environ.get("INCLUDE_TRANSACTIONS", "") == "yes"
logger.info(f"INCLUDE_TRANSACTIONS: {INCLUDE_TRANSACTIONS}")
POOL_SIZE = int(os.environ.get("POOL_SIZE", "1"))
logger.info(f"POOL_SIZE: {POOL_SIZE}")
extensions = [
QueryExtension(),
SortExtension(),
FieldsExtension(),
FilterExtension(),
TokenPaginationExtension(),
ContextExtension(),
]
cql_conformance_classes: List[str] = [
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#fields",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:cql-json",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:item-search-filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:basic-spatial-operators",
(
"https://api.stacspec.org/v1.0.0-beta.3/item-search"
"#filter:basic-temporal-operators"
),
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#sort",
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#query",
]
collections_conformance_classes: List[str] = [
"http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/oas30",
]
extra_conformance_classes = cql_conformance_classes + collections_conformance_classes
search_get_request_model = create_get_request_model(extensions)
search_post_request_model = create_post_request_model(extensions, base_model=PCSearch)
api = PCStacApi(
title=API_TITLE,
description=API_DESCRIPTION,
api_version=API_VERSION,
settings=Settings(debug=DEBUG),
client=PCClient.create(
post_request_model=search_post_request_model,
extra_conformance_classes=extra_conformance_classes,
),
extensions=extensions,
app=FastAPI(root_path=APP_ROOT_PATH, default_response_class=ORJSONResponse),
search_get_request_model=search_get_request_model,
search_post_request_model=search_post_request_model,
response_class=ORJSONResponse,
exceptions={**DEFAULT_STATUS_CODES, **PC_DEFAULT_STATUS_CODES},
)
app: FastAPI = api.app
app.add_middleware(
CORSMiddleware,
allow_origins="*",
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
@app.middleware("http")
async def _handle_exceptions(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
return await handle_exceptions(request, call_next)
@app.middleware("http")
async def _trace_request(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
return await trace_request(request, call_next)
@app.on_event("startup")
async def startup_event() -> None:
await connect_to_db(app)
@app.on_event("shutdown")
async def shutdown_event() -> None:
await close_db_connection(app)
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(
request: Request, exc: StarletteHTTPException
) -> PlainTextResponse:
return PlainTextResponse(str(exc.detail), status_code=exc.status_code)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(
request: Request, exc: RequestValidationError
) -> PlainTextResponse:
return PlainTextResponse(str(exc), status_code=400)
def custom_openapi() -> Dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
else:
schema = get_openapi(
title="Planetary Computer STAC API",
version=get_settings().api_version,
routes=app.routes,
)
app.openapi_schema = fixup_schema(app.root_path, schema)
import json
print(json.dumps(app.openapi_schema["paths"], indent=2))
return schema
| true | true |
f722898d294b0b3ae4182e3d3eb04c88b6048abf | 4,832 | py | Python | py/tests/test_tensor_ops.py | guidj/attx | 6a17ed393ab1f1723e7a9d8890da6313bb96c75f | [
"Apache-2.0"
] | null | null | null | py/tests/test_tensor_ops.py | guidj/attx | 6a17ed393ab1f1723e7a9d8890da6313bb96c75f | [
"Apache-2.0"
] | 5 | 2020-11-13T19:02:33.000Z | 2022-02-10T02:23:14.000Z | py/tests/test_tensor_ops.py | guidj/attx | 6a17ed393ab1f1723e7a9d8890da6313bb96c75f | [
"Apache-2.0"
] | null | null | null | import pytest
import tensorflow as tf
import numpy as np
from attx import tensor_ops
def test_single_example_batch_single_step_sequence_with_high_dimension():
# (?, k, dk) = (1, 1, 4)
query_1 = [[1, 2, 3, 4]]
key_1 = [[1, 1, 1, 1]]
value_1 = [[10, 10, 10, 10]]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [[1.0]]
expected_output_1 = [[10.0, 10.0, 10.0, 10.0]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
def test_single_example_batch_multi_step_sequence_with_high_dimension():
# (?, k, dk) = (1, 2, 4)
query_1 = [[1, 3, 5, 7], [2, 4, 6, 8]]
key_1 = [[1, 1, 1, 1], [1, 1, 1, 1]]
value_1 = [[10, 10, 10, 10], [50, 50, 50, 50]]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [[0.5, 0.5], [0.5, 0.5]]
expected_output_1 = [[30.0, 30.0, 30.0, 30.0], [30.0, 30.0, 30.0, 30.0]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
def test_single_example_batch_multi_step_sequence_with_single_dimension():
# (?, k, dk) = (1, 4, 1)
query_1 = [[1], [2], [3], [4]]
key_1 = [[1], [1], [1], [1]]
value_1 = [10], [10], [10], [10]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
]
expected_output_1 = [[10], [10], [10], [10]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
def test_multi_example_batch_multi_step_sequence_with_high_dimension():
# (?, k, dk) = (2, 2, 4)
query_1 = [[1, 3, 5, 7], [2, 4, 6, 8]]
query_2 = [[1, 3, 5, 7], [2, 4, 6, 8]]
key_1 = [[1, 1, 1, 1], [1, 1, 1, 1]]
key_2 = [[1, 2, 1, 2], [2, 1, 2, 1]]
value_1 = [[10, 10, 10, 10], [50, 50, 50, 50]]
value_2 = [[10, 10, 10, 10], [50, 50, 50, 50]]
query = tf.cast([query_1, query_2], tf.float32)
key = tf.cast([key_1, key_2], tf.float32)
value = tf.cast([value_1, value_2], tf.float32,)
expected_att_1 = [[0.5, 0.5], [0.5, 0.5]]
expected_att_2 = [[0.881, 0.119], [0.881, 0.119]]
expected_output_1 = [[30.0, 30.0, 30.0, 30.0], [30.0, 30.0, 30.0, 30.0]]
expected_output_2 = [
[369 / 25, 369 / 25, 369 / 25, 369 / 25],
[369 / 25, 369 / 25, 369 / 25, 369 / 25],
]
expected_attention = np.array([expected_att_1, expected_att_2])
expected_value = np.array([expected_output_1, expected_output_2])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=2)
def test_single_example_batch_multi_step_sequence_with_high_dimension_and_different_value_dimension():
# (?, k, dk) = (1, 2, 4)
query_1 = [[1, 3, 5, 7], [2, 4, 6, 8]]
key_1 = [[1, 1, 1, 1], [1, 1, 1, 1]]
# (?, k, dv) = (1, 2, 5)
value_1 = [[10, 10, 10, 10, 10], [50, 50, 50, 50, 50]]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [[0.5, 0.5], [0.5, 0.5]]
expected_output_1 = [[30.0, 30.0, 30.0, 30.0, 30.0], [30.0, 30.0, 30.0, 30.0, 30.0]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
| 34.514286 | 102 | 0.619826 | import pytest
import tensorflow as tf
import numpy as np
from attx import tensor_ops
def test_single_example_batch_single_step_sequence_with_high_dimension():
query_1 = [[1, 2, 3, 4]]
key_1 = [[1, 1, 1, 1]]
value_1 = [[10, 10, 10, 10]]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [[1.0]]
expected_output_1 = [[10.0, 10.0, 10.0, 10.0]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
def test_single_example_batch_multi_step_sequence_with_high_dimension():
query_1 = [[1, 3, 5, 7], [2, 4, 6, 8]]
key_1 = [[1, 1, 1, 1], [1, 1, 1, 1]]
value_1 = [[10, 10, 10, 10], [50, 50, 50, 50]]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [[0.5, 0.5], [0.5, 0.5]]
expected_output_1 = [[30.0, 30.0, 30.0, 30.0], [30.0, 30.0, 30.0, 30.0]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
def test_single_example_batch_multi_step_sequence_with_single_dimension():
query_1 = [[1], [2], [3], [4]]
key_1 = [[1], [1], [1], [1]]
value_1 = [10], [10], [10], [10]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
[1 / 4, 1 / 4, 1 / 4, 1 / 4],
]
expected_output_1 = [[10], [10], [10], [10]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
def test_multi_example_batch_multi_step_sequence_with_high_dimension():
query_1 = [[1, 3, 5, 7], [2, 4, 6, 8]]
query_2 = [[1, 3, 5, 7], [2, 4, 6, 8]]
key_1 = [[1, 1, 1, 1], [1, 1, 1, 1]]
key_2 = [[1, 2, 1, 2], [2, 1, 2, 1]]
value_1 = [[10, 10, 10, 10], [50, 50, 50, 50]]
value_2 = [[10, 10, 10, 10], [50, 50, 50, 50]]
query = tf.cast([query_1, query_2], tf.float32)
key = tf.cast([key_1, key_2], tf.float32)
value = tf.cast([value_1, value_2], tf.float32,)
expected_att_1 = [[0.5, 0.5], [0.5, 0.5]]
expected_att_2 = [[0.881, 0.119], [0.881, 0.119]]
expected_output_1 = [[30.0, 30.0, 30.0, 30.0], [30.0, 30.0, 30.0, 30.0]]
expected_output_2 = [
[369 / 25, 369 / 25, 369 / 25, 369 / 25],
[369 / 25, 369 / 25, 369 / 25, 369 / 25],
]
expected_attention = np.array([expected_att_1, expected_att_2])
expected_value = np.array([expected_output_1, expected_output_2])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=2)
def test_single_example_batch_multi_step_sequence_with_high_dimension_and_different_value_dimension():
query_1 = [[1, 3, 5, 7], [2, 4, 6, 8]]
key_1 = [[1, 1, 1, 1], [1, 1, 1, 1]]
value_1 = [[10, 10, 10, 10, 10], [50, 50, 50, 50, 50]]
query = tf.cast([query_1], tf.float32)
key = tf.cast([key_1], tf.float32)
value = tf.cast([value_1], tf.float32)
expected_att_1 = [[0.5, 0.5], [0.5, 0.5]]
expected_output_1 = [[30.0, 30.0, 30.0, 30.0, 30.0], [30.0, 30.0, 30.0, 30.0, 30.0]]
expected_attention = np.array([expected_att_1])
expected_value = np.array([expected_output_1])
output_attention, output_value = tensor_ops.attention(query, key, value)
np.testing.assert_array_almost_equal(
output_attention, expected_attention, decimal=3,
)
np.testing.assert_array_almost_equal(output_value, expected_value, decimal=3)
| true | true |
f7228a1713dc9b4f9227badc946aff093ca9b0cb | 1,234 | py | Python | decorest/types.py | bkryza/decorest | 95070f1d90eef6e0042b51d4391dc3fbf4779b45 | [
"Apache-2.0"
] | 21 | 2018-02-24T07:14:47.000Z | 2022-02-15T06:50:06.000Z | decorest/types.py | bkryza/decorest | 95070f1d90eef6e0042b51d4391dc3fbf4779b45 | [
"Apache-2.0"
] | 3 | 2018-09-08T09:54:29.000Z | 2022-01-13T08:53:59.000Z | decorest/types.py | bkryza/decorest | 95070f1d90eef6e0042b51d4391dc3fbf4779b45 | [
"Apache-2.0"
] | 4 | 2018-04-02T14:49:08.000Z | 2020-02-12T02:43:44.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018-2021 Bartosz Kryza <bkryza@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various types related to HTTP and REST."""
from six import PY3
DEnum = object
DIntEnum = object
if PY3:
import enum
DEnum = enum.Enum
DIntEnum = enum.IntEnum
class HttpMethod(DEnum):
"""Enum with HTTP methods."""
GET = 'GET',
POST = 'POST',
PUT = 'PUT',
PATCH = 'PATCH',
DELETE = 'DELETE',
HEAD = 'HEAD',
OPTIONS = 'OPTIONS'
class HttpStatus(DIntEnum):
"""Enum with HTTP error code classes."""
INFORMATIONAL_RESPONSE = 1,
SUCCESS = 2,
REDIRECTION = 3,
CLIENT_ERROR = 4,
SERVER_ERROR = 5,
ANY = 999 # Same as Ellipsis '...'
| 24.68 | 74 | 0.67423 |
from six import PY3
DEnum = object
DIntEnum = object
if PY3:
import enum
DEnum = enum.Enum
DIntEnum = enum.IntEnum
class HttpMethod(DEnum):
GET = 'GET',
POST = 'POST',
PUT = 'PUT',
PATCH = 'PATCH',
DELETE = 'DELETE',
HEAD = 'HEAD',
OPTIONS = 'OPTIONS'
class HttpStatus(DIntEnum):
INFORMATIONAL_RESPONSE = 1,
SUCCESS = 2,
REDIRECTION = 3,
CLIENT_ERROR = 4,
SERVER_ERROR = 5,
ANY = 999
| true | true |
f7228aa9f30ca6f334bcf414f70343a2d749e3eb | 8,298 | py | Python | glanceclient/v1/shell.py | dreamhost/python-glanceclient | b9b897252868732763de60d829b5c8de188adf38 | [
"Apache-2.0"
] | null | null | null | glanceclient/v1/shell.py | dreamhost/python-glanceclient | b9b897252868732763de60d829b5c8de188adf38 | [
"Apache-2.0"
] | null | null | null | glanceclient/v1/shell.py | dreamhost/python-glanceclient | b9b897252868732763de60d829b5c8de188adf38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
from glanceclient.common import utils
import glanceclient.v1.images
def do_image_list(gc, args):
"""List images."""
images = gc.images.list()
columns = ['ID', 'Name', 'Disk Format', 'Container Format',
'Size', 'Status']
utils.print_list(images, columns)
def _image_show(image):
# Flatten image properties dict for display
info = copy.deepcopy(image._info)
for (k, v) in info.pop('properties').iteritems():
info['Property \'%s\'' % k] = v
utils.print_dict(info)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
def do_image_show(gc, args):
"""Describe a specific image."""
image = gc.images.get(args.id)
_image_show(image)
@utils.arg('--id', metavar='<IMAGE_ID>',
help='ID of image to reserve.')
@utils.arg('--name', metavar='<NAME>',
help='Name of image.')
@utils.arg('--disk-format', metavar='<CONTAINER_FORMAT>',
help='Disk format of image.')
@utils.arg('--container-format', metavar='<DISK_FORMAT>',
help='Container format of image.')
@utils.arg('--owner', metavar='<TENANT_ID>',
help='Tenant who should own image.')
@utils.arg('--size', metavar='<SIZE>',
help=('Size of image data (in bytes). Only used with'
' \'--location\' and \'--copy_from\'.'))
@utils.arg('--min-disk', metavar='<DISK_GB>',
help='Minimum size of disk needed to boot image (in gigabytes).')
@utils.arg('--min-ram', metavar='<DISK_RAM>',
help='Minimum amount of ram needed to boot image (in megabytes).')
@utils.arg('--location', metavar='<IMAGE_URL>',
help=('URL where the data for this image already resides.'
' For example, if the image data is stored in the filesystem'
' local to the glance server at \'/usr/share/image.tar.gz\','
' you would specify \'file:///usr/share/image.tar.gz\'.'))
@utils.arg('--checksum', metavar='<CHECKSUM>',
help='Hash of image data used Glance can use for verification.')
@utils.arg('--copy-from', metavar='<IMAGE_URL>',
help=('Similar to \'--location\' in usage, but this indicates that'
' the Glance server should immediately copy the data and'
' store it in its configured image store.'))
@utils.arg('--public', action='store_true', default=False,
help='Make image accessible to the public.')
@utils.arg('--protected', action='store_true', default=False,
help='Prevent image from being deleted.')
@utils.arg('--property', metavar="<key=value>", action='append', default=[],
help=("Arbitrary property to associate with image. "
"May be used multiple times."))
def do_image_create(gc, args):
# Filter out None values
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
fields['is_public'] = fields.pop('public')
raw_properties = fields.pop('property')
fields['properties'] = {}
for datum in raw_properties:
key, value = datum.split('=', 1)
fields['properties'][key] = value
# Filter out values we can't use
CREATE_PARAMS = glanceclient.v1.images.CREATE_PARAMS
fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))
if 'location' not in fields and 'copy_from' not in fields:
fields['data'] = sys.stdin
image = gc.images.create(**fields)
_image_show(image)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to modify.')
@utils.arg('--name', metavar='<NAME>',
help='Name of image.')
@utils.arg('--disk-format', metavar='<CONTAINER_FORMAT>',
help='Disk format of image.')
@utils.arg('--container-format', metavar='<DISK_FORMAT>',
help='Container format of image.')
@utils.arg('--owner', metavar='<TENANT_ID>',
help='Tenant who should own image.')
@utils.arg('--size', metavar='<SIZE>',
help='Size of image data (in bytes).')
@utils.arg('--min-disk', metavar='<DISK_GB>',
help='Minimum size of disk needed to boot image (in gigabytes).')
@utils.arg('--min-ram', metavar='<DISK_RAM>',
help='Minimum amount of ram needed to boot image (in megabytes).')
@utils.arg('--location', metavar='<IMAGE_URL>',
help=('URL where the data for this image already resides.'
' For example, if the image data is stored in the filesystem'
' local to the glance server at \'/usr/share/image.tar.gz\','
' you would specify \'file:///usr/share/image.tar.gz\'.'))
@utils.arg('--checksum', metavar='<CHECKSUM>',
help='Hash of image data used Glance can use for verification.')
@utils.arg('--copy-from', metavar='<IMAGE_URL>',
help=('Similar to \'--location\' in usage, but this indicates that'
' the Glance server should immediately copy the data and'
' store it in its configured image store.'))
@utils.arg('--is-public', type=bool,
help='Make image accessible to the public.')
@utils.arg('--is-protected', type=bool,
help='Prevent image from being deleted.')
@utils.arg('--property', metavar="<key=value>", action='append', default=[],
help=("Arbitrary property to associate with image. "
"May be used multiple times."))
def do_image_update(gc, args):
# Filter out None values
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
image_id = fields.pop('id')
raw_properties = fields.pop('property')
fields['properties'] = {}
for datum in raw_properties:
key, value = datum.split('=', 1)
fields['properties'][key] = value
# Filter out values we can't use
UPDATE_PARAMS = glanceclient.v1.images.UPDATE_PARAMS
fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items()))
if 'location' not in fields and 'copy_from' not in fields:
fields['data'] = sys.stdin
image = gc.images.update(image_id, **fields)
_image_show(image)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to delete.')
def do_image_delete(gc, args):
"""Delete a specific image."""
gc.images.delete(args.id)
@utils.arg('--image-id', metavar='<IMAGE_ID>',
help='Filter results by an image ID.')
@utils.arg('--tenant-id', metavar='<TENANT_ID>',
help='Filter results by a tenant ID.')
def do_member_list(gc, args):
if args.image_id and args.tenant_id:
print 'Unable to filter members by both --image-id and --tenant-id.'
sys.exit(1)
elif args.image_id:
kwargs = {'image': args.image_id}
elif args.tenant_id:
kwargs = {'member': args.tenant_id}
else:
print 'Unable to list all members. Specify --image-id or --tenant-id'
sys.exit(1)
members = gc.image_members.list(**kwargs)
columns = ['Image ID', 'Member ID', 'Can Share']
utils.print_list(members, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image to add member to.')
@utils.arg('tenant_id', metavar='<TENANT_ID>',
help='Tenant to add as member')
@utils.arg('--can-share', action='store_true', default=False,
help='Allow the specified tenant to share this image.')
def do_member_create(gc, args):
gc.image_members.create(args.image_id, args.tenant_id, args.can_share)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image to add member to.')
@utils.arg('tenant_id', metavar='<TENANT_ID>',
help='Tenant to add as member')
def do_member_delete(gc, args):
gc.image_members.delete(args.image_id, args.tenant_id)
| 40.676471 | 78 | 0.632683 |
import copy
import sys
from glanceclient.common import utils
import glanceclient.v1.images
def do_image_list(gc, args):
"""List images."""
images = gc.images.list()
columns = ['ID', 'Name', 'Disk Format', 'Container Format',
'Size', 'Status']
utils.print_list(images, columns)
def _image_show(image):
info = copy.deepcopy(image._info)
for (k, v) in info.pop('properties').iteritems():
info['Property \'%s\'' % k] = v
utils.print_dict(info)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
def do_image_show(gc, args):
"""Describe a specific image."""
image = gc.images.get(args.id)
_image_show(image)
@utils.arg('--id', metavar='<IMAGE_ID>',
help='ID of image to reserve.')
@utils.arg('--name', metavar='<NAME>',
help='Name of image.')
@utils.arg('--disk-format', metavar='<CONTAINER_FORMAT>',
help='Disk format of image.')
@utils.arg('--container-format', metavar='<DISK_FORMAT>',
help='Container format of image.')
@utils.arg('--owner', metavar='<TENANT_ID>',
help='Tenant who should own image.')
@utils.arg('--size', metavar='<SIZE>',
help=('Size of image data (in bytes). Only used with'
' \'--location\' and \'--copy_from\'.'))
@utils.arg('--min-disk', metavar='<DISK_GB>',
help='Minimum size of disk needed to boot image (in gigabytes).')
@utils.arg('--min-ram', metavar='<DISK_RAM>',
help='Minimum amount of ram needed to boot image (in megabytes).')
@utils.arg('--location', metavar='<IMAGE_URL>',
help=('URL where the data for this image already resides.'
' For example, if the image data is stored in the filesystem'
' local to the glance server at \'/usr/share/image.tar.gz\','
' you would specify \'file:///usr/share/image.tar.gz\'.'))
@utils.arg('--checksum', metavar='<CHECKSUM>',
help='Hash of image data used Glance can use for verification.')
@utils.arg('--copy-from', metavar='<IMAGE_URL>',
help=('Similar to \'--location\' in usage, but this indicates that'
' the Glance server should immediately copy the data and'
' store it in its configured image store.'))
@utils.arg('--public', action='store_true', default=False,
help='Make image accessible to the public.')
@utils.arg('--protected', action='store_true', default=False,
help='Prevent image from being deleted.')
@utils.arg('--property', metavar="<key=value>", action='append', default=[],
help=("Arbitrary property to associate with image. "
"May be used multiple times."))
def do_image_create(gc, args):
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
fields['is_public'] = fields.pop('public')
raw_properties = fields.pop('property')
fields['properties'] = {}
for datum in raw_properties:
key, value = datum.split('=', 1)
fields['properties'][key] = value
CREATE_PARAMS = glanceclient.v1.images.CREATE_PARAMS
fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))
if 'location' not in fields and 'copy_from' not in fields:
fields['data'] = sys.stdin
image = gc.images.create(**fields)
_image_show(image)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to modify.')
@utils.arg('--name', metavar='<NAME>',
help='Name of image.')
@utils.arg('--disk-format', metavar='<CONTAINER_FORMAT>',
help='Disk format of image.')
@utils.arg('--container-format', metavar='<DISK_FORMAT>',
help='Container format of image.')
@utils.arg('--owner', metavar='<TENANT_ID>',
help='Tenant who should own image.')
@utils.arg('--size', metavar='<SIZE>',
help='Size of image data (in bytes).')
@utils.arg('--min-disk', metavar='<DISK_GB>',
help='Minimum size of disk needed to boot image (in gigabytes).')
@utils.arg('--min-ram', metavar='<DISK_RAM>',
help='Minimum amount of ram needed to boot image (in megabytes).')
@utils.arg('--location', metavar='<IMAGE_URL>',
help=('URL where the data for this image already resides.'
' For example, if the image data is stored in the filesystem'
' local to the glance server at \'/usr/share/image.tar.gz\','
' you would specify \'file:///usr/share/image.tar.gz\'.'))
@utils.arg('--checksum', metavar='<CHECKSUM>',
help='Hash of image data used Glance can use for verification.')
@utils.arg('--copy-from', metavar='<IMAGE_URL>',
help=('Similar to \'--location\' in usage, but this indicates that'
' the Glance server should immediately copy the data and'
' store it in its configured image store.'))
@utils.arg('--is-public', type=bool,
help='Make image accessible to the public.')
@utils.arg('--is-protected', type=bool,
help='Prevent image from being deleted.')
@utils.arg('--property', metavar="<key=value>", action='append', default=[],
help=("Arbitrary property to associate with image. "
"May be used multiple times."))
def do_image_update(gc, args):
# Filter out None values
fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
image_id = fields.pop('id')
raw_properties = fields.pop('property')
fields['properties'] = {}
for datum in raw_properties:
key, value = datum.split('=', 1)
fields['properties'][key] = value
# Filter out values we can't use
UPDATE_PARAMS = glanceclient.v1.images.UPDATE_PARAMS
fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items()))
if 'location' not in fields and 'copy_from' not in fields:
fields['data'] = sys.stdin
image = gc.images.update(image_id, **fields)
_image_show(image)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to delete.')
def do_image_delete(gc, args):
"""Delete a specific image."""
gc.images.delete(args.id)
@utils.arg('--image-id', metavar='<IMAGE_ID>',
help='Filter results by an image ID.')
@utils.arg('--tenant-id', metavar='<TENANT_ID>',
help='Filter results by a tenant ID.')
def do_member_list(gc, args):
if args.image_id and args.tenant_id:
print 'Unable to filter members by both --image-id and --tenant-id.'
sys.exit(1)
elif args.image_id:
kwargs = {'image': args.image_id}
elif args.tenant_id:
kwargs = {'member': args.tenant_id}
else:
print 'Unable to list all members. Specify --image-id or --tenant-id'
sys.exit(1)
members = gc.image_members.list(**kwargs)
columns = ['Image ID', 'Member ID', 'Can Share']
utils.print_list(members, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image to add member to.')
@utils.arg('tenant_id', metavar='<TENANT_ID>',
help='Tenant to add as member')
@utils.arg('--can-share', action='store_true', default=False,
help='Allow the specified tenant to share this image.')
def do_member_create(gc, args):
gc.image_members.create(args.image_id, args.tenant_id, args.can_share)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image to add member to.')
@utils.arg('tenant_id', metavar='<TENANT_ID>',
help='Tenant to add as member')
def do_member_delete(gc, args):
gc.image_members.delete(args.image_id, args.tenant_id)
| false | true |
f7228b133082a8f068ce1f75a5ccf3de42329fed | 9,290 | py | Python | stanza/models/depparse/data.py | andreipruteanu/stanza | cf58ec826e91eba700e5eafbfe3d0acc8f79360d | [
"Apache-2.0"
] | null | null | null | stanza/models/depparse/data.py | andreipruteanu/stanza | cf58ec826e91eba700e5eafbfe3d0acc8f79360d | [
"Apache-2.0"
] | null | null | null | stanza/models/depparse/data.py | andreipruteanu/stanza | cf58ec826e91eba700e5eafbfe3d0acc8f79360d | [
"Apache-2.0"
] | null | null | null | import random
import logging
# import torch
from stanza.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanza.models.common.vocab import PAD_ID, VOCAB_PREFIX, ROOT_ID, CompositeVocab, CharVocab
from stanza.models.pos.vocab import WordVocab, XPOSVocab, FeatureVocab, MultiVocab
from stanza.models.pos.xpos_vocab_factory import xpos_vocab_factory
from stanza.models.common.doc import *
logger = logging.getLogger('stanza')
def data_to_batches(data, batch_size, eval_mode, sort_during_eval, min_length_to_batch_separately):
"""
Given a list of lists, where the first element of each sublist
represents the sentence, group the sentences into batches.
During training mode (not eval_mode) the sentences are sorted by
length with a bit of random shuffling. During eval mode, the
sentences are sorted by length if sort_during_eval is true.
Refactored from the data structure in case other models could use
it and for ease of testing.
Returns (batches, original_order), where original_order is None
when in train mode or when unsorted and represents the original
location of each sentence in the sort
"""
res = []
if not eval_mode:
# sort sentences (roughly) by length for better memory utilization
data = sorted(data, key = lambda x: len(x[0]), reverse=random.random() > .5)
data_orig_idx = None
elif sort_during_eval:
(data, ), data_orig_idx = sort_all([data], [len(x[0]) for x in data])
else:
data_orig_idx = None
current = []
currentlen = 0
for x in data:
if min_length_to_batch_separately is not None and len(x[0]) > min_length_to_batch_separately:
if currentlen > 0:
res.append(current)
current = []
currentlen = 0
res.append([x])
else:
if len(x[0]) + currentlen > batch_size and currentlen > 0:
res.append(current)
current = []
currentlen = 0
current.append(x)
currentlen += len(x[0])
if currentlen > 0:
res.append(current)
return res, data_orig_idx
class DataLoader:
def __init__(self, doc, batch_size, args, pretrain, vocab=None, evaluation=False, sort_during_eval=False, min_length_to_batch_separately=None):
self.batch_size = batch_size
self.min_length_to_batch_separately=min_length_to_batch_separately
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
self.sort_during_eval = sort_during_eval
self.doc = doc
data = self.load_doc(doc)
# handle vocab
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
# handle pretrain; pretrain vocab is used when args['pretrain'] == True and pretrain is not None
self.pretrain_vocab = None
if pretrain is not None and args['pretrain']:
self.pretrain_vocab = pretrain.vocab
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
logger.debug("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab, self.pretrain_vocab, args)
# shuffle for training
if self.shuffled:
random.shuffle(data)
self.num_examples = len(data)
# chunk into batches
self.data = self.chunk_batches(data)
logger.debug("{} batches created.".format(len(self.data)))
def init_vocab(self, data):
assert self.eval == False # for eval vocab must exist
charvocab = CharVocab(data, self.args['shorthand'])
wordvocab = WordVocab(data, self.args['shorthand'], cutoff=7, lower=True)
uposvocab = WordVocab(data, self.args['shorthand'], idx=1)
xposvocab = xpos_vocab_factory(data, self.args['shorthand'])
featsvocab = FeatureVocab(data, self.args['shorthand'], idx=3)
lemmavocab = WordVocab(data, self.args['shorthand'], cutoff=7, idx=4, lower=True)
deprelvocab = WordVocab(data, self.args['shorthand'], idx=6)
vocab = MultiVocab({'char': charvocab,
'word': wordvocab,
'upos': uposvocab,
'xpos': xposvocab,
'feats': featsvocab,
'lemma': lemmavocab,
'deprel': deprelvocab})
return vocab
def preprocess(self, data, vocab, pretrain_vocab, args):
processed = []
xpos_replacement = [[ROOT_ID] * len(vocab['xpos'])] if isinstance(vocab['xpos'], CompositeVocab) else [ROOT_ID]
feats_replacement = [[ROOT_ID] * len(vocab['feats'])]
for sent in data:
processed_sent = [[ROOT_ID] + vocab['word'].map([w[0] for w in sent])]
processed_sent += [[[ROOT_ID]] + [vocab['char'].map([x for x in w[0]]) for w in sent]]
processed_sent += [[ROOT_ID] + vocab['upos'].map([w[1] for w in sent])]
processed_sent += [xpos_replacement + vocab['xpos'].map([w[2] for w in sent])]
processed_sent += [feats_replacement + vocab['feats'].map([w[3] for w in sent])]
if pretrain_vocab is not None:
# always use lowercase lookup in pretrained vocab
processed_sent += [[ROOT_ID] + pretrain_vocab.map([w[0].lower() for w in sent])]
else:
processed_sent += [[ROOT_ID] + [PAD_ID] * len(sent)]
processed_sent += [[ROOT_ID] + vocab['lemma'].map([w[4] for w in sent])]
processed_sent += [[to_int(w[5], ignore_error=self.eval) for w in sent]]
processed_sent += [vocab['deprel'].map([w[6] for w in sent])]
processed.append(processed_sent)
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 9
# sort sentences by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# sort words by lens for easy char-RNN operations
batch_words = [w for sent in batch[1] for w in sent]
word_lens = [len(x) for x in batch_words]
batch_words, word_orig_idx = sort_all([batch_words], word_lens)
batch_words = batch_words[0]
word_lens = [len(x) for x in batch_words]
# convert to tensors
words = batch[0]
words = get_long_tensor(words, batch_size)
words_mask = torch.eq(words, PAD_ID)
wordchars = get_long_tensor(batch_words, len(word_lens))
wordchars_mask = torch.eq(wordchars, PAD_ID)
upos = get_long_tensor(batch[2], batch_size)
xpos = get_long_tensor(batch[3], batch_size)
ufeats = get_long_tensor(batch[4], batch_size)
pretrained = get_long_tensor(batch[5], batch_size)
sentlens = [len(x) for x in batch[0]]
lemma = get_long_tensor(batch[6], batch_size)
head = get_long_tensor(batch[7], batch_size)
deprel = get_long_tensor(batch[8], batch_size)
return words, words_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, orig_idx, word_orig_idx, sentlens, word_lens
def load_doc(self, doc):
data = doc.get([TEXT, UPOS, XPOS, FEATS, LEMMA, HEAD, DEPREL], as_sentences=True)
data = self.resolve_none(data)
return data
def resolve_none(self, data):
# replace None to '_'
for sent_idx in range(len(data)):
for tok_idx in range(len(data[sent_idx])):
for feat_idx in range(len(data[sent_idx][tok_idx])):
if data[sent_idx][tok_idx][feat_idx] is None:
data[sent_idx][tok_idx][feat_idx] = '_'
return data
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def reshuffle(self):
data = [y for x in self.data for y in x]
self.data = self.chunk_batches(data)
random.shuffle(self.data)
def chunk_batches(self, data):
batches, data_orig_idx = data_to_batches(data=data, batch_size=self.batch_size,
eval_mode=self.eval, sort_during_eval=self.sort_during_eval,
min_length_to_batch_separately=self.min_length_to_batch_separately)
# data_orig_idx might be None at train time, since we don't anticipate unsorting
self.data_orig_idx = data_orig_idx
return batches
def to_int(string, ignore_error=False):
try:
res = int(string)
except ValueError as err:
if ignore_error:
return 0
else:
raise err
return res
| 41.473214 | 158 | 0.614962 | import random
import logging
from stanza.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanza.models.common.vocab import PAD_ID, VOCAB_PREFIX, ROOT_ID, CompositeVocab, CharVocab
from stanza.models.pos.vocab import WordVocab, XPOSVocab, FeatureVocab, MultiVocab
from stanza.models.pos.xpos_vocab_factory import xpos_vocab_factory
from stanza.models.common.doc import *
logger = logging.getLogger('stanza')
def data_to_batches(data, batch_size, eval_mode, sort_during_eval, min_length_to_batch_separately):
res = []
if not eval_mode:
data = sorted(data, key = lambda x: len(x[0]), reverse=random.random() > .5)
data_orig_idx = None
elif sort_during_eval:
(data, ), data_orig_idx = sort_all([data], [len(x[0]) for x in data])
else:
data_orig_idx = None
current = []
currentlen = 0
for x in data:
if min_length_to_batch_separately is not None and len(x[0]) > min_length_to_batch_separately:
if currentlen > 0:
res.append(current)
current = []
currentlen = 0
res.append([x])
else:
if len(x[0]) + currentlen > batch_size and currentlen > 0:
res.append(current)
current = []
currentlen = 0
current.append(x)
currentlen += len(x[0])
if currentlen > 0:
res.append(current)
return res, data_orig_idx
class DataLoader:
def __init__(self, doc, batch_size, args, pretrain, vocab=None, evaluation=False, sort_during_eval=False, min_length_to_batch_separately=None):
self.batch_size = batch_size
self.min_length_to_batch_separately=min_length_to_batch_separately
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
self.sort_during_eval = sort_during_eval
self.doc = doc
data = self.load_doc(doc)
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
self.pretrain_vocab = None
if pretrain is not None and args['pretrain']:
self.pretrain_vocab = pretrain.vocab
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
logger.debug("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab, self.pretrain_vocab, args)
if self.shuffled:
random.shuffle(data)
self.num_examples = len(data)
self.data = self.chunk_batches(data)
logger.debug("{} batches created.".format(len(self.data)))
def init_vocab(self, data):
assert self.eval == False
charvocab = CharVocab(data, self.args['shorthand'])
wordvocab = WordVocab(data, self.args['shorthand'], cutoff=7, lower=True)
uposvocab = WordVocab(data, self.args['shorthand'], idx=1)
xposvocab = xpos_vocab_factory(data, self.args['shorthand'])
featsvocab = FeatureVocab(data, self.args['shorthand'], idx=3)
lemmavocab = WordVocab(data, self.args['shorthand'], cutoff=7, idx=4, lower=True)
deprelvocab = WordVocab(data, self.args['shorthand'], idx=6)
vocab = MultiVocab({'char': charvocab,
'word': wordvocab,
'upos': uposvocab,
'xpos': xposvocab,
'feats': featsvocab,
'lemma': lemmavocab,
'deprel': deprelvocab})
return vocab
def preprocess(self, data, vocab, pretrain_vocab, args):
processed = []
xpos_replacement = [[ROOT_ID] * len(vocab['xpos'])] if isinstance(vocab['xpos'], CompositeVocab) else [ROOT_ID]
feats_replacement = [[ROOT_ID] * len(vocab['feats'])]
for sent in data:
processed_sent = [[ROOT_ID] + vocab['word'].map([w[0] for w in sent])]
processed_sent += [[[ROOT_ID]] + [vocab['char'].map([x for x in w[0]]) for w in sent]]
processed_sent += [[ROOT_ID] + vocab['upos'].map([w[1] for w in sent])]
processed_sent += [xpos_replacement + vocab['xpos'].map([w[2] for w in sent])]
processed_sent += [feats_replacement + vocab['feats'].map([w[3] for w in sent])]
if pretrain_vocab is not None:
processed_sent += [[ROOT_ID] + pretrain_vocab.map([w[0].lower() for w in sent])]
else:
processed_sent += [[ROOT_ID] + [PAD_ID] * len(sent)]
processed_sent += [[ROOT_ID] + vocab['lemma'].map([w[4] for w in sent])]
processed_sent += [[to_int(w[5], ignore_error=self.eval) for w in sent]]
processed_sent += [vocab['deprel'].map([w[6] for w in sent])]
processed.append(processed_sent)
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 9
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
batch_words = [w for sent in batch[1] for w in sent]
word_lens = [len(x) for x in batch_words]
batch_words, word_orig_idx = sort_all([batch_words], word_lens)
batch_words = batch_words[0]
word_lens = [len(x) for x in batch_words]
words = batch[0]
words = get_long_tensor(words, batch_size)
words_mask = torch.eq(words, PAD_ID)
wordchars = get_long_tensor(batch_words, len(word_lens))
wordchars_mask = torch.eq(wordchars, PAD_ID)
upos = get_long_tensor(batch[2], batch_size)
xpos = get_long_tensor(batch[3], batch_size)
ufeats = get_long_tensor(batch[4], batch_size)
pretrained = get_long_tensor(batch[5], batch_size)
sentlens = [len(x) for x in batch[0]]
lemma = get_long_tensor(batch[6], batch_size)
head = get_long_tensor(batch[7], batch_size)
deprel = get_long_tensor(batch[8], batch_size)
return words, words_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, orig_idx, word_orig_idx, sentlens, word_lens
def load_doc(self, doc):
data = doc.get([TEXT, UPOS, XPOS, FEATS, LEMMA, HEAD, DEPREL], as_sentences=True)
data = self.resolve_none(data)
return data
def resolve_none(self, data):
for sent_idx in range(len(data)):
for tok_idx in range(len(data[sent_idx])):
for feat_idx in range(len(data[sent_idx][tok_idx])):
if data[sent_idx][tok_idx][feat_idx] is None:
data[sent_idx][tok_idx][feat_idx] = '_'
return data
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def reshuffle(self):
data = [y for x in self.data for y in x]
self.data = self.chunk_batches(data)
random.shuffle(self.data)
def chunk_batches(self, data):
batches, data_orig_idx = data_to_batches(data=data, batch_size=self.batch_size,
eval_mode=self.eval, sort_during_eval=self.sort_during_eval,
min_length_to_batch_separately=self.min_length_to_batch_separately)
self.data_orig_idx = data_orig_idx
return batches
def to_int(string, ignore_error=False):
try:
res = int(string)
except ValueError as err:
if ignore_error:
return 0
else:
raise err
return res
| true | true |
f7228b37933f2ef8265da1cac6a8cd07aa18238b | 3,906 | py | Python | shamrock/wallet/rl_wallet/rl_wallet_puzzles.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | 3 | 2022-02-10T09:46:23.000Z | 2022-03-22T17:10:50.000Z | shamrock/wallet/rl_wallet/rl_wallet_puzzles.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | shamrock/wallet/rl_wallet/rl_wallet_puzzles.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | import math
from binascii import hexlify
from clvm_tools import binutils
from shamrock.types.blockchain_format.program import Program
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.condition_opcodes import ConditionOpcode
from shamrock.util.ints import uint64
from shamrock.wallet.chialisp import sexp
from shamrock.wallet.puzzles.load_clvm import load_clvm
RATE_LIMITED_MODE = 1
AGGREGATION_MODE = 2
CLAWBACK_MODE = 3
def rl_puzzle_for_pk(
pubkey: bytes,
rate_amount: uint64,
interval_time: uint64,
origin_id: bytes32,
clawback_pk: bytes,
):
"""
Solution to this puzzle must be in format:
(1 my_parent_id, my_puzzlehash, my_amount, outgoing_puzzle_hash, outgoing_amount,
min_block_time, parent_parent_id, parent_amount, fee)
RATE LIMIT LOGIC:
M - shamrock_per_interval
N - interval_blocks
V - amount being spent
MIN_BLOCK_AGE = V / (M / N)
if not (min_block_age * M >= V * N) do X (raise)
ASSERT_COIN_BLOCK_AGE_EXCEEDS min_block_age
"""
MOD = load_clvm("rl.clvm")
return MOD.curry(pubkey, rate_amount, interval_time, origin_id, clawback_pk)
def rl_make_aggregation_solution(myid, wallet_coin_primary_input, wallet_coin_amount):
opcode_myid = "0x" + hexlify(myid).decode("ascii")
primary_input = "0x" + hexlify(wallet_coin_primary_input).decode("ascii")
sol = sexp(opcode_myid, primary_input, wallet_coin_amount)
return Program.to(binutils.assemble(sol))
def make_clawback_solution(puzzlehash, amount, fee):
opcode_create = hexlify(ConditionOpcode.CREATE_COIN).decode("ascii")
solution = sexp(CLAWBACK_MODE, sexp("0x" + opcode_create, "0x" + str(puzzlehash), amount - fee))
return Program.to(binutils.assemble(solution))
def rl_make_solution_mode_2(
my_puzzle_hash,
consolidating_primary_input,
consolidating_coin_puzzle_hash,
outgoing_amount,
my_primary_input,
incoming_amount,
parent_amount,
my_parent_parent_id,
):
my_puzzle_hash = hexlify(my_puzzle_hash).decode("ascii")
consolidating_primary_input = hexlify(consolidating_primary_input).decode("ascii")
consolidating_coin_puzzle_hash = hexlify(consolidating_coin_puzzle_hash).decode("ascii")
primary_input = hexlify(my_primary_input).decode("ascii")
sol = sexp(
AGGREGATION_MODE,
"0x" + my_puzzle_hash,
"0x" + consolidating_primary_input,
"0x" + consolidating_coin_puzzle_hash,
outgoing_amount,
"0x" + primary_input,
incoming_amount,
parent_amount,
"0x" + str(my_parent_parent_id),
)
return Program.to(binutils.assemble(sol))
def solution_for_rl(
my_parent_id: bytes32,
my_puzzlehash: bytes32,
my_amount: uint64,
out_puzzlehash: bytes32,
out_amount: uint64,
my_parent_parent_id: bytes32,
parent_amount: uint64,
interval,
limit,
fee,
):
"""
Solution is (1 my_parent_id, my_puzzlehash, my_amount, outgoing_puzzle_hash, outgoing_amount,
min_block_time, parent_parent_id, parent_amount, fee)
min block time = Math.ceil((new_amount * self.interval) / self.limit)
"""
min_block_count = math.ceil((out_amount * interval) / limit)
solution = sexp(
RATE_LIMITED_MODE,
"0x" + my_parent_id.hex(),
"0x" + my_puzzlehash.hex(),
my_amount,
"0x" + out_puzzlehash.hex(),
out_amount,
min_block_count,
"0x" + my_parent_parent_id.hex(),
parent_amount,
fee,
)
return Program.to(binutils.assemble(solution))
def rl_make_aggregation_puzzle(wallet_puzzle):
"""
If Wallet A wants to send further funds to Wallet B then they can lock them up using this code
Solution will be (my_id wallet_coin_primary_input wallet_coin_amount)
"""
MOD = load_clvm("rl_aggregation.clvm")
return MOD.curry(wallet_puzzle)
| 31.248 | 100 | 0.717614 | import math
from binascii import hexlify
from clvm_tools import binutils
from shamrock.types.blockchain_format.program import Program
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.condition_opcodes import ConditionOpcode
from shamrock.util.ints import uint64
from shamrock.wallet.chialisp import sexp
from shamrock.wallet.puzzles.load_clvm import load_clvm
RATE_LIMITED_MODE = 1
AGGREGATION_MODE = 2
CLAWBACK_MODE = 3
def rl_puzzle_for_pk(
pubkey: bytes,
rate_amount: uint64,
interval_time: uint64,
origin_id: bytes32,
clawback_pk: bytes,
):
MOD = load_clvm("rl.clvm")
return MOD.curry(pubkey, rate_amount, interval_time, origin_id, clawback_pk)
def rl_make_aggregation_solution(myid, wallet_coin_primary_input, wallet_coin_amount):
opcode_myid = "0x" + hexlify(myid).decode("ascii")
primary_input = "0x" + hexlify(wallet_coin_primary_input).decode("ascii")
sol = sexp(opcode_myid, primary_input, wallet_coin_amount)
return Program.to(binutils.assemble(sol))
def make_clawback_solution(puzzlehash, amount, fee):
opcode_create = hexlify(ConditionOpcode.CREATE_COIN).decode("ascii")
solution = sexp(CLAWBACK_MODE, sexp("0x" + opcode_create, "0x" + str(puzzlehash), amount - fee))
return Program.to(binutils.assemble(solution))
def rl_make_solution_mode_2(
my_puzzle_hash,
consolidating_primary_input,
consolidating_coin_puzzle_hash,
outgoing_amount,
my_primary_input,
incoming_amount,
parent_amount,
my_parent_parent_id,
):
my_puzzle_hash = hexlify(my_puzzle_hash).decode("ascii")
consolidating_primary_input = hexlify(consolidating_primary_input).decode("ascii")
consolidating_coin_puzzle_hash = hexlify(consolidating_coin_puzzle_hash).decode("ascii")
primary_input = hexlify(my_primary_input).decode("ascii")
sol = sexp(
AGGREGATION_MODE,
"0x" + my_puzzle_hash,
"0x" + consolidating_primary_input,
"0x" + consolidating_coin_puzzle_hash,
outgoing_amount,
"0x" + primary_input,
incoming_amount,
parent_amount,
"0x" + str(my_parent_parent_id),
)
return Program.to(binutils.assemble(sol))
def solution_for_rl(
my_parent_id: bytes32,
my_puzzlehash: bytes32,
my_amount: uint64,
out_puzzlehash: bytes32,
out_amount: uint64,
my_parent_parent_id: bytes32,
parent_amount: uint64,
interval,
limit,
fee,
):
min_block_count = math.ceil((out_amount * interval) / limit)
solution = sexp(
RATE_LIMITED_MODE,
"0x" + my_parent_id.hex(),
"0x" + my_puzzlehash.hex(),
my_amount,
"0x" + out_puzzlehash.hex(),
out_amount,
min_block_count,
"0x" + my_parent_parent_id.hex(),
parent_amount,
fee,
)
return Program.to(binutils.assemble(solution))
def rl_make_aggregation_puzzle(wallet_puzzle):
MOD = load_clvm("rl_aggregation.clvm")
return MOD.curry(wallet_puzzle)
| true | true |
f7228b9da14d8b387c2fb18f0ddd0e1fb36c8436 | 2,839 | py | Python | tests/triangulum_test/test_support.py | PolarNick239/Triangulum3D | 85c6a44f5c8f620bdc58164bd50ff89e1897f59d | [
"MIT"
] | 10 | 2016-09-18T01:38:46.000Z | 2021-11-18T17:30:28.000Z | tests/triangulum_test/test_support.py | PolarNick239/Triangulum3D | 85c6a44f5c8f620bdc58164bd50ff89e1897f59d | [
"MIT"
] | 1 | 2018-06-20T05:48:19.000Z | 2018-06-20T09:19:56.000Z | tests/triangulum_test/test_support.py | PolarNick239/Triangulum3D | 85c6a44f5c8f620bdc58164bd50ff89e1897f59d | [
"MIT"
] | 12 | 2015-11-29T03:22:37.000Z | 2020-07-14T03:08:52.000Z | #
# Copyright (c) 2015, Nikolay Polyarnyi
# All rights reserved.
#
import yaml
import asyncio
import logging
import numpy as np
import pkg_resources
from pathlib import Path
from unittest import TestCase
from triangulum.utils import support
from triangulum.utils.support import str_dict, deep_merge
from triangulum.rendering.gl import RenderingAsyncExecutor
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(relativeCreated)d [%(threadName)s]\t%(name)s [%(levelname)s]:\t %(message)s')
resources_dir_path = Path(pkg_resources.get_provider('triangulum_test.resources').get_resource_filename(__name__, '.'))
_default_test_config = {
'debug_output_dir': None,
}
def load_config():
config_path = str(resources_dir_path / "test_config.yml")
try:
with open(config_path) as f:
user_config = yaml.load(f)
config = deep_merge(_default_test_config, user_config)
logger.debug("Using test config:\n{}".format(str_dict(config)))
except FileNotFoundError:
config = _default_test_config
logger.debug("No config file found at '{}'.".format(config_path))
logger.debug("Using test config (default one):\n{}".format(str_dict(config)))
return config
class TestBase(TestCase):
def setUp(self):
super().setUp()
self.config = load_config()
self.gl_executor = None
self.releasables = []
support.silent_make_dir(self.debug_dir())
def get_gl_executor(self):
if self.gl_executor is None:
self.gl_executor = RenderingAsyncExecutor()
return self.gl_executor
def gl_executor_map(self, foo, *args):
gl_executor = self.get_gl_executor()
result = asyncio.get_event_loop().run_until_complete(gl_executor.map(foo, *args))
return result
def register_releasable(self, releasable):
self.releasables.append(releasable)
def with_debug_output(self):
return self.config['debug_output_dir'] is not None
def debug_dir(self):
return Path(self.config['debug_output_dir']) / self.__class__.__name__
def dump_debug_img(self, path, img):
if self.with_debug_output():
path = self.debug_dir() / path
support.silent_make_dir(path.parent)
support.save_image(path, img)
def dump_debug_matrix_by_hue(self, path, mat):
if self.with_debug_output():
path = self.debug_dir() / path
support.silent_make_dir(path.parent)
img = support.array_to_rgb_by_hue(mat)[:, :, ::-1]
img = np.uint8(img)
support.save_image(path, img)
def tearDown(self):
super().tearDown()
for releasable in self.releasables:
self.gl_executor_map(releasable.release)
| 29.884211 | 119 | 0.672772 |
import yaml
import asyncio
import logging
import numpy as np
import pkg_resources
from pathlib import Path
from unittest import TestCase
from triangulum.utils import support
from triangulum.utils.support import str_dict, deep_merge
from triangulum.rendering.gl import RenderingAsyncExecutor
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(relativeCreated)d [%(threadName)s]\t%(name)s [%(levelname)s]:\t %(message)s')
resources_dir_path = Path(pkg_resources.get_provider('triangulum_test.resources').get_resource_filename(__name__, '.'))
_default_test_config = {
'debug_output_dir': None,
}
def load_config():
config_path = str(resources_dir_path / "test_config.yml")
try:
with open(config_path) as f:
user_config = yaml.load(f)
config = deep_merge(_default_test_config, user_config)
logger.debug("Using test config:\n{}".format(str_dict(config)))
except FileNotFoundError:
config = _default_test_config
logger.debug("No config file found at '{}'.".format(config_path))
logger.debug("Using test config (default one):\n{}".format(str_dict(config)))
return config
class TestBase(TestCase):
def setUp(self):
super().setUp()
self.config = load_config()
self.gl_executor = None
self.releasables = []
support.silent_make_dir(self.debug_dir())
def get_gl_executor(self):
if self.gl_executor is None:
self.gl_executor = RenderingAsyncExecutor()
return self.gl_executor
def gl_executor_map(self, foo, *args):
gl_executor = self.get_gl_executor()
result = asyncio.get_event_loop().run_until_complete(gl_executor.map(foo, *args))
return result
def register_releasable(self, releasable):
self.releasables.append(releasable)
def with_debug_output(self):
return self.config['debug_output_dir'] is not None
def debug_dir(self):
return Path(self.config['debug_output_dir']) / self.__class__.__name__
def dump_debug_img(self, path, img):
if self.with_debug_output():
path = self.debug_dir() / path
support.silent_make_dir(path.parent)
support.save_image(path, img)
def dump_debug_matrix_by_hue(self, path, mat):
if self.with_debug_output():
path = self.debug_dir() / path
support.silent_make_dir(path.parent)
img = support.array_to_rgb_by_hue(mat)[:, :, ::-1]
img = np.uint8(img)
support.save_image(path, img)
def tearDown(self):
super().tearDown()
for releasable in self.releasables:
self.gl_executor_map(releasable.release)
| true | true |
f7228c07a8d76a69d53822ccb3d9d7655013d783 | 995 | py | Python | tests/formatters/pls_recall.py | ir4n6/plaso | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | [
"Apache-2.0"
] | null | null | null | tests/formatters/pls_recall.py | ir4n6/plaso | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | [
"Apache-2.0"
] | null | null | null | tests/formatters/pls_recall.py | ir4n6/plaso | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the PL/SQL Recall event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import pls_recall
from tests.formatters import test_lib
class PlsRecallFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the PL/SQL Recall file container event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = pls_recall.PlsRecallFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = pls_recall.PlsRecallFormatter()
expected_attribute_names = [
'sequence_number',
'username',
'database_name',
'query']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| 24.875 | 67 | 0.734673 |
from __future__ import unicode_literals
import unittest
from plaso.formatters import pls_recall
from tests.formatters import test_lib
class PlsRecallFormatterTest(test_lib.EventFormatterTestCase):
def testInitialization(self):
event_formatter = pls_recall.PlsRecallFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
event_formatter = pls_recall.PlsRecallFormatter()
expected_attribute_names = [
'sequence_number',
'username',
'database_name',
'query']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
| true | true |
f7228c2465be83d0a65bbf9d1d17615ffe63b200 | 24,139 | py | Python | efficientdet/horovod_estimator/hooks.py | itsliupeng/automl | a02038aa60bdf54e689758e5860e19c574d3638f | [
"Apache-2.0"
] | 7 | 2020-04-07T14:24:49.000Z | 2020-09-27T08:48:15.000Z | efficientdet/horovod_estimator/hooks.py | itsliupeng/automl | a02038aa60bdf54e689758e5860e19c574d3638f | [
"Apache-2.0"
] | null | null | null | efficientdet/horovod_estimator/hooks.py | itsliupeng/automl | a02038aa60bdf54e689758e5860e19c574d3638f | [
"Apache-2.0"
] | 1 | 2020-04-09T09:15:11.000Z | 2020-04-09T09:15:11.000Z | import io
import itertools
import os
import time
import cv2
try:
import horovod.tensorflow as hvd
except ImportError:
hvd = None
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from PIL import Image
from PIL import ImageDraw, ImageFont
from sklearn.metrics import confusion_matrix
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from collections import OrderedDict
from horovod_estimator.utis import hvd_info_rank0, is_rank0
mpl.use('Agg')
np.seterr(divide='ignore', invalid='ignore')
class BroadcastGlobalVariablesHook(tf.train.SessionRunHook):
"""
SessionRunHook that will broadcast all global variables from root rank
to all other processes during initialization.
This is necessary to ensure consistent initialization of all workers when
training is started with random weights or restored from a checkpoint.
"""
def __init__(self, root_rank, pretrained_model_path=None, exclusions=[], device='', model_dir=None):
"""Construct a new BroadcastGlobalVariablesHook that will broadcast all
global variables from root rank to all other processes during initialization.
Args:
root_rank:
Rank that will send data, other ranks will receive data.
device:
Device to be used for broadcasting. Uses GPU by default
if Horovod was build with HOROVOD_GPU_BROADCAST.
"""
super(BroadcastGlobalVariablesHook, self).__init__()
self.root_rank = root_rank
self.bcast_op = None
self.device = device
self._pretrained_model_path = pretrained_model_path
self._saver = None
self._exclusions = set(exclusions)
self._variables_to_restore = []
self._model_dir = model_dir
def begin(self):
if not self.bcast_op or self.bcast_op.graph != tf.get_default_graph():
with tf.device(self.device):
self.bcast_op = hvd.broadcast_global_variables(self.root_rank)
if self._model_dir is not None:
checkpoint_path = checkpoint_management.latest_checkpoint(self._model_dir)
if checkpoint_path is not None and not checkpoint_path.endswith('model.ckpt-0'):
hvd_info_rank0('>>>>> model_dir {} has checkpoint {}, not using pretrained_model_path <<<<<'.
format(self._model_dir, checkpoint_path))
return
if self._pretrained_model_path is not None and len(self._pretrained_model_path) > 0 and is_rank0():
reader = pywrap_tensorflow.NewCheckpointReader(self._pretrained_model_path)
var_to_shape_map = sorted(reader.get_variable_to_shape_map())
self._exclusions.add('global_step')
for var in tf.global_variables():
if var.op.name in var_to_shape_map:
excluded = False
for exclusion in self._exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
self._variables_to_restore.append(var)
self._saver = tf.train.Saver(var_list=self._variables_to_restore)
def after_create_session(self, session, coord):
if self._saver:
hvd_info_rank0('>>>>> begin to load weights from {}, restore variables length {}, without variables {}'
.format(self._pretrained_model_path, len(self._variables_to_restore), self._exclusions))
self._saver.restore(session, self._pretrained_model_path)
hvd_info_rank0('<<<<< end to load weights')
hvd_info_rank0('>>>>> broadcast global variables begin during after_create_session')
session.run(self.bcast_op)
hvd_info_rank0('<<<<< broadcast global variables end during after_create_session ')
class LoggingTensorHook(tf.train.SessionRunHook):
def __init__(self, named_tensor, summary_dir=None, every_n_iter=100, use_all_reduce=False):
super(LoggingTensorHook, self).__init__()
self._named_tensor = named_tensor
self._every_n_iter = every_n_iter
self._summary_dir = summary_dir
self._step = 0
self._use_all_reduce = use_all_reduce
self._tic = time.time()
self._avg_ops = {}
self._global_step_tensor = None
def begin(self):
if self._use_all_reduce:
self._avg_ops = OrderedDict({'{}'.format(tag): hvd.allreduce(basic_session_run_hooks._as_graph_element(tensor))
for (tag, tensor) in self._named_tensor.items()})
else:
self._avg_ops = OrderedDict({'{}'.format(tag): basic_session_run_hooks._as_graph_element(tensor)
for (tag, tensor) in self._named_tensor.items()})
self._global_step_tensor = tf.train.get_or_create_global_step()
self._avg_ops['step'] = self._global_step_tensor
def before_run(self, run_context): # pylint: disable=unused-argument
if self._step % self._every_n_iter == 0:
return SessionRunArgs(fetches=self._avg_ops)
self._tic = time.time()
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
stats = []
for tag, tensor in tensor_values.items():
stats.append('%s = %s' % (tag, tensor))
stats.append('%s = %s' % ('step_time', time.time() - self._tic))
if self._use_all_reduce:
logging_head = 'logging all reduce tensors'
else:
logging_head = 'logging tensors'
hvd_info_rank0("{}: {}".format(logging_head, ", ".join(stats)))
np.set_printoptions(**original)
def _summary(self, tensor_values):
if self._summary_dir:
writer = tf.summary.FileWriterCache.get(self._summary_dir)
this_summary = tf.Summary()
for tag, value in tensor_values.items():
this_summary.value.add(tag='avg/{}'.format(tag), simple_value=value)
writer.add_summary(this_summary, tensor_values['step'])
writer.flush()
def after_run(self, run_context, run_values):
if self._step % self._every_n_iter == 0:
if is_rank0() or not self._use_all_reduce:
avg_values = run_values.results
self._log_tensors(avg_values)
self._summary(avg_values)
self._step += 1
def make_image(tensor):
"""Convert an numpy representation image to Image protobuf"""
from PIL import Image
height, width, channel = tensor.shape
image = Image.fromarray(tensor)
import io
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
def to_fix_format(i):
if isinstance(i, int) or isinstance(i, np.int32) or isinstance(i, np.int64):
return str(i)
else:
return '{:.2f}'.format(i)
def draw_text_image(text, size=(224, 224)):
# make a blank image for the text, initialized to transparent text color
img = Image.new('RGB', size, (0, 0, 0))
d = ImageDraw.Draw(img)
# to-do: how to align
# if len(text) <= 2:
# font_size = 100
# xy = (80, 60)
# else:
# font_size = 40
# xy = (60, 90)
xy = (10, 10)
font_size = 20
# get a font
fnt = ImageFont.truetype('assets/fonts/FreeMono.ttf', size=font_size)
# get a drawing context
d.text(xy, text, font=fnt, fill=(255, 255, 255))
return img
def scale_to_uint8(features_tensor):
if len(features_tensor) > 0:
min_f = np.min(features_tensor)
max_f = np.max(features_tensor)
features_tensor = (features_tensor - min_f) / (max_f - min_f) * 255
features_tensor = features_tensor.astype(np.uint8)
return features_tensor
def top_k_text(prob_array, k):
sort_idx = np.argsort(prob_array)[-k:][::-1]
top_k_prob = prob_array[sort_idx]
top_k_idx = sort_idx
result = ''
for i in range(k):
result += '{}: {}\n'.format(top_k_idx[i], to_fix_format(top_k_prob[i]))
return result.strip()
def find_xy(img, threshold=0.8, percentile=False):
x_offset = 3
y_offset = 3
img = img[x_offset: -x_offset, y_offset: -y_offset]
threshold = threshold * np.max(img)
idx = np.argwhere(img > threshold)
x_min = np.min(idx[:, 1]) + x_offset
x_max = np.max(idx[:, 1]) + x_offset
y_min = np.min(idx[:, 0]) + y_offset
y_max = np.max(idx[:, 0]) + y_offset
if percentile:
h, w = img.shape
x_min = x_min / w
x_max = x_max / w
y_min = y_min / h
y_max = y_max / h
x_max = min(1.0, x_max)
y_max = min(1.0, y_max)
return x_min, y_min, x_max, y_max
def draw_box(img_tensor, box):
img = Image.fromarray(img_tensor)
d = ImageDraw.Draw(img)
x_min, y_min, x_max, y_max = box
d.rectangle(((x_min, y_min), (x_max, y_max)), fill='white', outline=3)
return np.asarray(img, dtype=np.uint8)
def show_images(filenames, images, raw_images, heat_map_features, labels, probs, global_step, max_images,
summary_writer, prefix='train'):
if summary_writer is not None:
assert images is not None and labels is not None and probs is not None
n, height, width, channel = images.shape
padding_255 = np.ones([height, 1, channel], dtype=np.uint8) * 255
padding_1 = np.ones([height, 1, channel], dtype=np.float32)
filenames_tensor_list = []
raw_images_tensor_list = []
images_tensor_list = []
heat_map_tensor_list = []
label_tensor_list = []
max_images = min(max_images, n)
for i in range(max_images):
images_tensor_list.append(images[i])
images_tensor_list.append(padding_1)
if raw_images is not None:
raw_images_tensor_list.append(raw_images[i])
raw_images_tensor_list.append(padding_1)
if heat_map_features is not None:
cam = heat_map_features[i][:, :, labels[i]]
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
heat_map = cv2.applyColorMap(cv2.resize(cam_img, (height, width)), cv2.COLORMAP_JET)
blue_map = heat_map[:, :, -1]
box = find_xy(blue_map)
heat_map = draw_box(heat_map, box)
heat_map = heat_map / 255.0
heat_img = heat_map * 0.7 + images[i] * 0.3
heat_map_tensor_list.append(heat_img)
heat_map_tensor_list.append(padding_1)
# labes & predicts
probs_show_num = min(5, probs.shape[-1])
text = '{}: {}\n'.format(to_fix_format(labels[i]), to_fix_format(probs[i][labels[i]])) \
+ top_k_text(probs[i], probs_show_num)
label_image = draw_text_image(text, (height, width))
label_tensor_list.append(np.asarray(label_image, dtype=np.uint8))
label_tensor_list.append(padding_255)
filename = filenames[i]
if isinstance(filename, bytes):
filename = filename.decode('utf-8')
filename = filename.split('/')[-1]
filename_image = draw_text_image(filename, (height, width))
filenames_tensor_list.append(np.asarray(filename_image, dtype=np.uint8))
filenames_tensor_list.append(padding_255)
# scale float32 to unit8
all_tensor_list = [scale_to_uint8(np.concatenate(filenames_tensor_list, axis=1))]
if raw_images is not None:
all_tensor_list.append(scale_to_uint8(np.concatenate(raw_images_tensor_list, axis=1)))
all_tensor_list.append(scale_to_uint8(np.concatenate(images_tensor_list, axis=1)))
if heat_map_features is not None:
all_tensor_list.append(scale_to_uint8(np.concatenate(heat_map_tensor_list, axis=1)))
all_tensor_list.append(np.concatenate(label_tensor_list, axis=1))
feature_heatmap_label_tensor = np.concatenate(all_tensor_list, axis=0)
summary = Summary(value=[Summary.Value(tag='{}/features_heatmap_labels'.format(prefix),
image=make_image(feature_heatmap_label_tensor))])
summary_writer.add_summary(summary, global_step)
def plt_to_image_summary(plt):
buf = io.BytesIO()
plt.savefig(buf, format='png')
image = Image.open(buf).convert('RGB')
tensor = np.asarray(image, dtype=np.uint8)
image = make_image(tensor)
return image
def confusion_matrix_summary(tag, cm, classes, normalize=False, recall=True, title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
if recall:
s = cm.sum(axis=1)[:, np.newaxis] + np.finfo(np.float32).eps
else:
s = cm.sum(axis=0)[:, np.newaxis] + np.finfo(np.float32).eps
cm = cm.astype('float') / s
plt.close('all')
f_size = max(5, int(0.6 * len(classes)))
plt.figure(figsize=(f_size, f_size))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
image = plt_to_image_summary(plt)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def roc_summary(tag, y_true, y_pred, n_classes):
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from scipy import interp
classes = [i for i in range(n_classes)]
y_true = label_binarize(y_true, classes=classes)
y_pred = label_binarize(y_pred, classes=classes)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
# plt.figure(0)
# plt.plot(fpr[2], tpr[2], color='darkorange',
# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic example')
# plt.legend(loc="lower right")
# plt.show()
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.close('all')
f_size = max(5, int(0.6 * len(classes)))
plt.figure(figsize=(f_size, f_size))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
buf = io.BytesIO()
plt.savefig(buf, format='png')
image = Image.open(buf).convert('RGB')
tensor = np.asarray(image, dtype=np.uint8)
image = make_image(tensor)
return Summary(value=[Summary.Value(tag=tag, image=image)])
class EvalImageVisualizationHook(tf.train.SessionRunHook):
def __init__(self, images_name, labels_name, filenames_name, probs_name, raw_images_name=None,
heat_map_features_name=None, every_n_steps=100, summary_dir=None, max_images=8):
self._images_name = images_name
self._labels_name = labels_name
self._heat_map_features_name = heat_map_features_name
self._probs_name = probs_name
self._every_n_steps = every_n_steps
self._summary_dir = summary_dir
self._step = 0
self._run_begin = 0
self._run_end = 0
self._max_images = max_images
self._duration = 0.0
self._raw_images_name = raw_images_name
self._filenames_name = filenames_name
def begin(self):
self._summary_writer = tf.summary.FileWriterCache.get(self._summary_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read()
def before_run(self, run_context):
self._run_begin = time.time()
if self._step > 0 and self._step % self._every_n_steps == 0:
arg_map = {}
for name in [self._images_name, self._labels_name, self._filenames_name, self._raw_images_name,
self._heat_map_features_name, self._probs_name]:
if name is not None:
try:
arg_map[name] = basic_session_run_hooks._as_graph_element(name)
except Exception as e:
if not self.is_logged:
tf.logging.error('{} error {}'.format(name, e))
self.is_logged = True
arg_map['global_step'] = self._global_step_tensor
return SessionRunArgs(arg_map)
def _log_and_record(self, step):
if self._summary_writer is not None:
if self._total_batch_size:
img_per_sec_tag = 'eval/img_per_sec'
img_per_sec_tag_value = self._total_batch_size / (self._run_end - self._run_begin)
sec_per_img_tag = 'eval/sec_per_img'
sec_per_img_tag_value = 1 / img_per_sec_tag_value * 1000
summary = Summary(value=[Summary.Value(tag=img_per_sec_tag, simple_value=img_per_sec_tag_value),
Summary.Value(tag=sec_per_img_tag, simple_value=sec_per_img_tag_value)])
logging.info("%s: %g, %s: %g ms, step: %g",
img_per_sec_tag, img_per_sec_tag_value, sec_per_img_tag, sec_per_img_tag_value, step)
self._summary_writer.add_summary(summary, step)
def after_run(self, run_context, run_values):
self._run_end = time.time()
self._duration += self._run_end - self._run_begin
# not use step 0 to warmup
if self._step > 0 and self._step % self._every_n_steps == 0:
results = run_values.results
global_step = results['global_step']
images = get_key_or_none(results, self._images_name)
labels = get_key_or_none(results, self._labels_name)
filenames = get_key_or_none(results, self._filenames_name)
raw_images = get_key_or_none(results, self._raw_images_name)
heat_map_features = get_key_or_none(results, self._heat_map_features_name)
probs = get_key_or_none(results, self._probs_name)
self._total_batch_size = len(images) * hvd.size()
self._log_and_record(self._step + global_step)
show_images(filenames, images, raw_images, heat_map_features, labels, probs, self._step + global_step,
self._max_images, self._summary_writer, prefix='eval')
self._step += 1
def end(self, session):
total_image_count = self._step * self._total_batch_size
image_per_second = total_image_count / self._duration
second_per_image = self._duration / total_image_count * 1000
logging.info('total {}: {}, {}: {}, {}: {}, {}: {} ms'.format('duration', self._duration, 'total_image_count',
total_image_count, 'image_per_second',
image_per_second, 'second_per_image',
second_per_image))
class SpeedHook(basic_session_run_hooks.StepCounterHook):
def __init__(self, summary_dir, batch_size, every_n_steps=100):
super(SpeedHook, self).__init__(every_n_steps=every_n_steps, output_dir=summary_dir)
self._total_batch_size = batch_size * hvd.size()
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
if self._total_batch_size:
image_tag = 'images_sec'
image_count = float(steps_per_sec) * self._total_batch_size
summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec),
Summary.Value(tag=image_tag, simple_value=image_count)])
logging.info("%s: %g, %s: %g, step: %g", self._summary_tag, steps_per_sec, image_tag, image_count,
global_step)
else:
summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)])
logging.info("%s: %g, step: %g", self._summary_tag, steps_per_sec, global_step)
self._summary_writer.add_summary(summary, global_step)
def get_key_or_none(d, key):
if key in d:
return d[key]
else:
return None
class PrefillStagingAreasHook(tf.train.SessionRunHook):
def after_create_session(self, session, coord):
# TODO: This assumes TF collections are ordered; is this safe?
enqueue_ops = tf.get_collection('STAGING_AREA_PUTS')
for i in range(len(enqueue_ops)):
session.run(enqueue_ops[:i + 1])
class OomReportingHook(tf.train.SessionRunHook):
def before_run(self, run_context):
return SessionRunArgs(fetches=[], # no extra fetches
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
| 39.378467 | 123 | 0.628154 | import io
import itertools
import os
import time
import cv2
try:
import horovod.tensorflow as hvd
except ImportError:
hvd = None
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from PIL import Image
from PIL import ImageDraw, ImageFont
from sklearn.metrics import confusion_matrix
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from collections import OrderedDict
from horovod_estimator.utis import hvd_info_rank0, is_rank0
mpl.use('Agg')
np.seterr(divide='ignore', invalid='ignore')
class BroadcastGlobalVariablesHook(tf.train.SessionRunHook):
def __init__(self, root_rank, pretrained_model_path=None, exclusions=[], device='', model_dir=None):
super(BroadcastGlobalVariablesHook, self).__init__()
self.root_rank = root_rank
self.bcast_op = None
self.device = device
self._pretrained_model_path = pretrained_model_path
self._saver = None
self._exclusions = set(exclusions)
self._variables_to_restore = []
self._model_dir = model_dir
def begin(self):
if not self.bcast_op or self.bcast_op.graph != tf.get_default_graph():
with tf.device(self.device):
self.bcast_op = hvd.broadcast_global_variables(self.root_rank)
if self._model_dir is not None:
checkpoint_path = checkpoint_management.latest_checkpoint(self._model_dir)
if checkpoint_path is not None and not checkpoint_path.endswith('model.ckpt-0'):
hvd_info_rank0('>>>>> model_dir {} has checkpoint {}, not using pretrained_model_path <<<<<'.
format(self._model_dir, checkpoint_path))
return
if self._pretrained_model_path is not None and len(self._pretrained_model_path) > 0 and is_rank0():
reader = pywrap_tensorflow.NewCheckpointReader(self._pretrained_model_path)
var_to_shape_map = sorted(reader.get_variable_to_shape_map())
self._exclusions.add('global_step')
for var in tf.global_variables():
if var.op.name in var_to_shape_map:
excluded = False
for exclusion in self._exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
self._variables_to_restore.append(var)
self._saver = tf.train.Saver(var_list=self._variables_to_restore)
def after_create_session(self, session, coord):
if self._saver:
hvd_info_rank0('>>>>> begin to load weights from {}, restore variables length {}, without variables {}'
.format(self._pretrained_model_path, len(self._variables_to_restore), self._exclusions))
self._saver.restore(session, self._pretrained_model_path)
hvd_info_rank0('<<<<< end to load weights')
hvd_info_rank0('>>>>> broadcast global variables begin during after_create_session')
session.run(self.bcast_op)
hvd_info_rank0('<<<<< broadcast global variables end during after_create_session ')
class LoggingTensorHook(tf.train.SessionRunHook):
def __init__(self, named_tensor, summary_dir=None, every_n_iter=100, use_all_reduce=False):
super(LoggingTensorHook, self).__init__()
self._named_tensor = named_tensor
self._every_n_iter = every_n_iter
self._summary_dir = summary_dir
self._step = 0
self._use_all_reduce = use_all_reduce
self._tic = time.time()
self._avg_ops = {}
self._global_step_tensor = None
def begin(self):
if self._use_all_reduce:
self._avg_ops = OrderedDict({'{}'.format(tag): hvd.allreduce(basic_session_run_hooks._as_graph_element(tensor))
for (tag, tensor) in self._named_tensor.items()})
else:
self._avg_ops = OrderedDict({'{}'.format(tag): basic_session_run_hooks._as_graph_element(tensor)
for (tag, tensor) in self._named_tensor.items()})
self._global_step_tensor = tf.train.get_or_create_global_step()
self._avg_ops['step'] = self._global_step_tensor
def before_run(self, run_context):
if self._step % self._every_n_iter == 0:
return SessionRunArgs(fetches=self._avg_ops)
self._tic = time.time()
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
stats = []
for tag, tensor in tensor_values.items():
stats.append('%s = %s' % (tag, tensor))
stats.append('%s = %s' % ('step_time', time.time() - self._tic))
if self._use_all_reduce:
logging_head = 'logging all reduce tensors'
else:
logging_head = 'logging tensors'
hvd_info_rank0("{}: {}".format(logging_head, ", ".join(stats)))
np.set_printoptions(**original)
def _summary(self, tensor_values):
if self._summary_dir:
writer = tf.summary.FileWriterCache.get(self._summary_dir)
this_summary = tf.Summary()
for tag, value in tensor_values.items():
this_summary.value.add(tag='avg/{}'.format(tag), simple_value=value)
writer.add_summary(this_summary, tensor_values['step'])
writer.flush()
def after_run(self, run_context, run_values):
if self._step % self._every_n_iter == 0:
if is_rank0() or not self._use_all_reduce:
avg_values = run_values.results
self._log_tensors(avg_values)
self._summary(avg_values)
self._step += 1
def make_image(tensor):
from PIL import Image
height, width, channel = tensor.shape
image = Image.fromarray(tensor)
import io
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
def to_fix_format(i):
if isinstance(i, int) or isinstance(i, np.int32) or isinstance(i, np.int64):
return str(i)
else:
return '{:.2f}'.format(i)
def draw_text_image(text, size=(224, 224)):
img = Image.new('RGB', size, (0, 0, 0))
d = ImageDraw.Draw(img)
xy = (10, 10)
font_size = 20
fnt = ImageFont.truetype('assets/fonts/FreeMono.ttf', size=font_size)
d.text(xy, text, font=fnt, fill=(255, 255, 255))
return img
def scale_to_uint8(features_tensor):
if len(features_tensor) > 0:
min_f = np.min(features_tensor)
max_f = np.max(features_tensor)
features_tensor = (features_tensor - min_f) / (max_f - min_f) * 255
features_tensor = features_tensor.astype(np.uint8)
return features_tensor
def top_k_text(prob_array, k):
sort_idx = np.argsort(prob_array)[-k:][::-1]
top_k_prob = prob_array[sort_idx]
top_k_idx = sort_idx
result = ''
for i in range(k):
result += '{}: {}\n'.format(top_k_idx[i], to_fix_format(top_k_prob[i]))
return result.strip()
def find_xy(img, threshold=0.8, percentile=False):
x_offset = 3
y_offset = 3
img = img[x_offset: -x_offset, y_offset: -y_offset]
threshold = threshold * np.max(img)
idx = np.argwhere(img > threshold)
x_min = np.min(idx[:, 1]) + x_offset
x_max = np.max(idx[:, 1]) + x_offset
y_min = np.min(idx[:, 0]) + y_offset
y_max = np.max(idx[:, 0]) + y_offset
if percentile:
h, w = img.shape
x_min = x_min / w
x_max = x_max / w
y_min = y_min / h
y_max = y_max / h
x_max = min(1.0, x_max)
y_max = min(1.0, y_max)
return x_min, y_min, x_max, y_max
def draw_box(img_tensor, box):
img = Image.fromarray(img_tensor)
d = ImageDraw.Draw(img)
x_min, y_min, x_max, y_max = box
d.rectangle(((x_min, y_min), (x_max, y_max)), fill='white', outline=3)
return np.asarray(img, dtype=np.uint8)
def show_images(filenames, images, raw_images, heat_map_features, labels, probs, global_step, max_images,
summary_writer, prefix='train'):
if summary_writer is not None:
assert images is not None and labels is not None and probs is not None
n, height, width, channel = images.shape
padding_255 = np.ones([height, 1, channel], dtype=np.uint8) * 255
padding_1 = np.ones([height, 1, channel], dtype=np.float32)
filenames_tensor_list = []
raw_images_tensor_list = []
images_tensor_list = []
heat_map_tensor_list = []
label_tensor_list = []
max_images = min(max_images, n)
for i in range(max_images):
images_tensor_list.append(images[i])
images_tensor_list.append(padding_1)
if raw_images is not None:
raw_images_tensor_list.append(raw_images[i])
raw_images_tensor_list.append(padding_1)
if heat_map_features is not None:
cam = heat_map_features[i][:, :, labels[i]]
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
heat_map = cv2.applyColorMap(cv2.resize(cam_img, (height, width)), cv2.COLORMAP_JET)
blue_map = heat_map[:, :, -1]
box = find_xy(blue_map)
heat_map = draw_box(heat_map, box)
heat_map = heat_map / 255.0
heat_img = heat_map * 0.7 + images[i] * 0.3
heat_map_tensor_list.append(heat_img)
heat_map_tensor_list.append(padding_1)
probs_show_num = min(5, probs.shape[-1])
text = '{}: {}\n'.format(to_fix_format(labels[i]), to_fix_format(probs[i][labels[i]])) \
+ top_k_text(probs[i], probs_show_num)
label_image = draw_text_image(text, (height, width))
label_tensor_list.append(np.asarray(label_image, dtype=np.uint8))
label_tensor_list.append(padding_255)
filename = filenames[i]
if isinstance(filename, bytes):
filename = filename.decode('utf-8')
filename = filename.split('/')[-1]
filename_image = draw_text_image(filename, (height, width))
filenames_tensor_list.append(np.asarray(filename_image, dtype=np.uint8))
filenames_tensor_list.append(padding_255)
all_tensor_list = [scale_to_uint8(np.concatenate(filenames_tensor_list, axis=1))]
if raw_images is not None:
all_tensor_list.append(scale_to_uint8(np.concatenate(raw_images_tensor_list, axis=1)))
all_tensor_list.append(scale_to_uint8(np.concatenate(images_tensor_list, axis=1)))
if heat_map_features is not None:
all_tensor_list.append(scale_to_uint8(np.concatenate(heat_map_tensor_list, axis=1)))
all_tensor_list.append(np.concatenate(label_tensor_list, axis=1))
feature_heatmap_label_tensor = np.concatenate(all_tensor_list, axis=0)
summary = Summary(value=[Summary.Value(tag='{}/features_heatmap_labels'.format(prefix),
image=make_image(feature_heatmap_label_tensor))])
summary_writer.add_summary(summary, global_step)
def plt_to_image_summary(plt):
buf = io.BytesIO()
plt.savefig(buf, format='png')
image = Image.open(buf).convert('RGB')
tensor = np.asarray(image, dtype=np.uint8)
image = make_image(tensor)
return image
def confusion_matrix_summary(tag, cm, classes, normalize=False, recall=True, title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
if recall:
s = cm.sum(axis=1)[:, np.newaxis] + np.finfo(np.float32).eps
else:
s = cm.sum(axis=0)[:, np.newaxis] + np.finfo(np.float32).eps
cm = cm.astype('float') / s
plt.close('all')
f_size = max(5, int(0.6 * len(classes)))
plt.figure(figsize=(f_size, f_size))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
image = plt_to_image_summary(plt)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def roc_summary(tag, y_true, y_pred, n_classes):
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from scipy import interp
classes = [i for i in range(n_classes)]
y_true = label_binarize(y_true, classes=classes)
y_pred = label_binarize(y_pred, classes=classes)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.close('all')
f_size = max(5, int(0.6 * len(classes)))
plt.figure(figsize=(f_size, f_size))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
buf = io.BytesIO()
plt.savefig(buf, format='png')
image = Image.open(buf).convert('RGB')
tensor = np.asarray(image, dtype=np.uint8)
image = make_image(tensor)
return Summary(value=[Summary.Value(tag=tag, image=image)])
class EvalImageVisualizationHook(tf.train.SessionRunHook):
def __init__(self, images_name, labels_name, filenames_name, probs_name, raw_images_name=None,
heat_map_features_name=None, every_n_steps=100, summary_dir=None, max_images=8):
self._images_name = images_name
self._labels_name = labels_name
self._heat_map_features_name = heat_map_features_name
self._probs_name = probs_name
self._every_n_steps = every_n_steps
self._summary_dir = summary_dir
self._step = 0
self._run_begin = 0
self._run_end = 0
self._max_images = max_images
self._duration = 0.0
self._raw_images_name = raw_images_name
self._filenames_name = filenames_name
def begin(self):
self._summary_writer = tf.summary.FileWriterCache.get(self._summary_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read()
def before_run(self, run_context):
self._run_begin = time.time()
if self._step > 0 and self._step % self._every_n_steps == 0:
arg_map = {}
for name in [self._images_name, self._labels_name, self._filenames_name, self._raw_images_name,
self._heat_map_features_name, self._probs_name]:
if name is not None:
try:
arg_map[name] = basic_session_run_hooks._as_graph_element(name)
except Exception as e:
if not self.is_logged:
tf.logging.error('{} error {}'.format(name, e))
self.is_logged = True
arg_map['global_step'] = self._global_step_tensor
return SessionRunArgs(arg_map)
def _log_and_record(self, step):
if self._summary_writer is not None:
if self._total_batch_size:
img_per_sec_tag = 'eval/img_per_sec'
img_per_sec_tag_value = self._total_batch_size / (self._run_end - self._run_begin)
sec_per_img_tag = 'eval/sec_per_img'
sec_per_img_tag_value = 1 / img_per_sec_tag_value * 1000
summary = Summary(value=[Summary.Value(tag=img_per_sec_tag, simple_value=img_per_sec_tag_value),
Summary.Value(tag=sec_per_img_tag, simple_value=sec_per_img_tag_value)])
logging.info("%s: %g, %s: %g ms, step: %g",
img_per_sec_tag, img_per_sec_tag_value, sec_per_img_tag, sec_per_img_tag_value, step)
self._summary_writer.add_summary(summary, step)
def after_run(self, run_context, run_values):
self._run_end = time.time()
self._duration += self._run_end - self._run_begin
if self._step > 0 and self._step % self._every_n_steps == 0:
results = run_values.results
global_step = results['global_step']
images = get_key_or_none(results, self._images_name)
labels = get_key_or_none(results, self._labels_name)
filenames = get_key_or_none(results, self._filenames_name)
raw_images = get_key_or_none(results, self._raw_images_name)
heat_map_features = get_key_or_none(results, self._heat_map_features_name)
probs = get_key_or_none(results, self._probs_name)
self._total_batch_size = len(images) * hvd.size()
self._log_and_record(self._step + global_step)
show_images(filenames, images, raw_images, heat_map_features, labels, probs, self._step + global_step,
self._max_images, self._summary_writer, prefix='eval')
self._step += 1
def end(self, session):
total_image_count = self._step * self._total_batch_size
image_per_second = total_image_count / self._duration
second_per_image = self._duration / total_image_count * 1000
logging.info('total {}: {}, {}: {}, {}: {}, {}: {} ms'.format('duration', self._duration, 'total_image_count',
total_image_count, 'image_per_second',
image_per_second, 'second_per_image',
second_per_image))
class SpeedHook(basic_session_run_hooks.StepCounterHook):
def __init__(self, summary_dir, batch_size, every_n_steps=100):
super(SpeedHook, self).__init__(every_n_steps=every_n_steps, output_dir=summary_dir)
self._total_batch_size = batch_size * hvd.size()
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
if self._total_batch_size:
image_tag = 'images_sec'
image_count = float(steps_per_sec) * self._total_batch_size
summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec),
Summary.Value(tag=image_tag, simple_value=image_count)])
logging.info("%s: %g, %s: %g, step: %g", self._summary_tag, steps_per_sec, image_tag, image_count,
global_step)
else:
summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)])
logging.info("%s: %g, step: %g", self._summary_tag, steps_per_sec, global_step)
self._summary_writer.add_summary(summary, global_step)
def get_key_or_none(d, key):
if key in d:
return d[key]
else:
return None
class PrefillStagingAreasHook(tf.train.SessionRunHook):
def after_create_session(self, session, coord):
enqueue_ops = tf.get_collection('STAGING_AREA_PUTS')
for i in range(len(enqueue_ops)):
session.run(enqueue_ops[:i + 1])
class OomReportingHook(tf.train.SessionRunHook):
def before_run(self, run_context):
return SessionRunArgs(fetches=[],
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
| true | true |
f7228dd35cfbd2798f11c473ce682186c8a88f74 | 3,626 | py | Python | machina/apps/forum_member/receivers.py | jujinesy/initdjango-machina | 93c24877f546521867b3ef77fa278237af932d42 | [
"BSD-3-Clause"
] | 1 | 2021-10-08T03:31:24.000Z | 2021-10-08T03:31:24.000Z | machina/apps/forum_member/receivers.py | jujinesy/initdjango-machina | 93c24877f546521867b3ef77fa278237af932d42 | [
"BSD-3-Clause"
] | 7 | 2020-02-12T01:11:13.000Z | 2022-03-11T23:26:32.000Z | machina/apps/forum_member/receivers.py | jujinesy/initdjango-machina | 93c24877f546521867b3ef77fa278237af932d42 | [
"BSD-3-Clause"
] | 1 | 2019-04-20T05:26:27.000Z | 2019-04-20T05:26:27.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import F
from django.db.models.signals import post_delete
from django.db.models.signals import pre_save
from django.dispatch import receiver
from machina.core.db.models import get_model
User = get_user_model()
Post = get_model('forum_conversation', 'Post')
ForumProfile = get_model('forum_member', 'ForumProfile')
@receiver(pre_save, sender=Post)
def increase_posts_count(sender, instance, **kwargs):
""" Increases the member's post count after a post save.
This receiver handles the update of the profile related to the user who is the poster of the
forum post being created or updated.
"""
if instance.poster is None:
# An anonymous post is considered. No profile can be updated in
# that case.
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
increase_posts_count = False
if instance.pk:
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist: # pragma: no cover
# This should never happen (except with django loaddata command)
increase_posts_count = True
old_instance = None
if old_instance and old_instance.approved is False and instance.approved is True:
increase_posts_count = True
elif instance.approved:
increase_posts_count = True
if increase_posts_count:
profile.posts_count = F('posts_count') + 1
profile.save()
@receiver(pre_save, sender=Post)
def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):
""" Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased.
"""
if not instance.pk:
# Do not consider posts being created.
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist: # pragma: no cover
# This should never happen (except with django loaddata command)
return
if old_instance and old_instance.approved is True and instance.approved is False:
profile.posts_count = F('posts_count') - 1
profile.save()
@receiver(post_delete, sender=Post)
def decrease_posts_count_after_post_deletion(sender, instance, **kwargs):
""" Decreases the member's post count after a post deletion.
This receiver handles the deletion of a forum post: the posts count related to the post's
author is decreased.
"""
if not instance.approved:
# If a post has not been approved, it has not been counted.
# So do not decrement count
return
try:
assert instance.poster_id is not None
poster = User.objects.get(pk=instance.poster_id)
except AssertionError:
# An anonymous post is considered. No profile can be updated in
# that case.
return
except ObjectDoesNotExist: # pragma: no cover
# This can happen if a User instance is deleted. In that case the
# User instance is not available and the receiver should return.
return
profile, dummy = ForumProfile.objects.get_or_create(user=poster)
if profile.posts_count:
profile.posts_count = F('posts_count') - 1
profile.save()
| 34.533333 | 99 | 0.704633 |
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import F
from django.db.models.signals import post_delete
from django.db.models.signals import pre_save
from django.dispatch import receiver
from machina.core.db.models import get_model
User = get_user_model()
Post = get_model('forum_conversation', 'Post')
ForumProfile = get_model('forum_member', 'ForumProfile')
@receiver(pre_save, sender=Post)
def increase_posts_count(sender, instance, **kwargs):
if instance.poster is None:
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
increase_posts_count = False
if instance.pk:
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist:
increase_posts_count = True
old_instance = None
if old_instance and old_instance.approved is False and instance.approved is True:
increase_posts_count = True
elif instance.approved:
increase_posts_count = True
if increase_posts_count:
profile.posts_count = F('posts_count') + 1
profile.save()
@receiver(pre_save, sender=Post)
def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):
if not instance.pk:
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist:
return
if old_instance and old_instance.approved is True and instance.approved is False:
profile.posts_count = F('posts_count') - 1
profile.save()
@receiver(post_delete, sender=Post)
def decrease_posts_count_after_post_deletion(sender, instance, **kwargs):
if not instance.approved:
return
try:
assert instance.poster_id is not None
poster = User.objects.get(pk=instance.poster_id)
except AssertionError:
return
except ObjectDoesNotExist:
return
profile, dummy = ForumProfile.objects.get_or_create(user=poster)
if profile.posts_count:
profile.posts_count = F('posts_count') - 1
profile.save()
| true | true |
f7228e2898aabcc8eb0ba58d9c1b268f97c72316 | 6,547 | py | Python | toontown/safezone/Train.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | null | null | null | toontown/safezone/Train.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/safezone/Train.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed.ClockDelta import NetworkTimePrecision
import random
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
class Train(DirectObject):
notify = directNotify.newCategory('Train')
nameId = 0
Sfx_TrainPass = 'phase_10/audio/sfx/CBHQ_TRAIN_pass.ogg'
Sfx_TrainStopStart = 'phase_10/audio/sfx/CBHQ_TRAIN_stopstart.ogg'
LocomotiveFile = 'phase_10/models/cogHQ/CashBotLocomotive'
CarFiles = ['phase_10/models/cogHQ/CashBotBoxCar', 'phase_10/models/cogHQ/CashBotTankCar', 'phase_10/models/cogHQ/CashBotFlatCar']
CarLength = 88
MarkDelta = 15
def __init__(self, trackStartPos, trackEndPos, trackNum, numTotalTracks):
self.trackStartPos = trackStartPos
self.trackEndPos = trackEndPos
self.numCars = len(self.CarFiles)
self.locomotive = loader.loadModel(self.LocomotiveFile)
self.cars = []
self.trainPassingSfx = base.loader.loadSfx(self.Sfx_TrainPass)
self.trainStopStartSfx = base.loader.loadSfx(self.Sfx_TrainStopStart)
self.trainId = trackNum
self.bFlipped = False
if trackStartPos[0] < trackEndPos[0]:
self.locomotive.setHpr(180, 0, 0)
self.bFlipped = True
self.collNodeName = 'CollNode-%s' % self.trainId
self.firstMark = self.MarkDelta / numTotalTracks * trackNum
currentTime = self.__networkTimeInSeconds()
currentRun = int((currentTime - self.firstMark) / self.MarkDelta)
self.lastMark = currentRun * self.MarkDelta + self.firstMark
self.doNextRun(True)
self.hide()
def hide(self):
if self.locomotive:
self.locomotive.reparentTo(hidden)
def show(self):
if self.locomotive:
self.locomotive.reparentTo(render)
def __networkTimeInSeconds(self):
time = globalClockDelta.getRealNetworkTime(bits=32) / NetworkTimePrecision
return time
def doNextRun(self, bFirstRun = False):
if self.locomotive:
if bFirstRun:
nextMark = self.lastMark
else:
nextMark = self.lastMark + self.MarkDelta
self.nextRun.finish()
self.notify.debug('Next mark %s' % nextMark)
currentTime = self.__networkTimeInSeconds()
timeTillNextMark = nextMark - currentTime
self.notify.debug('Time diff %s' % timeTillNextMark)
runNumber = int((nextMark - self.firstMark) / self.MarkDelta)
S = random.getstate()
random.seed(self.trainId + runNumber)
self.nextRun = self.__getNextRun()
random.setstate(S)
self.__startNextRun(timeTillNextMark)
self.lastMark = nextMark
return Task.done
def __startNextRun(self, timeTillMark):
if self.locomotive:
self.__disableCollisions()
if timeTillMark > 0:
self.nextRun = Sequence(Wait(timeTillMark), self.nextRun)
self.nextRun.start()
else:
self.nextRun.start(-1 * timeTillMark)
self.__enableCollisions()
return Task.done
def __cleanupCars(self):
self.__disableCollisions()
for car in self.cars:
car.removeNode()
self.cars = []
def __getCars(self):
self.__cleanupCars()
numCarsThisRun = random.randrange(1, 10)
for nCar in xrange(numCarsThisRun):
carType = random.randrange(0, self.numCars)
car = loader.loadModel(self.CarFiles[carType])
car.reparentTo(self.locomotive)
car.setPos(self.CarLength * (nCar + 1), 0, 0)
self.cars.append(car)
def __showStart(self):
self.notify.debug('Starting train %s at %s.' % (self.trainId, self.__networkTimeInSeconds()))
def __getNextRun(self):
self.__getCars()
trainShouldStop = random.randrange(0, 4)
nextRun = Sequence(Func(self.__showStart))
if trainShouldStop is 0:
waitTime = 3
totalTime = random.randrange(4, (self.MarkDelta - waitTime) / 2)
sfxStopTime = 4.3
halfway = (self.trackStartPos + self.trackEndPos) / 2
halfway.setX(150)
nextRun.append(Parallel(Sequence(Wait(totalTime - sfxStopTime), SoundInterval(self.trainStopStartSfx, volume=0.5)), Sequence(LerpPosInterval(self.locomotive, totalTime, halfway, self.trackStartPos, blendType='easeInOut'), WaitInterval(waitTime), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, halfway, blendType='easeIn'))))
else:
totalTime = random.randrange(6, self.MarkDelta - 1)
sfxTime = 7
sfxStartTime = totalTime / 2 - sfxTime / 2
if self.bFlipped:
sfxStartTime -= 1
else:
sfxStartTime += 1
nextRun.append(Parallel(Sequence(Wait(sfxStartTime), SoundInterval(self.trainPassingSfx, volume=0.5)), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, self.trackStartPos)))
nextRun.append(Func(self.doNextRun))
return nextRun
def delete(self):
self.__cleanupCars()
self.locomotive.removeNode()
self.locomotive = None
self.nextRun.finish()
self.nextRun = None
del self.trainPassingSfx
del self.trainStopStartSfx
return
def uniqueName(self, name):
Train.nameId += 1
return name + '-%d' % Train.nameId
def __enableCollisions(self):
allColls = self.locomotive.findAllMatches('**/+CollisionNode')
for car in self.cars:
carColls = car.findAllMatches('**/+CollisionNode')
allColls += carColls
for collNode in allColls:
collNode.setName(self.collNodeName)
collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.collNodeName, self.__handleCollisionSphereEnter)
def __disableCollisions(self):
self.ignore('enter' + self.collNodeName)
def __handleCollisionSphereEnter(self, collEntry = None):
base.localAvatar.b_squish(10)
| 40.165644 | 351 | 0.653429 | from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed.ClockDelta import NetworkTimePrecision
import random
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
class Train(DirectObject):
notify = directNotify.newCategory('Train')
nameId = 0
Sfx_TrainPass = 'phase_10/audio/sfx/CBHQ_TRAIN_pass.ogg'
Sfx_TrainStopStart = 'phase_10/audio/sfx/CBHQ_TRAIN_stopstart.ogg'
LocomotiveFile = 'phase_10/models/cogHQ/CashBotLocomotive'
CarFiles = ['phase_10/models/cogHQ/CashBotBoxCar', 'phase_10/models/cogHQ/CashBotTankCar', 'phase_10/models/cogHQ/CashBotFlatCar']
CarLength = 88
MarkDelta = 15
def __init__(self, trackStartPos, trackEndPos, trackNum, numTotalTracks):
self.trackStartPos = trackStartPos
self.trackEndPos = trackEndPos
self.numCars = len(self.CarFiles)
self.locomotive = loader.loadModel(self.LocomotiveFile)
self.cars = []
self.trainPassingSfx = base.loader.loadSfx(self.Sfx_TrainPass)
self.trainStopStartSfx = base.loader.loadSfx(self.Sfx_TrainStopStart)
self.trainId = trackNum
self.bFlipped = False
if trackStartPos[0] < trackEndPos[0]:
self.locomotive.setHpr(180, 0, 0)
self.bFlipped = True
self.collNodeName = 'CollNode-%s' % self.trainId
self.firstMark = self.MarkDelta / numTotalTracks * trackNum
currentTime = self.__networkTimeInSeconds()
currentRun = int((currentTime - self.firstMark) / self.MarkDelta)
self.lastMark = currentRun * self.MarkDelta + self.firstMark
self.doNextRun(True)
self.hide()
def hide(self):
if self.locomotive:
self.locomotive.reparentTo(hidden)
def show(self):
if self.locomotive:
self.locomotive.reparentTo(render)
def __networkTimeInSeconds(self):
time = globalClockDelta.getRealNetworkTime(bits=32) / NetworkTimePrecision
return time
def doNextRun(self, bFirstRun = False):
if self.locomotive:
if bFirstRun:
nextMark = self.lastMark
else:
nextMark = self.lastMark + self.MarkDelta
self.nextRun.finish()
self.notify.debug('Next mark %s' % nextMark)
currentTime = self.__networkTimeInSeconds()
timeTillNextMark = nextMark - currentTime
self.notify.debug('Time diff %s' % timeTillNextMark)
runNumber = int((nextMark - self.firstMark) / self.MarkDelta)
S = random.getstate()
random.seed(self.trainId + runNumber)
self.nextRun = self.__getNextRun()
random.setstate(S)
self.__startNextRun(timeTillNextMark)
self.lastMark = nextMark
return Task.done
def __startNextRun(self, timeTillMark):
if self.locomotive:
self.__disableCollisions()
if timeTillMark > 0:
self.nextRun = Sequence(Wait(timeTillMark), self.nextRun)
self.nextRun.start()
else:
self.nextRun.start(-1 * timeTillMark)
self.__enableCollisions()
return Task.done
def __cleanupCars(self):
self.__disableCollisions()
for car in self.cars:
car.removeNode()
self.cars = []
def __getCars(self):
self.__cleanupCars()
numCarsThisRun = random.randrange(1, 10)
for nCar in xrange(numCarsThisRun):
carType = random.randrange(0, self.numCars)
car = loader.loadModel(self.CarFiles[carType])
car.reparentTo(self.locomotive)
car.setPos(self.CarLength * (nCar + 1), 0, 0)
self.cars.append(car)
def __showStart(self):
self.notify.debug('Starting train %s at %s.' % (self.trainId, self.__networkTimeInSeconds()))
def __getNextRun(self):
self.__getCars()
trainShouldStop = random.randrange(0, 4)
nextRun = Sequence(Func(self.__showStart))
if trainShouldStop is 0:
waitTime = 3
totalTime = random.randrange(4, (self.MarkDelta - waitTime) / 2)
sfxStopTime = 4.3
halfway = (self.trackStartPos + self.trackEndPos) / 2
halfway.setX(150)
nextRun.append(Parallel(Sequence(Wait(totalTime - sfxStopTime), SoundInterval(self.trainStopStartSfx, volume=0.5)), Sequence(LerpPosInterval(self.locomotive, totalTime, halfway, self.trackStartPos, blendType='easeInOut'), WaitInterval(waitTime), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, halfway, blendType='easeIn'))))
else:
totalTime = random.randrange(6, self.MarkDelta - 1)
sfxTime = 7
sfxStartTime = totalTime / 2 - sfxTime / 2
if self.bFlipped:
sfxStartTime -= 1
else:
sfxStartTime += 1
nextRun.append(Parallel(Sequence(Wait(sfxStartTime), SoundInterval(self.trainPassingSfx, volume=0.5)), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, self.trackStartPos)))
nextRun.append(Func(self.doNextRun))
return nextRun
def delete(self):
self.__cleanupCars()
self.locomotive.removeNode()
self.locomotive = None
self.nextRun.finish()
self.nextRun = None
del self.trainPassingSfx
del self.trainStopStartSfx
return
def uniqueName(self, name):
Train.nameId += 1
return name + '-%d' % Train.nameId
def __enableCollisions(self):
allColls = self.locomotive.findAllMatches('**/+CollisionNode')
for car in self.cars:
carColls = car.findAllMatches('**/+CollisionNode')
allColls += carColls
for collNode in allColls:
collNode.setName(self.collNodeName)
collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.collNodeName, self.__handleCollisionSphereEnter)
def __disableCollisions(self):
self.ignore('enter' + self.collNodeName)
def __handleCollisionSphereEnter(self, collEntry = None):
base.localAvatar.b_squish(10)
| true | true |
f7228e397bfc3956d4478f6abf5814927ced055f | 4,511 | py | Python | comfort_gallery/settings.py | C-Museo/Core-Django-1 | 44d501fe8666941cea6a5479ebb2ccb4472d67d0 | [
"PostgreSQL",
"MIT"
] | null | null | null | comfort_gallery/settings.py | C-Museo/Core-Django-1 | 44d501fe8666941cea6a5479ebb2ccb4472d67d0 | [
"PostgreSQL",
"MIT"
] | null | null | null | comfort_gallery/settings.py | C-Museo/Core-Django-1 | 44d501fe8666941cea6a5479ebb2ccb4472d67d0 | [
"PostgreSQL",
"MIT"
] | null | null | null | """
Django settings for comfort_gallery project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import cloudinary
import django_heroku
import dj_database_url
#from decouple import Csv
MODE=os.environ.get('MODE')
#SECRET_KEY = config('SECRET_KEY')
#DEBUG = config('DEBUG', default=False, cast=bool)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS')
# Application definition
INSTALLED_APPS = [
'gallery',
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'comfort_gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'comfort_gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if os.environ.get('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST','127.0.0.1'),
'PORT': os.environ.get('DB_PORT'),
}
}
else:
DATABASES = {
'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
cloudinary.config(
cloud_name = os.environ.get('CLOUDINARY_CLOUD_NAME'),
api_key = os.environ.get('CLOUDINARY_API_KEY'),
api_secret = os.environ.get('CLOUDINARY_API_SECRET'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | 27.674847 | 91 | 0.704278 |
from pathlib import Path
import os
import cloudinary
import django_heroku
import dj_database_url
MODE=os.environ.get('MODE')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = True
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS')
# Application definition
INSTALLED_APPS = [
'gallery',
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'comfort_gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'comfort_gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if os.environ.get('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST','127.0.0.1'),
'PORT': os.environ.get('DB_PORT'),
}
}
else:
DATABASES = {
'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
cloudinary.config(
cloud_name = os.environ.get('CLOUDINARY_CLOUD_NAME'),
api_key = os.environ.get('CLOUDINARY_API_KEY'),
api_secret = os.environ.get('CLOUDINARY_API_SECRET'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | true | true |
f7228f1c92e0b3d4a4d6b2c2380ff7458091afdb | 79,582 | py | Python | imaplib2.py | mannkind/imap_encrypt | bf83fea43b750cd0d89c266e046784a9e215797d | [
"MIT"
] | 3 | 2017-06-07T10:58:45.000Z | 2019-12-15T18:05:24.000Z | imaplib2.py | mannkind/imap_encrypt | bf83fea43b750cd0d89c266e046784a9e215797d | [
"MIT"
] | null | null | null | imaplib2.py | mannkind/imap_encrypt | bf83fea43b750cd0d89c266e046784a9e215797d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Threaded IMAP4 client.
Based on RFC 3501 and original imaplib module.
Public classes: IMAP4
IMAP4_SSL
IMAP4_stream
Public functions: Internaldate2Time
ParseFlags
Time2Internaldate
"""
__all__ = ("IMAP4", "IMAP4_SSL", "IMAP4_stream",
"Internaldate2Time", "ParseFlags", "Time2Internaldate")
__version__ = "2.33"
__release__ = "2"
__revision__ = "33"
__credits__ = """
Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
String method conversion by ESR, February 2001.
GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
IDLE via threads suggested by Philippe Normand <phil@respyre.org> January 2005.
GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
COMPRESS/DEFLATE contributed by Bron Gondwana <brong@brong.net> May 2009.
STARTTLS from Jython's imaplib by Alan Kennedy.
ID contributed by Dave Baggett <dave@baggett.org> November 2009.
Improved untagged responses handling suggested by Dave Baggett <dave@baggett.org> November 2009.
Improved thread naming, and 0 read detection contributed by Grant Edwards <grant.b.edwards@gmail.com> June 2010.
Improved timeout handling contributed by Ivan Vovnenko <ivovnenko@gmail.com> October 2010.
Timeout handling further improved by Ethan Glasser-Camp <glasse@cs.rpi.edu> December 2010.
Time2Internaldate() patch to match RFC2060 specification of English month names from bugs.python.org/issue11024 March 2011.
starttls() bug fixed with the help of Sebastian Spaeth <sebastian@sspaeth.de> April 2011.
Threads now set the "daemon" flag (suggested by offlineimap-project) April 2011.
Single quoting introduced with the help of Vladimir Marek <vladimir.marek@oracle.com> August 2011."""
__author__ = "Piers Lauder <piers@janeelix.com>"
__URL__ = "http://imaplib2.sourceforge.net"
__license__ = "Python License"
import binascii, errno, os, Queue, random, re, select, socket, sys, time, threading, zlib
select_module = select
# Globals
CRLF = '\r\n'
Debug = None # Backward compatibility
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
IDLE_TIMEOUT_RESPONSE = '* IDLE TIMEOUT\r\n'
IDLE_TIMEOUT = 60*29 # Don't stay in IDLE state longer
READ_POLL_TIMEOUT = 30 # Without this timeout interrupted network connections can hang reader
READ_SIZE = 32768 # Consume all available in socket
DFLT_DEBUG_BUF_LVL = 3 # Level above which the logging output goes directly to stderr
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Commands
CMD_VAL_STATES = 0
CMD_VAL_ASYNC = 1
NONAUTH, AUTH, SELECTED, LOGOUT = 'NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'
Commands = {
# name valid states asynchronous
'APPEND': ((AUTH, SELECTED), False),
'AUTHENTICATE': ((NONAUTH,), False),
'CAPABILITY': ((NONAUTH, AUTH, SELECTED), True),
'CHECK': ((SELECTED,), True),
'CLOSE': ((SELECTED,), False),
'COMPRESS': ((AUTH,), False),
'COPY': ((SELECTED,), True),
'CREATE': ((AUTH, SELECTED), True),
'DELETE': ((AUTH, SELECTED), True),
'DELETEACL': ((AUTH, SELECTED), True),
'EXAMINE': ((AUTH, SELECTED), False),
'EXPUNGE': ((SELECTED,), True),
'FETCH': ((SELECTED,), True),
'GETACL': ((AUTH, SELECTED), True),
'GETANNOTATION':((AUTH, SELECTED), True),
'GETQUOTA': ((AUTH, SELECTED), True),
'GETQUOTAROOT': ((AUTH, SELECTED), True),
'ID': ((NONAUTH, AUTH, LOGOUT, SELECTED), True),
'IDLE': ((SELECTED,), False),
'LIST': ((AUTH, SELECTED), True),
'LOGIN': ((NONAUTH,), False),
'LOGOUT': ((NONAUTH, AUTH, LOGOUT, SELECTED), False),
'LSUB': ((AUTH, SELECTED), True),
'MYRIGHTS': ((AUTH, SELECTED), True),
'NAMESPACE': ((AUTH, SELECTED), True),
'NOOP': ((NONAUTH, AUTH, SELECTED), True),
'PARTIAL': ((SELECTED,), True),
'PROXYAUTH': ((AUTH,), False),
'RENAME': ((AUTH, SELECTED), True),
'SEARCH': ((SELECTED,), True),
'SELECT': ((AUTH, SELECTED), False),
'SETACL': ((AUTH, SELECTED), False),
'SETANNOTATION':((AUTH, SELECTED), True),
'SETQUOTA': ((AUTH, SELECTED), False),
'SORT': ((SELECTED,), True),
'STARTTLS': ((NONAUTH,), False),
'STATUS': ((AUTH, SELECTED), True),
'STORE': ((SELECTED,), True),
'SUBSCRIBE': ((AUTH, SELECTED), False),
'THREAD': ((SELECTED,), True),
'UID': ((SELECTED,), True),
'UNSUBSCRIBE': ((AUTH, SELECTED), False),
}
UID_direct = ('SEARCH', 'SORT', 'THREAD')
def Int2AP(num):
"""string = Int2AP(num)
Return 'num' converted to a string using characters from the set 'A'..'P'
"""
val, a2p = [], 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val.insert(0, a2p[mod])
return ''.join(val)
class Request(object):
"""Private class to represent a request awaiting response."""
def __init__(self, parent, name=None, callback=None, cb_arg=None, cb_self=False):
self.parent = parent
self.name = name
self.callback = callback # Function called to process result
if not cb_self:
self.callback_arg = cb_arg # Optional arg passed to "callback"
else:
self.callback_arg = (self, cb_arg) # Self reference required in callback arg
self.tag = '%s%s' % (parent.tagpre, parent.tagnum)
parent.tagnum += 1
self.ready = threading.Event()
self.response = None
self.aborted = None
self.data = None
def abort(self, typ, val):
self.aborted = (typ, val)
self.deliver(None)
def get_response(self, exc_fmt=None):
self.callback = None
if __debug__: self.parent._log(3, '%s:%s.ready.wait' % (self.name, self.tag))
self.ready.wait()
if self.aborted is not None:
typ, val = self.aborted
if exc_fmt is None:
exc_fmt = '%s - %%s' % typ
raise typ(exc_fmt % str(val))
return self.response
def deliver(self, response):
if self.callback is not None:
self.callback((response, self.callback_arg, self.aborted))
return
self.response = response
self.ready.set()
if __debug__: self.parent._log(3, '%s:%s.ready.set' % (self.name, self.tag))
class IMAP4(object):
"""Threaded IMAP4 client class.
Instantiate with:
IMAP4(host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response (default: no timeout),
debug_buf_lvl - debug level at which buffering is turned off.
All IMAP4rev1 commands are supported by methods of the same name.
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data' is
either a string, or a tuple. If a tuple, then the first part is the
header of the response, and the second part contains the data (ie:
'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"), which is
a sub-class of 'error'. Mailbox status changes from READ-WRITE to
READ-ONLY raise the exception class <instance>.readonly("<reason>"),
which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
All commands take two optional named arguments:
'callback' and 'cb_arg'
If 'callback' is provided then the command is asynchronous, so after
the command is queued for transmission, the call returns immediately
with the tuple (None, None).
The result will be posted by invoking "callback" with one arg, a tuple:
callback((result, cb_arg, None))
or, if there was a problem:
callback((None, cb_arg, (exception class, reason)))
Otherwise the command is synchronous (waits for result). But note
that state-changing commands will both block until previous commands
have completed, and block subsequent commands until they have finished.
All (non-callback) arguments to commands are converted to strings,
except for AUTHENTICATE, and the last argument to APPEND which is
passed as an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double or single quotes) each string is
quoted. However, the 'password' argument to the LOGIN command is
always quoted. If you want to avoid having an argument string
quoted (eg: the 'flags' argument to STORE) then enclose the string
in parentheses (eg: "(\Deleted)"). If you are using "sequence sets"
containing the wildcard character '*', then enclose the argument
in single quotes: the quotes will be removed and the resulting
string passed unquoted. Note also that you can pass in an argument
with a type that doesn't evaluate to 'basestring' (eg: 'bytearray')
and it will be converted to a string without quoting.
There is one instance variable, 'state', that is useful for tracking
whether the client needs to login to the server. If it has the
value "AUTH" after instantiating the class, then the connection
is pre-authenticated (otherwise it will be "NONAUTH"). Selecting a
mailbox changes the state to be "SELECTED", closing a mailbox changes
back to "AUTH", and once the client has logged out, the state changes
to "LOGOUT" and no further commands may be issued.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
Note also that you must call logout() to shut down threads before
discarding an instance.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
continuation_cre = re.compile(r'\+( (?P<data>.*))?')
literal_cre = re.compile(r'.*{(?P<size>\d+)}$')
mapCRLF_cre = re.compile(r'\r\n|\r|\n')
# Need to quote "atom-specials" :-
# "(" / ")" / "{" / SP / 0x00 - 0x1f / 0x7f / "%" / "*" / DQUOTE / "\" / "]"
# so match not the inverse set
mustquote_cre = re.compile(r"[^!#$&'+,./0-9:;<=>?@A-Z\[^_`a-z|}~-]")
response_code_cre = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
# sequence_set_cre = re.compile(r"^[0-9]+(:([0-9]+|\*))?(,[0-9]+(:([0-9]+|\*))?)*$")
untagged_response_cre = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
untagged_status_cre = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
def __init__(self, host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.state = NONAUTH # IMAP4 protocol state
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = [] # [[typ: [data, ...]], ...]
self.mailbox = None # Current mailbox selected
self.mailboxes = {} # Untagged responses state per mailbox
self.is_readonly = False # READ-ONLY desired state
self.idle_rqb = None # Server IDLE Request - see _IdleCont
self.idle_timeout = None # Must prod server occasionally
self._expecting_data = 0 # Expecting message data
self._accumulated_data = [] # Message data accumulated so far
self._literal_expected = None # Message data descriptor
self.compressor = None # COMPRESS/DEFLATE if not None
self.decompressor = None
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagnum = 0
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(r'(?P<tag>'
+ self.tagpre
+ r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
if __debug__: self._init_debug(debug, debug_file, debug_buf_lvl)
self.resp_timeout = timeout # Timeout waiting for command response
if timeout is not None and timeout < READ_POLL_TIMEOUT:
self.read_poll_timeout = timeout
else:
self.read_poll_timeout = READ_POLL_TIMEOUT
self.read_size = READ_SIZE
# Open socket to server.
self.open(host, port)
if __debug__:
if debug:
self._mesg('connected to %s on port %s' % (self.host, self.port))
# Threading
if identifier is not None:
self.identifier = identifier
else:
self.identifier = self.host
if self.identifier:
self.identifier += ' '
self.Terminate = self.TerminateReader = False
self.state_change_free = threading.Event()
self.state_change_pending = threading.Lock()
self.commands_lock = threading.Lock()
self.idle_lock = threading.Lock()
self.ouq = Queue.Queue(10)
self.inq = Queue.Queue()
self.wrth = threading.Thread(target=self._writer)
self.wrth.setDaemon(True)
self.wrth.start()
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
self.inth = threading.Thread(target=self._handler)
self.inth.setDaemon(True)
self.inth.start()
# Get server welcome message,
# request and store CAPABILITY response.
try:
self.welcome = self._request_push(tag='continuation').get_response('IMAP4 protocol error: %s')[1]
if self._get_untagged_response('PREAUTH'):
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
elif self._get_untagged_response('OK'):
if __debug__: self._log(1, 'state => NONAUTH')
else:
raise self.error('unrecognised server welcome message: %s' % `self.welcome`)
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
if __debug__: self._log(1, 'CAPABILITY: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
break
else:
raise self.error('server not IMAP4 compliant')
except:
self._close_threads()
raise
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
# Overridable methods
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, send, shutdown, socket."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_PORT, port)
self.sock = self.open_socket()
self.read_fd = self.sock.fileno()
def open_socket(self):
"""open_socket()
Open socket choosing first address family available."""
msg = (-1, 'could not open socket')
for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
continue
try:
for i in (0, 1):
try:
s.connect(sa)
break
except socket.error, msg:
if len(msg.args) < 2 or msg.args[0] != errno.EINTR:
raise
else:
raise socket.error(msg)
except socket.error, msg:
s.close()
continue
break
else:
raise socket.error(msg)
return s
def ssl_wrap_socket(self):
# Allow sending of keep-alive messages - seems to prevent some servers
# from closing SSL, leading to deadlocks.
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
import ssl
if self.ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile, ca_certs=self.ca_certs, cert_reqs=cert_reqs)
ssl_exc = ssl.SSLError
self.read_fd = self.sock.fileno()
except ImportError:
# No ssl module, and socket.ssl has no fileno(), and does not allow certificate verification
raise socket.sslerror("imaplib2 SSL mode does not work without ssl module")
if self.cert_verify_cb is not None:
cert_err = self.cert_verify_cb(self.sock.getpeercert(), self.host)
if cert_err:
raise ssl_exc(cert_err)
def start_compressing(self):
"""start_compressing()
Enable deflate compression on the socket (RFC 4978)."""
# rfc 1951 - pure DEFLATE, so use -15 for both windows
self.decompressor = zlib.decompressobj(-15)
self.compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.recv(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.recv(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.sock.sendall(data)
def shutdown(self):
"""shutdown()
Close I/O established in "open"."""
self.sock.close()
def socket(self):
"""socket = socket()
Return socket instance used to connect to IMAP4 server."""
return self.sock
# Utility methods
def enable_compression(self):
"""enable_compression()
Ask the server to start compressing the connection.
Should be called from user of this class after instantiation, as in:
if 'COMPRESS=DEFLATE' in imapobj.capabilities:
imapobj.enable_compression()"""
try:
typ, dat = self._simple_command('COMPRESS', 'DEFLATE')
if typ == 'OK':
self.start_compressing()
if __debug__: self._log(1, 'Enabled COMPRESS=DEFLATE')
finally:
self._release_state_change()
def pop_untagged_responses(self):
""" for typ,data in pop_untagged_responses(): pass
Generator for any remaining untagged responses.
Returns and removes untagged responses in order of reception.
Use at your own risk!"""
while self.untagged_responses:
self.commands_lock.acquire()
try:
yield self.untagged_responses.pop(0)
finally:
self.commands_lock.release()
def recent(self, **kw):
"""(typ, [data]) = recent()
Return 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
'data' is None if no new messages,
else list of RECENT responses, most recent last."""
name = 'RECENT'
typ, dat = self._untagged_response(None, [None], name)
if dat != [None]:
return self._deliver_dat(typ, dat, kw)
kw['untagged_response'] = name
return self.noop(**kw) # Prod server for response
def response(self, code, **kw):
"""(code, [data]) = response(code)
Return data for response 'code' if received, or None.
Old value for response 'code' is cleared."""
typ, dat = self._untagged_response(code, [None], code.upper())
return self._deliver_dat(typ, dat, kw)
# IMAP4 commands
def append(self, mailbox, flags, date_time, message, **kw):
"""(typ, [data]) = append(mailbox, flags, date_time, message)
Append message to named mailbox.
All args except `message' can be None."""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = self.mapCRLF_cre.sub(CRLF, message)
try:
return self._simple_command(name, mailbox, flags, date_time, **kw)
finally:
self._release_state_change()
def authenticate(self, mechanism, authobject, **kw):
"""(typ, [data]) = authenticate(mechanism, authobject)
Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead."""
self.literal = _Authenticator(authobject).process
try:
typ, dat = self._simple_command('AUTHENTICATE', mechanism.upper())
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def capability(self, **kw):
"""(typ, [data]) = capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def check(self, **kw):
"""(typ, [data]) = check()
Checkpoint mailbox on server."""
return self._simple_command('CHECK', **kw)
def close(self, **kw):
"""(typ, [data]) = close()
Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'."""
if self.state != 'SELECTED':
raise self.error('No mailbox selected.')
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def copy(self, message_set, new_mailbox, **kw):
"""(typ, [data]) = copy(message_set, new_mailbox)
Copy 'message_set' messages onto end of 'new_mailbox'."""
return self._simple_command('COPY', message_set, new_mailbox, **kw)
def create(self, mailbox, **kw):
"""(typ, [data]) = create(mailbox)
Create new mailbox."""
return self._simple_command('CREATE', mailbox, **kw)
def delete(self, mailbox, **kw):
"""(typ, [data]) = delete(mailbox)
Delete old mailbox."""
return self._simple_command('DELETE', mailbox, **kw)
def deleteacl(self, mailbox, who, **kw):
"""(typ, [data]) = deleteacl(mailbox, who)
Delete the ACLs (remove any rights) set for who on mailbox."""
return self._simple_command('DELETEACL', mailbox, who, **kw)
def examine(self, mailbox='INBOX', **kw):
"""(typ, [data]) = examine(mailbox='INBOX')
Select a mailbox for READ-ONLY access. (Flushes all untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
return self.select(mailbox=mailbox, readonly=True, **kw)
def expunge(self, **kw):
"""(typ, [data]) = expunge()
Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
'data' is list of 'EXPUNGE'd message numbers in order received."""
name = 'EXPUNGE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def fetch(self, message_set, message_parts, **kw):
"""(typ, [data, ...]) = fetch(message_set, message_parts)
Fetch (parts of) messages.
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data,
followed by a string containing the trailer."""
name = 'FETCH'
kw['untagged_response'] = name
return self._simple_command(name, message_set, message_parts, **kw)
def getacl(self, mailbox, **kw):
"""(typ, [data]) = getacl(mailbox)
Get the ACLs for a mailbox."""
kw['untagged_response'] = 'ACL'
return self._simple_command('GETACL', mailbox, **kw)
def getannotation(self, mailbox, entry, attribute, **kw):
"""(typ, [data]) = getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('GETANNOTATION', mailbox, entry, attribute, **kw)
def getquota(self, root, **kw):
"""(typ, [data]) = getquota(root)
Get the quota root's resource usage and limits.
(Part of the IMAP4 QUOTA extension defined in rfc2087.)"""
kw['untagged_response'] = 'QUOTA'
return self._simple_command('GETQUOTA', root, **kw)
def getquotaroot(self, mailbox, **kw):
# Hmmm, this is non-std! Left for backwards-compatibility, sigh.
# NB: usage should have been defined as:
# (typ, [QUOTAROOT responses...]) = getquotaroot(mailbox)
# (typ, [QUOTA responses...]) = response('QUOTA')
"""(typ, [[QUOTAROOT responses...], [QUOTA responses...]]) = getquotaroot(mailbox)
Get the list of quota roots for the named mailbox."""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return self._deliver_dat(typ, [quotaroot, quota], kw)
def id(self, *kv_pairs, **kw):
"""(typ, [data]) = <instance>.id(kv_pairs)
'kv_pairs' is a possibly empty list of keys and values.
'data' is a list of ID key value pairs or NIL.
NB: a single argument is assumed to be correctly formatted and is passed through unchanged
(for backward compatibility with earlier version).
Exchange information for problem analysis and determination.
The ID extension is defined in RFC 2971. """
name = 'ID'
kw['untagged_response'] = name
if not kv_pairs:
data = 'NIL'
elif len(kv_pairs) == 1:
data = kv_pairs[0] # Assume invoker passing correctly formatted string (back-compat)
else:
data = '(%s)' % ' '.join([(arg and self._quote(arg) or 'NIL') for arg in kv_pairs])
return self._simple_command(name, (data,), **kw)
def idle(self, timeout=None, **kw):
""""(typ, [data]) = idle(timeout=None)
Put server into IDLE mode until server notifies some change,
or 'timeout' (secs) occurs (default: 29 minutes),
or another IMAP4 command is scheduled."""
name = 'IDLE'
self.literal = _IdleCont(self, timeout).process
try:
return self._simple_command(name, **kw)
finally:
self._release_state_change()
def list(self, directory='""', pattern='*', **kw):
"""(typ, [data]) = list(directory='""', pattern='*')
List mailbox names in directory matching pattern.
'data' is list of LIST responses.
NB: for 'pattern':
% matches all except separator ( so LIST "" "%" returns names at root)
* matches all (so LIST "" "*" returns whole directory tree from root)"""
name = 'LIST'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def login(self, user, password, **kw):
"""(typ, [data]) = login(user, password)
Identify client using plaintext password.
NB: 'password' will be quoted."""
try:
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def login_cram_md5(self, user, password, **kw):
"""(typ, [data]) = login_cram_md5(user, password)
Force use of CRAM-MD5 authentication."""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH, **kw)
def _CRAM_MD5_AUTH(self, challenge):
"""Authobject to use with CRAM-MD5 authentication."""
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self, **kw):
"""(typ, [data]) = logout()
Shutdown connection to server.
Returns server 'BYE' response.
NB: You must call this to shut down threads before discarding an instance."""
self.state = LOGOUT
if __debug__: self._log(1, 'state => LOGOUT')
try:
try:
typ, dat = self._simple_command('LOGOUT')
except:
typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
if __debug__: self._log(1, dat)
self._close_threads()
finally:
self._release_state_change()
if __debug__: self._log(1, 'connection closed')
bye = self._get_untagged_response('BYE', leave=True)
if bye:
typ, dat = 'BYE', bye
return self._deliver_dat(typ, dat, kw)
def lsub(self, directory='""', pattern='*', **kw):
"""(typ, [data, ...]) = lsub(directory='""', pattern='*')
List 'subscribed' mailbox names in directory matching pattern.
'data' are tuples of message part envelope and data."""
name = 'LSUB'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def myrights(self, mailbox, **kw):
"""(typ, [data]) = myrights(mailbox)
Show my ACLs for a mailbox (i.e. the rights that I have on mailbox)."""
name = 'MYRIGHTS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, **kw)
def namespace(self, **kw):
"""(typ, [data, ...]) = namespace()
Returns IMAP namespaces ala rfc2342."""
name = 'NAMESPACE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def noop(self, **kw):
"""(typ, [data]) = noop()
Send NOOP command."""
if __debug__: self._dump_ur(3)
return self._simple_command('NOOP', **kw)
def partial(self, message_num, message_part, start, length, **kw):
"""(typ, [data, ...]) = partial(message_num, message_part, start, length)
Fetch truncated part of a message.
'data' is tuple of message part envelope and data.
NB: obsolete."""
name = 'PARTIAL'
kw['untagged_response'] = 'FETCH'
return self._simple_command(name, message_num, message_part, start, length, **kw)
def proxyauth(self, user, **kw):
"""(typ, [data]) = proxyauth(user)
Assume authentication as 'user'.
(Allows an authorised administrator to proxy into any user's mailbox.)"""
try:
return self._simple_command('PROXYAUTH', user, **kw)
finally:
self._release_state_change()
def rename(self, oldmailbox, newmailbox, **kw):
"""(typ, [data]) = rename(oldmailbox, newmailbox)
Rename old mailbox name to new."""
return self._simple_command('RENAME', oldmailbox, newmailbox, **kw)
def search(self, charset, *criteria, **kw):
"""(typ, [data]) = search(charset, criterion, ...)
Search mailbox for matching messages.
'data' is space separated list of matching message numbers."""
name = 'SEARCH'
kw['untagged_response'] = name
if charset:
return self._simple_command(name, 'CHARSET', charset, *criteria, **kw)
return self._simple_command(name, *criteria, **kw)
def select(self, mailbox='INBOX', readonly=False, **kw):
"""(typ, [data]) = select(mailbox='INBOX', readonly=False)
Select a mailbox. (Restores any previous untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
self.commands_lock.acquire()
# Save state of old mailbox, restore state for new...
self.mailboxes[self.mailbox] = self.untagged_responses
self.untagged_responses = self.mailboxes.setdefault(mailbox, [])
self.commands_lock.release()
self.mailbox = mailbox
self.is_readonly = readonly and True or False
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
try:
rqb = self._command(name, mailbox)
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
if typ != 'OK':
if self.state == SELECTED:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
if typ == 'BAD':
self._deliver_exc(self.error, '%s command error: %s %s. Data: %.100s' % (name, typ, dat, mailbox), kw)
return self._deliver_dat(typ, dat, kw)
self.state = SELECTED
if __debug__: self._log(1, 'state => SELECTED')
finally:
self._release_state_change()
if self._get_untagged_response('READ-ONLY', leave=True) and not readonly:
if __debug__: self._dump_ur(1)
self._deliver_exc(self.readonly, '%s is not writable' % mailbox, kw)
typ, dat = self._untagged_response(typ, [None], 'EXISTS')
return self._deliver_dat(typ, dat, kw)
def setacl(self, mailbox, who, what, **kw):
"""(typ, [data]) = setacl(mailbox, who, what)
Set a mailbox acl."""
try:
return self._simple_command('SETACL', mailbox, who, what, **kw)
finally:
self._release_state_change()
def setannotation(self, *args, **kw):
"""(typ, [data]) = setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('SETANNOTATION', *args, **kw)
def setquota(self, root, limits, **kw):
"""(typ, [data]) = setquota(root, limits)
Set the quota root's resource limits."""
kw['untagged_response'] = 'QUOTA'
try:
return self._simple_command('SETQUOTA', root, limits, **kw)
finally:
self._release_state_change()
def sort(self, sort_criteria, charset, *search_criteria, **kw):
"""(typ, [data]) = sort(sort_criteria, charset, search_criteria, ...)
IMAP4rev1 extension SORT command."""
name = 'SORT'
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
kw['untagged_response'] = name
return self._simple_command(name, sort_criteria, charset, *search_criteria, **kw)
def starttls(self, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, **kw):
"""(typ, [data]) = starttls(keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None)
Start TLS negotiation as per RFC 2595."""
name = 'STARTTLS'
if name not in self.capabilities:
raise self.abort('TLS not supported by server')
if hasattr(self, '_tls_established') and self._tls_established:
raise self.abort('TLS session already established')
# Must now shutdown reader thread after next response, and restart after changing read_fd
self.read_size = 1 # Don't consume TLS handshake
self.TerminateReader = True
try:
typ, dat = self._simple_command(name)
finally:
self._release_state_change()
self.rdth.join()
self.TerminateReader = False
self.read_size = READ_SIZE
if typ != 'OK':
# Restart reader thread and error
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
raise self.error("Couldn't establish TLS session: %s" % dat)
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
try:
self.ssl_wrap_socket()
finally:
# Restart reader thread
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
self._tls_established = True
typ, dat = self._untagged_response(typ, dat, name)
return self._deliver_dat(typ, dat, kw)
def status(self, mailbox, names, **kw):
"""(typ, [data]) = status(mailbox, names)
Request named status conditions for mailbox."""
name = 'STATUS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, names, **kw)
def store(self, message_set, command, flags, **kw):
"""(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox."""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
kw['untagged_response'] = 'FETCH'
return self._simple_command('STORE', message_set, command, flags, **kw)
def subscribe(self, mailbox, **kw):
"""(typ, [data]) = subscribe(mailbox)
Subscribe to new mailbox."""
try:
return self._simple_command('SUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def thread(self, threading_algorithm, charset, *search_criteria, **kw):
"""(type, [data]) = thread(threading_alogrithm, charset, search_criteria, ...)
IMAPrev1 extension THREAD command."""
name = 'THREAD'
kw['untagged_response'] = name
return self._simple_command(name, threading_algorithm, charset, *search_criteria, **kw)
def uid(self, command, *args, **kw):
"""(typ, [data]) = uid(command, arg, ...)
Execute "command arg ..." with messages identified by UID,
rather than message number.
Assumes 'command' is legal in current state.
Returns response appropriate to 'command'."""
command = command.upper()
if command in UID_direct:
resp = command
else:
resp = 'FETCH'
kw['untagged_response'] = resp
return self._simple_command('UID', command, *args, **kw)
def unsubscribe(self, mailbox, **kw):
"""(typ, [data]) = unsubscribe(mailbox)
Unsubscribe from old mailbox."""
try:
return self._simple_command('UNSUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def xatom(self, name, *args, **kw):
"""(typ, [data]) = xatom(name, arg, ...)
Allow simple extension commands notified by server in CAPABILITY response.
Assumes extension command 'name' is legal in current state.
Returns response appropriate to extension command 'name'."""
name = name.upper()
if not name in Commands:
Commands[name] = ((self.state,), False)
try:
return self._simple_command(name, *args, **kw)
finally:
self._release_state_change()
# Internal methods
def _append_untagged(self, typ, dat):
# Append new 'dat' to end of last untagged response if same 'typ',
# else append new response.
if dat is None: dat = ''
self.commands_lock.acquire()
if self.untagged_responses:
urn, urd = self.untagged_responses[-1]
if urn != typ:
urd = None
else:
urd = None
if urd is None:
urd = []
self.untagged_responses.append([typ, urd])
urd.append(dat)
self.commands_lock.release()
if __debug__: self._log(5, 'untagged_responses[%s] %s += ["%s"]' % (typ, len(urd)-1, dat))
def _check_bye(self):
bye = self._get_untagged_response('BYE', leave=True)
if bye:
raise self.abort(bye[-1])
def _checkquote(self, arg):
# Must quote command args if "atom-specials" present,
# and not already quoted. NB: single quotes are removed.
if not isinstance(arg, basestring):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (("'","'"),):
return arg[1:-1]
if arg and self.mustquote_cre.search(arg) is None:
return arg
return self._quote(arg)
def _choose_nonull_or_dflt(self, dflt, *args):
if isinstance(dflt, basestring):
dflttyp = basestring # Allow any string type
else:
dflttyp = type(dflt)
for arg in args:
if arg is not None:
if isinstance(arg, dflttyp):
return arg
if __debug__: self._log(0, 'bad arg is %s, expecting %s' % (type(arg), dflttyp))
return dflt
def _command(self, name, *args, **kw):
if Commands[name][CMD_VAL_ASYNC]:
cmdtyp = 'async'
else:
cmdtyp = 'sync'
if __debug__: self._log(1, '[%s] %s %s' % (cmdtyp, name, args))
if __debug__: self._log(3, 'state_change_pending.acquire')
self.state_change_pending.acquire()
self._end_idle()
if cmdtyp == 'async':
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
else:
# Need to wait for all async commands to complete
self._check_bye()
self.commands_lock.acquire()
if self.tagged_commands:
self.state_change_free.clear()
need_event = True
else:
need_event = False
self.commands_lock.release()
if need_event:
if __debug__: self._log(3, 'sync command %s waiting for empty commands Q' % name)
self.state_change_free.wait()
if __debug__: self._log(3, 'sync command %s proceeding' % name)
if self.state not in Commands[name][CMD_VAL_STATES]:
self.literal = None
raise self.error('command %s illegal in state %s'
% (name, self.state))
self._check_bye()
for typ in ('OK', 'NO', 'BAD'):
self._get_untagged_response(typ)
if self._get_untagged_response('READ-ONLY', leave=True) and not self.is_readonly:
self.literal = None
raise self.readonly('mailbox status changed to READ-ONLY')
if self.Terminate:
raise self.abort('connection closed')
rqb = self._request_push(name=name, **kw)
data = '%s %s' % (rqb.tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if isinstance(literal, basestring):
literator = None
data = '%s {%s}' % (data, len(literal))
else:
literator = literal
if __debug__: self._log(4, 'data=%s' % data)
rqb.data = '%s%s' % (data, CRLF)
if literal is None:
self.ouq.put(rqb)
return rqb
# Must setup continuation expectancy *before* ouq.put
crqb = self._request_push(tag='continuation')
self.ouq.put(rqb)
while True:
# Wait for continuation response
ok, data = crqb.get_response('command: %s => %%s' % name)
if __debug__: self._log(4, 'continuation => %s, %s' % (ok, data))
# NO/BAD response?
if not ok:
break
# Send literal
if literator is not None:
literal = literator(data, rqb)
if literal is None:
break
if literator is not None:
# Need new request for next continuation response
crqb = self._request_push(tag='continuation')
if __debug__: self._log(4, 'write literal size %s' % len(literal))
crqb.data = '%s%s' % (literal, CRLF)
self.ouq.put(crqb)
if literator is None:
break
return rqb
def _command_complete(self, rqb, kw):
# Called for non-callback commands
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
self._check_bye()
if typ == 'BAD':
if __debug__: self._print_log()
raise self.error('%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
if 'untagged_response' in kw:
return self._untagged_response(typ, dat, kw['untagged_response'])
return typ, dat
def _command_completer(self, (response, cb_arg, error)):
# Called for callback commands
rqb, kw = cb_arg
rqb.callback = kw['callback']
rqb.callback_arg = kw.get('cb_arg')
if error is not None:
if __debug__: self._print_log()
typ, val = error
rqb.abort(typ, val)
return
bye = self._get_untagged_response('BYE', leave=True)
if bye:
rqb.abort(self.abort, bye[-1])
return
typ, dat = response
if typ == 'BAD':
if __debug__: self._print_log()
rqb.abort(self.error, '%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
return
if 'untagged_response' in kw:
response = self._untagged_response(typ, dat, kw['untagged_response'])
rqb.deliver(response)
def _deliver_dat(self, typ, dat, kw):
if 'callback' in kw:
kw['callback'](((typ, dat), kw.get('cb_arg'), None))
return typ, dat
def _deliver_exc(self, exc, dat, kw):
if 'callback' in kw:
kw['callback']((None, kw.get('cb_arg'), (exc, dat)))
raise exc(dat)
def _end_idle(self):
self.idle_lock.acquire()
irqb = self.idle_rqb
if irqb is None:
self.idle_lock.release()
return
self.idle_rqb = None
self.idle_timeout = None
self.idle_lock.release()
irqb.data = 'DONE%s' % CRLF
self.ouq.put(irqb)
if __debug__: self._log(2, 'server IDLE finished')
def _get_untagged_response(self, name, leave=False):
self.commands_lock.acquire()
for i, (typ, dat) in enumerate(self.untagged_responses):
if typ == name:
if not leave:
del self.untagged_responses[i]
self.commands_lock.release()
if __debug__: self._log(5, '_get_untagged_response(%s) => %s' % (name, dat))
return dat
self.commands_lock.release()
return None
def _match(self, cre, s):
# Run compiled regular expression 'cre' match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
return self.mo is not None
def _put_response(self, resp):
if self._expecting_data > 0:
rlen = len(resp)
dlen = min(self._expecting_data, rlen)
self._expecting_data -= dlen
if rlen <= dlen:
self._accumulated_data.append(resp)
return
self._accumulated_data.append(resp[:dlen])
resp = resp[dlen:]
if self._accumulated_data:
typ, dat = self._literal_expected
self._append_untagged(typ, (dat, ''.join(self._accumulated_data)))
self._accumulated_data = []
# Protocol mandates all lines terminated by CRLF
resp = resp[:-2]
if 'continuation' in self.tagged_commands:
continuation_expected = True
else:
continuation_expected = False
if self._literal_expected is not None:
dat = resp
if self._match(self.literal_cre, dat):
self._literal_expected[1] = dat
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'expecting literal size %s' % self._expecting_data)
return
typ = self._literal_expected[0]
self._literal_expected = None
self._append_untagged(typ, dat) # Tail
if __debug__: self._log(4, 'literal completed')
else:
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
typ = self.mo.group('type')
dat = self.mo.group('data')
if not tag in self.tagged_commands:
if __debug__: self._log(1, 'unexpected tagged response: %s' % resp)
else:
self._request_pop(tag, (typ, [dat]))
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(self.untagged_response_cre, resp):
if self._match(self.untagged_status_cre, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(self.continuation_cre, resp):
if not continuation_expected:
if __debug__: self._log(1, "unexpected continuation response: '%s'" % resp)
return
self._request_pop('continuation', (True, self.mo.group('data')))
return
if __debug__: self._log(1, "unexpected response: '%s'" % resp)
return
typ = self.mo.group('type')
dat = self.mo.group('data')
if dat is None: dat = '' # Null untagged response
if dat2: dat = dat + ' ' + dat2
# Is there a literal to come?
if self._match(self.literal_cre, dat):
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'read literal size %s' % self._expecting_data)
self._literal_expected = [typ, dat]
return
self._append_untagged(typ, dat)
if typ != 'OK': # NO, BYE, IDLE
self._end_idle()
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(self.response_code_cre, dat):
self._append_untagged(self.mo.group('type'), self.mo.group('data'))
# Command waiting for aborted continuation response?
if continuation_expected:
self._request_pop('continuation', (False, resp))
# Bad news?
if typ in ('NO', 'BAD', 'BYE'):
if typ == 'BYE':
self.Terminate = True
if __debug__: self._log(1, '%s response: %s' % (typ, dat))
def _quote(self, arg):
return '"%s"' % arg.replace('\\', '\\\\').replace('"', '\\"')
def _release_state_change(self):
if self.state_change_pending.locked():
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
def _request_pop(self, name, data):
self.commands_lock.acquire()
rqb = self.tagged_commands.pop(name)
if not self.tagged_commands:
if __debug__: self._log(3, 'state_change_free.set')
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(4, '_request_pop(%s, %s) = %s' % (name, data, rqb.tag))
rqb.deliver(data)
def _request_push(self, tag=None, name=None, **kw):
self.commands_lock.acquire()
rqb = Request(self, name=name, **kw)
if tag is None:
tag = rqb.tag
self.tagged_commands[tag] = rqb
self.commands_lock.release()
if __debug__: self._log(4, '_request_push(%s, %s, %s) = %s' % (tag, name, `kw`, rqb.tag))
return rqb
def _simple_command(self, name, *args, **kw):
if 'callback' in kw:
# Note: old calling sequence for back-compat with python <2.6
self._command(name, callback=self._command_completer, cb_arg=kw, cb_self=True, *args)
return (None, None)
return self._command_complete(self._command(name, *args), kw)
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
data = self._get_untagged_response(name)
if not data:
return typ, [None]
while True:
dat = self._get_untagged_response(name)
if not dat:
break
data += dat
if __debug__: self._log(4, '_untagged_response(%s, ?, %s) => %s' % (typ, name, data))
return typ, data
# Threads
def _close_threads(self):
if __debug__: self._log(1, '_close_threads')
self.ouq.put(None)
self.wrth.join()
if __debug__: self._log(1, 'call shutdown')
self.shutdown()
self.rdth.join()
self.inth.join()
def _handler(self):
resp_timeout = self.resp_timeout
threading.currentThread().setName(self.identifier + 'handler')
time.sleep(0.1) # Don't start handling before main thread ready
if __debug__: self._log(1, 'starting')
typ, val = self.abort, 'connection terminated'
while not self.Terminate:
try:
if self.idle_timeout is not None:
timeout = self.idle_timeout - time.time()
if timeout <= 0:
timeout = 1
if __debug__:
if self.idle_rqb is not None:
self._log(5, 'server IDLING, timeout=%.2f' % timeout)
else:
timeout = resp_timeout
line = self.inq.get(True, timeout)
except Queue.Empty:
if self.idle_rqb is None:
if resp_timeout is not None and self.tagged_commands:
if __debug__: self._log(1, 'response timeout')
typ, val = self.abort, 'no response after %s secs' % resp_timeout
break
continue
if self.idle_timeout > time.time():
continue
if __debug__: self._log(2, 'server IDLE timedout')
line = IDLE_TIMEOUT_RESPONSE
if line is None:
if __debug__: self._log(1, 'inq None - terminating')
break
if not isinstance(line, basestring):
typ, val = line
break
try:
self._put_response(line)
except:
typ, val = self.error, 'program error: %s - %s' % sys.exc_info()[:2]
break
self.Terminate = True
if __debug__: self._log(1, 'terminating: %s' % `val`)
while not self.ouq.empty():
try:
self.ouq.get_nowait().abort(typ, val)
except Queue.Empty:
break
self.ouq.put(None)
self.commands_lock.acquire()
for name in self.tagged_commands.keys():
rqb = self.tagged_commands.pop(name)
rqb.abort(typ, val)
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(3, 'state_change_free.set')
if __debug__: self._log(1, 'finished')
if hasattr(select_module, "poll"):
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using poll')
def poll_error(state):
PollErrors = {
select.POLLERR: 'Error',
select.POLLHUP: 'Hang up',
select.POLLNVAL: 'Invalid request: descriptor not open',
}
return ' '.join([PollErrors[s] for s in PollErrors.keys() if (s & state)])
line_part = ''
poll = select.poll()
poll.register(self.read_fd, select.POLLIN)
rxzero = 0
terminate = False
read_poll_timeout = self.read_poll_timeout * 1000 # poll() timeout is in millisecs
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = read_poll_timeout
try:
r = poll.poll(timeout)
if __debug__: self._log(5, 'poll => %s' % `r`)
if not r:
continue # Timeout
fd,state = r[0]
if state & select.POLLIN:
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
if state & ~(select.POLLIN):
raise IOError(poll_error(state))
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
poll.unregister(self.read_fd)
if __debug__: self._log(1, 'finished')
else:
# No "poll" - use select()
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using select')
line_part = ''
rxzero = 0
terminate = False
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = self.read_poll_timeout
try:
r,w,e = select.select([self.read_fd], [], [], timeout)
if __debug__: self._log(5, 'select => %s, %s, %s' % (r,w,e))
if not r: # Timeout
continue
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
if __debug__: self._log(1, 'finished')
def _writer(self):
threading.currentThread().setName(self.identifier + 'writer')
if __debug__: self._log(1, 'starting')
reason = 'Terminated'
while not self.Terminate:
rqb = self.ouq.get()
if rqb is None:
break # Outq flushed
try:
self.send(rqb.data)
if __debug__: self._log(4, '> %s' % rqb.data)
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
rqb.abort(self.abort, reason)
break
self.inq.put((self.abort, reason))
if __debug__: self._log(1, 'finished')
# Debugging
if __debug__:
def _init_debug(self, debug=None, debug_file=None, debug_buf_lvl=None):
self.debug = self._choose_nonull_or_dflt(0, debug, Debug)
self.debug_file = self._choose_nonull_or_dflt(sys.stderr, debug_file)
self.debug_buf_lvl = self._choose_nonull_or_dflt(DFLT_DEBUG_BUF_LVL, debug_buf_lvl)
self.debug_lock = threading.Lock()
self._cmd_log_len = 20
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug:
self._mesg('imaplib2 version %s' % __version__)
self._mesg('imaplib2 debug level %s, buffer level %s' % (self.debug, self.debug_buf_lvl))
def _dump_ur(self, lvl):
if lvl > self.debug:
return
l = self.untagged_responses
if not l:
return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self.debug_lock.acquire()
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
self.debug_lock.release()
def _log(self, lvl, line):
if lvl > self.debug:
return
if line[-2:] == CRLF:
line = line[:-2] + '\\r\\n'
tn = threading.currentThread().getName()
if lvl <= 1 or self.debug > self.debug_buf_lvl:
self.debug_lock.acquire()
self._mesg(line, tn)
self.debug_lock.release()
if lvl != 1:
return
# Keep log of last `_cmd_log_len' interactions for debugging.
self.debug_lock.acquire()
self._cmd_log[self._cmd_log_idx] = (line, tn, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
self.debug_lock.release()
def _mesg(self, s, tn=None, secs=None):
if secs is None:
secs = time.time()
if tn is None:
tn = threading.currentThread().getName()
tm = time.strftime('%M:%S', time.localtime(secs))
try:
self.debug_file.write(' %s.%02d %s %s\n' % (tm, (secs*100)%100, tn, s))
self.debug_file.flush()
finally:
pass
def _print_log(self):
self.debug_lock.acquire()
i, n = self._cmd_log_idx, self._cmd_log_len
if n: self._mesg('last %d log messages:' % n)
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
self.debug_lock.release()
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with:
IMAP4_SSL(host=None, port=None, keyfile=None, certfile=None, debug=None, debug_file=None, identifier=None, timeout=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port);
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
ca_certs - PEM formatted certificate chain file used to validate server certificates (default: None);
cert_verify_cb - function to verify authenticity of server certificates (default: None);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host=None, port=None, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
IMAP4.__init__(self, host, port, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup secure connection to remote server on "host:port"
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, send, shutdown, socket, ssl."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_SSL_PORT, port)
self.sock = self.open_socket()
self.ssl_wrap_socket()
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.read(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.read(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
if hasattr(self.sock, "sendall"):
self.sock.sendall(data)
else:
bytes = len(data)
while bytes > 0:
sent = self.sock.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def ssl(self):
"""ssl = ssl()
Return ssl instance used to communicate with the IMAP4 server."""
return self.sock
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with:
IMAP4_stream(command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
command - string that can be passed to subprocess.Popen();
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.command = command
self.host = command
self.port = None
self.sock = None
self.writefile, self.readfile = None, None
self.read_fd = None
IMAP4.__init__(self, None, None, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup a stream connection via 'self.command'.
This connection will be used by the routines:
read, send, shutdown, socket."""
from subprocess import Popen, PIPE
self._P = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
self.writefile, self.readfile = self._P.stdin, self._P.stdout
self.read_fd = self.readfile.fileno()
def read(self, size):
"""Read 'size' bytes from remote."""
if self.decompressor is None:
return os.read(self.read_fd, size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = os.read(self.read_fd, READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""Send data to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
class _Authenticator(object):
"""Private class to provide en/de-coding
for base64 authentication conversation."""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data, rqb):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
class _IdleCont(object):
"""When process is called, server is in IDLE state
and will send asynchronous changes."""
def __init__(self, parent, timeout):
self.parent = parent
self.timeout = parent._choose_nonull_or_dflt(IDLE_TIMEOUT, timeout)
self.parent.idle_timeout = self.timeout + time.time()
def process(self, data, rqb):
self.parent.idle_lock.acquire()
self.parent.idle_rqb = rqb
self.parent.idle_timeout = self.timeout + time.time()
self.parent.idle_lock.release()
if __debug__: self.parent._log(2, 'server IDLE started, timeout in %.2f secs' % self.timeout)
return None
MonthNames = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
Mon2num = dict(zip((x.encode() for x in MonthNames[1:]), range(1, 13)))
InternalDate = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
def Internaldate2Time(resp):
"""time_tuple = Internaldate2Time(resp)
Convert IMAP4 INTERNALDATE to UT."""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
Internaldate2tuple = Internaldate2Time # (Backward compatible)
def Time2Internaldate(date_time):
"""'"DD-Mmm-YYYY HH:MM:SS +HHMM"' = Time2Internaldate(date_time)
Convert 'date_time' to IMAP4 INTERNALDATE representation."""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return ('"%2d-%s-%04d %02d:%02d:%02d %+03d%02d"' %
((tt[2], MonthNames[tt[1]], tt[0]) + tt[3:6] +
divmod(zone//60, 60)))
FLAGS_cre = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
def ParseFlags(resp):
"""('flag', ...) = ParseFlags(line)
Convert IMAP4 flags response to python tuple."""
mo = FLAGS_cre.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
| 34.797551 | 190 | 0.561597 |
"""Threaded IMAP4 client.
Based on RFC 3501 and original imaplib module.
Public classes: IMAP4
IMAP4_SSL
IMAP4_stream
Public functions: Internaldate2Time
ParseFlags
Time2Internaldate
"""
__all__ = ("IMAP4", "IMAP4_SSL", "IMAP4_stream",
"Internaldate2Time", "ParseFlags", "Time2Internaldate")
__version__ = "2.33"
__release__ = "2"
__revision__ = "33"
__credits__ = """
Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
String method conversion by ESR, February 2001.
GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
IDLE via threads suggested by Philippe Normand <phil@respyre.org> January 2005.
GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
COMPRESS/DEFLATE contributed by Bron Gondwana <brong@brong.net> May 2009.
STARTTLS from Jython's imaplib by Alan Kennedy.
ID contributed by Dave Baggett <dave@baggett.org> November 2009.
Improved untagged responses handling suggested by Dave Baggett <dave@baggett.org> November 2009.
Improved thread naming, and 0 read detection contributed by Grant Edwards <grant.b.edwards@gmail.com> June 2010.
Improved timeout handling contributed by Ivan Vovnenko <ivovnenko@gmail.com> October 2010.
Timeout handling further improved by Ethan Glasser-Camp <glasse@cs.rpi.edu> December 2010.
Time2Internaldate() patch to match RFC2060 specification of English month names from bugs.python.org/issue11024 March 2011.
starttls() bug fixed with the help of Sebastian Spaeth <sebastian@sspaeth.de> April 2011.
Threads now set the "daemon" flag (suggested by offlineimap-project) April 2011.
Single quoting introduced with the help of Vladimir Marek <vladimir.marek@oracle.com> August 2011."""
__author__ = "Piers Lauder <piers@janeelix.com>"
__URL__ = "http://imaplib2.sourceforge.net"
__license__ = "Python License"
import binascii, errno, os, Queue, random, re, select, socket, sys, time, threading, zlib
select_module = select
# Globals
CRLF = '\r\n'
Debug = None # Backward compatibility
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
IDLE_TIMEOUT_RESPONSE = '* IDLE TIMEOUT\r\n'
IDLE_TIMEOUT = 60*29 # Don't stay in IDLE state longer
READ_POLL_TIMEOUT = 30
READ_SIZE = 32768
DFLT_DEBUG_BUF_LVL = 3
AllowedVersions = ('IMAP4REV1', 'IMAP4')
CMD_VAL_STATES = 0
CMD_VAL_ASYNC = 1
NONAUTH, AUTH, SELECTED, LOGOUT = 'NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'
Commands = {
'APPEND': ((AUTH, SELECTED), False),
'AUTHENTICATE': ((NONAUTH,), False),
'CAPABILITY': ((NONAUTH, AUTH, SELECTED), True),
'CHECK': ((SELECTED,), True),
'CLOSE': ((SELECTED,), False),
'COMPRESS': ((AUTH,), False),
'COPY': ((SELECTED,), True),
'CREATE': ((AUTH, SELECTED), True),
'DELETE': ((AUTH, SELECTED), True),
'DELETEACL': ((AUTH, SELECTED), True),
'EXAMINE': ((AUTH, SELECTED), False),
'EXPUNGE': ((SELECTED,), True),
'FETCH': ((SELECTED,), True),
'GETACL': ((AUTH, SELECTED), True),
'GETANNOTATION':((AUTH, SELECTED), True),
'GETQUOTA': ((AUTH, SELECTED), True),
'GETQUOTAROOT': ((AUTH, SELECTED), True),
'ID': ((NONAUTH, AUTH, LOGOUT, SELECTED), True),
'IDLE': ((SELECTED,), False),
'LIST': ((AUTH, SELECTED), True),
'LOGIN': ((NONAUTH,), False),
'LOGOUT': ((NONAUTH, AUTH, LOGOUT, SELECTED), False),
'LSUB': ((AUTH, SELECTED), True),
'MYRIGHTS': ((AUTH, SELECTED), True),
'NAMESPACE': ((AUTH, SELECTED), True),
'NOOP': ((NONAUTH, AUTH, SELECTED), True),
'PARTIAL': ((SELECTED,), True),
'PROXYAUTH': ((AUTH,), False),
'RENAME': ((AUTH, SELECTED), True),
'SEARCH': ((SELECTED,), True),
'SELECT': ((AUTH, SELECTED), False),
'SETACL': ((AUTH, SELECTED), False),
'SETANNOTATION':((AUTH, SELECTED), True),
'SETQUOTA': ((AUTH, SELECTED), False),
'SORT': ((SELECTED,), True),
'STARTTLS': ((NONAUTH,), False),
'STATUS': ((AUTH, SELECTED), True),
'STORE': ((SELECTED,), True),
'SUBSCRIBE': ((AUTH, SELECTED), False),
'THREAD': ((SELECTED,), True),
'UID': ((SELECTED,), True),
'UNSUBSCRIBE': ((AUTH, SELECTED), False),
}
UID_direct = ('SEARCH', 'SORT', 'THREAD')
def Int2AP(num):
"""string = Int2AP(num)
Return 'num' converted to a string using characters from the set 'A'..'P'
"""
val, a2p = [], 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val.insert(0, a2p[mod])
return ''.join(val)
class Request(object):
"""Private class to represent a request awaiting response."""
def __init__(self, parent, name=None, callback=None, cb_arg=None, cb_self=False):
self.parent = parent
self.name = name
self.callback = callback
if not cb_self:
self.callback_arg = cb_arg
else:
self.callback_arg = (self, cb_arg)
self.tag = '%s%s' % (parent.tagpre, parent.tagnum)
parent.tagnum += 1
self.ready = threading.Event()
self.response = None
self.aborted = None
self.data = None
def abort(self, typ, val):
self.aborted = (typ, val)
self.deliver(None)
def get_response(self, exc_fmt=None):
self.callback = None
if __debug__: self.parent._log(3, '%s:%s.ready.wait' % (self.name, self.tag))
self.ready.wait()
if self.aborted is not None:
typ, val = self.aborted
if exc_fmt is None:
exc_fmt = '%s - %%s' % typ
raise typ(exc_fmt % str(val))
return self.response
def deliver(self, response):
if self.callback is not None:
self.callback((response, self.callback_arg, self.aborted))
return
self.response = response
self.ready.set()
if __debug__: self.parent._log(3, '%s:%s.ready.set' % (self.name, self.tag))
class IMAP4(object):
"""Threaded IMAP4 client class.
Instantiate with:
IMAP4(host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response (default: no timeout),
debug_buf_lvl - debug level at which buffering is turned off.
All IMAP4rev1 commands are supported by methods of the same name.
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data' is
either a string, or a tuple. If a tuple, then the first part is the
header of the response, and the second part contains the data (ie:
'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"), which is
a sub-class of 'error'. Mailbox status changes from READ-WRITE to
READ-ONLY raise the exception class <instance>.readonly("<reason>"),
which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
All commands take two optional named arguments:
'callback' and 'cb_arg'
If 'callback' is provided then the command is asynchronous, so after
the command is queued for transmission, the call returns immediately
with the tuple (None, None).
The result will be posted by invoking "callback" with one arg, a tuple:
callback((result, cb_arg, None))
or, if there was a problem:
callback((None, cb_arg, (exception class, reason)))
Otherwise the command is synchronous (waits for result). But note
that state-changing commands will both block until previous commands
have completed, and block subsequent commands until they have finished.
All (non-callback) arguments to commands are converted to strings,
except for AUTHENTICATE, and the last argument to APPEND which is
passed as an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double or single quotes) each string is
quoted. However, the 'password' argument to the LOGIN command is
always quoted. If you want to avoid having an argument string
quoted (eg: the 'flags' argument to STORE) then enclose the string
in parentheses (eg: "(\Deleted)"). If you are using "sequence sets"
containing the wildcard character '*', then enclose the argument
in single quotes: the quotes will be removed and the resulting
string passed unquoted. Note also that you can pass in an argument
with a type that doesn't evaluate to 'basestring' (eg: 'bytearray')
and it will be converted to a string without quoting.
There is one instance variable, 'state', that is useful for tracking
whether the client needs to login to the server. If it has the
value "AUTH" after instantiating the class, then the connection
is pre-authenticated (otherwise it will be "NONAUTH"). Selecting a
mailbox changes the state to be "SELECTED", closing a mailbox changes
back to "AUTH", and once the client has logged out, the state changes
to "LOGOUT" and no further commands may be issued.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
Note also that you must call logout() to shut down threads before
discarding an instance.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
continuation_cre = re.compile(r'\+( (?P<data>.*))?')
literal_cre = re.compile(r'.*{(?P<size>\d+)}$')
mapCRLF_cre = re.compile(r'\r\n|\r|\n')
# Need to quote "atom-specials" :-
# "(" / ")" / "{" / SP / 0x00 - 0x1f / 0x7f / "%" / "*" / DQUOTE / "\" / "]"
# so match not the inverse set
mustquote_cre = re.compile(r"[^!#$&'+,./0-9:;<=>?@A-Z\[^_`a-z|}~-]")
response_code_cre = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
untagged_response_cre = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
untagged_status_cre = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
def __init__(self, host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.state = NONAUTH
self.literal = None
self.tagged_commands = {}
self.untagged_responses = []
self.mailbox = None
self.mailboxes = {}
self.is_readonly = False
self.idle_rqb = None
self.idle_timeout = None
self._expecting_data = 0
self._accumulated_data = []
self._literal_expected = None
self.compressor = None
self.decompressor = None
self.tagnum = 0
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(r'(?P<tag>'
+ self.tagpre
+ r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
if __debug__: self._init_debug(debug, debug_file, debug_buf_lvl)
self.resp_timeout = timeout
if timeout is not None and timeout < READ_POLL_TIMEOUT:
self.read_poll_timeout = timeout
else:
self.read_poll_timeout = READ_POLL_TIMEOUT
self.read_size = READ_SIZE
self.open(host, port)
if __debug__:
if debug:
self._mesg('connected to %s on port %s' % (self.host, self.port))
if identifier is not None:
self.identifier = identifier
else:
self.identifier = self.host
if self.identifier:
self.identifier += ' '
self.Terminate = self.TerminateReader = False
self.state_change_free = threading.Event()
self.state_change_pending = threading.Lock()
self.commands_lock = threading.Lock()
self.idle_lock = threading.Lock()
self.ouq = Queue.Queue(10)
self.inq = Queue.Queue()
self.wrth = threading.Thread(target=self._writer)
self.wrth.setDaemon(True)
self.wrth.start()
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
self.inth = threading.Thread(target=self._handler)
self.inth.setDaemon(True)
self.inth.start()
try:
self.welcome = self._request_push(tag='continuation').get_response('IMAP4 protocol error: %s')[1]
if self._get_untagged_response('PREAUTH'):
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
elif self._get_untagged_response('OK'):
if __debug__: self._log(1, 'state => NONAUTH')
else:
raise self.error('unrecognised server welcome message: %s' % `self.welcome`)
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
if __debug__: self._log(1, 'CAPABILITY: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
break
else:
raise self.error('server not IMAP4 compliant')
except:
self._close_threads()
raise
def __getattr__(self, attr):
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, send, shutdown, socket."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_PORT, port)
self.sock = self.open_socket()
self.read_fd = self.sock.fileno()
def open_socket(self):
"""open_socket()
Open socket choosing first address family available."""
msg = (-1, 'could not open socket')
for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
continue
try:
for i in (0, 1):
try:
s.connect(sa)
break
except socket.error, msg:
if len(msg.args) < 2 or msg.args[0] != errno.EINTR:
raise
else:
raise socket.error(msg)
except socket.error, msg:
s.close()
continue
break
else:
raise socket.error(msg)
return s
def ssl_wrap_socket(self):
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
import ssl
if self.ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile, ca_certs=self.ca_certs, cert_reqs=cert_reqs)
ssl_exc = ssl.SSLError
self.read_fd = self.sock.fileno()
except ImportError:
raise socket.sslerror("imaplib2 SSL mode does not work without ssl module")
if self.cert_verify_cb is not None:
cert_err = self.cert_verify_cb(self.sock.getpeercert(), self.host)
if cert_err:
raise ssl_exc(cert_err)
def start_compressing(self):
"""start_compressing()
Enable deflate compression on the socket (RFC 4978)."""
self.decompressor = zlib.decompressobj(-15)
self.compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.recv(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.recv(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.sock.sendall(data)
def shutdown(self):
"""shutdown()
Close I/O established in "open"."""
self.sock.close()
def socket(self):
"""socket = socket()
Return socket instance used to connect to IMAP4 server."""
return self.sock
def enable_compression(self):
"""enable_compression()
Ask the server to start compressing the connection.
Should be called from user of this class after instantiation, as in:
if 'COMPRESS=DEFLATE' in imapobj.capabilities:
imapobj.enable_compression()"""
try:
typ, dat = self._simple_command('COMPRESS', 'DEFLATE')
if typ == 'OK':
self.start_compressing()
if __debug__: self._log(1, 'Enabled COMPRESS=DEFLATE')
finally:
self._release_state_change()
def pop_untagged_responses(self):
""" for typ,data in pop_untagged_responses(): pass
Generator for any remaining untagged responses.
Returns and removes untagged responses in order of reception.
Use at your own risk!"""
while self.untagged_responses:
self.commands_lock.acquire()
try:
yield self.untagged_responses.pop(0)
finally:
self.commands_lock.release()
def recent(self, **kw):
"""(typ, [data]) = recent()
Return 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
'data' is None if no new messages,
else list of RECENT responses, most recent last."""
name = 'RECENT'
typ, dat = self._untagged_response(None, [None], name)
if dat != [None]:
return self._deliver_dat(typ, dat, kw)
kw['untagged_response'] = name
return self.noop(**kw)
def response(self, code, **kw):
"""(code, [data]) = response(code)
Return data for response 'code' if received, or None.
Old value for response 'code' is cleared."""
typ, dat = self._untagged_response(code, [None], code.upper())
return self._deliver_dat(typ, dat, kw)
def append(self, mailbox, flags, date_time, message, **kw):
"""(typ, [data]) = append(mailbox, flags, date_time, message)
Append message to named mailbox.
All args except `message' can be None."""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = self.mapCRLF_cre.sub(CRLF, message)
try:
return self._simple_command(name, mailbox, flags, date_time, **kw)
finally:
self._release_state_change()
def authenticate(self, mechanism, authobject, **kw):
"""(typ, [data]) = authenticate(mechanism, authobject)
Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead."""
self.literal = _Authenticator(authobject).process
try:
typ, dat = self._simple_command('AUTHENTICATE', mechanism.upper())
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def capability(self, **kw):
"""(typ, [data]) = capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def check(self, **kw):
"""(typ, [data]) = check()
Checkpoint mailbox on server."""
return self._simple_command('CHECK', **kw)
def close(self, **kw):
"""(typ, [data]) = close()
Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'."""
if self.state != 'SELECTED':
raise self.error('No mailbox selected.')
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def copy(self, message_set, new_mailbox, **kw):
"""(typ, [data]) = copy(message_set, new_mailbox)
Copy 'message_set' messages onto end of 'new_mailbox'."""
return self._simple_command('COPY', message_set, new_mailbox, **kw)
def create(self, mailbox, **kw):
"""(typ, [data]) = create(mailbox)
Create new mailbox."""
return self._simple_command('CREATE', mailbox, **kw)
def delete(self, mailbox, **kw):
"""(typ, [data]) = delete(mailbox)
Delete old mailbox."""
return self._simple_command('DELETE', mailbox, **kw)
def deleteacl(self, mailbox, who, **kw):
"""(typ, [data]) = deleteacl(mailbox, who)
Delete the ACLs (remove any rights) set for who on mailbox."""
return self._simple_command('DELETEACL', mailbox, who, **kw)
def examine(self, mailbox='INBOX', **kw):
"""(typ, [data]) = examine(mailbox='INBOX')
Select a mailbox for READ-ONLY access. (Flushes all untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
return self.select(mailbox=mailbox, readonly=True, **kw)
def expunge(self, **kw):
"""(typ, [data]) = expunge()
Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
'data' is list of 'EXPUNGE'd message numbers in order received."""
name = 'EXPUNGE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def fetch(self, message_set, message_parts, **kw):
"""(typ, [data, ...]) = fetch(message_set, message_parts)
Fetch (parts of) messages.
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data,
followed by a string containing the trailer."""
name = 'FETCH'
kw['untagged_response'] = name
return self._simple_command(name, message_set, message_parts, **kw)
def getacl(self, mailbox, **kw):
"""(typ, [data]) = getacl(mailbox)
Get the ACLs for a mailbox."""
kw['untagged_response'] = 'ACL'
return self._simple_command('GETACL', mailbox, **kw)
def getannotation(self, mailbox, entry, attribute, **kw):
"""(typ, [data]) = getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('GETANNOTATION', mailbox, entry, attribute, **kw)
def getquota(self, root, **kw):
"""(typ, [data]) = getquota(root)
Get the quota root's resource usage and limits.
(Part of the IMAP4 QUOTA extension defined in rfc2087.)"""
kw['untagged_response'] = 'QUOTA'
return self._simple_command('GETQUOTA', root, **kw)
def getquotaroot(self, mailbox, **kw):
"""(typ, [[QUOTAROOT responses...], [QUOTA responses...]]) = getquotaroot(mailbox)
Get the list of quota roots for the named mailbox."""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return self._deliver_dat(typ, [quotaroot, quota], kw)
def id(self, *kv_pairs, **kw):
"""(typ, [data]) = <instance>.id(kv_pairs)
'kv_pairs' is a possibly empty list of keys and values.
'data' is a list of ID key value pairs or NIL.
NB: a single argument is assumed to be correctly formatted and is passed through unchanged
(for backward compatibility with earlier version).
Exchange information for problem analysis and determination.
The ID extension is defined in RFC 2971. """
name = 'ID'
kw['untagged_response'] = name
if not kv_pairs:
data = 'NIL'
elif len(kv_pairs) == 1:
data = kv_pairs[0]
else:
data = '(%s)' % ' '.join([(arg and self._quote(arg) or 'NIL') for arg in kv_pairs])
return self._simple_command(name, (data,), **kw)
def idle(self, timeout=None, **kw):
""""(typ, [data]) = idle(timeout=None)
Put server into IDLE mode until server notifies some change,
or 'timeout' (secs) occurs (default: 29 minutes),
or another IMAP4 command is scheduled."""
name = 'IDLE'
self.literal = _IdleCont(self, timeout).process
try:
return self._simple_command(name, **kw)
finally:
self._release_state_change()
def list(self, directory='""', pattern='*', **kw):
"""(typ, [data]) = list(directory='""', pattern='*')
List mailbox names in directory matching pattern.
'data' is list of LIST responses.
NB: for 'pattern':
% matches all except separator ( so LIST "" "%" returns names at root)
* matches all (so LIST "" "*" returns whole directory tree from root)"""
name = 'LIST'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def login(self, user, password, **kw):
"""(typ, [data]) = login(user, password)
Identify client using plaintext password.
NB: 'password' will be quoted."""
try:
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def login_cram_md5(self, user, password, **kw):
"""(typ, [data]) = login_cram_md5(user, password)
Force use of CRAM-MD5 authentication."""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH, **kw)
def _CRAM_MD5_AUTH(self, challenge):
"""Authobject to use with CRAM-MD5 authentication."""
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self, **kw):
"""(typ, [data]) = logout()
Shutdown connection to server.
Returns server 'BYE' response.
NB: You must call this to shut down threads before discarding an instance."""
self.state = LOGOUT
if __debug__: self._log(1, 'state => LOGOUT')
try:
try:
typ, dat = self._simple_command('LOGOUT')
except:
typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
if __debug__: self._log(1, dat)
self._close_threads()
finally:
self._release_state_change()
if __debug__: self._log(1, 'connection closed')
bye = self._get_untagged_response('BYE', leave=True)
if bye:
typ, dat = 'BYE', bye
return self._deliver_dat(typ, dat, kw)
def lsub(self, directory='""', pattern='*', **kw):
"""(typ, [data, ...]) = lsub(directory='""', pattern='*')
List 'subscribed' mailbox names in directory matching pattern.
'data' are tuples of message part envelope and data."""
name = 'LSUB'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def myrights(self, mailbox, **kw):
"""(typ, [data]) = myrights(mailbox)
Show my ACLs for a mailbox (i.e. the rights that I have on mailbox)."""
name = 'MYRIGHTS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, **kw)
def namespace(self, **kw):
"""(typ, [data, ...]) = namespace()
Returns IMAP namespaces ala rfc2342."""
name = 'NAMESPACE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def noop(self, **kw):
"""(typ, [data]) = noop()
Send NOOP command."""
if __debug__: self._dump_ur(3)
return self._simple_command('NOOP', **kw)
def partial(self, message_num, message_part, start, length, **kw):
"""(typ, [data, ...]) = partial(message_num, message_part, start, length)
Fetch truncated part of a message.
'data' is tuple of message part envelope and data.
NB: obsolete."""
name = 'PARTIAL'
kw['untagged_response'] = 'FETCH'
return self._simple_command(name, message_num, message_part, start, length, **kw)
def proxyauth(self, user, **kw):
"""(typ, [data]) = proxyauth(user)
Assume authentication as 'user'.
(Allows an authorised administrator to proxy into any user's mailbox.)"""
try:
return self._simple_command('PROXYAUTH', user, **kw)
finally:
self._release_state_change()
def rename(self, oldmailbox, newmailbox, **kw):
"""(typ, [data]) = rename(oldmailbox, newmailbox)
Rename old mailbox name to new."""
return self._simple_command('RENAME', oldmailbox, newmailbox, **kw)
def search(self, charset, *criteria, **kw):
"""(typ, [data]) = search(charset, criterion, ...)
Search mailbox for matching messages.
'data' is space separated list of matching message numbers."""
name = 'SEARCH'
kw['untagged_response'] = name
if charset:
return self._simple_command(name, 'CHARSET', charset, *criteria, **kw)
return self._simple_command(name, *criteria, **kw)
def select(self, mailbox='INBOX', readonly=False, **kw):
"""(typ, [data]) = select(mailbox='INBOX', readonly=False)
Select a mailbox. (Restores any previous untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
self.commands_lock.acquire()
# Save state of old mailbox, restore state for new...
self.mailboxes[self.mailbox] = self.untagged_responses
self.untagged_responses = self.mailboxes.setdefault(mailbox, [])
self.commands_lock.release()
self.mailbox = mailbox
self.is_readonly = readonly and True or False
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
try:
rqb = self._command(name, mailbox)
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
if typ != 'OK':
if self.state == SELECTED:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
if typ == 'BAD':
self._deliver_exc(self.error, '%s command error: %s %s. Data: %.100s' % (name, typ, dat, mailbox), kw)
return self._deliver_dat(typ, dat, kw)
self.state = SELECTED
if __debug__: self._log(1, 'state => SELECTED')
finally:
self._release_state_change()
if self._get_untagged_response('READ-ONLY', leave=True) and not readonly:
if __debug__: self._dump_ur(1)
self._deliver_exc(self.readonly, '%s is not writable' % mailbox, kw)
typ, dat = self._untagged_response(typ, [None], 'EXISTS')
return self._deliver_dat(typ, dat, kw)
def setacl(self, mailbox, who, what, **kw):
"""(typ, [data]) = setacl(mailbox, who, what)
Set a mailbox acl."""
try:
return self._simple_command('SETACL', mailbox, who, what, **kw)
finally:
self._release_state_change()
def setannotation(self, *args, **kw):
"""(typ, [data]) = setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('SETANNOTATION', *args, **kw)
def setquota(self, root, limits, **kw):
"""(typ, [data]) = setquota(root, limits)
Set the quota root's resource limits."""
kw['untagged_response'] = 'QUOTA'
try:
return self._simple_command('SETQUOTA', root, limits, **kw)
finally:
self._release_state_change()
def sort(self, sort_criteria, charset, *search_criteria, **kw):
"""(typ, [data]) = sort(sort_criteria, charset, search_criteria, ...)
IMAP4rev1 extension SORT command."""
name = 'SORT'
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
kw['untagged_response'] = name
return self._simple_command(name, sort_criteria, charset, *search_criteria, **kw)
def starttls(self, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, **kw):
"""(typ, [data]) = starttls(keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None)
Start TLS negotiation as per RFC 2595."""
name = 'STARTTLS'
if name not in self.capabilities:
raise self.abort('TLS not supported by server')
if hasattr(self, '_tls_established') and self._tls_established:
raise self.abort('TLS session already established')
# Must now shutdown reader thread after next response, and restart after changing read_fd
self.read_size = 1 # Don't consume TLS handshake
self.TerminateReader = True
try:
typ, dat = self._simple_command(name)
finally:
self._release_state_change()
self.rdth.join()
self.TerminateReader = False
self.read_size = READ_SIZE
if typ != 'OK':
# Restart reader thread and error
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
raise self.error("Couldn't establish TLS session: %s" % dat)
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
try:
self.ssl_wrap_socket()
finally:
# Restart reader thread
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
self._tls_established = True
typ, dat = self._untagged_response(typ, dat, name)
return self._deliver_dat(typ, dat, kw)
def status(self, mailbox, names, **kw):
"""(typ, [data]) = status(mailbox, names)
Request named status conditions for mailbox."""
name = 'STATUS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, names, **kw)
def store(self, message_set, command, flags, **kw):
"""(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox."""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
kw['untagged_response'] = 'FETCH'
return self._simple_command('STORE', message_set, command, flags, **kw)
def subscribe(self, mailbox, **kw):
"""(typ, [data]) = subscribe(mailbox)
Subscribe to new mailbox."""
try:
return self._simple_command('SUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def thread(self, threading_algorithm, charset, *search_criteria, **kw):
"""(type, [data]) = thread(threading_alogrithm, charset, search_criteria, ...)
IMAPrev1 extension THREAD command."""
name = 'THREAD'
kw['untagged_response'] = name
return self._simple_command(name, threading_algorithm, charset, *search_criteria, **kw)
def uid(self, command, *args, **kw):
"""(typ, [data]) = uid(command, arg, ...)
Execute "command arg ..." with messages identified by UID,
rather than message number.
Assumes 'command' is legal in current state.
Returns response appropriate to 'command'."""
command = command.upper()
if command in UID_direct:
resp = command
else:
resp = 'FETCH'
kw['untagged_response'] = resp
return self._simple_command('UID', command, *args, **kw)
def unsubscribe(self, mailbox, **kw):
"""(typ, [data]) = unsubscribe(mailbox)
Unsubscribe from old mailbox."""
try:
return self._simple_command('UNSUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def xatom(self, name, *args, **kw):
"""(typ, [data]) = xatom(name, arg, ...)
Allow simple extension commands notified by server in CAPABILITY response.
Assumes extension command 'name' is legal in current state.
Returns response appropriate to extension command 'name'."""
name = name.upper()
if not name in Commands:
Commands[name] = ((self.state,), False)
try:
return self._simple_command(name, *args, **kw)
finally:
self._release_state_change()
# Internal methods
def _append_untagged(self, typ, dat):
# Append new 'dat' to end of last untagged response if same 'typ',
# else append new response.
if dat is None: dat = ''
self.commands_lock.acquire()
if self.untagged_responses:
urn, urd = self.untagged_responses[-1]
if urn != typ:
urd = None
else:
urd = None
if urd is None:
urd = []
self.untagged_responses.append([typ, urd])
urd.append(dat)
self.commands_lock.release()
if __debug__: self._log(5, 'untagged_responses[%s] %s += ["%s"]' % (typ, len(urd)-1, dat))
def _check_bye(self):
bye = self._get_untagged_response('BYE', leave=True)
if bye:
raise self.abort(bye[-1])
def _checkquote(self, arg):
# Must quote command args if "atom-specials" present,
# and not already quoted. NB: single quotes are removed.
if not isinstance(arg, basestring):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (("'","'"),):
return arg[1:-1]
if arg and self.mustquote_cre.search(arg) is None:
return arg
return self._quote(arg)
def _choose_nonull_or_dflt(self, dflt, *args):
if isinstance(dflt, basestring):
dflttyp = basestring # Allow any string type
else:
dflttyp = type(dflt)
for arg in args:
if arg is not None:
if isinstance(arg, dflttyp):
return arg
if __debug__: self._log(0, 'bad arg is %s, expecting %s' % (type(arg), dflttyp))
return dflt
def _command(self, name, *args, **kw):
if Commands[name][CMD_VAL_ASYNC]:
cmdtyp = 'async'
else:
cmdtyp = 'sync'
if __debug__: self._log(1, '[%s] %s %s' % (cmdtyp, name, args))
if __debug__: self._log(3, 'state_change_pending.acquire')
self.state_change_pending.acquire()
self._end_idle()
if cmdtyp == 'async':
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
else:
# Need to wait for all async commands to complete
self._check_bye()
self.commands_lock.acquire()
if self.tagged_commands:
self.state_change_free.clear()
need_event = True
else:
need_event = False
self.commands_lock.release()
if need_event:
if __debug__: self._log(3, 'sync command %s waiting for empty commands Q' % name)
self.state_change_free.wait()
if __debug__: self._log(3, 'sync command %s proceeding' % name)
if self.state not in Commands[name][CMD_VAL_STATES]:
self.literal = None
raise self.error('command %s illegal in state %s'
% (name, self.state))
self._check_bye()
for typ in ('OK', 'NO', 'BAD'):
self._get_untagged_response(typ)
if self._get_untagged_response('READ-ONLY', leave=True) and not self.is_readonly:
self.literal = None
raise self.readonly('mailbox status changed to READ-ONLY')
if self.Terminate:
raise self.abort('connection closed')
rqb = self._request_push(name=name, **kw)
data = '%s %s' % (rqb.tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if isinstance(literal, basestring):
literator = None
data = '%s {%s}' % (data, len(literal))
else:
literator = literal
if __debug__: self._log(4, 'data=%s' % data)
rqb.data = '%s%s' % (data, CRLF)
if literal is None:
self.ouq.put(rqb)
return rqb
# Must setup continuation expectancy *before* ouq.put
crqb = self._request_push(tag='continuation')
self.ouq.put(rqb)
while True:
# Wait for continuation response
ok, data = crqb.get_response('command: %s => %%s' % name)
if __debug__: self._log(4, 'continuation => %s, %s' % (ok, data))
# NO/BAD response?
if not ok:
break
# Send literal
if literator is not None:
literal = literator(data, rqb)
if literal is None:
break
if literator is not None:
# Need new request for next continuation response
crqb = self._request_push(tag='continuation')
if __debug__: self._log(4, 'write literal size %s' % len(literal))
crqb.data = '%s%s' % (literal, CRLF)
self.ouq.put(crqb)
if literator is None:
break
return rqb
def _command_complete(self, rqb, kw):
# Called for non-callback commands
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
self._check_bye()
if typ == 'BAD':
if __debug__: self._print_log()
raise self.error('%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
if 'untagged_response' in kw:
return self._untagged_response(typ, dat, kw['untagged_response'])
return typ, dat
def _command_completer(self, (response, cb_arg, error)):
# Called for callback commands
rqb, kw = cb_arg
rqb.callback = kw['callback']
rqb.callback_arg = kw.get('cb_arg')
if error is not None:
if __debug__: self._print_log()
typ, val = error
rqb.abort(typ, val)
return
bye = self._get_untagged_response('BYE', leave=True)
if bye:
rqb.abort(self.abort, bye[-1])
return
typ, dat = response
if typ == 'BAD':
if __debug__: self._print_log()
rqb.abort(self.error, '%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
return
if 'untagged_response' in kw:
response = self._untagged_response(typ, dat, kw['untagged_response'])
rqb.deliver(response)
def _deliver_dat(self, typ, dat, kw):
if 'callback' in kw:
kw['callback'](((typ, dat), kw.get('cb_arg'), None))
return typ, dat
def _deliver_exc(self, exc, dat, kw):
if 'callback' in kw:
kw['callback']((None, kw.get('cb_arg'), (exc, dat)))
raise exc(dat)
def _end_idle(self):
self.idle_lock.acquire()
irqb = self.idle_rqb
if irqb is None:
self.idle_lock.release()
return
self.idle_rqb = None
self.idle_timeout = None
self.idle_lock.release()
irqb.data = 'DONE%s' % CRLF
self.ouq.put(irqb)
if __debug__: self._log(2, 'server IDLE finished')
def _get_untagged_response(self, name, leave=False):
self.commands_lock.acquire()
for i, (typ, dat) in enumerate(self.untagged_responses):
if typ == name:
if not leave:
del self.untagged_responses[i]
self.commands_lock.release()
if __debug__: self._log(5, '_get_untagged_response(%s) => %s' % (name, dat))
return dat
self.commands_lock.release()
return None
def _match(self, cre, s):
# Run compiled regular expression 'cre' match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
return self.mo is not None
def _put_response(self, resp):
if self._expecting_data > 0:
rlen = len(resp)
dlen = min(self._expecting_data, rlen)
self._expecting_data -= dlen
if rlen <= dlen:
self._accumulated_data.append(resp)
return
self._accumulated_data.append(resp[:dlen])
resp = resp[dlen:]
if self._accumulated_data:
typ, dat = self._literal_expected
self._append_untagged(typ, (dat, ''.join(self._accumulated_data)))
self._accumulated_data = []
# Protocol mandates all lines terminated by CRLF
resp = resp[:-2]
if 'continuation' in self.tagged_commands:
continuation_expected = True
else:
continuation_expected = False
if self._literal_expected is not None:
dat = resp
if self._match(self.literal_cre, dat):
self._literal_expected[1] = dat
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'expecting literal size %s' % self._expecting_data)
return
typ = self._literal_expected[0]
self._literal_expected = None
self._append_untagged(typ, dat) # Tail
if __debug__: self._log(4, 'literal completed')
else:
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
typ = self.mo.group('type')
dat = self.mo.group('data')
if not tag in self.tagged_commands:
if __debug__: self._log(1, 'unexpected tagged response: %s' % resp)
else:
self._request_pop(tag, (typ, [dat]))
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(self.untagged_response_cre, resp):
if self._match(self.untagged_status_cre, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(self.continuation_cre, resp):
if not continuation_expected:
if __debug__: self._log(1, "unexpected continuation response: '%s'" % resp)
return
self._request_pop('continuation', (True, self.mo.group('data')))
return
if __debug__: self._log(1, "unexpected response: '%s'" % resp)
return
typ = self.mo.group('type')
dat = self.mo.group('data')
if dat is None: dat = '' # Null untagged response
if dat2: dat = dat + ' ' + dat2
# Is there a literal to come?
if self._match(self.literal_cre, dat):
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'read literal size %s' % self._expecting_data)
self._literal_expected = [typ, dat]
return
self._append_untagged(typ, dat)
if typ != 'OK': # NO, BYE, IDLE
self._end_idle()
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(self.response_code_cre, dat):
self._append_untagged(self.mo.group('type'), self.mo.group('data'))
# Command waiting for aborted continuation response?
if continuation_expected:
self._request_pop('continuation', (False, resp))
# Bad news?
if typ in ('NO', 'BAD', 'BYE'):
if typ == 'BYE':
self.Terminate = True
if __debug__: self._log(1, '%s response: %s' % (typ, dat))
def _quote(self, arg):
return '"%s"' % arg.replace('\\', '\\\\').replace('"', '\\"')
def _release_state_change(self):
if self.state_change_pending.locked():
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
def _request_pop(self, name, data):
self.commands_lock.acquire()
rqb = self.tagged_commands.pop(name)
if not self.tagged_commands:
if __debug__: self._log(3, 'state_change_free.set')
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(4, '_request_pop(%s, %s) = %s' % (name, data, rqb.tag))
rqb.deliver(data)
def _request_push(self, tag=None, name=None, **kw):
self.commands_lock.acquire()
rqb = Request(self, name=name, **kw)
if tag is None:
tag = rqb.tag
self.tagged_commands[tag] = rqb
self.commands_lock.release()
if __debug__: self._log(4, '_request_push(%s, %s, %s) = %s' % (tag, name, `kw`, rqb.tag))
return rqb
def _simple_command(self, name, *args, **kw):
if 'callback' in kw:
# Note: old calling sequence for back-compat with python <2.6
self._command(name, callback=self._command_completer, cb_arg=kw, cb_self=True, *args)
return (None, None)
return self._command_complete(self._command(name, *args), kw)
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
data = self._get_untagged_response(name)
if not data:
return typ, [None]
while True:
dat = self._get_untagged_response(name)
if not dat:
break
data += dat
if __debug__: self._log(4, '_untagged_response(%s, ?, %s) => %s' % (typ, name, data))
return typ, data
# Threads
def _close_threads(self):
if __debug__: self._log(1, '_close_threads')
self.ouq.put(None)
self.wrth.join()
if __debug__: self._log(1, 'call shutdown')
self.shutdown()
self.rdth.join()
self.inth.join()
def _handler(self):
resp_timeout = self.resp_timeout
threading.currentThread().setName(self.identifier + 'handler')
time.sleep(0.1) # Don't start handling before main thread ready
if __debug__: self._log(1, 'starting')
typ, val = self.abort, 'connection terminated'
while not self.Terminate:
try:
if self.idle_timeout is not None:
timeout = self.idle_timeout - time.time()
if timeout <= 0:
timeout = 1
if __debug__:
if self.idle_rqb is not None:
self._log(5, 'server IDLING, timeout=%.2f' % timeout)
else:
timeout = resp_timeout
line = self.inq.get(True, timeout)
except Queue.Empty:
if self.idle_rqb is None:
if resp_timeout is not None and self.tagged_commands:
if __debug__: self._log(1, 'response timeout')
typ, val = self.abort, 'no response after %s secs' % resp_timeout
break
continue
if self.idle_timeout > time.time():
continue
if __debug__: self._log(2, 'server IDLE timedout')
line = IDLE_TIMEOUT_RESPONSE
if line is None:
if __debug__: self._log(1, 'inq None - terminating')
break
if not isinstance(line, basestring):
typ, val = line
break
try:
self._put_response(line)
except:
typ, val = self.error, 'program error: %s - %s' % sys.exc_info()[:2]
break
self.Terminate = True
if __debug__: self._log(1, 'terminating: %s' % `val`)
while not self.ouq.empty():
try:
self.ouq.get_nowait().abort(typ, val)
except Queue.Empty:
break
self.ouq.put(None)
self.commands_lock.acquire()
for name in self.tagged_commands.keys():
rqb = self.tagged_commands.pop(name)
rqb.abort(typ, val)
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(3, 'state_change_free.set')
if __debug__: self._log(1, 'finished')
if hasattr(select_module, "poll"):
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using poll')
def poll_error(state):
PollErrors = {
select.POLLERR: 'Error',
select.POLLHUP: 'Hang up',
select.POLLNVAL: 'Invalid request: descriptor not open',
}
return ' '.join([PollErrors[s] for s in PollErrors.keys() if (s & state)])
line_part = ''
poll = select.poll()
poll.register(self.read_fd, select.POLLIN)
rxzero = 0
terminate = False
read_poll_timeout = self.read_poll_timeout * 1000 # poll() timeout is in millisecs
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = read_poll_timeout
try:
r = poll.poll(timeout)
if __debug__: self._log(5, 'poll => %s' % `r`)
if not r:
continue # Timeout
fd,state = r[0]
if state & select.POLLIN:
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
if state & ~(select.POLLIN):
raise IOError(poll_error(state))
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
poll.unregister(self.read_fd)
if __debug__: self._log(1, 'finished')
else:
# No "poll" - use select()
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using select')
line_part = ''
rxzero = 0
terminate = False
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = self.read_poll_timeout
try:
r,w,e = select.select([self.read_fd], [], [], timeout)
if __debug__: self._log(5, 'select => %s, %s, %s' % (r,w,e))
if not r: # Timeout
continue
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
if __debug__: self._log(1, 'finished')
def _writer(self):
threading.currentThread().setName(self.identifier + 'writer')
if __debug__: self._log(1, 'starting')
reason = 'Terminated'
while not self.Terminate:
rqb = self.ouq.get()
if rqb is None:
break # Outq flushed
try:
self.send(rqb.data)
if __debug__: self._log(4, '> %s' % rqb.data)
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
rqb.abort(self.abort, reason)
break
self.inq.put((self.abort, reason))
if __debug__: self._log(1, 'finished')
# Debugging
if __debug__:
def _init_debug(self, debug=None, debug_file=None, debug_buf_lvl=None):
self.debug = self._choose_nonull_or_dflt(0, debug, Debug)
self.debug_file = self._choose_nonull_or_dflt(sys.stderr, debug_file)
self.debug_buf_lvl = self._choose_nonull_or_dflt(DFLT_DEBUG_BUF_LVL, debug_buf_lvl)
self.debug_lock = threading.Lock()
self._cmd_log_len = 20
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug:
self._mesg('imaplib2 version %s' % __version__)
self._mesg('imaplib2 debug level %s, buffer level %s' % (self.debug, self.debug_buf_lvl))
def _dump_ur(self, lvl):
if lvl > self.debug:
return
l = self.untagged_responses
if not l:
return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self.debug_lock.acquire()
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
self.debug_lock.release()
def _log(self, lvl, line):
if lvl > self.debug:
return
if line[-2:] == CRLF:
line = line[:-2] + '\\r\\n'
tn = threading.currentThread().getName()
if lvl <= 1 or self.debug > self.debug_buf_lvl:
self.debug_lock.acquire()
self._mesg(line, tn)
self.debug_lock.release()
if lvl != 1:
return
# Keep log of last `_cmd_log_len' interactions for debugging.
self.debug_lock.acquire()
self._cmd_log[self._cmd_log_idx] = (line, tn, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
self.debug_lock.release()
def _mesg(self, s, tn=None, secs=None):
if secs is None:
secs = time.time()
if tn is None:
tn = threading.currentThread().getName()
tm = time.strftime('%M:%S', time.localtime(secs))
try:
self.debug_file.write(' %s.%02d %s %s\n' % (tm, (secs*100)%100, tn, s))
self.debug_file.flush()
finally:
pass
def _print_log(self):
self.debug_lock.acquire()
i, n = self._cmd_log_idx, self._cmd_log_len
if n: self._mesg('last %d log messages:' % n)
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
self.debug_lock.release()
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with:
IMAP4_SSL(host=None, port=None, keyfile=None, certfile=None, debug=None, debug_file=None, identifier=None, timeout=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port);
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
ca_certs - PEM formatted certificate chain file used to validate server certificates (default: None);
cert_verify_cb - function to verify authenticity of server certificates (default: None);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host=None, port=None, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
IMAP4.__init__(self, host, port, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup secure connection to remote server on "host:port"
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, send, shutdown, socket, ssl."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_SSL_PORT, port)
self.sock = self.open_socket()
self.ssl_wrap_socket()
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.read(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.read(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
if hasattr(self.sock, "sendall"):
self.sock.sendall(data)
else:
bytes = len(data)
while bytes > 0:
sent = self.sock.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def ssl(self):
"""ssl = ssl()
Return ssl instance used to communicate with the IMAP4 server."""
return self.sock
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with:
IMAP4_stream(command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
command - string that can be passed to subprocess.Popen();
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.command = command
self.host = command
self.port = None
self.sock = None
self.writefile, self.readfile = None, None
self.read_fd = None
IMAP4.__init__(self, None, None, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup a stream connection via 'self.command'.
This connection will be used by the routines:
read, send, shutdown, socket."""
from subprocess import Popen, PIPE
self._P = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
self.writefile, self.readfile = self._P.stdin, self._P.stdout
self.read_fd = self.readfile.fileno()
def read(self, size):
"""Read 'size' bytes from remote."""
if self.decompressor is None:
return os.read(self.read_fd, size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = os.read(self.read_fd, READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""Send data to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
class _Authenticator(object):
"""Private class to provide en/de-coding
for base64 authentication conversation."""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data, rqb):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
class _IdleCont(object):
"""When process is called, server is in IDLE state
and will send asynchronous changes."""
def __init__(self, parent, timeout):
self.parent = parent
self.timeout = parent._choose_nonull_or_dflt(IDLE_TIMEOUT, timeout)
self.parent.idle_timeout = self.timeout + time.time()
def process(self, data, rqb):
self.parent.idle_lock.acquire()
self.parent.idle_rqb = rqb
self.parent.idle_timeout = self.timeout + time.time()
self.parent.idle_lock.release()
if __debug__: self.parent._log(2, 'server IDLE started, timeout in %.2f secs' % self.timeout)
return None
MonthNames = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
Mon2num = dict(zip((x.encode() for x in MonthNames[1:]), range(1, 13)))
InternalDate = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
def Internaldate2Time(resp):
"""time_tuple = Internaldate2Time(resp)
Convert IMAP4 INTERNALDATE to UT."""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
Internaldate2tuple = Internaldate2Time # (Backward compatible)
def Time2Internaldate(date_time):
"""'"DD-Mmm-YYYY HH:MM:SS +HHMM"' = Time2Internaldate(date_time)
Convert 'date_time' to IMAP4 INTERNALDATE representation."""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return ('"%2d-%s-%04d %02d:%02d:%02d %+03d%02d"' %
((tt[2], MonthNames[tt[1]], tt[0]) + tt[3:6] +
divmod(zone//60, 60)))
FLAGS_cre = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
def ParseFlags(resp):
"""('flag', ...) = ParseFlags(line)
Convert IMAP4 flags response to python tuple."""
mo = FLAGS_cre.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
| false | true |
f7228f888e0c1319b41e87dce8b0a43b5bb32b32 | 6,784 | py | Python | my_tools/test.py | StephenStorm/SlowFast | 9e3616ec05bd0433c721d0b9438ac3ac0f145ac5 | [
"Apache-2.0"
] | null | null | null | my_tools/test.py | StephenStorm/SlowFast | 9e3616ec05bd0433c721d0b9438ac3ac0f145ac5 | [
"Apache-2.0"
] | null | null | null | my_tools/test.py | StephenStorm/SlowFast | 9e3616ec05bd0433c721d0b9438ac3ac0f145ac5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import sys, os
TRT_LOGGER = trt.Logger()
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 256MiB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run export_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# last_layer = network.get_layer(network.num_layers - 1)
# network.mark_output(last_layer.get_output(0))
# print(type(network))
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
# with open(engine_file_path, "wb") as f:
# f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def build_my_engine(engine_file_path, onnx_file_path):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
# if os.path.exists(engine_file_path):
if False:
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30
# The maximum GPU temporary memory which the engine can use at execution time.
builder.fp16_mode = True
builder.max_batch_size = 3
config = builder.create_builder_config()
profile = builder.create_optimization_profile()
# set_shape(self: tensorrt.tensorrt.IOptimizationProfile, input: str,
# min: tensorrt.tensorrt.Dims, opt: tensorrt.tensorrt.Dims,
# max: tensorrt.tensorrt.Dims) → None
profile.set_shape("slow", (1, 3, 8, 256, 256), (1, 3, 8, 256, 256), (2, 3, 8, 256, 256))
profile.set_shape("fast", (1, 3, 32, 256, 256), (1, 3, 32, 256, 256), (2, 3, 32, 256, 256))
config.add_optimization_profile(profile)
# This function must be called at least once if the network has dynamic or shape input tensors.
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run export_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('error occurd ~')
for error in range(parser.num_errors):
print(parser.get_error(error))
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
# engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
print("Completed creating Engine")
# with open(engine_file_path, "wb") as f:
# f.write(engine.serialize())
print(profile.get_shape('slow'))
print(profile.get_shape('fast'))
print(profile.get_shape_input('slow'))
return engine
'''
context.set_binding_shape(0, (3, 150, 250))
profile = builder.create_optimization_profile();
profile.set_shape("foo", (3, 100, 200), (3, 150, 250), (3, 200, 300))
config.add_optimization_profile(profile)
with trt.Builder(TRT_LOGGER) as builder, builder.create_builder_config() as config:
config.max_workspace_size = 1 << 20 # This determines the amount of memory available to the builder when building an optimized engine and should generally be set as high as possible.
with builder.build_engine(network, config) as engine:
# Do inference here.
'''
onnx_file_path = '/home/stephen/workspace/ActionRecognition/my_SlowFast/onnx/slowfast_mul_batch_sim.onnx'
onnx_file_path2 = '/home/stephen/workspace/ActionRecognition/onnx_trt/test15_sim.onnx'
engine_file_path = '/home/stephen/workspace/ActionRecognition/my_SlowFast/onnx/slowfast_mul_batch.trt'
# engine_file_path = ''
'''
# engine = get_engine(onnx_file_path)
if engine is None:
print('fail build engine')
print(engine.get_binding_shape(0),
engine.get_binding_shape(1),
engine.get_binding_shape(2)
)
# The number of binding indices.
print('num_bindings: {}'.format(engine.num_bindings))
# The maximum batch size which can be used for inference. implicit 1
print('max batch size: {}'.format(engine.max_batch_size))
# 优化合并后的层数
print('num_layers: {}'.format(engine.num_layers))
# Workspace will be allocated for each IExecutionContext
print('max_workspace_size: {}'.format(engine.max_workspace_size))
# num_optimization_profiles
print('optimizition profiles for this engine: {}'.format(engine.num_optimization_profiles))
'''
engine = build_my_engine(engine_file_path, onnx_file_path)
with engine.create_execution_context() as context:
print(context.get_binding_shape(0))
| 49.882353 | 186 | 0.664652 | import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import sys, os
TRT_LOGGER = trt.Logger()
def get_engine(onnx_file_path, engine_file_path=""):
def build_engine():
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30
builder.max_batch_size = 1
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run export_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
return engine
if os.path.exists(engine_file_path):
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def build_my_engine(engine_file_path, onnx_file_path):
if False:
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30
builder.fp16_mode = True
builder.max_batch_size = 3
config = builder.create_builder_config()
profile = builder.create_optimization_profile()
profile.set_shape("slow", (1, 3, 8, 256, 256), (1, 3, 8, 256, 256), (2, 3, 8, 256, 256))
profile.set_shape("fast", (1, 3, 32, 256, 256), (1, 3, 32, 256, 256), (2, 3, 32, 256, 256))
config.add_optimization_profile(profile)
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run export_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('error occurd ~')
for error in range(parser.num_errors):
print(parser.get_error(error))
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_engine(network, config)
print("Completed creating Engine")
print(profile.get_shape('slow'))
print(profile.get_shape('fast'))
print(profile.get_shape_input('slow'))
return engine
onnx_file_path = '/home/stephen/workspace/ActionRecognition/my_SlowFast/onnx/slowfast_mul_batch_sim.onnx'
onnx_file_path2 = '/home/stephen/workspace/ActionRecognition/onnx_trt/test15_sim.onnx'
engine_file_path = '/home/stephen/workspace/ActionRecognition/my_SlowFast/onnx/slowfast_mul_batch.trt'
engine = build_my_engine(engine_file_path, onnx_file_path)
with engine.create_execution_context() as context:
print(context.get_binding_shape(0))
| true | true |
f722909221b9083478adb8e769ef815032038602 | 1,726 | py | Python | aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/DescribeRPSDKRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/DescribeRPSDKRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/DescribeRPSDKRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class DescribeRPSDKRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'DescribeRPSDK','cloudauth')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | 34.52 | 84 | 0.757822 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class DescribeRPSDKRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'DescribeRPSDK','cloudauth')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | true | true |
f722917f1a19a21949e86991d7e61b514205fbd8 | 8,535 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20200601/service_endpoint_policy.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20200601/service_endpoint_policy.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20200601/service_endpoint_policy.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ServiceEndpointPolicy']
class ServiceEndpointPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyDefinitionArgs']]]]] = None,
service_endpoint_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Service End point policy resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyDefinitionArgs']]]] service_endpoint_policy_definitions: A collection of service endpoint policy definitions of the service endpoint policy.
:param pulumi.Input[str] service_endpoint_policy_name: The name of the service endpoint policy.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['service_endpoint_policy_definitions'] = service_endpoint_policy_definitions
if service_endpoint_policy_name is None:
raise TypeError("Missing required property 'service_endpoint_policy_name'")
__props__['service_endpoint_policy_name'] = service_endpoint_policy_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['subnets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ServiceEndpointPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ServiceEndpointPolicy, __self__).__init__(
'azure-nextgen:network/v20200601:ServiceEndpointPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ServiceEndpointPolicy':
"""
Get an existing ServiceEndpointPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ServiceEndpointPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the service endpoint policy resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the service endpoint policy resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="serviceEndpointPolicyDefinitions")
def service_endpoint_policy_definitions(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPolicyDefinitionResponse']]]:
"""
A collection of service endpoint policy definitions of the service endpoint policy.
"""
return pulumi.get(self, "service_endpoint_policy_definitions")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.949438 | 1,436 | 0.679555 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ServiceEndpointPolicy']
class ServiceEndpointPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyDefinitionArgs']]]]] = None,
service_endpoint_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['service_endpoint_policy_definitions'] = service_endpoint_policy_definitions
if service_endpoint_policy_name is None:
raise TypeError("Missing required property 'service_endpoint_policy_name'")
__props__['service_endpoint_policy_name'] = service_endpoint_policy_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['subnets'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ServiceEndpointPolicy"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ServiceEndpointPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ServiceEndpointPolicy, __self__).__init__(
'azure-nextgen:network/v20200601:ServiceEndpointPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ServiceEndpointPolicy':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ServiceEndpointPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="serviceEndpointPolicyDefinitions")
def service_endpoint_policy_definitions(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPolicyDefinitionResponse']]]:
return pulumi.get(self, "service_endpoint_policy_definitions")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f72292672e82e154161cc84f8f20433cdbb662d1 | 61 | py | Python | gunpowder/torch/__init__.py | trivoldus28/gunpowder | 97e9e64709fb616e2c47567b22d5f11a9234fe48 | [
"MIT"
] | 43 | 2017-05-03T22:27:11.000Z | 2022-02-11T19:07:28.000Z | gunpowder/torch/__init__.py | trivoldus28/gunpowder | 97e9e64709fb616e2c47567b22d5f11a9234fe48 | [
"MIT"
] | 102 | 2017-06-09T10:11:06.000Z | 2022-03-29T13:56:37.000Z | gunpowder/torch/__init__.py | trivoldus28/gunpowder | 97e9e64709fb616e2c47567b22d5f11a9234fe48 | [
"MIT"
] | 43 | 2017-04-25T20:25:17.000Z | 2022-02-11T19:07:34.000Z | from __future__ import absolute_import
from .nodes import *
| 15.25 | 38 | 0.819672 | from __future__ import absolute_import
from .nodes import *
| true | true |
f72292c08728b7c797e05de2a8920f385e676e2c | 4,065 | py | Python | benchmarks.py | belakaria/USeMO | ba0fb128b6cc961864d6d3187b073c071f64ce40 | [
"MIT"
] | 4 | 2020-11-25T05:47:57.000Z | 2022-01-04T10:50:56.000Z | benchmarks.py | aryandeshwal/USeMO | 063fbd8b8c39c3cc54f9abd8c79ff01eda9dc803 | [
"MIT"
] | null | null | null | benchmarks.py | aryandeshwal/USeMO | 063fbd8b8c39c3cc54f9abd8c79ff01eda9dc803 | [
"MIT"
] | 1 | 2021-01-14T06:01:11.000Z | 2021-01-14T06:01:11.000Z | import math
import numpy as np
from scipy.interpolate import interp1d
from copy import deepcopy
def Rosen(x1, d):
x=list(4*np.asarray(x1)-2)
sum_i = 0
for i in range(d-1):
sum_i =sum_i + (100 * ((x[i]**2) - x[i+1])**2 + (x[i] - 1)**2)
return sum_i
def Sphere(x1,d):
x=list(4*np.asarray(x1)-2)
sum_i = 0
for i in range(d):
sum_i =sum_i + (x[i]**2)
return sum_i
def AckleyD(x1, d):
x=list(4*np.asarray(x1)-2)
sum_i = 0
for i in range(d):
sum_i = sum_i + x[i]*x[i]
square_sum = sum_i/d
sum_i = 0
for i in range(d):
sum_i = sum_i + math.cos(2*3.1416*x[i])
cos_sum = sum_i/d
f_original = -20.0*math.exp(-0.2*math.sqrt(square_sum)) - math.exp(cos_sum) + 20 + math.exp(1)
return f_original
################################################
def Currin(x, d):
return float(((1 - math.exp(-0.5*(1/x[1]))) * ((2300*pow(x[0],3) + 1900*x[0]*x[0] + 2092*x[0] + 60)/(100*pow(x[0],3) + 500*x[0]*x[0] + 4*x[0] + 20))))
def branin(x1,d):
x=deepcopy(x1)
x[0]= 15* x[0]-5
x[1]=15*x[1]
return float(np.square(x[1] - (5.1/(4*np.square(math.pi)))*np.square(x[0]) + (5/math.pi)*x[0]- 6) + 10*(1-(1./(8*math.pi)))*np.cos(x[0]) + 10)
################################################
def Powell(xx,d):
vmin=-4
vmax=5
x=[None]+list(vmin + np.asarray(xx) * (vmax-vmin))
f_original=0
for i in range(1,int(math.floor(d/4)+1)):
f_original=f_original+pow(x[4*i-3]+10*x[4*i-2],2)+5*pow(x[4*i-1]-x[4*i],2)+pow(x[4*i-2]-2*x[4*i-1],4)+10*pow(x[4*i-3]-2*x[4*i],4)
return float(f_original)
def Perm(xx,d):
vmin=-1*d
vmax=d
beta=10
x=[None]+list(vmin + np.asarray(xx) * (vmax-vmin))
f_original=0
for i in range(1,d+1):
sum1=0
for j in range(1,d+1):
sum1=sum1+(j+beta)*(x[j]-math.pow(j,-1*i))
f_original=f_original+math.pow(sum1,2)
return f_original
def Dixon(xx,d):
vmin=-10
vmax=10
x=[None]+list(vmin + np.asarray(xx) * (vmax-vmin))
f_original=0
for i in range(2,d+1):
f_original=f_original+i*math.pow(2*math.pow(x[i],2)-x[i-1],2)
f_original=f_original+math.pow(x[1]-1,1)
return f_original
def ZAKHAROV(xx,d):
vmin=-5
vmax=10
x=[None]+list(vmin + np.asarray(xx) * (vmax-vmin))
term1=0
term2=0
for i in range(1,d+1):
term1=term1+x[i]**2
term2=term2+0.5*i*x[i]
f_original=term1+math.pow(term2,2)+math.pow(term2,4)
return f_original
def RASTRIGIN(xx,d):
vmin=-5.12
vmax=5.12
x=[None]+list(vmin + np.asarray(xx) * (vmax-vmin))
f_original=0
for i in range(1,d+1):
f_original=f_original+(x[i]**2-10*math.cos(2*x[i]*math.pi))
f_original=f_original+10*d
return f_original
def SumSquares(xx,d):
vmin=-5.12
vmax=5.12
x=[None]+list(vmin + np.asarray(xx) * (vmax-vmin))
f_original=0
for i in range(1,d+1):
f_original=f_original+(i*math.pow(x[i],2))
return f_original
################################################
def DTLZ14f_1(x, d):
g=0
for i in range(d):
g=g+pow(x[i]-0.5,2)-math.cos(20*math.pi*(x[i]-0.5))
g=100*(d+g)
y1=(1+g)*0.5*x[0]*x[1]*x[2]
return y1
def DTLZ14f_2(x, d):
g=0
for i in range(d):
g=g+pow(x[i]-0.5,2)-math.cos(20*math.pi*(x[i]-0.5))
g=100*(d+g)
y2=(1+g)*0.5*(1-x[2])*x[0]*x[1]
return y2
def DTLZ14f_3(x, d):
g=0
for i in range(d):
g=g+pow(x[i]-0.5,2)-math.cos(20*math.pi*(x[i]-0.5))
g=100*(d+g)
y3=(1+g)*0.5*(1-x[1])*x[0]
return y3
def DTLZ14f_4(x, d):
g=0
for i in range(d):
g=g+pow(x[i]-0.5,2)-math.cos(20*math.pi*(x[i]-0.5))
g=100*(d+g)
y4=(1+g)*0.5*(1-x[0])
return y4
#########################################
#d=4
def ZDT1_1(x, d):
y1=x[0]
return y1
def ZDT1_2(x, d):
y1=x[0]
g=0
for i in range(1,d):
g=g+x[i]
g=g*(9./(d-1))+1
h=1-math.sqrt(y1)/math.sqrt(g)
y2=g*h
return y2
########################################### | 27.1 | 154 | 0.510209 | import math
import numpy as np
from scipy.interpolate import interp1d
from copy import deepcopy
def Rosen(x1, d):
x=list(4*np.asarray(x1)-2)
sum_i = 0
for i in range(d-1):
sum_i =sum_i + (100 * ((x[i]**2) - x[i+1])**2 + (x[i] - 1)**2)
return sum_i
def Sphere(x1,d):
x=list(4*np.asarray(x1)-2)
sum_i = 0
for i in range(d):
sum_i =sum_i + (x[i]**2)
return sum_i
def AckleyD(x1, d):
x=list(4*np.asarray(x1)-2)
sum_i = 0
for i in range(d):
sum_i = sum_i + x[i]*x[i]
square_sum = sum_i/d
sum_i = 0
for i in range(d):
sum_i = sum_i + math.cos(2*3.1416*x[i])
cos_sum = sum_i/d
f_original = -20.0*math.exp(-0.2*math.sqrt(square_sum)) - math.exp(cos_sum) + 20 + math.exp(1)
return f_original
| true | true |
f72292fb546b416bad4ea00d66e1f27463821c23 | 15,741 | py | Python | zed_python_sample/darknet_zed.py | ryota2425/zed-yolo | a5672d30e7d11b7542b8cdb0ac1cd882741fb150 | [
"MIT"
] | null | null | null | zed_python_sample/darknet_zed.py | ryota2425/zed-yolo | a5672d30e7d11b7542b8cdb0ac1cd882741fb150 | [
"MIT"
] | null | null | null | zed_python_sample/darknet_zed.py | ryota2425/zed-yolo | a5672d30e7d11b7542b8cdb0ac1cd882741fb150 | [
"MIT"
] | null | null | null | #!python3
"""
Python 3 wrapper for identifying objects in images
Requires DLL compilation
Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py
Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py
@author: Philip Kahn, Aymeric Dujardin
@date: 20180911
"""
# pylint: disable=R, W0401, W0614, W0703
import os
import sys
import time
import logging
import random
from random import randint
import math
import statistics
import getopt
from ctypes import *
import numpy as np
import cv2
import pyzed.sl as sl
# Get the top-level logger object
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
#lib = CDLL("darknet.so", RTLD_GLOBAL)
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
log.info("Flag value '"+tmp+"' not forcing CPU mode")
except KeyError:
# We never set the flag
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError:
pass
# log.info(os.environ.keys())
# log.warning("FORCE_CPU flag undefined, proceeding with GPU")
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
log.warning("Notice: CPU-only mode")
else:
# Try the other way, in case no_gpu was
# compile but not renamed
lib = CDLL(winGPUdll, RTLD_GLOBAL)
log.warning("Environment variables indicated a CPU run, but we didn't find `" +
winNoGPUdll+"`. Trying a GPU run anyway.")
else:
lib = CDLL("../libdarknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(
c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def array_to_image(arr):
import numpy as np
# need to return old values to avoid python freeing memory
arr = arr.transpose(2, 0, 1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im, arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
if altNames is None:
name_tag = meta.names[i]
else:
name_tag = altNames[i]
res.append((name_tag, out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False):
"""
Performs the detection
"""
custom_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
custom_image = cv2.resize(custom_image, (lib.network_width(
net), lib.network_height(net)), interpolation=cv2.INTER_LINEAR)
im, arr = array_to_image(custom_image)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(
net, image.shape[1], image.shape[0], thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
if debug:
log.debug("about to range")
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
name_tag = meta.names[i]
else:
name_tag = altNames[i]
res.append((name_tag, dets[j].prob[i], (b.x, b.y, b.w, b.h), i))
res = sorted(res, key=lambda x: -x[1])
free_detections(dets, num)
return res
netMain = None
metaMain = None
altNames = None
def get_object_depth(depth, bounds):
'''
Calculates the median x, y, z position of top slice(area_div) of point cloud
in camera frame.
Arguments:
depth: Point cloud data of whole frame.
bounds: Bounding box for object in pixels.
bounds[0]: x-center
bounds[1]: y-center
bounds[2]: width of bounding box.
bounds[3]: height of bounding box.
Return:
x, y, z: Location of object in meters.
'''
area_div = 2
x_vect = []
y_vect = []
z_vect = []
for j in range(int(bounds[0] - area_div), int(bounds[0] + area_div)):
for i in range(int(bounds[1] - area_div), int(bounds[1] + area_div)):
z = depth[i, j, 2]
if not np.isnan(z) and not np.isinf(z):
x_vect.append(depth[i, j, 0])
y_vect.append(depth[i, j, 1])
z_vect.append(z)
try:
x_median = statistics.median(x_vect)
y_median = statistics.median(y_vect)
z_median = statistics.median(z_vect)
except Exception:
x_median = -1
y_median = -1
z_median = -1
pass
return x_median, y_median, z_median
def generate_color(meta_path):
'''
Generate random colors for the number of classes mentioned in data file.
Arguments:
meta_path: Path to .data file.
Return:
color_array: RGB color codes for each class.
'''
random.seed(42)
with open(meta_path, 'r') as f:
content = f.readlines()
class_num = int(content[0].split("=")[1])
color_array = []
for x in range(0, class_num):
color_array.append((randint(0, 255), randint(0, 255), randint(0, 255)))
return color_array
def main(argv):
thresh = 0.25
darknet_path="../libdarknet/"
config_path = darknet_path + "cfg/yolov3-tiny.cfg"
weight_path = "yolov3-tiny.weights"
meta_path = "coco.data"
svo_path = None
zed_id = 0
help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
try:
opts, args = getopt.getopt(
argv, "hc:w:m:t:s:z:", ["config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="])
except getopt.GetoptError:
log.exception(help_str)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
log.info(help_str)
sys.exit()
elif opt in ("-c", "--config"):
config_path = arg
elif opt in ("-w", "--weight"):
weight_path = arg
elif opt in ("-m", "--meta"):
meta_path = arg
elif opt in ("-t", "--threshold"):
thresh = float(arg)
elif opt in ("-s", "--svo_file"):
svo_path = arg
elif opt in ("-z", "--zed_id"):
zed_id = int(arg)
input_type = sl.InputType()
if svo_path is not None:
log.info("SVO file : " + svo_path)
input_type.set_from_svo_file(svo_path)
else:
# Launch camera by id
input_type.set_from_camera_id(zed_id)
init = sl.InitParameters(input_t=input_type)
init.coordinate_units = sl.UNIT.METER
cam = sl.Camera()
if not cam.is_opened():
log.info("Opening ZED Camera...")
status = cam.open(init)
if status != sl.ERROR_CODE.SUCCESS:
log.error(repr(status))
exit()
runtime = sl.RuntimeParameters()
# Use STANDARD sensing mode
runtime.sensing_mode = sl.SENSING_MODE.STANDARD
mat = sl.Mat()
point_cloud_mat = sl.Mat()
# Import the global variables. This lets us instance Darknet once,
# then just call performDetect() again without instancing again
global metaMain, netMain, altNames # pylint: disable=W0603
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(config_path):
raise ValueError("Invalid config path `" +
os.path.abspath(config_path)+"`")
if not os.path.exists(weight_path):
raise ValueError("Invalid weight path `" +
os.path.abspath(weight_path)+"`")
if not os.path.exists(meta_path):
raise ValueError("Invalid data file path `" +
os.path.abspath(meta_path)+"`")
if netMain is None:
netMain = load_net_custom(config_path.encode(
"ascii"), weight_path.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = load_meta(meta_path.encode("ascii"))
if altNames is None:
# In thon 3, the metafile default access craps out on Windows (but not Linux)
# Read the names file and create a list to feed to detect
try:
with open(meta_path) as meta_fh:
meta_contents = meta_fh.read()
import re
match = re.search("names *= *(.*)$", meta_contents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as names_fh:
names_list = names_fh.read().strip().split("\n")
altNames = [x.strip() for x in names_list]
except TypeError:
pass
except Exception:
pass
color_array = generate_color(meta_path)
log.info("Running...")
key = ''
while key != 113: # for 'q' key
start_time = time.time() # start time of the loop
err = cam.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
cam.retrieve_image(mat, sl.VIEW.LEFT)
image = mat.get_data()
cam.retrieve_measure(
point_cloud_mat, sl.MEASURE.XYZRGBA)
depth = point_cloud_mat.get_data()
# Do the detection
detections = detect(netMain, metaMain, image, thresh)
log.info(chr(27) + "[2J"+"**** " + str(len(detections)) + " Results ****")
for detection in detections:
label = detection[0]
confidence = detection[1]
pstring = label+": "+str(np.rint(100 * confidence))+"%"
log.info(pstring)
bounds = detection[2]
y_extent = int(bounds[3])
x_extent = int(bounds[2])
# Coordinates are around the center
x_coord = int(bounds[0] - bounds[2]/2)
y_coord = int(bounds[1] - bounds[3]/2)
#boundingBox = [[x_coord, y_coord], [x_coord, y_coord + y_extent], [x_coord + x_extent, y_coord + y_extent], [x_coord + x_extent, y_coord]]
thickness = 1
x, y, z = get_object_depth(depth, bounds)
distance = math.sqrt(x * x + y * y + z * z)
distance = "{:.2f}".format(distance)
cv2.rectangle(image, (x_coord - thickness, y_coord - thickness),
(x_coord + x_extent + thickness, y_coord + (18 + thickness*4)),
color_array[detection[3]], -1)
cv2.putText(image, label + " " + (str(distance) + " m"),
(x_coord + (thickness * 4), y_coord + (10 + thickness * 4)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.rectangle(image, (x_coord - thickness, y_coord - thickness),
(x_coord + x_extent + thickness, y_coord + y_extent + thickness),
color_array[detection[3]], int(thickness*2))
cv2.imshow("ZED", image)
key = cv2.waitKey(5)
log.info("FPS: {}".format(1.0 / (time.time() - start_time)))
else:
key = cv2.waitKey(5)
cv2.destroyAllWindows()
cam.close()
log.info("\nFINISH")
if __name__ == "__main__":
main(sys.argv[1:])
| 32.590062 | 155 | 0.590369 |
import os
import sys
import time
import logging
import random
from random import randint
import math
import statistics
import getopt
from ctypes import *
import numpy as np
import cv2
import pyzed.sl as sl
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
log.info("Flag value '"+tmp+"' not forcing CPU mode")
except KeyError:
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError:
pass
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
log.warning("Notice: CPU-only mode")
else:
lib = CDLL(winGPUdll, RTLD_GLOBAL)
log.warning("Environment variables indicated a CPU run, but we didn't find `" +
winNoGPUdll+"`. Trying a GPU run anyway.")
else:
lib = CDLL("../libdarknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(
c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def array_to_image(arr):
import numpy as np
# need to return old values to avoid python freeing memory
arr = arr.transpose(2, 0, 1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im, arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
if altNames is None:
name_tag = meta.names[i]
else:
name_tag = altNames[i]
res.append((name_tag, out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False):
custom_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
custom_image = cv2.resize(custom_image, (lib.network_width(
net), lib.network_height(net)), interpolation=cv2.INTER_LINEAR)
im, arr = array_to_image(custom_image)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(
net, image.shape[1], image.shape[0], thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
if debug:
log.debug("about to range")
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
name_tag = meta.names[i]
else:
name_tag = altNames[i]
res.append((name_tag, dets[j].prob[i], (b.x, b.y, b.w, b.h), i))
res = sorted(res, key=lambda x: -x[1])
free_detections(dets, num)
return res
netMain = None
metaMain = None
altNames = None
def get_object_depth(depth, bounds):
area_div = 2
x_vect = []
y_vect = []
z_vect = []
for j in range(int(bounds[0] - area_div), int(bounds[0] + area_div)):
for i in range(int(bounds[1] - area_div), int(bounds[1] + area_div)):
z = depth[i, j, 2]
if not np.isnan(z) and not np.isinf(z):
x_vect.append(depth[i, j, 0])
y_vect.append(depth[i, j, 1])
z_vect.append(z)
try:
x_median = statistics.median(x_vect)
y_median = statistics.median(y_vect)
z_median = statistics.median(z_vect)
except Exception:
x_median = -1
y_median = -1
z_median = -1
pass
return x_median, y_median, z_median
def generate_color(meta_path):
random.seed(42)
with open(meta_path, 'r') as f:
content = f.readlines()
class_num = int(content[0].split("=")[1])
color_array = []
for x in range(0, class_num):
color_array.append((randint(0, 255), randint(0, 255), randint(0, 255)))
return color_array
def main(argv):
thresh = 0.25
darknet_path="../libdarknet/"
config_path = darknet_path + "cfg/yolov3-tiny.cfg"
weight_path = "yolov3-tiny.weights"
meta_path = "coco.data"
svo_path = None
zed_id = 0
help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
try:
opts, args = getopt.getopt(
argv, "hc:w:m:t:s:z:", ["config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="])
except getopt.GetoptError:
log.exception(help_str)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
log.info(help_str)
sys.exit()
elif opt in ("-c", "--config"):
config_path = arg
elif opt in ("-w", "--weight"):
weight_path = arg
elif opt in ("-m", "--meta"):
meta_path = arg
elif opt in ("-t", "--threshold"):
thresh = float(arg)
elif opt in ("-s", "--svo_file"):
svo_path = arg
elif opt in ("-z", "--zed_id"):
zed_id = int(arg)
input_type = sl.InputType()
if svo_path is not None:
log.info("SVO file : " + svo_path)
input_type.set_from_svo_file(svo_path)
else:
# Launch camera by id
input_type.set_from_camera_id(zed_id)
init = sl.InitParameters(input_t=input_type)
init.coordinate_units = sl.UNIT.METER
cam = sl.Camera()
if not cam.is_opened():
log.info("Opening ZED Camera...")
status = cam.open(init)
if status != sl.ERROR_CODE.SUCCESS:
log.error(repr(status))
exit()
runtime = sl.RuntimeParameters()
# Use STANDARD sensing mode
runtime.sensing_mode = sl.SENSING_MODE.STANDARD
mat = sl.Mat()
point_cloud_mat = sl.Mat()
# Import the global variables. This lets us instance Darknet once,
# then just call performDetect() again without instancing again
global metaMain, netMain, altNames # pylint: disable=W0603
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(config_path):
raise ValueError("Invalid config path `" +
os.path.abspath(config_path)+"`")
if not os.path.exists(weight_path):
raise ValueError("Invalid weight path `" +
os.path.abspath(weight_path)+"`")
if not os.path.exists(meta_path):
raise ValueError("Invalid data file path `" +
os.path.abspath(meta_path)+"`")
if netMain is None:
netMain = load_net_custom(config_path.encode(
"ascii"), weight_path.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = load_meta(meta_path.encode("ascii"))
if altNames is None:
# In thon 3, the metafile default access craps out on Windows (but not Linux)
# Read the names file and create a list to feed to detect
try:
with open(meta_path) as meta_fh:
meta_contents = meta_fh.read()
import re
match = re.search("names *= *(.*)$", meta_contents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as names_fh:
names_list = names_fh.read().strip().split("\n")
altNames = [x.strip() for x in names_list]
except TypeError:
pass
except Exception:
pass
color_array = generate_color(meta_path)
log.info("Running...")
key = ''
while key != 113: # for 'q' key
start_time = time.time() # start time of the loop
err = cam.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
cam.retrieve_image(mat, sl.VIEW.LEFT)
image = mat.get_data()
cam.retrieve_measure(
point_cloud_mat, sl.MEASURE.XYZRGBA)
depth = point_cloud_mat.get_data()
# Do the detection
detections = detect(netMain, metaMain, image, thresh)
log.info(chr(27) + "[2J"+"**** " + str(len(detections)) + " Results ****")
for detection in detections:
label = detection[0]
confidence = detection[1]
pstring = label+": "+str(np.rint(100 * confidence))+"%"
log.info(pstring)
bounds = detection[2]
y_extent = int(bounds[3])
x_extent = int(bounds[2])
# Coordinates are around the center
x_coord = int(bounds[0] - bounds[2]/2)
y_coord = int(bounds[1] - bounds[3]/2)
#boundingBox = [[x_coord, y_coord], [x_coord, y_coord + y_extent], [x_coord + x_extent, y_coord + y_extent], [x_coord + x_extent, y_coord]]
thickness = 1
x, y, z = get_object_depth(depth, bounds)
distance = math.sqrt(x * x + y * y + z * z)
distance = "{:.2f}".format(distance)
cv2.rectangle(image, (x_coord - thickness, y_coord - thickness),
(x_coord + x_extent + thickness, y_coord + (18 + thickness*4)),
color_array[detection[3]], -1)
cv2.putText(image, label + " " + (str(distance) + " m"),
(x_coord + (thickness * 4), y_coord + (10 + thickness * 4)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.rectangle(image, (x_coord - thickness, y_coord - thickness),
(x_coord + x_extent + thickness, y_coord + y_extent + thickness),
color_array[detection[3]], int(thickness*2))
cv2.imshow("ZED", image)
key = cv2.waitKey(5)
log.info("FPS: {}".format(1.0 / (time.time() - start_time)))
else:
key = cv2.waitKey(5)
cv2.destroyAllWindows()
cam.close()
log.info("\nFINISH")
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
f72292fc75e33b34e3a9adbf8068e3f766ac64b5 | 1,836 | py | Python | dvd/__main__.py | MajorcaDevs/dvd | b646f45fe741f69600e6cd1f2417fc70385eaf9a | [
"MIT"
] | 2 | 2021-06-27T14:57:17.000Z | 2021-12-04T07:26:23.000Z | dvd/__main__.py | MajorcaDevs/dvd | b646f45fe741f69600e6cd1f2417fc70385eaf9a | [
"MIT"
] | null | null | null | dvd/__main__.py | MajorcaDevs/dvd | b646f45fe741f69600e6cd1f2417fc70385eaf9a | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from .args import parse_arguments
from .ffmpeg_runner import run_ffmpeg
from .image_generator import generate_frame, generate_random_color_dvd_logo, get_scaled_dvd_logo
from .position_generator import generate_dvd_positions
def main():
args = parse_arguments()
ffmpeg = run_ffmpeg(args.fps, args.live, *args.ffmpeg_args)
resolution = (int(args.width / args.scale), int(args.height / args.scale))
scl = (240 / args.height) * 40
speed = int(1000 / args.scale / scl * 2)
dvd_logo = get_scaled_dvd_logo(args.scale, scl)
dvd_logo_color = dvd_logo
total_points = int(100 * args.fps) if args.live else int(args.duration * args.fps)
points = generate_dvd_positions(
resolution,
(dvd_logo.width, dvd_logo.height),
speed,
args.fps,
None if args.live else args.duration,
)
i = 1
last_print = datetime.utcnow() - timedelta(seconds=2)
for x, y, recalculate in points:
start_frame_time = datetime.utcnow()
if recalculate:
dvd_logo_color = generate_random_color_dvd_logo(dvd_logo)
generate_frame((int(x), int(y)), resolution, dvd_logo, dvd_logo_color).save(ffmpeg.stdin, 'BMP')
end_frame_time = datetime.utcnow()
took = end_frame_time - start_frame_time
tt = took.seconds + took.microseconds / 1000000
if (end_frame_time - last_print).seconds >= 1:
print(f'\r{i * 100 // total_points}% {i}: ({int(x)}, {int(y)}) - {tt}s \033[7D', end='')
last_print = end_frame_time
i += 1
print(f'\r{i * 100 // total_points}% {i}: ({int(x)}, {int(y)}) - {tt}s ')
print('Waiting to ffmpeg to finish')
ffmpeg.stdin.close()
ffmpeg.wait()
print('Finished')
if __name__ == '__main__':
main()
| 34 | 106 | 0.648693 | from datetime import datetime, timedelta
from .args import parse_arguments
from .ffmpeg_runner import run_ffmpeg
from .image_generator import generate_frame, generate_random_color_dvd_logo, get_scaled_dvd_logo
from .position_generator import generate_dvd_positions
def main():
args = parse_arguments()
ffmpeg = run_ffmpeg(args.fps, args.live, *args.ffmpeg_args)
resolution = (int(args.width / args.scale), int(args.height / args.scale))
scl = (240 / args.height) * 40
speed = int(1000 / args.scale / scl * 2)
dvd_logo = get_scaled_dvd_logo(args.scale, scl)
dvd_logo_color = dvd_logo
total_points = int(100 * args.fps) if args.live else int(args.duration * args.fps)
points = generate_dvd_positions(
resolution,
(dvd_logo.width, dvd_logo.height),
speed,
args.fps,
None if args.live else args.duration,
)
i = 1
last_print = datetime.utcnow() - timedelta(seconds=2)
for x, y, recalculate in points:
start_frame_time = datetime.utcnow()
if recalculate:
dvd_logo_color = generate_random_color_dvd_logo(dvd_logo)
generate_frame((int(x), int(y)), resolution, dvd_logo, dvd_logo_color).save(ffmpeg.stdin, 'BMP')
end_frame_time = datetime.utcnow()
took = end_frame_time - start_frame_time
tt = took.seconds + took.microseconds / 1000000
if (end_frame_time - last_print).seconds >= 1:
print(f'\r{i * 100 // total_points}% {i}: ({int(x)}, {int(y)}) - {tt}s \033[7D', end='')
last_print = end_frame_time
i += 1
print(f'\r{i * 100 // total_points}% {i}: ({int(x)}, {int(y)}) - {tt}s ')
print('Waiting to ffmpeg to finish')
ffmpeg.stdin.close()
ffmpeg.wait()
print('Finished')
if __name__ == '__main__':
main()
| true | true |
f722931e20b014d69e2515142a9b789d4ce53e3f | 639 | py | Python | Ejercicio_1.py | VeronicaMoise/Trabajo_Git_DGP_VM | af6cfe394235b8eb8906f99f1c9224ea274953af | [
"MIT"
] | null | null | null | Ejercicio_1.py | VeronicaMoise/Trabajo_Git_DGP_VM | af6cfe394235b8eb8906f99f1c9224ea274953af | [
"MIT"
] | 1 | 2017-12-19T19:18:01.000Z | 2017-12-19T19:45:20.000Z | Ejercicio_1.py | VeronicaMoise/Trabajo_Git_DGP_VM | af6cfe394235b8eb8906f99f1c9224ea274953af | [
"MIT"
] | null | null | null | # Ejercicio 1 de DGP. Realizado por :
# Ramona Verónica Moise.
# Funciones en python
# La función suma de una lista por recursión:
def suma(l):
acum=0
for x in l:
acum+=x
return acum
# La función suma de cuadrados de una lista por comprensión:
def suma_cuadrados(n):
return sum([i**2 for i in n if i%2==0])
# La función máximo de una lista:
def máximo(l):
max_val=-float("inf")
for x in l:
if x>max_val:
max_val=x
return max_val
# La función multiplicación de un escalar por una matriz:
def prod_map(x,l)
res=[]
for n in l:
res.append(x*n)
return res
| 17.27027 | 60 | 0.624413 |
def suma(l):
acum=0
for x in l:
acum+=x
return acum
def suma_cuadrados(n):
return sum([i**2 for i in n if i%2==0])
def máximo(l):
max_val=-float("inf")
for x in l:
if x>max_val:
max_val=x
return max_val
def prod_map(x,l)
res=[]
for n in l:
res.append(x*n)
return res
| false | true |
f72293db7cf1184566fd00221fdb982d329d1cd5 | 2,531 | py | Python | proto/uploader_pb2_grpc.py | techunits/file-upload-stream-grpc-python | 6c61170eb35d8afe522188f68fa3bd32b6dc82db | [
"MIT"
] | 1 | 2021-12-17T23:39:44.000Z | 2021-12-17T23:39:44.000Z | proto/uploader_pb2_grpc.py | techunits/file-upload-stream-grpc-python | 6c61170eb35d8afe522188f68fa3bd32b6dc82db | [
"MIT"
] | null | null | null | proto/uploader_pb2_grpc.py | techunits/file-upload-stream-grpc-python | 6c61170eb35d8afe522188f68fa3bd32b6dc82db | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import uploader_pb2 as uploader__pb2
class UploaderServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.uploadFile = channel.stream_unary(
'/UploaderPkg.UploaderService/uploadFile',
request_serializer=uploader__pb2.FileUploadRequest.SerializeToString,
response_deserializer=uploader__pb2.FileUploadResponse.FromString,
)
class UploaderServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def uploadFile(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UploaderServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'uploadFile': grpc.stream_unary_rpc_method_handler(
servicer.uploadFile,
request_deserializer=uploader__pb2.FileUploadRequest.FromString,
response_serializer=uploader__pb2.FileUploadResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'UploaderPkg.UploaderService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class UploaderService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def uploadFile(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/UploaderPkg.UploaderService/uploadFile',
uploader__pb2.FileUploadRequest.SerializeToString,
uploader__pb2.FileUploadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 37.776119 | 114 | 0.68471 |
import grpc
from . import uploader_pb2 as uploader__pb2
class UploaderServiceStub(object):
def __init__(self, channel):
self.uploadFile = channel.stream_unary(
'/UploaderPkg.UploaderService/uploadFile',
request_serializer=uploader__pb2.FileUploadRequest.SerializeToString,
response_deserializer=uploader__pb2.FileUploadResponse.FromString,
)
class UploaderServiceServicer(object):
def uploadFile(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UploaderServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'uploadFile': grpc.stream_unary_rpc_method_handler(
servicer.uploadFile,
request_deserializer=uploader__pb2.FileUploadRequest.FromString,
response_serializer=uploader__pb2.FileUploadResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'UploaderPkg.UploaderService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class UploaderService(object):
@staticmethod
def uploadFile(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/UploaderPkg.UploaderService/uploadFile',
uploader__pb2.FileUploadRequest.SerializeToString,
uploader__pb2.FileUploadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| true | true |
f7229506eda09d6d691231425512d260fa98cec6 | 4,839 | py | Python | sdk/python/pulumi_aws_native/robomaker/get_robot_application.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/robomaker/get_robot_application.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/robomaker/get_robot_application.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetRobotApplicationResult',
'AwaitableGetRobotApplicationResult',
'get_robot_application',
'get_robot_application_output',
]
@pulumi.output_type
class GetRobotApplicationResult:
def __init__(__self__, arn=None, current_revision_id=None, environment=None, robot_software_suite=None, sources=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if current_revision_id and not isinstance(current_revision_id, str):
raise TypeError("Expected argument 'current_revision_id' to be a str")
pulumi.set(__self__, "current_revision_id", current_revision_id)
if environment and not isinstance(environment, str):
raise TypeError("Expected argument 'environment' to be a str")
pulumi.set(__self__, "environment", environment)
if robot_software_suite and not isinstance(robot_software_suite, dict):
raise TypeError("Expected argument 'robot_software_suite' to be a dict")
pulumi.set(__self__, "robot_software_suite", robot_software_suite)
if sources and not isinstance(sources, list):
raise TypeError("Expected argument 'sources' to be a list")
pulumi.set(__self__, "sources", sources)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="currentRevisionId")
def current_revision_id(self) -> Optional[str]:
"""
The revision ID of robot application.
"""
return pulumi.get(self, "current_revision_id")
@property
@pulumi.getter
def environment(self) -> Optional[str]:
"""
The URI of the Docker image for the robot application.
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter(name="robotSoftwareSuite")
def robot_software_suite(self) -> Optional['outputs.RobotApplicationRobotSoftwareSuite']:
return pulumi.get(self, "robot_software_suite")
@property
@pulumi.getter
def sources(self) -> Optional[Sequence['outputs.RobotApplicationSourceConfig']]:
"""
The sources of the robot application.
"""
return pulumi.get(self, "sources")
@property
@pulumi.getter
def tags(self) -> Optional['outputs.RobotApplicationTags']:
return pulumi.get(self, "tags")
class AwaitableGetRobotApplicationResult(GetRobotApplicationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRobotApplicationResult(
arn=self.arn,
current_revision_id=self.current_revision_id,
environment=self.environment,
robot_software_suite=self.robot_software_suite,
sources=self.sources,
tags=self.tags)
def get_robot_application(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRobotApplicationResult:
"""
AWS::RoboMaker::RobotApplication resource creates an AWS RoboMaker RobotApplication. Robot application can be used in AWS RoboMaker Simulation Jobs.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:robomaker:getRobotApplication', __args__, opts=opts, typ=GetRobotApplicationResult).value
return AwaitableGetRobotApplicationResult(
arn=__ret__.arn,
current_revision_id=__ret__.current_revision_id,
environment=__ret__.environment,
robot_software_suite=__ret__.robot_software_suite,
sources=__ret__.sources,
tags=__ret__.tags)
@_utilities.lift_output_func(get_robot_application)
def get_robot_application_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRobotApplicationResult]:
"""
AWS::RoboMaker::RobotApplication resource creates an AWS RoboMaker RobotApplication. Robot application can be used in AWS RoboMaker Simulation Jobs.
"""
...
| 38.712 | 152 | 0.687125 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetRobotApplicationResult',
'AwaitableGetRobotApplicationResult',
'get_robot_application',
'get_robot_application_output',
]
@pulumi.output_type
class GetRobotApplicationResult:
def __init__(__self__, arn=None, current_revision_id=None, environment=None, robot_software_suite=None, sources=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if current_revision_id and not isinstance(current_revision_id, str):
raise TypeError("Expected argument 'current_revision_id' to be a str")
pulumi.set(__self__, "current_revision_id", current_revision_id)
if environment and not isinstance(environment, str):
raise TypeError("Expected argument 'environment' to be a str")
pulumi.set(__self__, "environment", environment)
if robot_software_suite and not isinstance(robot_software_suite, dict):
raise TypeError("Expected argument 'robot_software_suite' to be a dict")
pulumi.set(__self__, "robot_software_suite", robot_software_suite)
if sources and not isinstance(sources, list):
raise TypeError("Expected argument 'sources' to be a list")
pulumi.set(__self__, "sources", sources)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="currentRevisionId")
def current_revision_id(self) -> Optional[str]:
return pulumi.get(self, "current_revision_id")
@property
@pulumi.getter
def environment(self) -> Optional[str]:
return pulumi.get(self, "environment")
@property
@pulumi.getter(name="robotSoftwareSuite")
def robot_software_suite(self) -> Optional['outputs.RobotApplicationRobotSoftwareSuite']:
return pulumi.get(self, "robot_software_suite")
@property
@pulumi.getter
def sources(self) -> Optional[Sequence['outputs.RobotApplicationSourceConfig']]:
return pulumi.get(self, "sources")
@property
@pulumi.getter
def tags(self) -> Optional['outputs.RobotApplicationTags']:
return pulumi.get(self, "tags")
class AwaitableGetRobotApplicationResult(GetRobotApplicationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRobotApplicationResult(
arn=self.arn,
current_revision_id=self.current_revision_id,
environment=self.environment,
robot_software_suite=self.robot_software_suite,
sources=self.sources,
tags=self.tags)
def get_robot_application(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRobotApplicationResult:
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:robomaker:getRobotApplication', __args__, opts=opts, typ=GetRobotApplicationResult).value
return AwaitableGetRobotApplicationResult(
arn=__ret__.arn,
current_revision_id=__ret__.current_revision_id,
environment=__ret__.environment,
robot_software_suite=__ret__.robot_software_suite,
sources=__ret__.sources,
tags=__ret__.tags)
@_utilities.lift_output_func(get_robot_application)
def get_robot_application_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRobotApplicationResult]:
...
| true | true |
f72295c8e8cc927508aed1db8b34de467166c7ee | 1,976 | py | Python | recipes/visit_struct/all/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/visit_struct/all/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | null | null | null | recipes/visit_struct/all/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | null | null | null | import os
from conans import ConanFile, tools
required_conan_version = ">=1.33.0"
class VisitStructConan(ConanFile):
name = "visit_struct"
description = "A miniature library for struct-field reflection in C++"
topics = ("reflection", "introspection", "visitor", "struct-field-visitor",)
homepage = "https://github.com/garbageslam/visit_struct"
url = "https://github.com/conan-io/conan-center-index"
license = "BSL-1.0"
settings = "os", "arch", "compiler", "build_type",
options = {
"with_boost_fusion": [True, False],
"with_boost_hana": [True, False],
}
default_options = {
"with_boost_fusion": False,
"with_boost_hana": False,
}
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def requirements(self):
if self.options.with_boost_fusion or self.options.with_boost_hana:
self.requires("boost/1.78.0")
def package_id(self):
self.info.header_only()
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "11")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE", src=self._source_subfolder, dst="licenses")
self.copy(pattern="*visit_struct.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
self.copy(pattern="*visit_struct_intrusive.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
if self.options.with_boost_fusion:
self.copy(pattern="*visit_struct_boost_fusion.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
if self.options.with_boost_hana:
self.copy(pattern="*visit_struct_boost_hana.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
| 39.52 | 131 | 0.674089 | import os
from conans import ConanFile, tools
required_conan_version = ">=1.33.0"
class VisitStructConan(ConanFile):
name = "visit_struct"
description = "A miniature library for struct-field reflection in C++"
topics = ("reflection", "introspection", "visitor", "struct-field-visitor",)
homepage = "https://github.com/garbageslam/visit_struct"
url = "https://github.com/conan-io/conan-center-index"
license = "BSL-1.0"
settings = "os", "arch", "compiler", "build_type",
options = {
"with_boost_fusion": [True, False],
"with_boost_hana": [True, False],
}
default_options = {
"with_boost_fusion": False,
"with_boost_hana": False,
}
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def requirements(self):
if self.options.with_boost_fusion or self.options.with_boost_hana:
self.requires("boost/1.78.0")
def package_id(self):
self.info.header_only()
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "11")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE", src=self._source_subfolder, dst="licenses")
self.copy(pattern="*visit_struct.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
self.copy(pattern="*visit_struct_intrusive.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
if self.options.with_boost_fusion:
self.copy(pattern="*visit_struct_boost_fusion.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
if self.options.with_boost_hana:
self.copy(pattern="*visit_struct_boost_hana.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
| true | true |
f722966edd30cb5077e170925645898dcb7db5bd | 5,229 | py | Python | qa/Rules/GuideCreatorRules/GuideCreatorRule.py | tartarini/MAF3 | f9614d36591754544b23e3a670980799254dfd2c | [
"Apache-2.0"
] | 1 | 2021-05-10T19:01:48.000Z | 2021-05-10T19:01:48.000Z | qa/Rules/GuideCreatorRules/GuideCreatorRule.py | examyes/MAF3 | f9614d36591754544b23e3a670980799254dfd2c | [
"Apache-2.0"
] | null | null | null | qa/Rules/GuideCreatorRules/GuideCreatorRule.py | examyes/MAF3 | f9614d36591754544b23e3a670980799254dfd2c | [
"Apache-2.0"
] | 1 | 2018-02-06T03:51:57.000Z | 2018-02-06T03:51:57.000Z | from xml.dom import minidom as xd
import re
from AbstractRule import AbstractRule
import os
class GuideCreatorRule(AbstractRule):
def __init__(self):
AbstractRule.__init__(self)
def execute(self):
commentTag = "//! "
startTitleTag = "//! <title>"
startDescriptionTag = "//! <description>"
startSnippetTag = "//! <snippet>"
endTitleTag = "//! </title>"
endDescriptionTag = "//! </description>"
endSnippetTag = "//! </snippet>"
snippetsCounter = 0
className = os.path.basename(self.FullPathInputFile)
className = os.path.splitext(className)[0] #without extension
x = re.compile(self.ParameterList[0])
if(re.match(x, str(className))): #get only test classes
file = open(self.FullPathInputFile)
guideLines = "<item>\n"
while True: #iterate on file lines
line = file.readline()
if len(line) == 0: break
#Search for Title tag
startTitle = line.find(startTitleTag)
if (startTitle != -1):
while True:
if(line.find(endTitleTag) != -1): #if startTag and endTag are on the same line
commentLine = line[len(startTitleTag):line.find(endTitleTag)]
guideLines += line[startTitle:len(startTitleTag)]
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endTitleTag):(line.find(endTitleTag)+len(endTitleTag))]
guideLines += "\n"
break
guideLines += line.lstrip()
line = file.readline()
endTitle = line.find(endTitleTag)
if (endTitle != -1):
commentLine = line[0:line.find(endTitleTag)].lstrip()
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endTitleTag):(line.find(endTitleTag)+len(endTitleTag))]
guideLines += "\n"
break
line = self.replaceLine(line)
if len(line) == 0: break #to avoid infinite loop
#Search for Description tag
startDescription = line.find(startDescriptionTag)
if (startDescription != -1):
while True:
if(line.find(endDescriptionTag) != -1): #if startTag and endTag are on the same line
commentLine = line[len(startDescriptionTag):line.find(endDescriptionTag)]
guideLines += line[startDescription:len(startDescriptionTag)]
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endDescriptionTag):(line.find(endDescriptionTag) + len(endDescriptionTag))]
guideLines += "\n"
break
guideLines += line.lstrip()
line = file.readline()
endDescription = line.find(endDescriptionTag)
if (endDescription != -1):
commentLine = line[0:line.find(endDescriptionTag)].lstrip()
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endDescriptionTag):(line.find(endDescriptionTag)+len(endDescriptionTag))]
guideLines += "\n"
break
line = self.replaceLine(line)
if len(line) == 0: break #to avoid infinite loop
#Search for Snippet tag
startSnippet = line.find(startSnippetTag)
if (startSnippet != -1):
if(snippetsCounter == 0):
guideLines += "<snippets>\n"
snippetsCounter = snippetsCounter+1
while True:
if(line.find(endSnippetTag) != -1): #if startTag and endTag are on the same line
commentLine = line[len(startSnippetTag):line.find(endSnippetTag)]
guideLines += line[startSnippet:len(startSnippetTag)]
guideLines += self.replaceLine(commentLine) #
guideLines += line[line.find(endSnippetTag):(line.find(endSnippetTag) + len(endSnippetTag))]
guideLines += "\n"
break
guideLines += line.lstrip()
line = file.readline()
endSnippet = line.find(endSnippetTag)
if (endSnippet != -1):
commentLine = line[0:line.find(endSnippetTag)].lstrip()
guideLines += self.replaceLine(commentLine) #
guideLines += line[line.find(endSnippetTag):(line.find(endSnippetTag)+len(endSnippetTag))]
guideLines += "\n"
break
line = self.replaceLine(line) #
if len(line) == 0: break #to avoid infinite loop
if(snippetsCounter != 0):
guideLines += "</snippets>\n"
file.close()
guideLines = guideLines.strip()
guideLines = guideLines.replace(commentTag,"")
guideLines = guideLines.replace("//","")
guideLines += "</item>"
self.MarkedList.append(guideLines)
return self.MarkedList
def replaceLine(self, lineOld):
lineNew = lineOld.replace("&","&")
lineNew = lineNew.replace(">",">")
lineNew = lineNew.replace("<","<")
return lineNew
| 43.941176 | 120 | 0.571429 | from xml.dom import minidom as xd
import re
from AbstractRule import AbstractRule
import os
class GuideCreatorRule(AbstractRule):
def __init__(self):
AbstractRule.__init__(self)
def execute(self):
commentTag = "//! "
startTitleTag = "//! <title>"
startDescriptionTag = "//! <description>"
startSnippetTag = "//! <snippet>"
endTitleTag = "//! </title>"
endDescriptionTag = "//! </description>"
endSnippetTag = "//! </snippet>"
snippetsCounter = 0
className = os.path.basename(self.FullPathInputFile)
className = os.path.splitext(className)[0]
x = re.compile(self.ParameterList[0])
if(re.match(x, str(className))):
file = open(self.FullPathInputFile)
guideLines = "<item>\n"
while True:
line = file.readline()
if len(line) == 0: break
startTitle = line.find(startTitleTag)
if (startTitle != -1):
while True:
if(line.find(endTitleTag) != -1):
commentLine = line[len(startTitleTag):line.find(endTitleTag)]
guideLines += line[startTitle:len(startTitleTag)]
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endTitleTag):(line.find(endTitleTag)+len(endTitleTag))]
guideLines += "\n"
break
guideLines += line.lstrip()
line = file.readline()
endTitle = line.find(endTitleTag)
if (endTitle != -1):
commentLine = line[0:line.find(endTitleTag)].lstrip()
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endTitleTag):(line.find(endTitleTag)+len(endTitleTag))]
guideLines += "\n"
break
line = self.replaceLine(line)
if len(line) == 0: break
startDescription = line.find(startDescriptionTag)
if (startDescription != -1):
while True:
if(line.find(endDescriptionTag) != -1):
commentLine = line[len(startDescriptionTag):line.find(endDescriptionTag)]
guideLines += line[startDescription:len(startDescriptionTag)]
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endDescriptionTag):(line.find(endDescriptionTag) + len(endDescriptionTag))]
guideLines += "\n"
break
guideLines += line.lstrip()
line = file.readline()
endDescription = line.find(endDescriptionTag)
if (endDescription != -1):
commentLine = line[0:line.find(endDescriptionTag)].lstrip()
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endDescriptionTag):(line.find(endDescriptionTag)+len(endDescriptionTag))]
guideLines += "\n"
break
line = self.replaceLine(line)
if len(line) == 0: break
startSnippet = line.find(startSnippetTag)
if (startSnippet != -1):
if(snippetsCounter == 0):
guideLines += "<snippets>\n"
snippetsCounter = snippetsCounter+1
while True:
if(line.find(endSnippetTag) != -1):
commentLine = line[len(startSnippetTag):line.find(endSnippetTag)]
guideLines += line[startSnippet:len(startSnippetTag)]
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endSnippetTag):(line.find(endSnippetTag) + len(endSnippetTag))]
guideLines += "\n"
break
guideLines += line.lstrip()
line = file.readline()
endSnippet = line.find(endSnippetTag)
if (endSnippet != -1):
commentLine = line[0:line.find(endSnippetTag)].lstrip()
guideLines += self.replaceLine(commentLine)
guideLines += line[line.find(endSnippetTag):(line.find(endSnippetTag)+len(endSnippetTag))]
guideLines += "\n"
break
line = self.replaceLine(line)
if len(line) == 0: break
if(snippetsCounter != 0):
guideLines += "</snippets>\n"
file.close()
guideLines = guideLines.strip()
guideLines = guideLines.replace(commentTag,"")
guideLines = guideLines.replace("//","")
guideLines += "</item>"
self.MarkedList.append(guideLines)
return self.MarkedList
def replaceLine(self, lineOld):
lineNew = lineOld.replace("&","&")
lineNew = lineNew.replace(">",">")
lineNew = lineNew.replace("<","<")
return lineNew
| true | true |
f722969bd923c7bf13836a6687a1de7dd1b499db | 1,145 | py | Python | python/pyhusky/frontend/library/logistic_regression_receiver.py | husky-team/PyHusky | ac3638101d9147890f9360bcfd18293f2ec1a9a3 | [
"Apache-2.0"
] | 9 | 2017-02-28T08:14:42.000Z | 2019-07-16T19:20:12.000Z | python/pyhusky/frontend/library/logistic_regression_receiver.py | husky-team/PyHusky | ac3638101d9147890f9360bcfd18293f2ec1a9a3 | [
"Apache-2.0"
] | 13 | 2017-01-03T08:29:05.000Z | 2017-06-05T11:03:57.000Z | python/pyhusky/frontend/library/logistic_regression_receiver.py | husky-team/PyHusky | ac3638101d9147890f9360bcfd18293f2ec1a9a3 | [
"Apache-2.0"
] | 6 | 2017-01-05T02:14:19.000Z | 2020-01-22T04:07:17.000Z | # Copyright 2016 Husky Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LogisticRegressionModelReceiver(object):
def __init__(self):
pass
@staticmethod
def register(receiver_map):
receiver_map["LogisticRegressionModel#LogisticR_train_py"] = LogisticRegressionModelReceiver.train_receiver
@staticmethod
def train_receiver(reply):
res = []
# eat dummy int64 represents the string length
dummy = reply.load_int64()
n_params = reply.load_int32()
for _ in xrange(n_params):
param_v = reply.load_double()
res.append(param_v)
return res
| 34.69697 | 115 | 0.71441 |
class LogisticRegressionModelReceiver(object):
def __init__(self):
pass
@staticmethod
def register(receiver_map):
receiver_map["LogisticRegressionModel#LogisticR_train_py"] = LogisticRegressionModelReceiver.train_receiver
@staticmethod
def train_receiver(reply):
res = []
dummy = reply.load_int64()
n_params = reply.load_int32()
for _ in xrange(n_params):
param_v = reply.load_double()
res.append(param_v)
return res
| true | true |
f72296b0800a491f3ac06891ac78f803cd21e285 | 13,925 | py | Python | rasa_core/processor.py | deepak02/rasa_core | 2a9c583b6f5277a2ca07b8277af74947c8823b2e | [
"Apache-2.0"
] | 46 | 2017-11-16T06:03:48.000Z | 2022-03-06T18:25:15.000Z | rasa_core/processor.py | deepak02/rasa_core | 2a9c583b6f5277a2ca07b8277af74947c8823b2e | [
"Apache-2.0"
] | null | null | null | rasa_core/processor.py | deepak02/rasa_core | 2a9c583b6f5277a2ca07b8277af74947c8823b2e | [
"Apache-2.0"
] | 21 | 2018-02-05T09:59:40.000Z | 2020-09-24T14:39:16.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from types import LambdaType
from apscheduler.schedulers.background import BackgroundScheduler
from typing import Optional, List, Dict, Any
from typing import Text
from rasa_core.actions import Action
from rasa_core.actions.action import ActionRestart, ACTION_LISTEN_NAME
from rasa_core.channels import UserMessage, InputChannel
from rasa_core.channels.direct import CollectingOutputChannel
from rasa_core.dispatcher import Dispatcher
from rasa_core.domain import Domain
from rasa_core.events import Restarted, ReminderScheduled, Event
from rasa_core.events import UserUttered, ActionExecuted
from rasa_core.interpreter import NaturalLanguageInterpreter
from rasa_core.interpreter import RegexInterpreter
from rasa_core.policies.ensemble import PolicyEnsemble
from rasa_core.tracker_store import TrackerStore
from rasa_core.trackers import DialogueStateTracker
scheduler = BackgroundScheduler()
scheduler.start()
logger = logging.getLogger(__name__)
class MessageProcessor(object):
def __init__(self,
interpreter, # type: NaturalLanguageInterpreter
policy_ensemble, # type: PolicyEnsemble
domain, # type: Domain
tracker_store, # type: TrackerStore
max_number_of_predictions=10, # type: int
message_preprocessor=None, # type: Optional[LambdaType]
on_circuit_break=None # type: Optional[LambdaType]
):
self.interpreter = interpreter
self.policy_ensemble = policy_ensemble
self.domain = domain
self.tracker_store = tracker_store
self.max_number_of_predictions = max_number_of_predictions
self.message_preprocessor = message_preprocessor
self.on_circuit_break = on_circuit_break
def handle_channel(self, input_channel=None):
# type: (InputChannel) -> None
"""Handles the input channel synchronously.
Each message gets processed directly after it got received."""
input_channel.start_sync_listening(self.handle_message)
def handle_channel_asynchronous(self, message_queue):
"""Handles incoming messages from the message queue.
An input channel should add messages to the queue asynchronously."""
while True:
message = message_queue.dequeue()
if message is None:
continue
self.handle_message(message)
def handle_message(self, message):
# type: (UserMessage) -> Optional[List[Text]]
"""Handle a single message with this processor."""
# preprocess message if necessary
if self.message_preprocessor is not None:
message.text = self.message_preprocessor(message.text)
# we have a Tracker instance for each user
# which maintains conversation state
tracker = self._get_tracker(message.sender_id)
self._handle_message_with_tracker(message, tracker)
self._predict_and_execute_next_action(message, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
if isinstance(message.output_channel, CollectingOutputChannel):
return [outgoing_message
for _, outgoing_message in message.output_channel.messages]
else:
return None
def start_message_handling(self, message):
# type: (UserMessage) -> Dict[Text, Any]
# pre-process message if necessary
if self.message_preprocessor is not None:
message.text = self.message_preprocessor(message.text)
# we have a Tracker instance for each user
# which maintains conversation state
tracker = self._get_tracker(message.sender_id)
self._handle_message_with_tracker(message, tracker)
# Log currently set slots
self._log_slots(tracker)
# action loop. predicts actions until we hit action listen
if self._should_handle_message(tracker):
# this actually just calls the policy's method by the same name
action = self._get_next_action(tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return {"next_action": action.name(),
"tracker": tracker.current_state()}
else:
return {"next_action": None,
"tracker": tracker.current_state()}
def continue_message_handling(self, sender_id, executed_action, events):
# type: (Text, Text, List[Event]) -> Dict[Text, Any]
tracker = self._get_tracker(sender_id)
self._log_action_on_tracker(tracker, executed_action, events)
if self._should_predict_another_action(executed_action, events):
action = self._get_next_action(tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return {"next_action": action.name(),
"tracker": tracker.current_state()}
else:
return {"next_action": None,
"tracker": tracker.current_state()}
def _log_slots(self, tracker):
# Log currently set slots
slot_values = "\n".join(["\t{}: {}".format(s.name, s.value)
for s in tracker.slots.values()])
logger.debug("Current slot values: \n{}".format(slot_values))
def handle_reminder(self, reminder_event, dispatcher):
# type: (ReminderScheduled, Dispatcher) -> None
"""Handle a reminder that is triggered asynchronously."""
def has_message_after_reminder(tracker):
"""If the user sent a message after the reminder got scheduled -
it might be better to cancel it."""
for e in reversed(tracker.events):
if isinstance(e,
ReminderScheduled) and e.name == \
reminder_event.name:
return False
elif isinstance(e, UserUttered):
return True
return True # tracker has probably been restarted
tracker = self._get_tracker(dispatcher.sender)
if (reminder_event.kill_on_user_message and
has_message_after_reminder(tracker)):
logger.debug("Canceled reminder because it is outdated. "
"(event: {} id: {})".format(reminder_event.action_name,
reminder_event.name))
else:
# necessary for proper featurization, otherwise the previous
# unrelated message would influence featurization
tracker.update(UserUttered.empty())
action = self.domain.action_for_name(reminder_event.action_name)
should_continue = self._run_action(action, tracker, dispatcher)
if should_continue:
user_msg = UserMessage(None,
dispatcher.output_channel,
dispatcher.sender)
self._predict_and_execute_next_action(user_msg, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
def _parse_message(self, message):
# for testing - you can short-cut the NLU part with a message
# in the format _intent[entity1=val1,entity=val2]
# parse_data is a dict of intent & entities
if message.text.startswith('_'):
parse_data = RegexInterpreter().parse(message.text)
else:
parse_data = self.interpreter.parse(message.text)
logger.debug("Received user message '{}' with intent '{}' "
"and entities '{}'".format(message.text,
parse_data["intent"],
parse_data["entities"]))
return parse_data
def _handle_message_with_tracker(self, message, tracker):
# type: (UserMessage, DialogueStateTracker) -> None
parse_data = self._parse_message(message)
# don't ever directly mutate the tracker - instead pass it events to log
tracker.update(UserUttered(message.text, parse_data["intent"],
parse_data["entities"], parse_data))
# store all entities as slots
for e in self.domain.slots_for_entities(parse_data["entities"]):
tracker.update(e)
logger.debug("Logged UserUtterance - "
"tracker now has {} events".format(len(tracker.events)))
def _should_handle_message(self, tracker):
return (not tracker.is_paused() or
tracker.latest_message.intent.get("name") ==
self.domain.restart_intent)
def _predict_and_execute_next_action(self, message, tracker):
# this will actually send the response to the user
dispatcher = Dispatcher(message.sender_id,
message.output_channel,
self.domain)
# keep taking actions decided by the policy until it chooses to 'listen'
should_predict_another_action = True
num_predicted_actions = 0
self._log_slots(tracker)
# action loop. predicts actions until we hit action listen
while should_predict_another_action and \
self._should_handle_message(tracker) and \
num_predicted_actions < self.max_number_of_predictions:
# this actually just calls the policy's method by the same name
action = self._get_next_action(tracker)
should_predict_another_action = self._run_action(action,
tracker,
dispatcher)
num_predicted_actions += 1
if num_predicted_actions == self.max_number_of_predictions and \
should_predict_another_action:
# circuit breaker was tripped
logger.warn(
"Circuit breaker tripped. Stopped predicting "
"more actions for sender '{}'".format(tracker.sender_id))
if self.on_circuit_break:
# call a registered callback
self.on_circuit_break(tracker, dispatcher)
logger.debug("Current topic: {}".format(tracker.topic.name))
def _should_predict_another_action(self, action_name, events):
is_listen_action = action_name == ACTION_LISTEN_NAME
contains_restart = events and isinstance(events[0], Restarted)
return not is_listen_action and not contains_restart
def _schedule_reminders(self, events, dispatcher):
# type: (List[Event], Dispatcher) -> None
"""Uses the scheduler to time a job to trigger the passed reminder.
Reminders with the same `id` property will overwrite one another
(i.e. only one of them will eventually run)."""
if events is not None:
for e in events:
if isinstance(e, ReminderScheduled):
scheduler.add_job(self.handle_reminder, "date",
run_date=e.trigger_date_time,
args=[e, dispatcher],
id=e.name,
replace_existing=True)
def _run_action(self, action, tracker, dispatcher):
# events and return values are used to update
# the tracker state after an action has been taken
events = action.run(dispatcher, tracker, self.domain)
self._log_action_on_tracker(tracker, action.name(), events)
self._schedule_reminders(events, dispatcher)
return self._should_predict_another_action(action.name(), events)
def _log_action_on_tracker(self, tracker, action_name, events):
# Ensures that the code still works even if a lazy programmer missed
# to type `return []` at the end of an action or the run method
# returns `None` for some other reason.
if events is None:
events = []
logger.debug("Action '{}' ended with events '{}'".format(
action_name, ['{}'.format(e) for e in events]))
# log the action and its produced events
tracker.update(ActionExecuted(action_name))
for e in events:
tracker.update(e)
def _get_tracker(self, sender):
# type: (Text) -> DialogueStateTracker
sender_id = sender or UserMessage.DEFAULT_SENDER
tracker = self.tracker_store.get_or_create_tracker(sender_id)
return tracker
def _save_tracker(self, tracker):
self.tracker_store.save(tracker)
def _get_next_action(self, tracker):
# type: (DialogueStateTracker) -> Action
follow_up_action = tracker.follow_up_action
if follow_up_action:
tracker.clear_follow_up_action()
if self.domain.index_for_action(
follow_up_action.name()) is not None:
return follow_up_action
else:
logger.error(
"Trying to run unknown follow up action '{}'!"
"Instead of running that, we will ignore the action "
"and predict the next action.".format(follow_up_action))
if tracker.latest_message.intent.get("name") == \
self.domain.restart_intent:
return ActionRestart()
idx = self.policy_ensemble.predict_next_action(tracker, self.domain)
return self.domain.action_for_index(idx)
| 42.978395 | 80 | 0.629731 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from types import LambdaType
from apscheduler.schedulers.background import BackgroundScheduler
from typing import Optional, List, Dict, Any
from typing import Text
from rasa_core.actions import Action
from rasa_core.actions.action import ActionRestart, ACTION_LISTEN_NAME
from rasa_core.channels import UserMessage, InputChannel
from rasa_core.channels.direct import CollectingOutputChannel
from rasa_core.dispatcher import Dispatcher
from rasa_core.domain import Domain
from rasa_core.events import Restarted, ReminderScheduled, Event
from rasa_core.events import UserUttered, ActionExecuted
from rasa_core.interpreter import NaturalLanguageInterpreter
from rasa_core.interpreter import RegexInterpreter
from rasa_core.policies.ensemble import PolicyEnsemble
from rasa_core.tracker_store import TrackerStore
from rasa_core.trackers import DialogueStateTracker
scheduler = BackgroundScheduler()
scheduler.start()
logger = logging.getLogger(__name__)
class MessageProcessor(object):
def __init__(self,
interpreter,
policy_ensemble,
domain,
tracker_store,
max_number_of_predictions=10,
message_preprocessor=None,
on_circuit_break=None
):
self.interpreter = interpreter
self.policy_ensemble = policy_ensemble
self.domain = domain
self.tracker_store = tracker_store
self.max_number_of_predictions = max_number_of_predictions
self.message_preprocessor = message_preprocessor
self.on_circuit_break = on_circuit_break
def handle_channel(self, input_channel=None):
input_channel.start_sync_listening(self.handle_message)
def handle_channel_asynchronous(self, message_queue):
while True:
message = message_queue.dequeue()
if message is None:
continue
self.handle_message(message)
def handle_message(self, message):
if self.message_preprocessor is not None:
message.text = self.message_preprocessor(message.text)
tracker = self._get_tracker(message.sender_id)
self._handle_message_with_tracker(message, tracker)
self._predict_and_execute_next_action(message, tracker)
self._save_tracker(tracker)
if isinstance(message.output_channel, CollectingOutputChannel):
return [outgoing_message
for _, outgoing_message in message.output_channel.messages]
else:
return None
def start_message_handling(self, message):
if self.message_preprocessor is not None:
message.text = self.message_preprocessor(message.text)
tracker = self._get_tracker(message.sender_id)
self._handle_message_with_tracker(message, tracker)
self._log_slots(tracker)
if self._should_handle_message(tracker):
action = self._get_next_action(tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return {"next_action": action.name(),
"tracker": tracker.current_state()}
else:
return {"next_action": None,
"tracker": tracker.current_state()}
def continue_message_handling(self, sender_id, executed_action, events):
# type: (Text, Text, List[Event]) -> Dict[Text, Any]
tracker = self._get_tracker(sender_id)
self._log_action_on_tracker(tracker, executed_action, events)
if self._should_predict_another_action(executed_action, events):
action = self._get_next_action(tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return {"next_action": action.name(),
"tracker": tracker.current_state()}
else:
return {"next_action": None,
"tracker": tracker.current_state()}
def _log_slots(self, tracker):
# Log currently set slots
slot_values = "\n".join(["\t{}: {}".format(s.name, s.value)
for s in tracker.slots.values()])
logger.debug("Current slot values: \n{}".format(slot_values))
def handle_reminder(self, reminder_event, dispatcher):
# type: (ReminderScheduled, Dispatcher) -> None
def has_message_after_reminder(tracker):
for e in reversed(tracker.events):
if isinstance(e,
ReminderScheduled) and e.name == \
reminder_event.name:
return False
elif isinstance(e, UserUttered):
return True
return True # tracker has probably been restarted
tracker = self._get_tracker(dispatcher.sender)
if (reminder_event.kill_on_user_message and
has_message_after_reminder(tracker)):
logger.debug("Canceled reminder because it is outdated. "
"(event: {} id: {})".format(reminder_event.action_name,
reminder_event.name))
else:
# necessary for proper featurization, otherwise the previous
# unrelated message would influence featurization
tracker.update(UserUttered.empty())
action = self.domain.action_for_name(reminder_event.action_name)
should_continue = self._run_action(action, tracker, dispatcher)
if should_continue:
user_msg = UserMessage(None,
dispatcher.output_channel,
dispatcher.sender)
self._predict_and_execute_next_action(user_msg, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
def _parse_message(self, message):
# for testing - you can short-cut the NLU part with a message
# in the format _intent[entity1=val1,entity=val2]
# parse_data is a dict of intent & entities
if message.text.startswith('_'):
parse_data = RegexInterpreter().parse(message.text)
else:
parse_data = self.interpreter.parse(message.text)
logger.debug("Received user message '{}' with intent '{}' "
"and entities '{}'".format(message.text,
parse_data["intent"],
parse_data["entities"]))
return parse_data
def _handle_message_with_tracker(self, message, tracker):
# type: (UserMessage, DialogueStateTracker) -> None
parse_data = self._parse_message(message)
# don't ever directly mutate the tracker - instead pass it events to log
tracker.update(UserUttered(message.text, parse_data["intent"],
parse_data["entities"], parse_data))
for e in self.domain.slots_for_entities(parse_data["entities"]):
tracker.update(e)
logger.debug("Logged UserUtterance - "
"tracker now has {} events".format(len(tracker.events)))
def _should_handle_message(self, tracker):
return (not tracker.is_paused() or
tracker.latest_message.intent.get("name") ==
self.domain.restart_intent)
def _predict_and_execute_next_action(self, message, tracker):
dispatcher = Dispatcher(message.sender_id,
message.output_channel,
self.domain)
should_predict_another_action = True
num_predicted_actions = 0
self._log_slots(tracker)
while should_predict_another_action and \
self._should_handle_message(tracker) and \
num_predicted_actions < self.max_number_of_predictions:
action = self._get_next_action(tracker)
should_predict_another_action = self._run_action(action,
tracker,
dispatcher)
num_predicted_actions += 1
if num_predicted_actions == self.max_number_of_predictions and \
should_predict_another_action:
# circuit breaker was tripped
logger.warn(
"Circuit breaker tripped. Stopped predicting "
"more actions for sender '{}'".format(tracker.sender_id))
if self.on_circuit_break:
# call a registered callback
self.on_circuit_break(tracker, dispatcher)
logger.debug("Current topic: {}".format(tracker.topic.name))
def _should_predict_another_action(self, action_name, events):
is_listen_action = action_name == ACTION_LISTEN_NAME
contains_restart = events and isinstance(events[0], Restarted)
return not is_listen_action and not contains_restart
def _schedule_reminders(self, events, dispatcher):
# type: (List[Event], Dispatcher) -> None
if events is not None:
for e in events:
if isinstance(e, ReminderScheduled):
scheduler.add_job(self.handle_reminder, "date",
run_date=e.trigger_date_time,
args=[e, dispatcher],
id=e.name,
replace_existing=True)
def _run_action(self, action, tracker, dispatcher):
# events and return values are used to update
# the tracker state after an action has been taken
events = action.run(dispatcher, tracker, self.domain)
self._log_action_on_tracker(tracker, action.name(), events)
self._schedule_reminders(events, dispatcher)
return self._should_predict_another_action(action.name(), events)
def _log_action_on_tracker(self, tracker, action_name, events):
# Ensures that the code still works even if a lazy programmer missed
# to type `return []` at the end of an action or the run method
# returns `None` for some other reason.
if events is None:
events = []
logger.debug("Action '{}' ended with events '{}'".format(
action_name, ['{}'.format(e) for e in events]))
# log the action and its produced events
tracker.update(ActionExecuted(action_name))
for e in events:
tracker.update(e)
def _get_tracker(self, sender):
# type: (Text) -> DialogueStateTracker
sender_id = sender or UserMessage.DEFAULT_SENDER
tracker = self.tracker_store.get_or_create_tracker(sender_id)
return tracker
def _save_tracker(self, tracker):
self.tracker_store.save(tracker)
def _get_next_action(self, tracker):
# type: (DialogueStateTracker) -> Action
follow_up_action = tracker.follow_up_action
if follow_up_action:
tracker.clear_follow_up_action()
if self.domain.index_for_action(
follow_up_action.name()) is not None:
return follow_up_action
else:
logger.error(
"Trying to run unknown follow up action '{}'!"
"Instead of running that, we will ignore the action "
"and predict the next action.".format(follow_up_action))
if tracker.latest_message.intent.get("name") == \
self.domain.restart_intent:
return ActionRestart()
idx = self.policy_ensemble.predict_next_action(tracker, self.domain)
return self.domain.action_for_index(idx)
| true | true |
f72296b0c1f3fb301b416f05e7e91ac2c9bab66e | 6,171 | py | Python | mapclientplugins/gait2392somsomusclestep/step.py | tsalemink/gait2392somsomusclestep | 53d8dbc6daf774b013795f8bc7879cb6e12a0a6b | [
"Apache-2.0"
] | null | null | null | mapclientplugins/gait2392somsomusclestep/step.py | tsalemink/gait2392somsomusclestep | 53d8dbc6daf774b013795f8bc7879cb6e12a0a6b | [
"Apache-2.0"
] | null | null | null | mapclientplugins/gait2392somsomusclestep/step.py | tsalemink/gait2392somsomusclestep | 53d8dbc6daf774b013795f8bc7879cb6e12a0a6b | [
"Apache-2.0"
] | 1 | 2021-12-02T22:50:13.000Z | 2021-12-02T22:50:13.000Z | """
MAP Client Plugin Step
"""
import json
from PySide2 import QtGui
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
from mapclientplugins.gait2392somsomusclestep.configuredialog import \
ConfigureDialog
from mapclientplugins.gait2392somsomusclestep.gait2392musclecustsomso import \
Gait2392MuscleCustomiser
class FieldworkGait2392SomsoMuscleStep(WorkflowStepMountPoint):
"""
MAP Client plugin for customising the OpenSim Gait2392 model muscle points
Inputs
------
gias-lowerlimb : GIAS2 LowerlimbAtlas instance
Lower limb model with customised lower limb bone geometry and pose
osimmodel : OpenSim model instance
The opensim model to modify. Should be output from a step that
modified the body geometries.
Outputs
-------
osimmodel : OpenSim model instance
Modified opensim model
"""
def __init__(self, location):
super(FieldworkGait2392SomsoMuscleStep, self).__init__(
'Gait2392 SOMSO Muscle', location)
# A step cannot be executed until it has been configured.
self._configured = False
self._category = 'OpenSim'
# Add any other initialisation code here:
self._icon = QtGui.QImage(
':/fieldworkgait2392musclehmfstep/images/morphometric.png')
# Ports:
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#gias-'
'lowerlimb'))
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#osimmodel'))
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#landmarks'))
# 'ju#fieldworkmodeldict'))
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'http://physiomeproject.org/workflow/1.0/rdf-schema#osimmodel'))
# Config:
self._config = {
'identifier': '', 'osim_output_dir': './', 'in_unit': 'mm',
'out_unit': 'm', 'write_osim_file': True, 'update_knee_splines': False, 'static_vas': False,
'update_max_iso_forces': True, 'subject_height': '', 'subject_mass': ''
}
self._g2392_somso_muscle = Gait2392MuscleCustomiser(self._config)
self._g2392_somso_muscle.set_workflow_location(self._location)
def execute(self):
"""
Add your code here that will kick off the execution of the step.
Make sure you call the _doneExecution() method when finished. This
method may be connected up to a button in a widget for example.
"""
self._g2392_somso_muscle.config = self._config
self._g2392_somso_muscle.customise()
self._doneExecution()
def setPortData(self, index, dataIn):
"""
Add your code here that will set the appropriate objects for this step.
The index is the index of the port in the port list. If there is only
one uses port for this step then the index can be ignored.
"""
if index == 0:
# http://physiomeproject.org/workflow/1.0/rdf-schema#gias-lowerlimb
self._g2392_somso_muscle.ll = dataIn
elif index == 1:
# http://physiomeproject.org/workflow/1.0/rdf-schema#osimmodel
self._g2392_somso_muscle.set_osim_model(dataIn)
elif index == 2:
self._g2392_somso_muscle.landmarks = dataIn
def getPortData(self, index):
"""
Add your code here that will return the appropriate objects for this
step. The index is the index of the port in the port list. If there is
only one provides port for this step then the index can be ignored.
"""
# http://physiomeproject.org/workflow/1.0/rdf-schema#osimmodel
return self._g2392_somso_muscle.gias_osimmodel._model
def configure(self):
"""
This function will be called when the configure icon on the step is
clicked. It is appropriate to display a configuration dialog at this
time. If the conditions for the configuration of this step are complete
then set:
self._configured = True
"""
dlg = ConfigureDialog()
dlg.set_workflow_location(self._location)
dlg.identifierOccursCount = self._identifierOccursCount
dlg.setConfig(self._config)
dlg.validate()
dlg.setModal(True)
if dlg.exec_():
self._config = dlg.getConfig()
self._configured = dlg.validate()
self._configuredObserver()
def getIdentifier(self):
"""
The identifier is a string that must be unique within a workflow.
"""
return self._config['identifier']
def setIdentifier(self, identifier):
"""
The framework will set the identifier for this step when it is loaded.
"""
self._config['identifier'] = identifier
def serialize(self):
"""
Add code to serialize this step to string. This method should
implement the opposite of 'deserialize'.
"""
return json.dumps(self._config, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def deserialize(self, string):
"""
Add code to deserialize this step from string. This method should
implement the opposite of 'serialize'.
"""
self._config.update(json.loads(string))
d = ConfigureDialog()
d.set_workflow_location(self._location)
d.identifierOccursCount = self._identifierOccursCount
d.setConfig(self._config)
self._configured = d.validate()
| 38.811321 | 104 | 0.64479 | import json
from PySide2 import QtGui
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
from mapclientplugins.gait2392somsomusclestep.configuredialog import \
ConfigureDialog
from mapclientplugins.gait2392somsomusclestep.gait2392musclecustsomso import \
Gait2392MuscleCustomiser
class FieldworkGait2392SomsoMuscleStep(WorkflowStepMountPoint):
def __init__(self, location):
super(FieldworkGait2392SomsoMuscleStep, self).__init__(
'Gait2392 SOMSO Muscle', location)
self._configured = False
self._category = 'OpenSim'
self._icon = QtGui.QImage(
':/fieldworkgait2392musclehmfstep/images/morphometric.png')
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#gias-'
'lowerlimb'))
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#osimmodel'))
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#landmarks'))
self.addPort(
('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'http://physiomeproject.org/workflow/1.0/rdf-schema#osimmodel'))
self._config = {
'identifier': '', 'osim_output_dir': './', 'in_unit': 'mm',
'out_unit': 'm', 'write_osim_file': True, 'update_knee_splines': False, 'static_vas': False,
'update_max_iso_forces': True, 'subject_height': '', 'subject_mass': ''
}
self._g2392_somso_muscle = Gait2392MuscleCustomiser(self._config)
self._g2392_somso_muscle.set_workflow_location(self._location)
def execute(self):
self._g2392_somso_muscle.config = self._config
self._g2392_somso_muscle.customise()
self._doneExecution()
def setPortData(self, index, dataIn):
if index == 0:
lf._g2392_somso_muscle.ll = dataIn
elif index == 1:
self._g2392_somso_muscle.set_osim_model(dataIn)
elif index == 2:
self._g2392_somso_muscle.landmarks = dataIn
def getPortData(self, index):
eturn self._g2392_somso_muscle.gias_osimmodel._model
def configure(self):
dlg = ConfigureDialog()
dlg.set_workflow_location(self._location)
dlg.identifierOccursCount = self._identifierOccursCount
dlg.setConfig(self._config)
dlg.validate()
dlg.setModal(True)
if dlg.exec_():
self._config = dlg.getConfig()
self._configured = dlg.validate()
self._configuredObserver()
def getIdentifier(self):
return self._config['identifier']
def setIdentifier(self, identifier):
self._config['identifier'] = identifier
def serialize(self):
return json.dumps(self._config, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def deserialize(self, string):
self._config.update(json.loads(string))
d = ConfigureDialog()
d.set_workflow_location(self._location)
d.identifierOccursCount = self._identifierOccursCount
d.setConfig(self._config)
self._configured = d.validate()
| true | true |
f722974799be3549ca3e50e43cb2ae03b134ec36 | 599 | py | Python | setup.py | Mike7477/cta-lstchain | 977de95d7dfe046cd402dbf1df628384724a4316 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Mike7477/cta-lstchain | 977de95d7dfe046cd402dbf1df628384724a4316 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Mike7477/cta-lstchain | 977de95d7dfe046cd402dbf1df628384724a4316 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# import sys
import setuptools
setuptools.setup(name='lstchain',
version=0.1,
description="DESCRIPTION",
# these should be minimum list of what is needed to run
packages=setuptools.find_packages(),
install_requires=['h5py'
],
tests_require=['pytest'],
author='LST collaboration',
author_email='',
license='',
url='https://github.com/cta-observatory/cta-lstchain',
long_description='',
classifiers=[
],
)
| 27.227273 | 63 | 0.617696 |
import setuptools
setuptools.setup(name='lstchain',
version=0.1,
description="DESCRIPTION",
packages=setuptools.find_packages(),
install_requires=['h5py'
],
tests_require=['pytest'],
author='LST collaboration',
author_email='',
license='',
url='https://github.com/cta-observatory/cta-lstchain',
long_description='',
classifiers=[
],
)
| true | true |
f72297cab8de6ba925b4bb00bc467b1588b58081 | 5,387 | py | Python | app/api/service/party.py | ChegeBryan/politico | 746ef4c76931928ef145593092c8b391421a50fd | [
"MIT"
] | 1 | 2021-09-08T13:17:03.000Z | 2021-09-08T13:17:03.000Z | app/api/service/party.py | ChegeBryan/politico | 746ef4c76931928ef145593092c8b391421a50fd | [
"MIT"
] | 62 | 2019-02-04T07:08:32.000Z | 2021-05-06T19:49:03.000Z | app/api/service/party.py | ChegeBryan/politico | 746ef4c76931928ef145593092c8b391421a50fd | [
"MIT"
] | 5 | 2019-02-11T18:21:14.000Z | 2022-02-25T07:41:07.000Z | """ Method for data manipulation in the mock db """
from flask import jsonify
from marshmallow import ValidationError
from app.api.model.party import Party
from app.api.util.dto import party_schema, parties_schema
from app.api.db.database import AppDatabase as db
def save_new_party(json_data):
"""saves a new party in the database
Args:
json_data (json) : party details
Returns:
json : api endpoint response
"""
# Deserialize the data input against the party schema
# check if input values throw validation errors
try:
data = party_schema.load(json_data)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
party_name = data['party_name']
hq_address = data['hq_address']
logo_url = data['logo_url']
# Query database for party by name
party_by_name = Party.get_party_by_name(party_name)
party = db().get_single_row(*party_by_name)
if party is None:
# if name is not taken
new_party = Party(
party_name=party_name,
hq_address=hq_address,
logo_url=logo_url
)
save_changes(new_party)
# 1. serialize the input for response
# 2. return serialized and proper format json to api endpoint
party_saved = db().get_single_row(*party_by_name)
response = party_schema.dump(party_saved)
response_object = jsonify({
"status": 201,
"data": [response]
})
return response_object, 201
# default response When name is taken
return jsonify({
"status": 409,
"error": "Try a different Party name, Provided name is taken."
}), 409
def get_party(_id):
"""Method to return the party from the database with the provided id
Args:
_id (integer): the party unique identifier
Returns:
1. json : the party found details in json format
2. json : error if the party is not found
"""
party_query = Party.get_party_by_id(_id)
party = db().get_single_row(*party_query)
if party:
# response when party exists
return jsonify({
"status": 200,
"data": [party_schema.dump(party)]
}), 200
else:
# response when party not found
return jsonify({
"status": 404,
"error": "Resource /parties/{} not found".format(_id)
}), 404
def get_parties():
"""Method to return all the parties from the database
Returns:
1. json : the parties found details in json format
"""
parties_query = Party.get_parties_query()
parties = db().get_all_rows(parties_query)
response = parties_schema.dump(parties)
return jsonify({
"status": 200,
"data": response
}), 200
def edit_party(_id, json_data):
""" Method to apply new changes to party details """
try:
data = party_schema.load(json_data, partial=True)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
# check if the party with the provided party id exists.
party_to_edit_query = Party.get_party_by_id(_id)
party_to_edit = db().get_single_row(*party_to_edit_query)
if party_to_edit:
new_name = data["party_name"]
# check if the provided name already exists
party_by_name = Party.get_party_by_name(new_name)
party = db().get_single_row(*party_by_name)
if party is None:
# construct update party name query
query, values = Party.update_party(_id, new_name)
# persist changes to the database
db().commit_changes(query, values)
# query back the database for the edited party.
party_edited = db().get_single_row(*party_to_edit_query)
return jsonify({
"status": 200,
"data": [party_schema.dump(party_edited)]
})
# if party name is already registered
return jsonify({
"status": 409,
"error":
"Provided name is already taken or is the same for this party."
}), 409
# response when party not found
return jsonify({
"status": 404,
"error": "Resource requested for edit not found."
}), 404
def delete_party(_id):
"""delete the selected party
Returns:
1. json : response message o details in json format
"""
# check if party to delete exists
party_to_delete_query = Party.get_party_by_id(_id)
party_to_delete = db().get_single_row(*party_to_delete_query)
if party_to_delete:
# delete found party
query, value = Party.delete_party(_id)
db().commit_changes(query, value)
return jsonify({
"status": 200,
"data": [{
"message": "Political Party deleted successfully."
}]
}), 200
else:
# response message when delete fails.
return jsonify({
"status": 404,
"data": [{
"message": "Political Party to delete not found."
}]
}), 404
def save_changes(data):
""" Write to the mock db """
query, values = Party.add_party(data)
db().commit_changes(query, values)
| 29.437158 | 75 | 0.604418 | from flask import jsonify
from marshmallow import ValidationError
from app.api.model.party import Party
from app.api.util.dto import party_schema, parties_schema
from app.api.db.database import AppDatabase as db
def save_new_party(json_data):
try:
data = party_schema.load(json_data)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
party_name = data['party_name']
hq_address = data['hq_address']
logo_url = data['logo_url']
party_by_name = Party.get_party_by_name(party_name)
party = db().get_single_row(*party_by_name)
if party is None:
new_party = Party(
party_name=party_name,
hq_address=hq_address,
logo_url=logo_url
)
save_changes(new_party)
party_saved = db().get_single_row(*party_by_name)
response = party_schema.dump(party_saved)
response_object = jsonify({
"status": 201,
"data": [response]
})
return response_object, 201
return jsonify({
"status": 409,
"error": "Try a different Party name, Provided name is taken."
}), 409
def get_party(_id):
party_query = Party.get_party_by_id(_id)
party = db().get_single_row(*party_query)
if party:
return jsonify({
"status": 200,
"data": [party_schema.dump(party)]
}), 200
else:
return jsonify({
"status": 404,
"error": "Resource /parties/{} not found".format(_id)
}), 404
def get_parties():
parties_query = Party.get_parties_query()
parties = db().get_all_rows(parties_query)
response = parties_schema.dump(parties)
return jsonify({
"status": 200,
"data": response
}), 200
def edit_party(_id, json_data):
try:
data = party_schema.load(json_data, partial=True)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
party_to_edit_query = Party.get_party_by_id(_id)
party_to_edit = db().get_single_row(*party_to_edit_query)
if party_to_edit:
new_name = data["party_name"]
party_by_name = Party.get_party_by_name(new_name)
party = db().get_single_row(*party_by_name)
if party is None:
query, values = Party.update_party(_id, new_name)
db().commit_changes(query, values)
party_edited = db().get_single_row(*party_to_edit_query)
return jsonify({
"status": 200,
"data": [party_schema.dump(party_edited)]
})
return jsonify({
"status": 409,
"error":
"Provided name is already taken or is the same for this party."
}), 409
return jsonify({
"status": 404,
"error": "Resource requested for edit not found."
}), 404
def delete_party(_id):
party_to_delete_query = Party.get_party_by_id(_id)
party_to_delete = db().get_single_row(*party_to_delete_query)
if party_to_delete:
query, value = Party.delete_party(_id)
db().commit_changes(query, value)
return jsonify({
"status": 200,
"data": [{
"message": "Political Party deleted successfully."
}]
}), 200
else:
return jsonify({
"status": 404,
"data": [{
"message": "Political Party to delete not found."
}]
}), 404
def save_changes(data):
query, values = Party.add_party(data)
db().commit_changes(query, values)
| true | true |
f72297f08adb9274b965c4d7aef9072269c53f94 | 1,397 | py | Python | scratchpad/average.py | Outflier/PyAV | f3aa2336a9fddfc2ae46e15a26956da08153af7e | [
"BSD-3-Clause"
] | 965 | 2015-01-08T19:11:16.000Z | 2020-04-30T16:27:07.000Z | scratchpad/average.py | Outflier/PyAV | f3aa2336a9fddfc2ae46e15a26956da08153af7e | [
"BSD-3-Clause"
] | 542 | 2015-01-02T12:55:46.000Z | 2020-04-30T16:13:56.000Z | scratchpad/average.py | Outflier/PyAV | f3aa2336a9fddfc2ae46e15a26956da08153af7e | [
"BSD-3-Clause"
] | 211 | 2015-01-10T12:10:02.000Z | 2020-04-29T14:02:51.000Z | import argparse
import os
import sys
import pprint
import itertools
import cv2
from av import open
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--format')
parser.add_argument('-n', '--frames', type=int, default=0)
parser.add_argument('path', nargs='+')
args = parser.parse_args()
max_size = 24 * 60 # One minute's worth.
def frame_iter(video):
count = 0
streams = [s for s in video.streams if s.type == b'video']
streams = [streams[0]]
for packet in video.demux(streams):
for frame in packet.decode():
yield frame
count += 1
if args.frames and count > args.frames:
return
for src_path in args.path:
print('reading', src_path)
basename = os.path.splitext(os.path.basename(src_path))[0]
dir_name = os.path.join('sandbox', basename)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
video = open(src_path, format=args.format)
frames = frame_iter(video)
sum_ = None
for fi, frame in enumerate(frame_iter(video)):
if sum_ is None:
sum_ = frame.to_ndarray().astype(float)
else:
sum_ += frame.to_ndarray().astype(float)
sum_ /= (fi + 1)
dst_path = os.path.join('sandbox', os.path.basename(src_path) + '-avg.jpeg')
print('writing', (fi + 1), 'frames to', dst_path)
cv2.imwrite(dst_path, sum_)
| 23.283333 | 80 | 0.629921 | import argparse
import os
import sys
import pprint
import itertools
import cv2
from av import open
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--format')
parser.add_argument('-n', '--frames', type=int, default=0)
parser.add_argument('path', nargs='+')
args = parser.parse_args()
max_size = 24 * 60
def frame_iter(video):
count = 0
streams = [s for s in video.streams if s.type == b'video']
streams = [streams[0]]
for packet in video.demux(streams):
for frame in packet.decode():
yield frame
count += 1
if args.frames and count > args.frames:
return
for src_path in args.path:
print('reading', src_path)
basename = os.path.splitext(os.path.basename(src_path))[0]
dir_name = os.path.join('sandbox', basename)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
video = open(src_path, format=args.format)
frames = frame_iter(video)
sum_ = None
for fi, frame in enumerate(frame_iter(video)):
if sum_ is None:
sum_ = frame.to_ndarray().astype(float)
else:
sum_ += frame.to_ndarray().astype(float)
sum_ /= (fi + 1)
dst_path = os.path.join('sandbox', os.path.basename(src_path) + '-avg.jpeg')
print('writing', (fi + 1), 'frames to', dst_path)
cv2.imwrite(dst_path, sum_)
| true | true |
f72298276664ccbcea35dc2655353b5bce68ee5b | 589 | py | Python | inheritance3.py | Nischal47/Python-basics | bef88076dd06f27f8d175c43933f7e3bb95c935a | [
"MIT"
] | null | null | null | inheritance3.py | Nischal47/Python-basics | bef88076dd06f27f8d175c43933f7e3bb95c935a | [
"MIT"
] | null | null | null | inheritance3.py | Nischal47/Python-basics | bef88076dd06f27f8d175c43933f7e3bb95c935a | [
"MIT"
] | null | null | null | class Person:
def __init__(self,fname,lname):
self.fname=fname
self.lname=lname
self.name=self.fname+self.lname
class Student(Person):
def __init__(self,fname,lname,RollNO):#We can add properties in child class like this.
#Using __init__ inside child will disable the inheritance property. To re-enable it parent is called.
Person.__init__(self,fname,lname)
self.RollNO=RollNO
def printname(self):
print("Name :",self.name)
print("RollNO :",self.RollNO)
x=Student("Ram","Parsad","1")
x.printname()
| 32.722222 | 109 | 0.655348 | class Person:
def __init__(self,fname,lname):
self.fname=fname
self.lname=lname
self.name=self.fname+self.lname
class Student(Person):
def __init__(self,fname,lname,RollNO):
Person.__init__(self,fname,lname)
self.RollNO=RollNO
def printname(self):
print("Name :",self.name)
print("RollNO :",self.RollNO)
x=Student("Ram","Parsad","1")
x.printname()
| true | true |
f722993608435965231718822fcf83ecbd5400dd | 132 | py | Python | typy/old/_fragment.py | cyrus-/tydy | 0fea76d82663e18a809735c02d09529950dbb5ba | [
"MIT"
] | 39 | 2016-09-12T14:44:56.000Z | 2017-04-06T16:08:00.000Z | typy/old/_fragment.py | cyrus-/tydy | 0fea76d82663e18a809735c02d09529950dbb5ba | [
"MIT"
] | 30 | 2016-10-05T04:14:27.000Z | 2017-02-06T20:16:07.000Z | typy/old/_fragment.py | cyrus-/typy | 0fea76d82663e18a809735c02d09529950dbb5ba | [
"MIT"
] | 1 | 2016-09-13T13:41:52.000Z | 2016-09-13T13:41:52.000Z | """typy fragments"""
__all__ = ("Fragment",)
class Fragment(object):
def __init__(self):
raise FragmentCannotBeInstantiated()
| 14.666667 | 38 | 0.719697 |
__all__ = ("Fragment",)
class Fragment(object):
def __init__(self):
raise FragmentCannotBeInstantiated()
| true | true |
f7229aacdf68f7edec0bbb547366c2bcd2683f2a | 3,755 | py | Python | p_iadabot.py | EdoardoGalletti/PiadaBot | 1318ddcc24e682ad2fafea22d8937af69c274dab | [
"MIT"
] | null | null | null | p_iadabot.py | EdoardoGalletti/PiadaBot | 1318ddcc24e682ad2fafea22d8937af69c274dab | [
"MIT"
] | 1 | 2021-06-01T23:27:19.000Z | 2021-06-01T23:27:19.000Z | p_iadabot.py | EdoardoGalletti/PiadaBot | 1318ddcc24e682ad2fafea22d8937af69c274dab | [
"MIT"
] | null | null | null | """
Very stupid bot to indicate the quantities of ingredients to prepare the desired number of piadines
Mother"s recipe for 16 piadine:
FLOUR: 700g
LARD: 100g
MILK: 175g
WATER: 175g
SWEETS YEAST: 15g
SALT: q.s.
HONEY: q.s.
"""
class Ingredient:
def __init__(self, name, quantity):
self.name = name
self.quantity = quantity
wheat = Ingredient("Farina", 700/16)
lard = Ingredient("Strutto", 100/16)
milk = Ingredient("Latte", 175/16)
water = Ingredient("Farina", 175/16)
yeast = Ingredient("Lievito per dolci non vanigliato", 15/16)
import logging
import telegram
#import telepot
import urllib3
import math
# You can leave this bit out if you're using a paid PythonAnywhere account
# proxy_url = "http://proxy.server:3128"
# telepot.api._pools = {
# 'default': urllib3.ProxyManager(proxy_url=proxy_url, num_pools=3, maxsize=10, retries=False, timeout=30),
# }
# telepot.api._onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=proxy_url, num_pools=1, maxsize=1, retries=False, timeout=30))
# end of the stuff that's only needed for free accounts
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# Enable logging
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
updater = Updater(token="751566911:AAHUJoC-mZAPEVA8u8xmP2BM7gO-lmP2O54")
dispatcher = updater.dispatcher
# Command handlers (they usually take the two arguments bot and update)
def start(bot, update):
"""Send a message when the command /start is issued"""
bot.send_message(chat_id=update.message.chat_id, text="I'm the PiadaBot! Quante piadine vuoi fare?\nScrivi: \"/piade x\", dove x è il numero di piadine desiderato.")
#########################################crap
def echo(update, context):
"""Echo the user message."""
update.message.reply_text(update.message.text)
#########################################crap
def caps(bot, update, args):
"""Echo the user message, but in CAPS."""
text_caps = " ".join(args).upper()
bot.send_message(chat_id=update.message.chat_id, text=text_caps)
def piade(bot, update, args):
"""Set the amount of piadine to eat and get the quantities of ingredients"""
# user_says = " ".join(args)
piade = float(args[0])
message =\
"*" + wheat.name + "*: " + str(int(wheat.quantity*piade)) + "g \n" +\
"*" + lard.name + "*: " + str(int(lard.quantity*piade)) + "g \n" +\
"*" + milk.name + "*: " + str(int(milk.quantity*piade)) + "g \n" +\
"*" + water.name + "*: " + str(int(water.quantity*piade)) + "g \n" +\
"*" + yeast.name + "*: " + str(math.ceil(yeast.quantity*piade)) + "g \n" +\
"*Sale*: q.b.\n*Miele*: q.b."
bot.send_message(chat_id=update.message.chat_id, text = message, parse_mode = telegram.ParseMode.MARKDOWN)
def unknown(bot, update):
"""Message sent when an un unrecognized command is sent"""
bot.send_message(chat_id=update.message.chat_id, text="No compriendo nu caz.")
def main():
"""Start the bot """
# Create the Updater and pass it your bot"s token.
updater = Updater(token="751566911:AAHUJoC-mZAPEVA8u8xmP2BM7gO-lmP2O54")
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# on different commands - answer in Telegram
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("caps", caps, pass_args=True))
dispatcher.add_handler(CommandHandler("piade", piade, pass_args=True))
# on unknown command - show message in Telegram
dispatcher.add_handler(MessageHandler(Filters.command, unknown))
# Start the Bot
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| 35.424528 | 167 | 0.673502 |
class Ingredient:
def __init__(self, name, quantity):
self.name = name
self.quantity = quantity
wheat = Ingredient("Farina", 700/16)
lard = Ingredient("Strutto", 100/16)
milk = Ingredient("Latte", 175/16)
water = Ingredient("Farina", 175/16)
yeast = Ingredient("Lievito per dolci non vanigliato", 15/16)
import logging
import telegram
import urllib3
import math
# proxy_url = "http://proxy.server:3128"
# telepot.api._pools = {
# 'default': urllib3.ProxyManager(proxy_url=proxy_url, num_pools=3, maxsize=10, retries=False, timeout=30),
# }
# telepot.api._onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=proxy_url, num_pools=1, maxsize=1, retries=False, timeout=30))
# end of the stuff that's only needed for free accounts
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
updater = Updater(token="751566911:AAHUJoC-mZAPEVA8u8xmP2BM7gO-lmP2O54")
dispatcher = updater.dispatcher
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="I'm the PiadaBot! Quante piadine vuoi fare?\nScrivi: \"/piade x\", dove x è il numero di piadine desiderato.")
#########################################crap
def echo(update, context):
update.message.reply_text(update.message.text)
#########################################crap
def caps(bot, update, args):
text_caps = " ".join(args).upper()
bot.send_message(chat_id=update.message.chat_id, text=text_caps)
def piade(bot, update, args):
# user_says = " ".join(args)
piade = float(args[0])
message =\
"*" + wheat.name + "*: " + str(int(wheat.quantity*piade)) + "g \n" +\
"*" + lard.name + "*: " + str(int(lard.quantity*piade)) + "g \n" +\
"*" + milk.name + "*: " + str(int(milk.quantity*piade)) + "g \n" +\
"*" + water.name + "*: " + str(int(water.quantity*piade)) + "g \n" +\
"*" + yeast.name + "*: " + str(math.ceil(yeast.quantity*piade)) + "g \n" +\
"*Sale*: q.b.\n*Miele*: q.b."
bot.send_message(chat_id=update.message.chat_id, text = message, parse_mode = telegram.ParseMode.MARKDOWN)
def unknown(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="No compriendo nu caz.")
def main():
# Create the Updater and pass it your bot"s token.
updater = Updater(token="751566911:AAHUJoC-mZAPEVA8u8xmP2BM7gO-lmP2O54")
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# on different commands - answer in Telegram
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("caps", caps, pass_args=True))
dispatcher.add_handler(CommandHandler("piade", piade, pass_args=True))
# on unknown command - show message in Telegram
dispatcher.add_handler(MessageHandler(Filters.command, unknown))
# Start the Bot
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| true | true |
f7229abe4303ac48d874665b1a528a756d171275 | 1,878 | py | Python | reactivex/observable/fromcallback.py | christiansandberg/RxPY | 036027d2858ea6c9d45839c863bd791e5bb50c36 | [
"MIT"
] | null | null | null | reactivex/observable/fromcallback.py | christiansandberg/RxPY | 036027d2858ea6c9d45839c863bd791e5bb50c36 | [
"MIT"
] | null | null | null | reactivex/observable/fromcallback.py | christiansandberg/RxPY | 036027d2858ea6c9d45839c863bd791e5bb50c36 | [
"MIT"
] | null | null | null | from typing import Any, Callable, Optional
from reactivex import Observable, abc, typing
from reactivex.disposable import Disposable
def from_callback_(
func: Callable[..., Callable[..., None]],
mapper: Optional[typing.Mapper[Any, Any]] = None,
) -> Callable[[], Observable[Any]]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single value of
the arguments to the callback as a list.
"""
def function(*args: Any) -> Observable[Any]:
arguments = list(args)
def subscribe(
observer: abc.ObserverBase[Any],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
def handler(*args: Any) -> None:
results = list(args)
if mapper:
try:
results = mapper(args)
except Exception as err: # pylint: disable=broad-except
observer.on_error(err)
return
observer.on_next(results)
else:
if len(results) <= 1:
observer.on_next(*results)
else:
observer.on_next(results)
observer.on_completed()
arguments.append(handler)
func(*arguments)
return Disposable()
return Observable(subscribe)
return function
__all__ = ["from_callback_"]
| 31.3 | 76 | 0.565495 | from typing import Any, Callable, Optional
from reactivex import Observable, abc, typing
from reactivex.disposable import Disposable
def from_callback_(
func: Callable[..., Callable[..., None]],
mapper: Optional[typing.Mapper[Any, Any]] = None,
) -> Callable[[], Observable[Any]]:
def function(*args: Any) -> Observable[Any]:
arguments = list(args)
def subscribe(
observer: abc.ObserverBase[Any],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
def handler(*args: Any) -> None:
results = list(args)
if mapper:
try:
results = mapper(args)
except Exception as err:
observer.on_error(err)
return
observer.on_next(results)
else:
if len(results) <= 1:
observer.on_next(*results)
else:
observer.on_next(results)
observer.on_completed()
arguments.append(handler)
func(*arguments)
return Disposable()
return Observable(subscribe)
return function
__all__ = ["from_callback_"]
| true | true |
f7229ac8cf11116abf2321d839ce3680a94fd5d0 | 15,653 | py | Python | boa3/model/builtin/interop/interop.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3/model/builtin/interop/interop.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3/model/builtin/interop/interop.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from enum import Enum
from typing import Dict, List
from boa3.model.builtin.interop.blockchain import *
from boa3.model.builtin.interop.contract import *
from boa3.model.builtin.interop.contract.contractmanifest import *
from boa3.model.builtin.interop.crypto import *
from boa3.model.builtin.interop.iterator import *
from boa3.model.builtin.interop.json import *
from boa3.model.builtin.interop.nativecontract import *
from boa3.model.builtin.interop.oracle import *
from boa3.model.builtin.interop.policy import *
from boa3.model.builtin.interop.role import *
from boa3.model.builtin.interop.runtime import *
from boa3.model.builtin.interop.stdlib import *
from boa3.model.builtin.interop.storage import *
from boa3.model.identifiedsymbol import IdentifiedSymbol
from boa3.model.imports.package import Package
class InteropPackage(str, Enum):
Blockchain = 'blockchain'
Contract = 'contract'
Crypto = 'crypto'
Iterator = 'iterator'
Json = 'json'
Oracle = 'oracle'
Policy = 'policy'
Role = 'role'
Runtime = 'runtime'
Stdlib = 'stdlib'
Storage = 'storage'
class Interop:
@classmethod
def interop_symbols(cls, package: str = None) -> List[IdentifiedSymbol]:
if package in InteropPackage.__members__.values():
return cls._interop_symbols[package]
lst: List[IdentifiedSymbol] = []
for symbols in cls._interop_symbols.values():
lst.extend(symbols)
return lst
# region Interops
# Interop Types
BlockType = BlockType.build()
CallFlagsType = CallFlagsType()
ContractManifestType = ContractManifestType.build()
ContractType = ContractType.build()
FindOptionsType = FindOptionsType()
Iterator = IteratorType.build()
NamedCurveType = NamedCurveType()
NotificationType = NotificationType.build()
OracleResponseCode = OracleResponseCodeType.build()
OracleType = OracleType.build()
RoleType = RoleType.build()
StorageContextType = StorageContextType.build()
StorageMapType = StorageMapType.build()
TransactionType = TransactionType.build()
TriggerType = TriggerType()
# Blockchain Interops
CurrentHash = CurrentHashProperty()
CurrentHeight = CurrentHeightProperty()
CurrentIndex = CurrentIndexProperty()
GetContract = GetContractMethod(ContractType)
GetBlock = GetBlockMethod(BlockType)
GetTransaction = GetTransactionMethod(TransactionType)
GetTransactionFromBlock = GetTransactionFromBlockMethod(TransactionType)
GetTransactionHeight = GetTransactionHeightMethod()
# Contract Interops
CallContract = CallMethod()
CreateContract = CreateMethod(ContractType)
CreateMultisigAccount = CreateMultisigAccountMethod()
CreateStandardAccount = CreateStandardAccountMethod()
DestroyContract = DestroyMethod()
GetCallFlags = GetCallFlagsMethod(CallFlagsType)
GetMinimumDeploymentFee = GetMinimumDeploymentFeeMethod()
UpdateContract = UpdateMethod()
# Native Contracts
GasScriptHash = GasProperty()
NeoScriptHash = NeoProperty()
ContractManagementScriptHash = ContractManagement
CryptoLibScriptHash = CryptoLibContract
LedgerScriptHash = LedgerContract
OracleScriptHash = OracleContract
StdLibScriptHash = StdLibContract
# Crypto Interops
CheckMultisig = CheckMultisigMethod()
CheckSig = CheckSigMethod()
Hash160 = Hash160Method()
Hash256 = Hash256Method()
Ripemd160 = Ripemd160Method()
Sha256 = Sha256Method()
VerifyWithECDsa = VerifyWithECDsaMethod()
# Iterator Interops
IteratorCreate = IteratorMethod(Iterator)
# Json Interops
JsonDeserialize = JsonDeserializeMethod()
JsonSerialize = JsonSerializeMethod()
# Policy Interops
GetExecFeeFactor = GetExecFeeFactorMethod()
GetFeePerByte = GetFeePerByteMethod()
GetStoragePrice = GetStoragePriceMethod()
IsBlocked = IsBlockedMethod()
# Role Interops
GetDesignatedByRole = GetDesignatedByRoleMethod()
# Runtime Interops
BlockTime = BlockTimeProperty()
BurnGas = BurnGasMethod()
CallingScriptHash = CallingScriptHashProperty()
CheckWitness = CheckWitnessMethod()
EntryScriptHash = EntryScriptHashProperty()
ExecutingScriptHash = ExecutingScriptHashProperty()
GasLeft = GasLeftProperty()
GetNetwork = GetNetworkMethod()
GetNotifications = GetNotificationsMethod(NotificationType)
GetRandom = GetRandomMethod()
GetTrigger = GetTriggerMethod(TriggerType)
InvocationCounter = InvocationCounterProperty()
Log = LogMethod()
Notify = NotifyMethod()
Platform = PlatformProperty()
ScriptContainer = ScriptContainerProperty()
# Stdlib Interops
Atoi = AtoiMethod()
Base58CheckDecode = Base58CheckDecodeMethod()
Base58CheckEncode = Base58CheckEncodeMethod()
Base58Encode = Base58EncodeMethod()
Base58Decode = Base58DecodeMethod()
Base64Encode = Base64EncodeMethod()
Base64Decode = Base64DecodeMethod()
Deserialize = DeserializeMethod()
Itoa = ItoaMethod()
MemoryCompare = MemoryCompareMethod()
MemorySearch = MemorySearchMethod()
Serialize = SerializeMethod()
# Storage Interops
StorageDelete = StorageDeleteMethod()
StorageFind = StorageFindMethod(FindOptionsType)
StorageGetContext = StorageGetContextMethod(StorageContextType)
StorageGetReadOnlyContext = StorageGetReadOnlyContextMethod(StorageContextType)
StorageGet = StorageGetMethod()
StoragePut = StoragePutMethod()
# endregion
# region Packages
BlockModule = Package(identifier=BlockType.identifier.lower(),
types=[BlockType]
)
TransactionModule = Package(identifier=TransactionType.identifier.lower(),
types=[TransactionType]
)
BlockchainPackage = Package(identifier=InteropPackage.Blockchain,
types=[BlockType,
TransactionType
],
methods=[CurrentHash,
CurrentHeight,
CurrentIndex,
GetBlock,
GetContract,
GetTransaction,
GetTransactionFromBlock,
GetTransactionHeight
],
packages=[BlockModule,
TransactionModule
]
)
CallFlagsTypeModule = Package(identifier=f'{CallFlagsType.identifier.lower()}type',
types=[CallFlagsType]
)
ContractModule = Package(identifier=ContractType.identifier.lower(),
types=[ContractType]
)
ContractManifestModule = Package(identifier=ContractManifestType.identifier.lower(),
types=[ContractAbiType.build(),
ContractEventDescriptorType.build(),
ContractGroupType.build(),
ContractManifestType,
ContractMethodDescriptorType.build(),
ContractParameterDefinitionType.build(),
ContractParameterType.build(),
ContractPermissionDescriptorType.build(),
ContractPermissionType.build()
]
)
ContractPackage = Package(identifier=InteropPackage.Contract,
types=[CallFlagsType,
ContractManifestType,
ContractType
],
properties=[GasScriptHash,
NeoScriptHash
],
methods=[CallContract,
CreateContract,
CreateMultisigAccount,
CreateStandardAccount,
DestroyContract,
GetCallFlags,
GetMinimumDeploymentFee,
UpdateContract
],
packages=[CallFlagsTypeModule,
ContractManifestModule,
ContractModule
]
)
CryptoPackage = Package(identifier=InteropPackage.Crypto,
types=[NamedCurveType],
methods=[CheckMultisig,
CheckSig,
Hash160,
Hash256,
Ripemd160,
Sha256,
VerifyWithECDsa,
]
)
IteratorPackage = Package(identifier=InteropPackage.Iterator,
types=[Iterator],
)
JsonPackage = Package(identifier=InteropPackage.Json,
methods=[JsonDeserialize,
JsonSerialize
]
)
NotificationModule = Package(identifier=NotificationType.identifier.lower(),
types=[NotificationType]
)
OracleResponseCodeModule = Package(identifier=OracleResponseCode.identifier.lower(),
types=[OracleResponseCode]
)
OracleModule = Package(identifier=OracleType.identifier.lower(),
types=[OracleType]
)
OraclePackage = Package(identifier=InteropPackage.Oracle,
types=[OracleResponseCode,
OracleType
],
packages=[OracleModule,
OracleResponseCodeModule
]
)
TriggerTypeModule = Package(identifier=TriggerType.identifier.lower(),
types=[TriggerType]
)
PolicyPackage = Package(identifier=InteropPackage.Policy,
methods=[GetExecFeeFactor,
GetFeePerByte,
GetStoragePrice,
IsBlocked
]
)
RolePackage = Package(identifier=InteropPackage.Role,
types=[RoleType],
methods=[GetDesignatedByRole]
)
RuntimePackage = Package(identifier=InteropPackage.Runtime,
types=[NotificationType,
TriggerType
],
properties=[BlockTime,
CallingScriptHash,
ExecutingScriptHash,
GasLeft,
Platform,
InvocationCounter,
EntryScriptHash,
ScriptContainer
],
methods=[BurnGas,
CheckWitness,
GetNetwork,
GetNotifications,
GetRandom,
GetTrigger,
Log,
Notify
],
packages=[NotificationModule,
TriggerTypeModule
]
)
FindOptionsModule = Package(identifier=FindOptionsType.identifier.lower(),
types=[FindOptionsType]
)
StdlibPackage = Package(identifier=InteropPackage.Stdlib,
methods=[Atoi,
Base58CheckDecode,
Base58CheckEncode,
Base58Encode,
Base58Decode,
Base64Encode,
Base64Decode,
Deserialize,
Itoa,
MemoryCompare,
MemorySearch,
Serialize
]
)
StorageContextModule = Package(identifier=StorageContextType.identifier.lower(),
types=[StorageContextType]
)
StorageMapModule = Package(identifier=StorageMapType.identifier.lower(),
types=[StorageMapType]
)
StoragePackage = Package(identifier=InteropPackage.Storage,
types=[FindOptionsType,
StorageContextType,
StorageMapType
],
methods=[StorageDelete,
StorageFind,
StorageGet,
StorageGetContext,
StorageGetReadOnlyContext,
StoragePut
],
packages=[FindOptionsModule,
StorageContextModule,
StorageMapModule
]
)
# endregion
package_symbols: List[IdentifiedSymbol] = [
OracleType,
BlockchainPackage,
ContractPackage,
CryptoPackage,
IteratorPackage,
JsonPackage,
OraclePackage,
PolicyPackage,
RolePackage,
RuntimePackage,
StdlibPackage,
StoragePackage
]
_interop_symbols: Dict[InteropPackage, List[IdentifiedSymbol]] = {
package.identifier: list(package.symbols.values()) for package in package_symbols if
isinstance(package, Package)
}
| 40.342784 | 92 | 0.492493 | from enum import Enum
from typing import Dict, List
from boa3.model.builtin.interop.blockchain import *
from boa3.model.builtin.interop.contract import *
from boa3.model.builtin.interop.contract.contractmanifest import *
from boa3.model.builtin.interop.crypto import *
from boa3.model.builtin.interop.iterator import *
from boa3.model.builtin.interop.json import *
from boa3.model.builtin.interop.nativecontract import *
from boa3.model.builtin.interop.oracle import *
from boa3.model.builtin.interop.policy import *
from boa3.model.builtin.interop.role import *
from boa3.model.builtin.interop.runtime import *
from boa3.model.builtin.interop.stdlib import *
from boa3.model.builtin.interop.storage import *
from boa3.model.identifiedsymbol import IdentifiedSymbol
from boa3.model.imports.package import Package
class InteropPackage(str, Enum):
Blockchain = 'blockchain'
Contract = 'contract'
Crypto = 'crypto'
Iterator = 'iterator'
Json = 'json'
Oracle = 'oracle'
Policy = 'policy'
Role = 'role'
Runtime = 'runtime'
Stdlib = 'stdlib'
Storage = 'storage'
class Interop:
@classmethod
def interop_symbols(cls, package: str = None) -> List[IdentifiedSymbol]:
if package in InteropPackage.__members__.values():
return cls._interop_symbols[package]
lst: List[IdentifiedSymbol] = []
for symbols in cls._interop_symbols.values():
lst.extend(symbols)
return lst
BlockType = BlockType.build()
CallFlagsType = CallFlagsType()
ContractManifestType = ContractManifestType.build()
ContractType = ContractType.build()
FindOptionsType = FindOptionsType()
Iterator = IteratorType.build()
NamedCurveType = NamedCurveType()
NotificationType = NotificationType.build()
OracleResponseCode = OracleResponseCodeType.build()
OracleType = OracleType.build()
RoleType = RoleType.build()
StorageContextType = StorageContextType.build()
StorageMapType = StorageMapType.build()
TransactionType = TransactionType.build()
TriggerType = TriggerType()
CurrentHash = CurrentHashProperty()
CurrentHeight = CurrentHeightProperty()
CurrentIndex = CurrentIndexProperty()
GetContract = GetContractMethod(ContractType)
GetBlock = GetBlockMethod(BlockType)
GetTransaction = GetTransactionMethod(TransactionType)
GetTransactionFromBlock = GetTransactionFromBlockMethod(TransactionType)
GetTransactionHeight = GetTransactionHeightMethod()
CallContract = CallMethod()
CreateContract = CreateMethod(ContractType)
CreateMultisigAccount = CreateMultisigAccountMethod()
CreateStandardAccount = CreateStandardAccountMethod()
DestroyContract = DestroyMethod()
GetCallFlags = GetCallFlagsMethod(CallFlagsType)
GetMinimumDeploymentFee = GetMinimumDeploymentFeeMethod()
UpdateContract = UpdateMethod()
GasScriptHash = GasProperty()
NeoScriptHash = NeoProperty()
ContractManagementScriptHash = ContractManagement
CryptoLibScriptHash = CryptoLibContract
LedgerScriptHash = LedgerContract
OracleScriptHash = OracleContract
StdLibScriptHash = StdLibContract
CheckMultisig = CheckMultisigMethod()
CheckSig = CheckSigMethod()
Hash160 = Hash160Method()
Hash256 = Hash256Method()
Ripemd160 = Ripemd160Method()
Sha256 = Sha256Method()
VerifyWithECDsa = VerifyWithECDsaMethod()
IteratorCreate = IteratorMethod(Iterator)
JsonDeserialize = JsonDeserializeMethod()
JsonSerialize = JsonSerializeMethod()
GetExecFeeFactor = GetExecFeeFactorMethod()
GetFeePerByte = GetFeePerByteMethod()
GetStoragePrice = GetStoragePriceMethod()
IsBlocked = IsBlockedMethod()
GetDesignatedByRole = GetDesignatedByRoleMethod()
BlockTime = BlockTimeProperty()
BurnGas = BurnGasMethod()
CallingScriptHash = CallingScriptHashProperty()
CheckWitness = CheckWitnessMethod()
EntryScriptHash = EntryScriptHashProperty()
ExecutingScriptHash = ExecutingScriptHashProperty()
GasLeft = GasLeftProperty()
GetNetwork = GetNetworkMethod()
GetNotifications = GetNotificationsMethod(NotificationType)
GetRandom = GetRandomMethod()
GetTrigger = GetTriggerMethod(TriggerType)
InvocationCounter = InvocationCounterProperty()
Log = LogMethod()
Notify = NotifyMethod()
Platform = PlatformProperty()
ScriptContainer = ScriptContainerProperty()
Atoi = AtoiMethod()
Base58CheckDecode = Base58CheckDecodeMethod()
Base58CheckEncode = Base58CheckEncodeMethod()
Base58Encode = Base58EncodeMethod()
Base58Decode = Base58DecodeMethod()
Base64Encode = Base64EncodeMethod()
Base64Decode = Base64DecodeMethod()
Deserialize = DeserializeMethod()
Itoa = ItoaMethod()
MemoryCompare = MemoryCompareMethod()
MemorySearch = MemorySearchMethod()
Serialize = SerializeMethod()
StorageDelete = StorageDeleteMethod()
StorageFind = StorageFindMethod(FindOptionsType)
StorageGetContext = StorageGetContextMethod(StorageContextType)
StorageGetReadOnlyContext = StorageGetReadOnlyContextMethod(StorageContextType)
StorageGet = StorageGetMethod()
StoragePut = StoragePutMethod()
BlockModule = Package(identifier=BlockType.identifier.lower(),
types=[BlockType]
)
TransactionModule = Package(identifier=TransactionType.identifier.lower(),
types=[TransactionType]
)
BlockchainPackage = Package(identifier=InteropPackage.Blockchain,
types=[BlockType,
TransactionType
],
methods=[CurrentHash,
CurrentHeight,
CurrentIndex,
GetBlock,
GetContract,
GetTransaction,
GetTransactionFromBlock,
GetTransactionHeight
],
packages=[BlockModule,
TransactionModule
]
)
CallFlagsTypeModule = Package(identifier=f'{CallFlagsType.identifier.lower()}type',
types=[CallFlagsType]
)
ContractModule = Package(identifier=ContractType.identifier.lower(),
types=[ContractType]
)
ContractManifestModule = Package(identifier=ContractManifestType.identifier.lower(),
types=[ContractAbiType.build(),
ContractEventDescriptorType.build(),
ContractGroupType.build(),
ContractManifestType,
ContractMethodDescriptorType.build(),
ContractParameterDefinitionType.build(),
ContractParameterType.build(),
ContractPermissionDescriptorType.build(),
ContractPermissionType.build()
]
)
ContractPackage = Package(identifier=InteropPackage.Contract,
types=[CallFlagsType,
ContractManifestType,
ContractType
],
properties=[GasScriptHash,
NeoScriptHash
],
methods=[CallContract,
CreateContract,
CreateMultisigAccount,
CreateStandardAccount,
DestroyContract,
GetCallFlags,
GetMinimumDeploymentFee,
UpdateContract
],
packages=[CallFlagsTypeModule,
ContractManifestModule,
ContractModule
]
)
CryptoPackage = Package(identifier=InteropPackage.Crypto,
types=[NamedCurveType],
methods=[CheckMultisig,
CheckSig,
Hash160,
Hash256,
Ripemd160,
Sha256,
VerifyWithECDsa,
]
)
IteratorPackage = Package(identifier=InteropPackage.Iterator,
types=[Iterator],
)
JsonPackage = Package(identifier=InteropPackage.Json,
methods=[JsonDeserialize,
JsonSerialize
]
)
NotificationModule = Package(identifier=NotificationType.identifier.lower(),
types=[NotificationType]
)
OracleResponseCodeModule = Package(identifier=OracleResponseCode.identifier.lower(),
types=[OracleResponseCode]
)
OracleModule = Package(identifier=OracleType.identifier.lower(),
types=[OracleType]
)
OraclePackage = Package(identifier=InteropPackage.Oracle,
types=[OracleResponseCode,
OracleType
],
packages=[OracleModule,
OracleResponseCodeModule
]
)
TriggerTypeModule = Package(identifier=TriggerType.identifier.lower(),
types=[TriggerType]
)
PolicyPackage = Package(identifier=InteropPackage.Policy,
methods=[GetExecFeeFactor,
GetFeePerByte,
GetStoragePrice,
IsBlocked
]
)
RolePackage = Package(identifier=InteropPackage.Role,
types=[RoleType],
methods=[GetDesignatedByRole]
)
RuntimePackage = Package(identifier=InteropPackage.Runtime,
types=[NotificationType,
TriggerType
],
properties=[BlockTime,
CallingScriptHash,
ExecutingScriptHash,
GasLeft,
Platform,
InvocationCounter,
EntryScriptHash,
ScriptContainer
],
methods=[BurnGas,
CheckWitness,
GetNetwork,
GetNotifications,
GetRandom,
GetTrigger,
Log,
Notify
],
packages=[NotificationModule,
TriggerTypeModule
]
)
FindOptionsModule = Package(identifier=FindOptionsType.identifier.lower(),
types=[FindOptionsType]
)
StdlibPackage = Package(identifier=InteropPackage.Stdlib,
methods=[Atoi,
Base58CheckDecode,
Base58CheckEncode,
Base58Encode,
Base58Decode,
Base64Encode,
Base64Decode,
Deserialize,
Itoa,
MemoryCompare,
MemorySearch,
Serialize
]
)
StorageContextModule = Package(identifier=StorageContextType.identifier.lower(),
types=[StorageContextType]
)
StorageMapModule = Package(identifier=StorageMapType.identifier.lower(),
types=[StorageMapType]
)
StoragePackage = Package(identifier=InteropPackage.Storage,
types=[FindOptionsType,
StorageContextType,
StorageMapType
],
methods=[StorageDelete,
StorageFind,
StorageGet,
StorageGetContext,
StorageGetReadOnlyContext,
StoragePut
],
packages=[FindOptionsModule,
StorageContextModule,
StorageMapModule
]
)
package_symbols: List[IdentifiedSymbol] = [
OracleType,
BlockchainPackage,
ContractPackage,
CryptoPackage,
IteratorPackage,
JsonPackage,
OraclePackage,
PolicyPackage,
RolePackage,
RuntimePackage,
StdlibPackage,
StoragePackage
]
_interop_symbols: Dict[InteropPackage, List[IdentifiedSymbol]] = {
package.identifier: list(package.symbols.values()) for package in package_symbols if
isinstance(package, Package)
}
| true | true |
f7229b55ecd3993deb78b6e7e2bd6b017d3a0275 | 866 | py | Python | twitoff/models.py | BrianThomasRoss/TwitOff | ae72e546f92719269abaa51b336139ca0992d016 | [
"MIT"
] | null | null | null | twitoff/models.py | BrianThomasRoss/TwitOff | ae72e546f92719269abaa51b336139ca0992d016 | [
"MIT"
] | 2 | 2022-01-13T02:05:35.000Z | 2022-03-12T00:14:39.000Z | twitoff/models.py | lambda-assignments/TwitOff | ae72e546f92719269abaa51b336139ca0992d016 | [
"MIT"
] | null | null | null | """SQL alchemy models for tweettweet"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class Tweeter(DB.Model):
"""Twitter users that we pull and analyze tweets for"""
id = DB.Column(DB.BigInteger, primary_key=True)
handle = DB.Column(DB.String(15), nullable=False)
newest_tweet_id = DB.Column(DB.BigInteger)
def __repr__(self):
return "<Tweeter {}>".format(self.handle)
class Tweet(DB.Model):
"""Tweets tweeted from the tweeters"""
id = DB.Column(DB.BigInteger, primary_key=True)
text = DB.Column(DB.Unicode(300))
embedding = DB.Column(DB.PickleType, nullable=False)
tweeter_id = DB.Column(DB.BigInteger, DB.ForeignKey('tweeter.id'), nullable=False)
tweeter = DB.relationship('Tweeter', backref=DB.backref('tweets', lazy=True))
def __repr__(self):
return "<Tweet {}>".format(self.text) | 36.083333 | 86 | 0.692841 | from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class Tweeter(DB.Model):
id = DB.Column(DB.BigInteger, primary_key=True)
handle = DB.Column(DB.String(15), nullable=False)
newest_tweet_id = DB.Column(DB.BigInteger)
def __repr__(self):
return "<Tweeter {}>".format(self.handle)
class Tweet(DB.Model):
id = DB.Column(DB.BigInteger, primary_key=True)
text = DB.Column(DB.Unicode(300))
embedding = DB.Column(DB.PickleType, nullable=False)
tweeter_id = DB.Column(DB.BigInteger, DB.ForeignKey('tweeter.id'), nullable=False)
tweeter = DB.relationship('Tweeter', backref=DB.backref('tweets', lazy=True))
def __repr__(self):
return "<Tweet {}>".format(self.text) | true | true |
f7229bc239bcd41c8d6326e4b76bcc58c2721b8b | 2,939 | py | Python | p3/management/commands/create_bulk_coupons.py | zevaverbach/epcon | 8352c030ee0d4197f559cdb58a54ee45c7a4471a | [
"BSD-2-Clause"
] | null | null | null | p3/management/commands/create_bulk_coupons.py | zevaverbach/epcon | 8352c030ee0d4197f559cdb58a54ee45c7a4471a | [
"BSD-2-Clause"
] | null | null | null | p3/management/commands/create_bulk_coupons.py | zevaverbach/epcon | 8352c030ee0d4197f559cdb58a54ee45c7a4471a | [
"BSD-2-Clause"
] | null | null | null |
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
| 31.602151 | 79 | 0.550527 |
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
lass Command(BaseCommand):
args = '<conference> <count>'
dry_run = False
def add_arguments(self, parser):
parser.add_argument('conference')
parser.add_argument('csv')
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
continue
if code in all_codes:
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
| true | true |
f7229c2105148fbd8e6af890f351595b4e547ac9 | 2,151 | py | Python | test/py/test_array.py | aaronenyeshi/AMDMIGraphX | 87528938188f0247f3dfcc6ab9b83c22187109fd | [
"MIT"
] | null | null | null | test/py/test_array.py | aaronenyeshi/AMDMIGraphX | 87528938188f0247f3dfcc6ab9b83c22187109fd | [
"MIT"
] | null | null | null | test/py/test_array.py | aaronenyeshi/AMDMIGraphX | 87528938188f0247f3dfcc6ab9b83c22187109fd | [
"MIT"
] | null | null | null | import migraphx, struct, array, sys
try:
from functools import reduce
except:
pass
def assert_eq(x, y):
if x == y:
pass
else:
raise Exception(str(x) + " != " + str(y))
def read_float(b, index):
return struct.unpack_from('f', b, index*4)[0]
def write_float(b, index):
struct.pack_into('f', b, index*4)
def nelements(lens):
return reduce(lambda x,y: x*y,lens, 1)
def create_buffer(t, data, shape):
a = array.array(t, data)
if sys.version_info >= (3, 0):
m = memoryview(a.tobytes())
return m.cast(t, shape)
else:
m = memoryview(a.tostring())
return m
def check_argument(a):
l = a.tolist()
for i in range(len(l)):
assert_eq(l[i], read_float(a, i))
def check_shapes(r, m):
lens = list(m.shape)
strides = [int(s/m.itemsize) for s in m.strides]
elements = nelements(lens)
assert_eq(r.get_shape().elements(), elements)
assert_eq(r.get_shape().lens(), lens)
assert_eq(r.get_shape().strides(), strides)
def run(p):
params = {}
for key, value in p.get_parameter_shapes().items():
params[key] = migraphx.to_gpu(migraphx.generate_argument(value))
return migraphx.from_gpu(p.run(params))
def test_shape(shape):
data = list(range(nelements(shape)))
m = create_buffer('f', data, shape)
a = migraphx.argument(m)
check_shapes(a, m)
assert_eq(a.tolist(), data)
def test_input():
if sys.version_info >= (3, 0):
test_shape([4])
test_shape([2, 3])
else:
data = list(range(4))
m = create_buffer('f', data, [4])
a1 = migraphx.argument(m)
a2 = migraphx.argument(bytearray(a1))
check_shapes(a2, m)
assert_eq(a1.tolist(), m.tolist())
def test_output():
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
p.compile(migraphx.get_target("gpu"))
r1 = run(p)
r2 = run(p)
assert_eq(r1, r2)
assert_eq(r1.tolist(), r2.tolist())
check_argument(r1)
check_argument(r2)
m1 = memoryview(r1)
m2 = memoryview(r2)
check_shapes(r1, m1)
check_shapes(r2, m2)
test_input()
test_output()
| 23.380435 | 72 | 0.610414 | import migraphx, struct, array, sys
try:
from functools import reduce
except:
pass
def assert_eq(x, y):
if x == y:
pass
else:
raise Exception(str(x) + " != " + str(y))
def read_float(b, index):
return struct.unpack_from('f', b, index*4)[0]
def write_float(b, index):
struct.pack_into('f', b, index*4)
def nelements(lens):
return reduce(lambda x,y: x*y,lens, 1)
def create_buffer(t, data, shape):
a = array.array(t, data)
if sys.version_info >= (3, 0):
m = memoryview(a.tobytes())
return m.cast(t, shape)
else:
m = memoryview(a.tostring())
return m
def check_argument(a):
l = a.tolist()
for i in range(len(l)):
assert_eq(l[i], read_float(a, i))
def check_shapes(r, m):
lens = list(m.shape)
strides = [int(s/m.itemsize) for s in m.strides]
elements = nelements(lens)
assert_eq(r.get_shape().elements(), elements)
assert_eq(r.get_shape().lens(), lens)
assert_eq(r.get_shape().strides(), strides)
def run(p):
params = {}
for key, value in p.get_parameter_shapes().items():
params[key] = migraphx.to_gpu(migraphx.generate_argument(value))
return migraphx.from_gpu(p.run(params))
def test_shape(shape):
data = list(range(nelements(shape)))
m = create_buffer('f', data, shape)
a = migraphx.argument(m)
check_shapes(a, m)
assert_eq(a.tolist(), data)
def test_input():
if sys.version_info >= (3, 0):
test_shape([4])
test_shape([2, 3])
else:
data = list(range(4))
m = create_buffer('f', data, [4])
a1 = migraphx.argument(m)
a2 = migraphx.argument(bytearray(a1))
check_shapes(a2, m)
assert_eq(a1.tolist(), m.tolist())
def test_output():
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
p.compile(migraphx.get_target("gpu"))
r1 = run(p)
r2 = run(p)
assert_eq(r1, r2)
assert_eq(r1.tolist(), r2.tolist())
check_argument(r1)
check_argument(r2)
m1 = memoryview(r1)
m2 = memoryview(r2)
check_shapes(r1, m1)
check_shapes(r2, m2)
test_input()
test_output()
| true | true |
f7229c7b293e3307f6c5c5470b7fa3313ce1c4e9 | 3,019 | py | Python | runny-christmas/managers/SplashScreen.py | HighSoftWare96/py-games | b2700764b194c23442d04378d4b3773be71adf70 | [
"Apache-2.0"
] | null | null | null | runny-christmas/managers/SplashScreen.py | HighSoftWare96/py-games | b2700764b194c23442d04378d4b3773be71adf70 | [
"Apache-2.0"
] | null | null | null | runny-christmas/managers/SplashScreen.py | HighSoftWare96/py-games | b2700764b194c23442d04378d4b3773be71adf70 | [
"Apache-2.0"
] | null | null | null | from controllers.GameState import state
from pygame.font import Font
from pygame.surface import Surface
from pygame.locals import *
from helpers.config import config
from helpers.loaders import load_image
import os
from definitions import ROOT_DIR
from controllers.GameState import state, PAUSE_STATE, RUNNING_STATE, GAMEOVER_STATE
FADE_IN_STEP = .5
CENTER_POSITION = tuple([i / 2 for i in config['SCREEN_SIZE']])
scoreColor = (255, 0, 0)
subColor = (255, 255, 255)
class SplashScreen():
def __init__(self):
fullfontpathH1 = os.path.join(ROOT_DIR, 'assets/fonts/wintersoul.ttf')
fullfontpathH2 = os.path.join(ROOT_DIR, 'assets/fonts/coolvetica.ttf')
self.h1Font = Font(fullfontpathH1, 50)
self.h2Font = Font(fullfontpathH2, 24)
self.fadeInValue = 255
self.refreshImage, self.refreshImageRect = load_image('refresh.png', (30, 30))
self.refreshImageRect.center = (config['SCREEN_SIZE'][0] - 20, config['SCREEN_SIZE'][1] - 20)
def refreshClicked(self, pos):
return self.refreshImageRect.collidepoint(pos)
def pause(self):
self.fadeInValue = 255
def update(self):
self._renderh1()
# Valori vuoti per h2
self.textH2 = None
self.rectH2 = None
if state.state == RUNNING_STATE:
# Reduce alpha each frame, but make sure it doesn't get below 0.
self.fadeInValue = max(self.fadeInValue-FADE_IN_STEP, 0)
elif state.state == PAUSE_STATE:
self.fadeInValue = 255
self._renderPause()
elif state.state == GAMEOVER_STATE:
self.fadeInValue = 255
self._renderGameover()
def draw(self, screen):
screen.blit(self.refreshImage, self.refreshImageRect)
screen.blit(self.textWithAlpha, self.rect)
if self.textH2:
screen.blit(self.textH2, self.rectH2)
def _renderh1(self):
self.text = self.h1Font.render('RUNNY CHRISTMAS', True, scoreColor)
self.rect = self.text.get_rect()
self.rect.center = CENTER_POSITION
alpha_surf = Surface(self.text.get_size(), SRCALPHA)
# Don't modify the original text surf.
self.textWithAlpha = self.text.copy()
# Fill alpha_surf with this color to set its alpha value.
alpha_surf.fill((255, 255, 255, self.fadeInValue))
# To make the text surface transparent, blit the transparent
# alpha_surf onto it with the BLEND_RGBA_MULT flag.
self.textWithAlpha.blit(
alpha_surf, (0, 0), special_flags=BLEND_RGBA_MULT)
def _renderPause(self):
self.textH2 = self.h2Font.render('PAUSED', True, subColor)
self.rectH2 = self.textH2.get_rect()
self.rectH2.center = (CENTER_POSITION[0], CENTER_POSITION[1] + 40)
def _renderGameover(self):
self.textH2 = self.h2Font.render('GAME OVER!', True, subColor)
self.rectH2 = self.textH2.get_rect()
self.rectH2.center = (CENTER_POSITION[0], CENTER_POSITION[1] + 40)
| 38.21519 | 101 | 0.666446 | from controllers.GameState import state
from pygame.font import Font
from pygame.surface import Surface
from pygame.locals import *
from helpers.config import config
from helpers.loaders import load_image
import os
from definitions import ROOT_DIR
from controllers.GameState import state, PAUSE_STATE, RUNNING_STATE, GAMEOVER_STATE
FADE_IN_STEP = .5
CENTER_POSITION = tuple([i / 2 for i in config['SCREEN_SIZE']])
scoreColor = (255, 0, 0)
subColor = (255, 255, 255)
class SplashScreen():
def __init__(self):
fullfontpathH1 = os.path.join(ROOT_DIR, 'assets/fonts/wintersoul.ttf')
fullfontpathH2 = os.path.join(ROOT_DIR, 'assets/fonts/coolvetica.ttf')
self.h1Font = Font(fullfontpathH1, 50)
self.h2Font = Font(fullfontpathH2, 24)
self.fadeInValue = 255
self.refreshImage, self.refreshImageRect = load_image('refresh.png', (30, 30))
self.refreshImageRect.center = (config['SCREEN_SIZE'][0] - 20, config['SCREEN_SIZE'][1] - 20)
def refreshClicked(self, pos):
return self.refreshImageRect.collidepoint(pos)
def pause(self):
self.fadeInValue = 255
def update(self):
self._renderh1()
self.textH2 = None
self.rectH2 = None
if state.state == RUNNING_STATE:
self.fadeInValue = max(self.fadeInValue-FADE_IN_STEP, 0)
elif state.state == PAUSE_STATE:
self.fadeInValue = 255
self._renderPause()
elif state.state == GAMEOVER_STATE:
self.fadeInValue = 255
self._renderGameover()
def draw(self, screen):
screen.blit(self.refreshImage, self.refreshImageRect)
screen.blit(self.textWithAlpha, self.rect)
if self.textH2:
screen.blit(self.textH2, self.rectH2)
def _renderh1(self):
self.text = self.h1Font.render('RUNNY CHRISTMAS', True, scoreColor)
self.rect = self.text.get_rect()
self.rect.center = CENTER_POSITION
alpha_surf = Surface(self.text.get_size(), SRCALPHA)
# Don't modify the original text surf.
self.textWithAlpha = self.text.copy()
alpha_surf.fill((255, 255, 255, self.fadeInValue))
self.textWithAlpha.blit(
alpha_surf, (0, 0), special_flags=BLEND_RGBA_MULT)
def _renderPause(self):
self.textH2 = self.h2Font.render('PAUSED', True, subColor)
self.rectH2 = self.textH2.get_rect()
self.rectH2.center = (CENTER_POSITION[0], CENTER_POSITION[1] + 40)
def _renderGameover(self):
self.textH2 = self.h2Font.render('GAME OVER!', True, subColor)
self.rectH2 = self.textH2.get_rect()
self.rectH2.center = (CENTER_POSITION[0], CENTER_POSITION[1] + 40)
| true | true |
f7229d180e33f0d2eb3eb9b79f6e4d8eb4f6a7c6 | 655 | py | Python | sheets/auth.py | jdtech3/YouTube-Archive-Tracker | ef2a547a683368af7df0c7c7df175c278ae1c760 | [
"MIT"
] | 3 | 2020-01-28T05:30:43.000Z | 2021-06-28T10:58:04.000Z | sheets/auth.py | jdtech3/YouTube-Archive-Tracker | ef2a547a683368af7df0c7c7df175c278ae1c760 | [
"MIT"
] | null | null | null | sheets/auth.py | jdtech3/YouTube-Archive-Tracker | ef2a547a683368af7df0c7c7df175c278ae1c760 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
auth.py - Creates an authenticated pygsheets object
"""
# Libs
import pygsheets
import pathlib
from config import config # import config
# Grab the service account auth file location from the config
SA_auth_file = pathlib.Path(config['auth_file'])
# Create the pygsheets obj
client = pygsheets.authorize(service_file=SA_auth_file)
spreadsheet = client.open(config['spreadsheet_name'])
tracker_wks = spreadsheet.worksheet_by_title(config['tracker_worksheet_name'])
requests_wks = spreadsheet.worksheet_by_title(config['requests_worksheet_name'])
stats_wks = spreadsheet.worksheet_by_title(config['stats_worksheet_name'])
| 29.772727 | 80 | 0.79542 |
import pygsheets
import pathlib
from config import config
SA_auth_file = pathlib.Path(config['auth_file'])
client = pygsheets.authorize(service_file=SA_auth_file)
spreadsheet = client.open(config['spreadsheet_name'])
tracker_wks = spreadsheet.worksheet_by_title(config['tracker_worksheet_name'])
requests_wks = spreadsheet.worksheet_by_title(config['requests_worksheet_name'])
stats_wks = spreadsheet.worksheet_by_title(config['stats_worksheet_name'])
| true | true |
f7229d504f1344ccfec139a0b6f51b07a1a0b28b | 5,259 | py | Python | tools/wptrunner/wptrunner/manifestinclude.py | loonybear/web-platform-tests | 9a18bbb6a0243e39d4e2b569281027b3c832e323 | [
"BSD-3-Clause"
] | 1 | 2021-06-03T09:31:09.000Z | 2021-06-03T09:31:09.000Z | tools/wptrunner/wptrunner/manifestinclude.py | loonybear/web-platform-tests | 9a18bbb6a0243e39d4e2b569281027b3c832e323 | [
"BSD-3-Clause"
] | null | null | null | tools/wptrunner/wptrunner/manifestinclude.py | loonybear/web-platform-tests | 9a18bbb6a0243e39d4e2b569281027b3c832e323 | [
"BSD-3-Clause"
] | 1 | 2020-03-31T17:20:54.000Z | 2020-03-31T17:20:54.000Z | """Manifest structure used to store paths that should be included in a test run.
The manifest is represented by a tree of IncludeManifest objects, the root
representing the file and each subnode representing a subdirectory that should
be included or excluded.
"""
import glob
import os
import urlparse
from wptmanifest.node import DataNode
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
class IncludeManifest(ManifestItem):
def __init__(self, node):
"""Node in a tree structure representing the paths
that should be included or excluded from the test run.
:param node: AST Node corresponding to this Node.
"""
ManifestItem.__init__(self, node)
self.child_map = {}
@classmethod
def create(cls):
"""Create an empty IncludeManifest tree"""
node = DataNode(None)
return cls(node)
def append(self, child):
ManifestItem.append(self, child)
self.child_map[child.name] = child
assert len(self.child_map) == len(self.children)
def include(self, test):
"""Return a boolean indicating whether a particular test should be
included in a test run, based on the IncludeManifest tree rooted on
this object.
:param test: The test object"""
path_components = self._get_components(test.url)
return self._include(test, path_components)
def _include(self, test, path_components):
if path_components:
next_path_part = path_components.pop()
if next_path_part in self.child_map:
return self.child_map[next_path_part]._include(test, path_components)
node = self
while node:
try:
skip_value = self.get("skip", {"test_type": test.item_type}).lower()
assert skip_value in ("true", "false")
return skip_value != "true"
except KeyError:
if node.parent is not None:
node = node.parent
else:
# Include by default
return True
def _get_components(self, url):
rv = []
url_parts = urlparse.urlsplit(url)
variant = ""
if url_parts.query:
variant += "?" + url_parts.query
if url_parts.fragment:
variant += "#" + url_parts.fragment
if variant:
rv.append(variant)
rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
return rv
def _add_rule(self, test_manifests, url, direction):
maybe_path = os.path.join(os.path.abspath(os.curdir), url)
rest, last = os.path.split(maybe_path)
fragment = query = None
if "#" in last:
last, fragment = last.rsplit("#", 1)
if "?" in last:
last, query = last.rsplit("?", 1)
maybe_path = os.path.join(rest, last)
paths = glob.glob(maybe_path)
if paths:
urls = []
for path in paths:
for manifest, data in test_manifests.iteritems():
found = False
rel_path = os.path.relpath(path, data["tests_path"])
for test in manifest.iterpath(rel_path):
if not hasattr(test, "url"):
continue
url = test.url
if query or fragment:
parsed = urlparse.urlparse(url)
if ((query and query != parsed.query) or
(fragment and fragment != parsed.fragment)):
continue
urls.append(url)
found = True
if found:
break
else:
urls = [url]
assert direction in ("include", "exclude")
for url in urls:
components = self._get_components(url)
node = self
while components:
component = components.pop()
if component not in node.child_map:
new_node = IncludeManifest(DataNode(component))
node.append(new_node)
new_node.set("skip", node.get("skip", {}))
node = node.child_map[component]
skip = False if direction == "include" else True
node.set("skip", str(skip))
def add_include(self, test_manifests, url_prefix):
"""Add a rule indicating that tests under a url path
should be included in test runs
:param url_prefix: The url prefix to include
"""
return self._add_rule(test_manifests, url_prefix, "include")
def add_exclude(self, test_manifests, url_prefix):
"""Add a rule indicating that tests under a url path
should be excluded from test runs
:param url_prefix: The url prefix to exclude
"""
return self._add_rule(test_manifests, url_prefix, "exclude")
def get_manifest(manifest_path):
with open(manifest_path) as f:
return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
| 35.06 | 85 | 0.572162 | import glob
import os
import urlparse
from wptmanifest.node import DataNode
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
class IncludeManifest(ManifestItem):
def __init__(self, node):
ManifestItem.__init__(self, node)
self.child_map = {}
@classmethod
def create(cls):
node = DataNode(None)
return cls(node)
def append(self, child):
ManifestItem.append(self, child)
self.child_map[child.name] = child
assert len(self.child_map) == len(self.children)
def include(self, test):
path_components = self._get_components(test.url)
return self._include(test, path_components)
def _include(self, test, path_components):
if path_components:
next_path_part = path_components.pop()
if next_path_part in self.child_map:
return self.child_map[next_path_part]._include(test, path_components)
node = self
while node:
try:
skip_value = self.get("skip", {"test_type": test.item_type}).lower()
assert skip_value in ("true", "false")
return skip_value != "true"
except KeyError:
if node.parent is not None:
node = node.parent
else:
return True
def _get_components(self, url):
rv = []
url_parts = urlparse.urlsplit(url)
variant = ""
if url_parts.query:
variant += "?" + url_parts.query
if url_parts.fragment:
variant += "#" + url_parts.fragment
if variant:
rv.append(variant)
rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
return rv
def _add_rule(self, test_manifests, url, direction):
maybe_path = os.path.join(os.path.abspath(os.curdir), url)
rest, last = os.path.split(maybe_path)
fragment = query = None
if "#" in last:
last, fragment = last.rsplit("#", 1)
if "?" in last:
last, query = last.rsplit("?", 1)
maybe_path = os.path.join(rest, last)
paths = glob.glob(maybe_path)
if paths:
urls = []
for path in paths:
for manifest, data in test_manifests.iteritems():
found = False
rel_path = os.path.relpath(path, data["tests_path"])
for test in manifest.iterpath(rel_path):
if not hasattr(test, "url"):
continue
url = test.url
if query or fragment:
parsed = urlparse.urlparse(url)
if ((query and query != parsed.query) or
(fragment and fragment != parsed.fragment)):
continue
urls.append(url)
found = True
if found:
break
else:
urls = [url]
assert direction in ("include", "exclude")
for url in urls:
components = self._get_components(url)
node = self
while components:
component = components.pop()
if component not in node.child_map:
new_node = IncludeManifest(DataNode(component))
node.append(new_node)
new_node.set("skip", node.get("skip", {}))
node = node.child_map[component]
skip = False if direction == "include" else True
node.set("skip", str(skip))
def add_include(self, test_manifests, url_prefix):
return self._add_rule(test_manifests, url_prefix, "include")
def add_exclude(self, test_manifests, url_prefix):
return self._add_rule(test_manifests, url_prefix, "exclude")
def get_manifest(manifest_path):
with open(manifest_path) as f:
return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
| true | true |
f7229fa565f57720bda3ad416ac604d502083a45 | 2,182 | py | Python | torchFunc.py | ljbuaa/HLDAGN | 787462a43d6c0f47dc1ebfd6ef9bfbd1eb5246a7 | [
"MIT"
] | null | null | null | torchFunc.py | ljbuaa/HLDAGN | 787462a43d6c0f47dc1ebfd6ef9bfbd1eb5246a7 | [
"MIT"
] | null | null | null | torchFunc.py | ljbuaa/HLDAGN | 787462a43d6c0f47dc1ebfd6ef9bfbd1eb5246a7 | [
"MIT"
] | null | null | null | from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args) | 33.569231 | 83 | 0.610907 | from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args) | true | true |
f7229fc0ac4d9ca6f1176a89e5615bb3dc9d9a9d | 94 | py | Python | cryptofeed_werks/exchanges/__init__.py | globophobe/django-cryptofeed-werks | dd7c04189a0d1582b115298fbe44cf1e0cb969f2 | [
"MIT"
] | 7 | 2021-12-30T02:38:17.000Z | 2022-03-08T16:14:35.000Z | cryptofeed_werks/exchanges/__init__.py | globophobe/django-cryptofeed-werks | dd7c04189a0d1582b115298fbe44cf1e0cb969f2 | [
"MIT"
] | null | null | null | cryptofeed_werks/exchanges/__init__.py | globophobe/django-cryptofeed-werks | dd7c04189a0d1582b115298fbe44cf1e0cb969f2 | [
"MIT"
] | 1 | 2022-01-28T00:18:45.000Z | 2022-01-28T00:18:45.000Z | from .api import api, candles_api, trades_api
__all__ = ["api", "trades_api", "candles_api"]
| 23.5 | 46 | 0.723404 | from .api import api, candles_api, trades_api
__all__ = ["api", "trades_api", "candles_api"]
| true | true |
f7229fecc8abcfa996481e7128a83b81f606b917 | 1,314 | py | Python | gdmix-trainer/setup.py | seraconlp/gdmix | a7405c4dde9b201741f44d4ac954b7e3492b088d | [
"BSD-2-Clause"
] | null | null | null | gdmix-trainer/setup.py | seraconlp/gdmix | a7405c4dde9b201741f44d4ac954b7e3492b088d | [
"BSD-2-Clause"
] | null | null | null | gdmix-trainer/setup.py | seraconlp/gdmix | a7405c4dde9b201741f44d4ac954b7e3492b088d | [
"BSD-2-Clause"
] | null | null | null | from pathlib import Path
from setuptools import find_namespace_packages, setup
from sys import platform as _platform
import sys
VERSION="0.3.0"
current_dir = Path(__file__).resolve().parent
with open(current_dir.joinpath('README.md'), encoding='utf-8') as f:
long_description = f.read()
if _platform not in ["linux", "linux2", "darwin"]:
print("ERROR: platform {} isn't supported".format(_platform))
sys.exit(1)
setup(
name="gdmix-trainer",
python_requires='>=3.7',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=["Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved"],
license='BSD-2-CLAUSE',
version=f'{VERSION}',
package_dir={'': 'src'},
packages=find_namespace_packages(where='src'),
include_package_data=True,
install_requires=[
"setuptools>=41.0.0",
"tensorflow==1.15.2",
"tensorflow_ranking==0.1.4",
"fastavro==0.21.22",
"decorator==4.4.2",
"detext-nodep==2.0.9",
"psutil==5.7.0",
"scipy==1.3.2",
"scikit-learn==0.21.2",
"smart-arg==0.2.12"
],
tests_require=['pytest']
)
| 29.2 | 68 | 0.619482 | from pathlib import Path
from setuptools import find_namespace_packages, setup
from sys import platform as _platform
import sys
VERSION="0.3.0"
current_dir = Path(__file__).resolve().parent
with open(current_dir.joinpath('README.md'), encoding='utf-8') as f:
long_description = f.read()
if _platform not in ["linux", "linux2", "darwin"]:
print("ERROR: platform {} isn't supported".format(_platform))
sys.exit(1)
setup(
name="gdmix-trainer",
python_requires='>=3.7',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=["Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved"],
license='BSD-2-CLAUSE',
version=f'{VERSION}',
package_dir={'': 'src'},
packages=find_namespace_packages(where='src'),
include_package_data=True,
install_requires=[
"setuptools>=41.0.0",
"tensorflow==1.15.2",
"tensorflow_ranking==0.1.4",
"fastavro==0.21.22",
"decorator==4.4.2",
"detext-nodep==2.0.9",
"psutil==5.7.0",
"scipy==1.3.2",
"scikit-learn==0.21.2",
"smart-arg==0.2.12"
],
tests_require=['pytest']
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.