hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38c7711f8e19eb8f9cf38250d7d62b8cf546d400 | 2,297 | py | Python | core/ctc_utils.py | igormq/asr-study | 302fa3087cc71aec4853360638dbe2f4a59b5726 | [
"MIT"
] | 155 | 2017-03-12T22:56:56.000Z | 2021-11-23T09:03:57.000Z | core/ctc_utils.py | igormq/asr-study | 302fa3087cc71aec4853360638dbe2f4a59b5726 | [
"MIT"
] | 7 | 2017-06-08T08:27:06.000Z | 2019-06-17T05:21:07.000Z | core/ctc_utils.py | igormq/asr-study | 302fa3087cc71aec4853360638dbe2f4a59b5726 | [
"MIT"
] | 72 | 2017-03-16T12:10:04.000Z | 2021-10-16T10:34:50.000Z | import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
def decode(inputs, **kwargs):
""" Decodes a sequence of probabilities choosing the path with highest
probability of occur
# Arguments
is_greedy: if True (default) the greedy decoder will be used;
otherwise beam search decoder will be used
if is_greedy is False:
see the documentation of tf.nn.ctc_beam_search_decoder for more
options
# Inputs
A tuple (y_pred, seq_len) where:
y_pred is a tensor (N, T, C) where N is the bath size, T is the
maximum timestep and C is the number of classes (including the
blank label)
seq_len is a tensor (N,) that indicates the real number of
timesteps of each sequence
# Outputs
A sparse tensor with the top path decoded sequence
"""
# Little hack for load_model
import tensorflow as tf
is_greedy = kwargs.get('is_greedy', True)
y_pred, seq_len = inputs
seq_len = tf.cast(seq_len[:, 0], tf.int32)
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
if is_greedy:
decoded = tf.nn.ctc_greedy_decoder(y_pred, seq_len)[0][0]
else:
beam_width = kwargs.get('beam_width', 100)
top_paths = kwargs.get('top_paths', 1)
merge_repeated = kwargs.get('merge_repeated', True)
decoded = tf.nn.ctc_beam_search_decoder(y_pred, seq_len, beam_width,
top_paths,
merge_repeated)[0][0]
return decoded
def decode_output_shape(inputs_shape):
y_pred_shape, seq_len_shape = inputs_shape
return (y_pred_shape[:1], None)
def ctc_lambda_func(args):
""" CTC cost function
"""
y_pred, labels, inputs_length = args
# Little hack for load_model
import tensorflow as tf
return tf.nn.ctc_loss(labels,
tf.transpose(y_pred, perm=[1, 0, 2]),
inputs_length[:, 0])
def ctc_dummy_loss(y_true, y_pred):
""" Little hack to make CTC working with Keras
"""
return y_pred
def decoder_dummy_loss(y_true, y_pred):
""" Little hack to make CTC working with Keras
"""
return K.zeros((1,))
| 27.674699 | 76 | 0.616021 | 330 | 2,297 | 4.093939 | 0.333333 | 0.051813 | 0.020725 | 0.032568 | 0.242043 | 0.220577 | 0.185048 | 0.185048 | 0.150999 | 0.088823 | 0 | 0.012523 | 0.304745 | 2,297 | 82 | 77 | 28.012195 | 0.833438 | 0.383979 | 0 | 0.090909 | 0 | 0 | 0.031842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.181818 | 0 | 0.484848 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38c77c920c890cca2c4c7892675a5fe84d8d6244 | 2,306 | py | Python | atm/grids/poi_grid.py | gina-alaska/arctic_thermokarst_model | 7a3dbedb72b133670bb6e476fc3f5788bbcdbca4 | [
"MIT"
] | null | null | null | atm/grids/poi_grid.py | gina-alaska/arctic_thermokarst_model | 7a3dbedb72b133670bb6e476fc3f5788bbcdbca4 | [
"MIT"
] | null | null | null | atm/grids/poi_grid.py | gina-alaska/arctic_thermokarst_model | 7a3dbedb72b133670bb6e476fc3f5788bbcdbca4 | [
"MIT"
] | null | null | null | """
POI_grid
--------
POI: probability of instantiation
"""
import numpy as np
import os
from .constants import ROW, COL, create_deepcopy
import copy
from multigrids import TemporalMultiGrid
class POIGrid (TemporalMultiGrid):
""" Class doc """
def __init__ (self, *args, **kwargs):
"""This class represents each cohorts POI for the model grid at
each time step
.. note:: Note on grid coordinates
Origin (Y,X) is top left. rows = Y, cols = X
Object will store dimensional(resolution, dimensions)
metadata as a tuple (Y val, X val).
Parameters
----------
Config: Dict
should have keys 'start year', 'cohort list', and 'shape'
Attributes
----------
shape : tuple of ints
Shape of the grid (y,x) (rows,columns)
grid : array
This 3d array is the grid data at each time step.
The first dimension is the time step with 0 being the initial data.
The second dimension is the flat grid for given cohort, mapped using
key_to_index. The third dimension is the grid element. Each cohort
can be reshaped using shape to get the proper grid
init_grid: np.ndarray
starting POI grid
key_to_index : dict
Maps canon cohort names to the index for that cohort in the
data object
"""
config = args [0]
if type(config) is str:
super(POIGrid , self).__init__(*args, **kwargs)
else:
grid_names = config['_FAST_get_cohorts']##['cohorts']
args = [
config['grid_shape'][ROW], config['grid_shape'][COL],
len(grid_names), config['model length']
]
kwargs = create_deepcopy(config)
kwargs['data_type'] = 'float32'
kwargs['mode'] = 'r+'
kwargs['grid_names'] = grid_names
super(POIGrid , self).__init__(*args, **kwargs)
self.config['start_timestep'] = config['start_year']
# self.start_year = int(config['initialization year'])
# self.shape = config['shape']
| 29.948052 | 82 | 0.549003 | 265 | 2,306 | 4.65283 | 0.445283 | 0.016221 | 0.034063 | 0.022709 | 0.048662 | 0.048662 | 0 | 0 | 0 | 0 | 0 | 0.003376 | 0.357762 | 2,306 | 77 | 83 | 29.948052 | 0.829169 | 0.480919 | 0 | 0.090909 | 0 | 0 | 0.113514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.227273 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38c9bc66bb258fbf3240f168bc8bd83e27e017b0 | 6,996 | py | Python | src/pdf/utils.py | ChakshuGautam/PDF-Package | e054a4125c8bb979f0e2ac6a8137523868742d8a | [
"MIT"
] | null | null | null | src/pdf/utils.py | ChakshuGautam/PDF-Package | e054a4125c8bb979f0e2ac6a8137523868742d8a | [
"MIT"
] | null | null | null | src/pdf/utils.py | ChakshuGautam/PDF-Package | e054a4125c8bb979f0e2ac6a8137523868742d8a | [
"MIT"
] | null | null | null | import os
import re
import traceback
import requests
import aspose.words as aw
from bs4 import BeautifulSoup
from django.http import JsonResponse
import pdfkit
from minio import Minio
from requests import HTTPError
from .models import Doc
def get_sample_data():
sample_data = {
"1": "Developer",
"17": "09-02-2022",
"2": "Software engineer",
"4": "D.K",
"6": "R.V Bangalore",
"7": "09-02-2022",
"8": "3",
"9": "Rahul",
"10": "Math",
"11": "50",
"12": "50",
"13": "Yes",
"14": "Yes",
"15": "5",
"16": "5",
"17": "5",
"18": "Yes",
"19": "Yes",
"20": "Concept",
"21": "Teaching way",
"22": "Practical",
"23": "Yes",
"24": "3 hours",
"25": "100%",
"26": "100%",
"27": "Yes",
"28": "09-02-2022",
"29": "Math",
"30": "Rahul",
"31": "50",
"32": "50",
"33": "Yes",
"34": "Yes",
"35": "5",
"36": "5",
"37": "5",
"38": "Yes",
"39": "Yes",
"40": "Concept",
"41": "Teaching way",
"42": "Practical",
"43": "Yes",
"44": "3 hours",
"45": "100%",
"46": "100%",
"47": "Yes",
"48": "09-02-2022",
}
return sample_data
def return_response(final_data, error_code, error_text):
"""
The function used to give response in all APIs
Args:
final_data:
error_code:
error_text:
Returns:
response
"""
# Adding the response status code
if error_code is None:
status_code = 200
response = JsonResponse({"data": final_data},
safe=False,
status=status_code)
else:
if error_code == 802:
return JsonResponse({"error": [{
"code": error_code,
"message": error_text
}]}, status=401)
elif error_code == 500:
return JsonResponse({"error": [{
"code": error_code,
"message": error_text
}]}, status=500)
else:
response = JsonResponse(
{"error": [{
"code": error_code,
"message": error_text
}]},
safe=False,
status=200)
return response
def return_tokens(final_data, error_code, error_text):
"""
The function used to give response in all APIs
Args:
final_data:
error_code:
error_text:
Returns:
response
"""
# Adding the response status code
if error_code is None:
status_code = 200
response = JsonResponse({"tokens": final_data},
safe=False,
status=status_code)
else:
if error_code == 802:
return JsonResponse({"error": [{
"code": error_code,
"message": error_text
}]}, status=401)
elif error_code == 500:
return JsonResponse({"error": [{
"code": error_code,
"message": error_text
}]}, status=500)
else:
response = JsonResponse(
{"error": [{
"code": error_code,
"message": error_text
}]},
safe=False,
status=200)
return response
def format_html(html_str, data):
# html_file = open(f'pdf/drivefiles/{doc_id}.html')
# html_doc = html_file.read()
soup = BeautifulSoup(html_str, 'html.parser')
old_text = soup.find_all(text=re.compile("<<"))
for i in old_text:
updated_text = re.findall(r"<<(.*?)>>", i)
new_text = data[updated_text[0]]
if new_text is None:
i.replace_with("Not provided")
else:
i.replace_with(new_text)
str_html = soup.prettify()
# html_file.close()
# os.remove(f'pdf/drivefiles/{doc_id}.html')
return str_html
def build_pdf(html_str, file_name):
is_successful = error = None
drive_file_loc = f'pdf/drivefiles/{file_name}.pdf'
try:
path_wkhtmltopdf = r'C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe'
config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
pdfkit.from_string(html_str, drive_file_loc, configuration=config)
is_successful = True
except Exception as e:
traceback.print_exc()
error = f"Failed to generate doc: {e}"
return is_successful, error
def build_doc(html_str, file_name):
drive_file_loc = f'pdf/drivefiles/{file_name}.docx'
try:
doc = aw.Document()
builder = aw.DocumentBuilder(doc);
builder.insert_html(html_str)
doc.save(drive_file_loc)
return True
except:
traceback.print_exc()
return False
def send_get_request(url, params=None, headers=None):
try:
request = requests.get(url, params=params, headers=headers)
request.raise_for_status()
return request.json()
except HTTPError as http_err:
return JsonResponse(
{"error": [{
"code": request.status_code,
"message": http_err
}]},
safe=False,
status=200)
except ValueError:
traceback.print_exc()
return request.content
except Exception as err:
return JsonResponse(
{"error": [{
"code": 804,
"message": f"Something went wrong!: {e}"
}]},
safe=False,
status=200)
def send_post_request(url, params=None, data=None, json=None, headers=None):
try:
request = requests.post(url, params=params, json=json, data=data, headers=headers)
request.raise_for_status()
return request.json()
except HTTPError as http_err:
return JsonResponse(
{"error": [{
"code": request.status_code,
"message": http_err
}]},
safe=False,
status=200)
except ValueError:
traceback.print_exc()
return request.content
except Exception as e:
return JsonResponse(
{"error": [{
"code": 804,
"message": f"Something went wrong!: {e}"
}]},
safe=False,
status=200)
def publish_to_url(pdf_id, url, headers=None):
pdf = Doc.objects.get(pk=pdf_id)
response = send_post_request(url, data=pdf, headers=headers)
return response
| 27.435294 | 91 | 0.48928 | 721 | 6,996 | 4.592233 | 0.277393 | 0.070674 | 0.042283 | 0.065237 | 0.520991 | 0.520085 | 0.486258 | 0.486258 | 0.46572 | 0.46572 | 0 | 0.047575 | 0.387078 | 6,996 | 254 | 92 | 27.543307 | 0.72458 | 0.064751 | 0 | 0.487562 | 0 | 0 | 0.115428 | 0.015799 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.054726 | 0 | 0.199005 | 0.019901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38cb66c969dcb93ecefa119784b11438254edb95 | 4,796 | py | Python | process_multiple_simtel_files.py | tudo-astroparticlephysics/cta_preprocessing | e7e6c8f9974007d18a8baeb7078f393217d6ca7b | [
"MIT"
] | 1 | 2018-10-05T14:34:44.000Z | 2018-10-05T14:34:44.000Z | process_multiple_simtel_files.py | tudo-astroparticlephysics/cta_preprocessing | e7e6c8f9974007d18a8baeb7078f393217d6ca7b | [
"MIT"
] | 1 | 2019-02-14T09:49:07.000Z | 2019-02-14T09:49:07.000Z | process_multiple_simtel_files.py | tudo-astroparticlephysics/cta_preprocessing | e7e6c8f9974007d18a8baeb7078f393217d6ca7b | [
"MIT"
] | 2 | 2018-08-24T14:41:17.000Z | 2019-02-11T08:37:08.000Z | import os
import glob
import click
import numpy as np
from tqdm import tqdm
from joblib import delayed, Parallel
from process_simtel_file import process_file, write_result_to_file
from preprocessing.parameters import PREPConfig
import logging
from logging.config import dictConfig
import yaml
@click.command()
@click.argument('input_pattern', type=str)
@click.argument('output_folder', type=click.Path(dir_okay=True, file_okay=False))
@click.argument('config_file', type=click.Path(file_okay=True))
@click.option(
'-l',
'--logger_config_file',
default='logging_config.yaml',
help='Specify a yaml logger config file to tune console and file debugging',
)
@click.option('-n', '--n_events', default=-1, help='Number of events to process in each file.')
@click.option(
'-j',
'--n_jobs',
default=1,
help='Number of jobs to start.' 'This is usefull when passing more than one simtel file.',
)
@click.option(
'--overwrite/--no-overwrite',
default=False,
help='If false (default) will only process non-existing filenames',
)
@click.option('-v', '--verbose', default=1, help='specifies the output being shown during processing')
@click.option('-c', '--chunksize', default=1, help='number of files per chunk')
def main(
input_pattern,
output_folder,
config_file,
logger_config_file,
n_events,
n_jobs,
overwrite,
verbose,
chunksize,
):
'''
Process simtel files given as matching
'input_pattern'
into one hdf5 file for each simtel file.
Output files get placed into
'output_folder'
with the same filename as their respective input file but the
extension switched to .hdf5
Processing steps consist of:
- Calibration
- Calculating image features
- Collecting MC header information
The hdf5 file will contain three groups.
'runs', 'array_events', 'telescope_events'.
The config specifies which
- telescopes
- integrator
- cleaning
- cleaning levels per telescope type
to use.
'''
# workaround https://stackoverflow.com/questions/30861524/logging-basicconfig-not-creating-log-file-when-i-run-in-pycharm
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
try:
with open(logger_config_file, 'rb') as f:
config = yaml.safe_load(f)
dictConfig(config)
except Exception:
logging.warning('Could not set logger configuration.', exc_info=True)
config = PREPConfig(config_file)
if not input_pattern.endswith('simtel.gz'):
logging.warning(
'WARNING. Pattern does not end with file extension (simtel.gz). More files might be matched.'
)
input_files = glob.glob(input_pattern)
if len(input_files) == 0:
logging.critical(f'No files found. For pattern {input_pattern}. Aborting')
return
else:
logging.info(f'Found {len(input_files)} files matching pattern.')
def output_file_for_input_file(input_file):
output_file = os.path.join(output_folder, os.path.basename(input_file).replace('simtel.gz', 'h5'))
return output_file
if not overwrite:
input_files = list(filter(lambda v: not os.path.exists(output_file_for_input_file(v)), input_files))
logging.info(f'Preprocessing on {len(input_files)} files that have no matching output')
else:
output_files = [output_file_for_input_file(f) for f in input_files]
[os.remove(of) for of in output_files if os.path.exists(of)]
logging.info('Preprocessing all found input_files and overwriting existing output.')
n_chunks = (len(input_files) // chunksize) + 1
chunks = np.array_split(input_files, n_chunks)
logging.debug(f'Splitted input_files in {n_chunks} chunks')
with Parallel(n_jobs=n_jobs, verbose=verbose, backend='loky') as parallel:
for chunk in tqdm(chunks):
results = parallel(
delayed(process_file)(f, config, n_jobs=1, n_events=n_events, verbose=verbose) for f in chunk
) # 1 because multiple threads on one file did not perform well at all
if len(results) != len(chunk):
logging.error('One or more files failed to process in this chunk.')
assert len(results) == len(chunk)
for input_file, r in zip(chunk, results):
if r:
run_info_container, array_events, telescope_events = r
output_file = output_file_for_input_file(input_file)
write_result_to_file(run_info_container, array_events, telescope_events, output_file)
else:
logging.error(f'could not process file {input_file}. job did not return a result')
if __name__ == '__main__':
main()
| 35.525926 | 125 | 0.680776 | 648 | 4,796 | 4.878086 | 0.322531 | 0.034799 | 0.018981 | 0.022778 | 0.09111 | 0.046188 | 0.046188 | 0 | 0 | 0 | 0 | 0.005349 | 0.220392 | 4,796 | 134 | 126 | 35.791045 | 0.840064 | 0.150334 | 0 | 0.063158 | 0 | 0 | 0.257401 | 0.006523 | 0 | 0 | 0 | 0 | 0.010526 | 1 | 0.021053 | false | 0.010526 | 0.115789 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38cb7f8d9074411805567ac6ccda96a1b2300e3c | 4,040 | py | Python | car-controller/src/mainController/Controller/Camera/OrbbecAstraCamera.py | iisys-hof/autonomous-driving | 9f2ab64713b6dbec38f4ca6dcb953729f39a2746 | [
"Apache-2.0"
] | null | null | null | car-controller/src/mainController/Controller/Camera/OrbbecAstraCamera.py | iisys-hof/autonomous-driving | 9f2ab64713b6dbec38f4ca6dcb953729f39a2746 | [
"Apache-2.0"
] | null | null | null | car-controller/src/mainController/Controller/Camera/OrbbecAstraCamera.py | iisys-hof/autonomous-driving | 9f2ab64713b6dbec38f4ca6dcb953729f39a2746 | [
"Apache-2.0"
] | null | null | null | # @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from Controller.Camera.ICamera import ICamera
import numpy as np
from primesense import _openni2 as c_api
from primesense import openni2
import math
import cv2
class OrbbecAstraCamera(ICamera):
_rgb_stream = None
_depth_stream = None
_dev = None
_h = 480
_w = 640
frameRate = 30
angleWidth = 60
openNIDist= None
def __init__(self,openNIDist=None):
self.error = False
if not openni2.is_initialized():
dist = openNIDist
if dist is None:
if 'OPENNI2_REDIST64' in os.environ:
dist = os.environ['OPENNI2_REDIST64']
if 'OPENNI2_REDIST' in os.environ:
dist = os.environ['OPENNI2_REDIST']
if dist is not None:
self.openNIDist = dist
openni2.initialize(dist)
if not openni2.is_initialized():
raise Exception("openNI2 not initialized form path: " + str(openNIDist))
self._dev = openni2.Device.open_any()
def run(self):
self._rgb_stream = self._dev.create_color_stream()
self._depth_stream = self._dev.create_depth_stream()
self._rgb_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=self._w, resolutionY=self._h, fps=self.frameRate))
self._depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=self._w, resolutionY=self._h, fps=self.frameRate))
self._depth_stream.set_mirroring_enabled(False)
self._rgb_stream.set_mirroring_enabled(False)
self._rgb_stream.start()
self._depth_stream.start()
# Synchronize the streams
self._dev.set_depth_color_sync_enabled(True) # synchronize the streams
# IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
self._dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
return self.getStream()
def getAngleWidth(self):
angleWidth = self.angleWidth* math.pi /180
return angleWidth
def stop(self):
self._rgb_stream.stop()
print('Closed color stream')
self._depth_stream.stop()
print('Closed depth stream')
openni2.unload()
print('Unloaded OpenNI2')
def getStream(self):
while True:
yield self.takePicture()
def takePicture(self):
image = np.fromstring(self._rgb_stream.read_frame().get_buffer_as_uint8(),dtype=np.uint8).reshape(self._h,self._w,3)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
deepMap = np.fromstring(self._depth_stream.read_frame().get_buffer_as_uint16(),dtype=np.uint16).reshape(self._h,self._w)
return image,deepMap, None
def __str__(self):
text = '################################\n'
text += 'OrbbecAstra Camera' + '\n'
text += '################################\n'
text += 'Width: ' + str(self._h) + '\n'
text += 'Height: ' + str(self._w) + '\n'
text += 'FPS: ' + str(self.frameRate) + '\n'
text += 'angleWidth: ' + str(self.angleWidth) + '\n'
text += 'Deep Image: True' + '\n'
text += 'openNIDist: ' + self.openNIDist + '\n'
text += '\n################################\n'
return text
| 35.130435 | 185 | 0.62599 | 488 | 4,040 | 4.963115 | 0.354508 | 0.040875 | 0.032205 | 0.013212 | 0.246903 | 0.190751 | 0.169282 | 0.143683 | 0.114368 | 0.114368 | 0 | 0.016521 | 0.235891 | 4,040 | 114 | 186 | 35.438596 | 0.76806 | 0.179951 | 0 | 0.027397 | 0 | 0 | 0.104609 | 0.031534 | 0.027397 | 0 | 0 | 0 | 0 | 1 | 0.09589 | false | 0 | 0.09589 | 0 | 0.369863 | 0.041096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38cda266078cd115e9713dfe4320b3340c7d8b16 | 5,912 | py | Python | common/config.py | pi-plan/pidts | ddfd04b25d513be0b080712062b38338bbe92b47 | [
"BSD-3-Clause"
] | 1 | 2021-02-07T10:33:54.000Z | 2021-02-07T10:33:54.000Z | common/config.py | pi-plan/pidts | ddfd04b25d513be0b080712062b38338bbe92b47 | [
"BSD-3-Clause"
] | null | null | null | common/config.py | pi-plan/pidts | ddfd04b25d513be0b080712062b38338bbe92b47 | [
"BSD-3-Clause"
] | null | null | null | import os
from typing import Dict, List, Any, Optional, Tuple, MutableMapping, Union
import toml
class Config(object):
"""
所有配置的factory,单例可以被修改
"""
_instance: Optional['Config'] = None
def __init__(self, current_zone_id: int, node: str, server_id: int):
self.current_zone_id: int = int(current_zone_id)
self.node = node
self.server_id = server_id
@classmethod
def new(cls, current_zone_id: int, node: str, server_id: int):
if cls._instance:
del(cls._instance)
c = cls(current_zone_id, node, server_id)
cls._instance = c
return c._instance
@classmethod
def get_instance(cls) -> 'Config':
if not cls._instance:
raise Exception("Not yet initialized")
return cls._instance
@staticmethod
def get_meta_config() -> 'MetaService':
return MetaService.get_instance()
class MetaService(object):
"""
单例,不能被修改
"""
_instance: Optional['MetaService'] = None
def __init__(self,
servers: List[Tuple[str, int]],
wait_timeout: int):
self.servers: List[Tuple[str, int]] = servers
self.wait_timeout: int = wait_timeout
@classmethod
def new(cls,
servers: List[Tuple[str, int]],
wait_timeout: int) -> 'MetaService':
if cls._instance:
return cls._instance
c = cls(servers, wait_timeout)
cls._instance = c
return cls._instance
@classmethod
def get_instance(cls) -> 'MetaService':
if not cls._instance:
raise Exception("Not yet initialized")
return cls._instance
class LoggingHandlerConfig(object):
def __init__(self, class_name: str, args: List[List[Any]]):
self.class_name: str = class_name
self.args: List[List[Any]] = args
class LoggingConfig(object):
"""
单例,不能被修改
"""
_instance: Optional['LoggingConfig'] = None
def __init__(self, level: str, format: str, datefmt: str,
handler: LoggingHandlerConfig):
self.level = level
self.format = format
self.datefmt = datefmt
self.handler = handler
@classmethod
def new(cls, level: str, format: str, datefmt: str,
handler: LoggingHandlerConfig) -> 'LoggingConfig':
if cls._instance:
return cls._instance
c = cls(level, format, datefmt, handler)
cls._instance = c
return cls._instance
@classmethod
def get_instance(cls) -> 'LoggingConfig':
if not cls._instance:
raise Exception("Not yet initialized")
return cls._instance
class MQConfig(object):
_instance: Optional['MQConfig'] = None
@classmethod
def new(cls, type: str, bootstrap_servers: str, client_id: str,
topic: str, group_id: str, auto_commit_interval_ms: int = 50,
acks: Union[str, int] = 1, timeout: int = 50) -> 'MQConfig':
if cls._instance:
return cls._instance
m = cls(type, bootstrap_servers, client_id, topic, group_id,
auto_commit_interval_ms, acks, timeout)
cls._instance = m
return cls._instance
@classmethod
def get_instance(cls) -> 'MQConfig':
if not cls._instance:
raise Exception("Not yet initialized")
return cls._instance
def __init__(self, type: str, bootstrap_servers: str, client_id: str,
topic: str, group_id: str, auto_commit_interval_ms: int = 50,
acks: Union[str, int] = 1, timeout: int = 50):
self.type = type
self.bootstrap_servers = bootstrap_servers
self.client_id = client_id
self.group_id = group_id
self.topic = topic
self.acks = acks
self.timeout = timeout
self.auto_commit_interval_ms = auto_commit_interval_ms
def parser_config(zone_id: int, file: str):
with open(file, "r") as f:
config = toml.load(f)
for i in ["base", "mq"]:
if i not in config:
raise Exception("config file is error.")
for i in ["logging", "meta_service"]:
if i not in config["base"]:
raise Exception("config file is error.")
for i in ["level", "format", "datefmt", "handler"]:
if i not in config["base"]["logging"]:
raise Exception("config file is error.")
for i in ["class", "args"]:
if i not in config["base"]["logging"]["handler"]:
raise Exception("config file is error.")
logging_handler = LoggingHandlerConfig(
config["base"]["logging"]["handler"]["class"],
config["base"]["logging"]["handler"]["args"])
LoggingConfig.new(
config["base"]["logging"]["level"],
config["base"]["logging"]["format"],
config["base"]["logging"]["datefmt"],
logging_handler)
Config.new(_get_zone_id(zone_id, config["base"]),
config["base"]["node"], config["base"]["server_id"])
for i in ["servers", "wait_timeout"]:
if i not in config["base"]["meta_service"]:
raise Exception("config file is error.")
servers = []
for i in config["base"]["meta_service"]["servers"]:
servers.append((i["host"], i["port"]))
MetaService.new(servers,
config["base"]["meta_service"]["wait_timeout"])
MQConfig.new(**config["mq"])
def _get_zone_id(zone_id: int, conf: MutableMapping[str, Any]) -> int:
"""
获取当前的 zone id 优先级为 启动参数指定 > 配置文件指定 > 环境变量
"""
if zone_id:
return zone_id
if "zone_id" in conf.keys():
return int(conf["zone_id"])
env_zone_id = os.environ.get("PIDAL_ZONE_ID")
if env_zone_id:
return int(env_zone_id)
return 0
| 30.95288 | 78 | 0.583897 | 691 | 5,912 | 4.794501 | 0.149059 | 0.076366 | 0.051313 | 0.030184 | 0.44431 | 0.396016 | 0.337459 | 0.322366 | 0.233927 | 0.179897 | 0 | 0.002643 | 0.296008 | 5,912 | 190 | 79 | 31.115789 | 0.793369 | 0.013532 | 0 | 0.297872 | 0 | 0 | 0.110379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113475 | false | 0 | 0.021277 | 0.007092 | 0.312057 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38cda95a78affcbfda47741a851a53fac1c2275f | 1,619 | py | Python | craftier/performance.py | sk-/craftier | c35c6c18b43c28cf7b0c457d00e94e0abeb47f15 | [
"MIT"
] | 3 | 2021-01-11T11:25:30.000Z | 2021-04-16T07:30:34.000Z | craftier/performance.py | sk-/craftier | c35c6c18b43c28cf7b0c457d00e94e0abeb47f15 | [
"MIT"
] | null | null | null | craftier/performance.py | sk-/craftier | c35c6c18b43c28cf7b0c457d00e94e0abeb47f15 | [
"MIT"
] | 1 | 2021-05-27T18:46:27.000Z | 2021-05-27T18:46:27.000Z | """
Module to keep track of performance statistics across multiple processes.
Note: because this uses a global state it will only work in forked
multi-process. Note that the default changed in Python 38 for MacOs.
"""
import dataclasses
import json
import multiprocessing
import tempfile
from typing import IO, Any, Dict, Mapping, Optional, Sequence
@dataclasses.dataclass
class _Config:
file: Optional[IO[bytes]] = None
_config = _Config()
class PerformanceError(Exception):
"""Errors specific to the performance module."""
def enable() -> None:
"""Enable the performance sink."""
if _config.file:
raise PerformanceError("performance is already enabled")
_config.file = tempfile.NamedTemporaryFile(
prefix="craftier-stats-", buffering=0
)
def write(data: Dict[str, Any]) -> None:
"""Write an entry to the performance file.
Data must be a JSON serializable object.
"""
if not _config.file:
return
with multiprocessing.Lock():
_config.file.write(json.dumps(data).encode())
_config.file.write(b"\n")
def read() -> Sequence[Mapping[str, Any]]:
"""Read the contents of the performance data."""
if not _config.file:
return []
with multiprocessing.Lock():
_config.file.seek(0)
return [json.loads(line.strip().decode()) for line in _config.file]
def disable() -> None:
"""Disable the performance sink.
Further writes and read will fail.
"""
if not _config.file:
raise PerformanceError("performance is not enabled")
_config.file.close()
_config.file = None
| 24.530303 | 75 | 0.680049 | 203 | 1,619 | 5.35468 | 0.492611 | 0.110396 | 0.030359 | 0.041398 | 0.180313 | 0.180313 | 0.099356 | 0.099356 | 0.099356 | 0.099356 | 0 | 0.003137 | 0.212477 | 1,619 | 65 | 76 | 24.907692 | 0.849412 | 0.292156 | 0 | 0.151515 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.151515 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38d02e8c0048c5ac8c926a34293eeed04becfac6 | 4,635 | py | Python | flute/tests/model_test.py | iflare3g/flute | fb030b4ee541d212af973a98ab7c9da6c9c62374 | [
"BSD-3-Clause"
] | 1 | 2018-10-01T17:18:00.000Z | 2018-10-01T17:18:00.000Z | flute/tests/model_test.py | iflare3g/flute | fb030b4ee541d212af973a98ab7c9da6c9c62374 | [
"BSD-3-Clause"
] | 3 | 2018-09-08T17:16:21.000Z | 2018-10-16T17:05:26.000Z | flute/tests/model_test.py | iflare3g/flute | fb030b4ee541d212af973a98ab7c9da6c9c62374 | [
"BSD-3-Clause"
] | 7 | 2018-08-12T15:56:02.000Z | 2018-10-15T11:03:09.000Z | import sys
sys.path.append('..')
import unittest
from model import Field, Model, field_assembler
class TestModel( unittest.TestCase ):
def test_field( self ):
fname = 'login'
ftype = 'String(100)'
field = Field( fname, ftype )
self.assertEqual( " login = db.Column( db.String(100), nullable=True )", field.definition() )
funique = True
fnull = False
field = Field( fname, ftype, unique=funique, nullable=fnull )
self.assertEqual( " login = db.Column( db.String(100), unique=True, nullable=False )", field.definition() )
fpk = True
field = Field( fname, ftype, pkey=fpk )
res1 = field.definition()
self.assertEqual( " login = db.Column( db.String(100), primary_key=True, autoincrement=True )", res1 )
field = Field( fname, ftype, unique=funique, nullable=fnull, pkey=fpk )
res2 = field.definition()
self.assertEqual( res1, res2 )
fautoinc = False
field = Field( fname, ftype, pkey=fpk, autoinc=fautoinc )
self.assertEqual( " login = db.Column( db.String(100), primary_key=True, autoincrement=False )", field.definition() )
f = field_assembler( ('login', 'String(100)') )
self.assertEqual( " login = db.Column( db.String(100), nullable=True )", f.definition() )
f = field_assembler( ('login', 'String(100)', ['unique']) )
self.assertEqual( " login = db.Column( db.String(100), unique=True, nullable=True )", f.definition() )
f = field_assembler( ('login', 'String(100)', ['unique','notnull']) )
self.assertEqual( " login = db.Column( db.String(100), unique=True, nullable=False )", f.definition() )
f = field_assembler( ('login', 'String(100)', ['pkey','autoinc']) )
self.assertEqual( " login = db.Column( db.String(100), primary_key=True, autoincrement=True )", f.definition() )
f = field_assembler( ('login', 'String(100)', ['pkey']) )
self.assertEqual( " login = db.Column( db.String(100), primary_key=True, autoincrement=False )", f.definition() )
def test_model( self ):
name = 'company'
fields = [
('id', 'Integer', ['pkey', 'autoinc']),
('name', 'String(200)', ['notnull']),
('email', 'String(200)', ['unique', 'notnull']),
('active', 'Boolean'),
]
model = Model( name, fields )
value = model.value()
self.assertIn( 'class Company( db.Model ):', value )
self.assertIn( ' id = db.Column( db.Integer, primary_key=True, autoincrement=True )', value )
self.assertIn( ' name = db.Column( db.String(200), nullable=False )', value )
self.assertIn( ' email = db.Column( db.String(200), unique=True, nullable=False )', value )
self.assertIn( ' active = db.Column( db.Boolean, nullable=True )', value )
name = 'user'
fields = [
('id', 'Integer', ['pkey', 'autoinc']),
('username', 'String(80)', ['unique', 'notnull']),
('email', 'String(100)', ['notnull']),
('company_id', 'Integer', ['fkey'], 'company.id')
]
model = Model( name, fields )
value = model.value()
self.assertIn( " company_id = db.Column( db.Integer, db.ForeignKey( 'company.id' ) )", value )
self.assertIn( " company = db.relationship( 'Company' ) )", value )
uniques = [
['username', 'company_id'],
['email']
]
model = Model( name, fields, uniques=uniques )
value = model.value()
self.assertIn( " db.UniqueConstraint( 'username', 'company_id', name='ukey_1' )", value )
self.assertIn( " db.UniqueConstraint( 'email', name='ukey_2' )", value )
self.assertIn( "def __init__( self, id, username, email, company_id )", value )
self.assertIn( "self.username = username", value )
self.assertIn( "self.company_id = company_id", value )
self.assertNotIn( "self.company = company", value )
model.remove_field( 'email' )
self.assertNotIn( "'name': 'email'", model.show_fields() )
model.add_field( ('email', 'String(200)', ['unique', 'notnull']) )
value = model.value()
self.assertIn( ' email = db.Column( db.String(200), unique=True, nullable=False )', value )
model.clear_fields()
self.assertEqual( 0, len (model.fields ) )
model.add_fields( fields )
self.assertEqual( len( fields ), len (model.fields ) )
if __name__ == '__main__':
unittest.main()
| 45.891089 | 128 | 0.574326 | 499 | 4,635 | 5.256513 | 0.146293 | 0.054899 | 0.057186 | 0.073199 | 0.58597 | 0.444529 | 0.423942 | 0.409074 | 0.373999 | 0.304613 | 0 | 0.022119 | 0.258684 | 4,635 | 100 | 129 | 46.35 | 0.741269 | 0 | 0 | 0.144578 | 0 | 0.012048 | 0.3726 | 0 | 0 | 0 | 0 | 0 | 0.325301 | 1 | 0.024096 | false | 0 | 0.036145 | 0 | 0.072289 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38d0a8a2c98f89d03f504868faeb95043fe1fe21 | 404 | py | Python | jsonencoder.py | tennebo/flask-poc | 7591d0c5ed13c3e35857c424c6f1d08a3d3e8b3e | [
"Unlicense"
] | null | null | null | jsonencoder.py | tennebo/flask-poc | 7591d0c5ed13c3e35857c424c6f1d08a3d3e8b3e | [
"Unlicense"
] | null | null | null | jsonencoder.py | tennebo/flask-poc | 7591d0c5ed13c3e35857c424c6f1d08a3d3e8b3e | [
"Unlicense"
] | null | null | null | """JSON encoding functions.
"""
import datetime
from flask.json import JSONEncoder
class IsoJSONEncoder(JSONEncoder):
"""Custom encoder to serialize dates into ISO-8601 format."""
def default(self, obj):
"""If the argument is a date, return its ISO representation."""
if isinstance(obj, datetime.date):
return obj.isoformat()
return super().default(obj)
| 22.444444 | 71 | 0.668317 | 48 | 404 | 5.625 | 0.729167 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012739 | 0.222772 | 404 | 17 | 72 | 23.764706 | 0.847134 | 0.341584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38d2cde485ca91bea8c2a1374b8ab233b09be1b7 | 493 | py | Python | dbref/migrations/0006_auto_20150409_2236.py | kilisimba/gluten-free-tax | e88005a9a175628505bbcdf13cdd123ad736fe3a | [
"MIT"
] | null | null | null | dbref/migrations/0006_auto_20150409_2236.py | kilisimba/gluten-free-tax | e88005a9a175628505bbcdf13cdd123ad736fe3a | [
"MIT"
] | null | null | null | dbref/migrations/0006_auto_20150409_2236.py | kilisimba/gluten-free-tax | e88005a9a175628505bbcdf13cdd123ad736fe3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dbref', '0005_auto_20150406_1949'),
]
operations = [
migrations.AlterField(
model_name='association',
name='equivalent',
field=models.ForeignKey(related_name='nonGF', verbose_name=b'non-gluten free', blank=True, to='dbref.Product', null=True),
),
]
| 24.65 | 134 | 0.634888 | 51 | 493 | 5.921569 | 0.784314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045213 | 0.237323 | 493 | 19 | 135 | 25.947368 | 0.757979 | 0.042596 | 0 | 0 | 0 | 0 | 0.174468 | 0.048936 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38d3deb0152d67420a149ee95efab9a0d923ec8b | 16,923 | py | Python | fugue_duckdb/execution_engine.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | fugue_duckdb/execution_engine.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | fugue_duckdb/execution_engine.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | import logging
from threading import RLock
from typing import Any, Callable, Dict, List, Optional, Union, Iterable
import duckdb
import pyarrow as pa
from duckdb import DuckDBPyConnection
from fugue import ArrowDataFrame, NativeExecutionEngine
from fugue.collections.partition import (
EMPTY_PARTITION_SPEC,
PartitionCursor,
PartitionSpec,
parse_presort_exp,
)
from fugue.dataframe import (
DataFrame,
DataFrames,
LocalBoundedDataFrame,
LocalDataFrame,
PandasDataFrame,
)
from fugue.dataframe.utils import get_join_schemas
from fugue.execution.execution_engine import (
_DEFAULT_JOIN_KEYS,
ExecutionEngine,
SQLEngine,
)
from triad.collections.fs import FileSystem
from triad.utils.assertion import assert_or_throw
from fugue_duckdb._io import DuckDBIO
from fugue_duckdb._utils import encode_value_to_expr, get_temp_df_name
from fugue_duckdb.dataframe import DuckDataFrame
_FUGUE_DUCKDB_PRAGMA_CONFIG_PREFIX = "fugue.duckdb.pragma."
class DuckDBEngine(SQLEngine):
"""DuckDB SQL backend implementation.
:param execution_engine: the execution engine this sql engine will run on
"""
def __init__(self, execution_engine: ExecutionEngine) -> None:
super().__init__(execution_engine)
self._cache: Dict[str, int] = {}
def select(self, dfs: DataFrames, statement: str) -> DataFrame:
if isinstance(self.execution_engine, DuckExecutionEngine):
return self._duck_select(dfs, statement)
return self._other_select(dfs, statement)
def _duck_select(self, dfs: DataFrames, statement: str) -> DataFrame:
for k, v in dfs.items():
tdf: Any = self.execution_engine.to_df(v)
if k not in self._cache or self._cache[k] != id(tdf.native):
# tdf.native.create_view(k, replace=True)
kk = k + get_temp_df_name()
tdf.native.query( # TODO: a hack to avoid DuckDB stability issue
kk, f"CREATE OR REPLACE TEMP VIEW {k} AS SELECT * FROM {kk}"
)
self._cache[k] = id(tdf.native)
result = self.execution_engine.connection.query(statement) # type: ignore
return DuckDataFrame(result)
def _other_select(self, dfs: DataFrames, statement: str) -> DataFrame:
conn = duckdb.connect()
try:
for k, v in dfs.items():
conn.from_arrow_table(v.as_arrow()).create_view(k)
return ArrowDataFrame(conn.execute(statement).arrow())
finally:
conn.close()
class DuckExecutionEngine(ExecutionEngine):
"""The execution engine using DuckDB.
Please read |ExecutionEngineTutorial| to understand this important Fugue concept
:param conf: |ParamsLikeObject|, read |FugueConfig| to learn Fugue specific options
:param connection: DuckDB connection
"""
def __init__(
self, conf: Any = None, connection: Optional[DuckDBPyConnection] = None
):
super().__init__(conf)
self._native_engine = NativeExecutionEngine(conf)
self._con = connection or duckdb.connect()
self._external_con = connection is not None
self._context_lock = RLock()
try:
for pg in list(self._get_pragmas()): # transactional
self._con.execute(pg)
except Exception:
self.stop()
raise
def _get_pragmas(self) -> Iterable[str]:
for k, v in self.conf.items():
if k.startswith(_FUGUE_DUCKDB_PRAGMA_CONFIG_PREFIX):
name = k[len(_FUGUE_DUCKDB_PRAGMA_CONFIG_PREFIX) :]
assert_or_throw(
name.isidentifier(), ValueError(f"{name} is not a valid pragma key")
)
value = encode_value_to_expr(v)
yield f"PRAGMA {name}={value};"
def stop(self) -> None:
if not self._external_con:
self._con.close()
def __repr__(self) -> str:
return "DuckExecutionEngine"
@property
def connection(self) -> DuckDBPyConnection:
return self._con
@property
def log(self) -> logging.Logger:
return self._native_engine.log
@property
def fs(self) -> FileSystem:
return self._native_engine.fs
@property
def default_sql_engine(self) -> SQLEngine:
return DuckDBEngine(self)
def to_df(self, df: Any, schema: Any = None, metadata: Any = None) -> DuckDataFrame:
if isinstance(df, DataFrame):
assert_or_throw(
schema is None and metadata is None,
ValueError("schema and metadata must be None when df is a DataFrame"),
)
if isinstance(df, DuckDataFrame):
return df
if isinstance(df, PandasDataFrame) and all(
not pa.types.is_nested(f.type) for f in df.schema.fields
):
rdf = DuckDataFrame(
self.connection.df(df.as_pandas()), metadata=dict(df.metadata)
)
else:
rdf = DuckDataFrame(
self.connection.from_arrow_table(df.as_arrow()),
metadata=dict(df.metadata),
)
return rdf
tdf = ArrowDataFrame(df, schema)
return DuckDataFrame(self.connection.from_arrow_table(tdf.native), metadata)
def repartition(
self, df: DataFrame, partition_spec: PartitionSpec
) -> DataFrame: # pragma: no cover
self.log.warning("%s doesn't respect repartition", self)
return df
def map(
self,
df: DataFrame,
map_func: Callable[[PartitionCursor, LocalDataFrame], LocalDataFrame],
output_schema: Any,
partition_spec: PartitionSpec,
metadata: Any = None,
on_init: Optional[Callable[[int, DataFrame], Any]] = None,
) -> DataFrame:
return self._native_engine.map(
df=df,
map_func=map_func,
output_schema=output_schema,
partition_spec=partition_spec,
metadata=metadata,
on_init=on_init,
)
def broadcast(self, df: DataFrame) -> DataFrame:
return self.to_df(df)
def persist(
self,
df: DataFrame,
lazy: bool = False,
**kwargs: Any,
) -> DataFrame:
# TODO: we should create DuckDB table, but it has bugs, so can't use by 0.3.1
if isinstance(df, DuckDataFrame):
# materialize
return ArrowDataFrame(df.native.arrow(), metadata=df.metadata)
return self.to_df(df)
def join(
self,
df1: DataFrame,
df2: DataFrame,
how: str,
on: List[str] = _DEFAULT_JOIN_KEYS,
metadata: Any = None,
) -> DataFrame:
key_schema, output_schema = get_join_schemas(df1, df2, how=how, on=on)
t1, t2, t3 = (
get_temp_df_name(),
get_temp_df_name(),
get_temp_df_name(),
)
on_fields = " AND ".join(f"{t1}.{k}={t2}.{k}" for k in key_schema)
join_type = self._how_to_join(how)
if how.lower() == "cross":
select_fields = ",".join(
f"{t1}.{k}" if k in df1.schema else f"{t2}.{k}"
for k in output_schema.names
)
sql = f"SELECT {select_fields} FROM {t1} {join_type} {t2}"
elif how.lower() == "right_outer":
select_fields = ",".join(
f"{t2}.{k}" if k in df2.schema else f"{t1}.{k}"
for k in output_schema.names
)
sql = (
f"SELECT {select_fields} FROM {t2} LEFT OUTER JOIN {t1} ON {on_fields}"
)
elif how.lower() == "full_outer":
select_fields = ",".join(
f"COALESCE({t1}.{k},{t2}.{k}) AS {k}" if k in key_schema else k
for k in output_schema.names
)
sql = f"SELECT {select_fields} FROM {t1} {join_type} {t2} ON {on_fields}"
elif how.lower() in ["semi", "left_semi"]:
keys = ",".join(key_schema.names)
on_fields = " AND ".join(f"{t1}.{k}={t3}.{k}" for k in key_schema)
sql = (
f"SELECT {t1}.* FROM {t1} INNER JOIN (SELECT DISTINCT {keys} "
f"FROM {t2}) AS {t3} ON {on_fields}"
)
elif how.lower() in ["anti", "left_anti"]:
keys = ",".join(key_schema.names)
on_fields = " AND ".join(f"{t1}.{k}={t3}.{k}" for k in key_schema)
sql = (
f"SELECT {t1}.* FROM {t1} LEFT OUTER JOIN "
f"(SELECT DISTINCT {keys}, 1 AS __contain__ FROM {t2}) AS {t3} "
f"ON {on_fields} WHERE {t3}.__contain__ IS NULL"
)
else:
select_fields = ",".join(
f"{t1}.{k}" if k in df1.schema else f"{t2}.{k}"
for k in output_schema.names
)
sql = f"SELECT {select_fields} FROM {t1} {join_type} {t2} ON {on_fields}"
return self._sql(sql, {t1: df1, t2: df2}, metadata=metadata)
def _how_to_join(self, how: str):
return how.upper().replace("_", " ") + " JOIN"
def union(
self,
df1: DataFrame,
df2: DataFrame,
distinct: bool = True,
metadata: Any = None,
) -> DataFrame:
assert_or_throw(
df1.schema == df2.schema, ValueError(f"{df1.schema} != {df2.schema}")
)
if distinct:
t1, t2 = get_temp_df_name(), get_temp_df_name()
sql = f"SELECT * FROM {t1} UNION SELECT * FROM {t2}"
return self._sql(sql, {t1: df1, t2: df2}, metadata=metadata)
return DuckDataFrame(
self.to_df(df1).native.union(self.to_df(df2).native), metadata=metadata
)
def subtract(
self,
df1: DataFrame,
df2: DataFrame,
distinct: bool = True,
metadata: Any = None,
) -> DataFrame: # pragma: no cover
if distinct:
t1, t2 = get_temp_df_name(), get_temp_df_name()
sql = f"SELECT * FROM {t1} EXCEPT SELECT * FROM {t2}"
return self._sql(sql, {t1: df1, t2: df2}, metadata=metadata)
return DuckDataFrame(
self.to_df(df1).native.except_(self.to_df(df2).native), metadata=metadata
)
def intersect(
self,
df1: DataFrame,
df2: DataFrame,
distinct: bool = True,
metadata: Any = None,
) -> DataFrame:
if distinct:
t1, t2 = get_temp_df_name(), get_temp_df_name()
sql = f"SELECT * FROM {t1} INTERSECT SELECT * FROM {t2}"
return self._sql(sql, {t1: df1, t2: df2}, metadata=metadata)
return DuckDataFrame(
self.to_df(df1).native.intersect(self.to_df(df2).native), metadata=metadata
)
def distinct(
self,
df: DataFrame,
metadata: Any = None,
) -> DataFrame:
rel = self.to_df(df).native.distinct()
return DuckDataFrame(rel, metadata=metadata)
def dropna(
self,
df: DataFrame,
how: str = "any",
thresh: int = None,
subset: List[str] = None,
metadata: Any = None,
) -> DataFrame:
schema = df.schema
if subset is not None:
schema = schema.extract(subset)
if how == "all":
thr = 0
elif how == "any":
thr = thresh or len(schema)
else: # pragma: no cover
raise ValueError(f"{how} is not one of any and all")
cw = [f"CASE WHEN {f} IS NULL THEN 0 ELSE 1 END" for f in schema.names]
expr = " + ".join(cw) + f" >= {thr}"
return DuckDataFrame(self.to_df(df).native.filter(expr), metadata=metadata)
def fillna(
self,
df: DataFrame,
value: Any,
subset: List[str] = None,
metadata: Any = None,
) -> DataFrame:
def _build_value_dict(names: List[str]) -> Dict[str, str]:
if not isinstance(value, dict):
v = encode_value_to_expr(value)
return {n: v for n in names}
else:
return {n: encode_value_to_expr(value[n]) for n in names}
names = list(df.schema.names)
if isinstance(value, dict):
# subset should be ignored
names = list(value.keys())
elif subset is not None:
names = list(df.schema.extract(subset).names)
vd = _build_value_dict(names)
assert_or_throw(
all(v != "NULL" for v in vd.values()),
ValueError("fillna value can not be None or contain None"),
)
cols = [
f"COALESCE({f}, {vd[f]}) AS {f}" if f in names else f
for f in df.schema.names
]
return DuckDataFrame(
self.to_df(df).native.project(", ".join(cols)), metadata=metadata
)
def sample(
self,
df: DataFrame,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
seed: Optional[int] = None,
metadata: Any = None,
) -> DataFrame:
assert_or_throw(
(n is None and frac is not None and frac >= 0.0)
or (frac is None and n is not None and n >= 0),
ValueError(
f"one and only one of n and frac should be non-negative, {n}, {frac}"
),
)
tb = get_temp_df_name()
if frac is not None:
sql = f"SELECT * FROM {tb} USING SAMPLE bernoulli({frac*100} PERCENT)"
else:
sql = f"SELECT * FROM {tb} USING SAMPLE reservoir({n} ROWS)"
if seed is not None:
sql += f" REPEATABLE ({seed})"
return self._sql(sql, {tb: df}, metadata=metadata)
def take(
self,
df: DataFrame,
n: int,
presort: str,
na_position: str = "last",
partition_spec: PartitionSpec = EMPTY_PARTITION_SPEC,
metadata: Any = None,
) -> DataFrame:
assert_or_throw(
isinstance(n, int),
ValueError("n needs to be an integer"),
)
if presort is not None and presort != "":
_presort = parse_presort_exp(presort)
else:
_presort = partition_spec.presort
tb = get_temp_df_name()
if len(_presort) == 0:
if len(partition_spec.partition_by) == 0:
return DuckDataFrame(self.to_df(df).native.limit(n), metadata=metadata)
cols = ", ".join(df.schema.names)
pcols = ", ".join(partition_spec.partition_by)
sql = (
f"SELECT *, ROW_NUMBER() OVER (PARTITION BY {pcols}) "
f"AS __fugue_take_param FROM {tb}"
)
sql = f"SELECT {cols} FROM ({sql}) WHERE __fugue_take_param<={n}"
return self._sql(sql, {tb: df}, metadata=metadata)
sorts: List[str] = []
for k, v in _presort.items():
s = k
if not v:
s += " DESC"
s += " NULLS FIRST" if na_position == "first" else " NULLS LAST"
sorts.append(s)
sort_expr = "ORDER BY " + ", ".join(sorts)
if len(partition_spec.partition_by) == 0:
sql = f"SELECT * FROM {tb} {sort_expr} LIMIT {n}"
return self._sql(sql, {tb: df}, metadata=metadata)
cols = ", ".join(df.schema.names)
pcols = ", ".join(partition_spec.partition_by)
sql = (
f"SELECT *, ROW_NUMBER() OVER (PARTITION BY {pcols} {sort_expr}) "
f"AS __fugue_take_param FROM {tb}"
)
sql = f"SELECT {cols} FROM ({sql}) WHERE __fugue_take_param<={n}"
return self._sql(sql, {tb: df}, metadata=metadata)
def load_df(
self,
path: Union[str, List[str]],
format_hint: Any = None,
columns: Any = None,
**kwargs: Any,
) -> LocalBoundedDataFrame:
dio = DuckDBIO(self.fs, self.connection)
return dio.load_df(path, format_hint, columns, **kwargs)
def save_df(
self,
df: DataFrame,
path: str,
format_hint: Any = None,
mode: str = "overwrite",
partition_spec: PartitionSpec = EMPTY_PARTITION_SPEC,
force_single: bool = False,
**kwargs: Any,
) -> None:
dio = DuckDBIO(self.fs, self.connection)
dio.save_df(self.to_df(df), path, format_hint, mode, **kwargs)
def convert_yield_dataframe(self, df: DataFrame, as_local: bool) -> DataFrame:
return df.as_local() if not self._external_con or as_local else df
def _sql(
self, sql: str, dfs: Dict[str, DataFrame], metadata: Any = None
) -> DuckDataFrame:
with self._context_lock:
df = self.sql_engine.select(DataFrames(dfs), sql)
return DuckDataFrame(df.native, metadata=metadata) # type: ignore
| 35.25625 | 88 | 0.565621 | 2,026 | 16,923 | 4.560711 | 0.148075 | 0.014394 | 0.017316 | 0.01829 | 0.344805 | 0.302489 | 0.257684 | 0.204113 | 0.179329 | 0.172511 | 0 | 0.008817 | 0.323111 | 16,923 | 479 | 89 | 35.329854 | 0.797818 | 0.037641 | 0 | 0.356459 | 0 | 0 | 0.115401 | 0.004495 | 0.002392 | 0 | 0 | 0.004175 | 0.016746 | 1 | 0.076555 | false | 0 | 0.038278 | 0.021531 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38d63648ec041a10170b26be119c90743852519a | 7,213 | py | Python | 2_learning/Alignment/train.py | BGU-CS-VIL/JA-POLS | 0ee34ec0c8c7d7fdfc0c5b1c85b2bb6632cc3c41 | [
"MIT"
] | 16 | 2020-03-16T08:52:09.000Z | 2022-03-09T09:05:47.000Z | 2_learning/Alignment/train.py | BGU-CS-VIL/JA-POLS | 0ee34ec0c8c7d7fdfc0c5b1c85b2bb6632cc3c41 | [
"MIT"
] | 1 | 2020-08-24T17:28:19.000Z | 2020-08-24T17:28:19.000Z | 2_learning/Alignment/train.py | BGU-CS-VIL/JA-POLS | 0ee34ec0c8c7d7fdfc0c5b1c85b2bb6632cc3c41 | [
"MIT"
] | 1 | 2022-02-04T20:54:24.000Z | 2022-02-04T20:54:24.000Z | from __future__ import division, print_function
import copy
import time
import cv2
import numpy as np
import torch
from scipy.linalg import expm, logm
from utils.image_warping import warp_image
from utils.Plots import *
def train_model(model, dataloaders, criterion, optimizer, device, num_epochs, model_path, is_inception=False):
since = time.time()
train_loss_history = []
val_loss_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = np.inf
for epoch in range(num_epochs):
print('\nEpoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 20)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.float)
# print(f'input shape: {inputs.shape}')
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
preds = model(inputs)
loss = criterion(preds, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
# running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
# running_corrects.double()/len(dataloaders[phase].dataset)
epoch_acc = 0
print('{} Loss: {:.4f}'.format(
phase, epoch_loss))
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_loss_history.append(epoch_loss)
if phase == 'train':
train_loss_history.append(epoch_loss)
# load best model weights and save it
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(),model_path)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
# plot_results
return model, [train_loss_history, val_loss_history]
def predict_test(model, dataloader, device):
for inputs, labels in dataloader:
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.float)
preds = model(inputs)
return preds
def training_summary(hist, num_epochs, logs_dir, test_name):
train, val = hist
plt.title("Loss vs. Number of Training Epochs")
plt.xlabel("Training Epochs")
plt.ylabel("Loss")
plt.plot(range(1, num_epochs+1), train, label="train loss")
plt.plot(range(1, num_epochs+1), val, label="val loss")
# plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.savefig(f'{logs_dir}/loss_{test_name}.png')
def plot_results(test_x_embd, test_theta, predicted_theta, logs_dir, test_name):
print('-' * 20)
print('Plotting Panorama results:')
img_emb_sz = (test_x_embd[0].shape[0] - 400, test_x_embd[0].shape[1] - 400)
predicted_theta = predicted_theta.cpu().detach().numpy()
imgs_trans_lrn = []
imgs_trans_gt = []
for i in range(len(test_x_embd)):
I = test_x_embd[i, ...]
T_lrn = convert_to_expm(predicted_theta[i, ...])
T_lrn = np.reshape(T_lrn, (2, 3))
T_gt = convert_to_expm(test_theta[i, ...])
T_gt = np.reshape(T_gt, (2, 3))
I_T_lrn, _ = warp_image(I, T_lrn, cv2.INTER_CUBIC)
I_T_lrn = np.abs(I_T_lrn/np.nanmax(I_T_lrn))
I_T_lrn = embed_to_normal_sz_image(I_T_lrn, img_emb_sz)
imgs_trans_lrn.append(I_T_lrn)
I_T_gt, _ = warp_image(I, T_gt, cv2.INTER_CUBIC)
I_T_gt = np.abs(I_T_gt/np.nanmax(I_T_gt))
I_T_gt = embed_to_normal_sz_image(I_T_gt, img_emb_sz)
imgs_trans_gt.append(I_T_gt)
# --------- build panoramic images of learned theta:------------
panoramic_img_lrn = np.nanmedian(imgs_trans_lrn, axis=0) # nanmean
fig1 = open_figure(1, f'Panoramic Image Predicted - {test_name}', (3, 2))
PlotImages(1, 1, 1, 1, [panoramic_img_lrn], [''],
'gray', axis=False, colorbar=False)
# --------- build panoramic images of ground-truth theta:--------
panoramic_img_gt = np.nanmedian(imgs_trans_gt, axis=0) # nanmean
fig2 = open_figure(2, f'Panoramic Image GT - {test_name}', (3, 2))
PlotImages(2, 1, 1, 1, [panoramic_img_gt], [''],
'gray', axis=False, colorbar=False)
fig1.savefig(f'{logs_dir}/Pano_{test_name}_pred.png', dpi=1000)
fig2.savefig(f'{logs_dir}//Pano_{test_name}_gt.png', dpi=1000)
plt.show()
print('-' * 20)
print('Done Plotting.')
def plot_augmentations(images, plot_name, img_num=9):
print('-' * 20)
print('Plotting Augmentations:')
random_imgs = images[np.random.choice(
range(images.shape[0]), size=img_num)]
cols = int(np.sqrt(img_num))
n_images = len(random_imgs)
titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(random_imgs, titles)):
a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
fig.suptitle(plot_name)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()
print('-' * 20)
print('Done Plotting.')
return fig
# Get se transformation, in shape: (1,6)
# Return SE transformation, in shape (1,6)
def convert_to_expm(T):
T = np.reshape(T, (2, 3))
bottom = np.zeros((1, 3))
T = np.concatenate((T, bottom), axis=0)
T_exmp = expm(T)[0:2, :]
return T_exmp.ravel()
def embed_to_normal_sz_image(img, img_emb_sz):
img_big_emb_sz = img.shape
st_y = (img_big_emb_sz[0] - img_emb_sz[0]) // 2
st_x = (img_big_emb_sz[1] - img_emb_sz[1]) // 2
return img[st_y:st_y + img_emb_sz[0], st_x:st_x + img_emb_sz[1], :]
def embed_to_big_image(img, img_big_emb_sz, img_emb_sz, img_sz):
I = np.zeros((img_big_emb_sz[0], img_big_emb_sz[1], 3))
I[::] = np.nan
start_idx_y = ((img_big_emb_sz[0] - img_emb_sz[0]) //
2) + (img_emb_sz[0] - img_sz[0]) // 2
start_idx_x = ((img_big_emb_sz[1] - img_emb_sz[1]) //
2) + (img_emb_sz[1] - img_sz[1]) // 2
I[start_idx_y:start_idx_y + img_sz[0],
start_idx_x:start_idx_x + img_sz[1], :] = img
return np.abs(I / np.nanmax(I))
| 35.014563 | 110 | 0.603771 | 1,050 | 7,213 | 3.881905 | 0.214286 | 0.025761 | 0.025515 | 0.02159 | 0.241904 | 0.158734 | 0.128067 | 0.088813 | 0.056919 | 0.056919 | 0 | 0.02216 | 0.26175 | 7,213 | 205 | 111 | 35.185366 | 0.743286 | 0.100929 | 0 | 0.152778 | 0 | 0 | 0.0684 | 0.015785 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.0625 | 0 | 0.159722 | 0.090278 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38dab9aa058dfb23a215f4afea45be4f2a2e0514 | 604 | py | Python | app/models.py | dicks6n/NEWSHIGHLIGHT | ad94e1b22640b72acdc7acd3e0fdb107f980dad3 | [
"MIT"
] | 1 | 2018-11-10T08:08:41.000Z | 2018-11-10T08:08:41.000Z | app/models.py | abdirahman-mahat/news-highlighter | 49f2fd63e5a3c0e7f06d45aaf36be7314a088087 | [
"MIT"
] | null | null | null | app/models.py | abdirahman-mahat/news-highlighter | 49f2fd63e5a3c0e7f06d45aaf36be7314a088087 | [
"MIT"
] | null | null | null | class Source:
'''
Source class to define Source Objects
'''
def __init__(self,id,name,description):
self.id = id
self.name = name
self.description = description
class Articles:
'''
articles class to define the article Objects
'''
def __init__(self,source,author,title,description,url,urlToImage,publishedAt):
# self.id=id
self.source=source
self.author=author
self.title=title
self.description=description
self.url=url
self.urlToImage=urlToImage
self.publishedAt=publishedAt | 26.26087 | 82 | 0.627483 | 66 | 604 | 5.621212 | 0.287879 | 0.048518 | 0.070081 | 0.097035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.284768 | 604 | 23 | 83 | 26.26087 | 0.858796 | 0.155629 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38dc3330e7fd401cc7a01d3a379667d2f51fb293 | 1,086 | py | Python | client/examples/users/set_device_pin.py | thefstock/FirstockPy | 09b4dcf3470f83de991b43213958d2c6783f997b | [
"MIT"
] | 1 | 2022-03-29T06:56:06.000Z | 2022-03-29T06:56:06.000Z | client/examples/users/set_device_pin.py | thefstock/FirstockPy | 09b4dcf3470f83de991b43213958d2c6783f997b | [
"MIT"
] | 3 | 2022-01-17T09:31:21.000Z | 2022-03-11T12:12:08.000Z | client/examples/users/set_device_pin.py | thefstock/FirstockPy | 09b4dcf3470f83de991b43213958d2c6783f997b | [
"MIT"
] | null | null | null | """
Example usage of client to set device pin
"""
from py_client import SetDevicePinRequestModel, RequestSourceType
from argparse import ArgumentParser
from ..base import Example
class SetDevicePinExample(Example):
title: str = 'Set Device Pin'
def parse_args(self):
"""
Parse CLI arguments
"""
parser = ArgumentParser('examples.users.set_device_pin')
parser.add_argument('--uid', type=str, help='The user id')
parser.add_argument('--imei', type=str, help='The IMEI or device unique fingerprint')
parser.add_argument('--dpin', type=str, help='The new pin')
parser.add_argument('--token', type=str, help='The token recieved from login')
self.args = parser.parse_args()
def run(self):
"""
Run the example
"""
model = SetDevicePinRequestModel(
uid=self.args.uid,
imei=self.args.imei,
dpin=self.args.dpin,
source=RequestSourceType.API
)
response = self.client.users.set_device_pin(model, key=self.args.token)
return [model, response]
# Run example
example = SetDevicePinExample()
example() | 27.15 | 89 | 0.694291 | 137 | 1,086 | 5.423358 | 0.386861 | 0.053836 | 0.064603 | 0.07537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18232 | 1,086 | 40 | 90 | 27.15 | 0.836712 | 0.082873 | 0 | 0 | 0 | 0 | 0.161627 | 0.03024 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.347826 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38dd1b1ec6c5d99ef981a56b0ca95aed01fa4e0c | 10,440 | py | Python | OcCo_Torch/train_cls.py | sun-pyo/OcCo | e2e12dbaa8f9b98fb8c42fc32682f49e99be302f | [
"MIT"
] | 158 | 2020-08-19T18:13:28.000Z | 2022-03-30T13:55:32.000Z | OcCo_Torch/train_cls.py | sun-pyo/OcCo | e2e12dbaa8f9b98fb8c42fc32682f49e99be302f | [
"MIT"
] | 28 | 2020-05-30T04:02:33.000Z | 2022-03-30T15:46:38.000Z | OcCo_Torch/train_cls.py | sun-pyo/OcCo | e2e12dbaa8f9b98fb8c42fc32682f49e99be302f | [
"MIT"
] | 18 | 2020-08-19T19:52:38.000Z | 2022-02-06T11:42:26.000Z | # Copyright (c) 2020. Hanchen Wang, hw501@cam.ac.uk
# Ref: https://github.com/WangYueFt/dgcnn/blob/master/pytorch/main.py
# Ref: https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/train_cls.py
import os, sys, torch, shutil, importlib, argparse
sys.path.append('utils')
sys.path.append('models')
from PC_Augmentation import random_point_dropout, random_scale_point_cloud, random_shift_point_cloud
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from ModelNetDataLoader import General_CLSDataLoader_HDF5
from Torch_Utility import copy_parameters, seed_torch
from torch.utils.tensorboard import SummaryWriter
# from Inference_Timer import Inference_Timer
from torch.utils.data import DataLoader
from Dataset_Loc import Dataset_Loc
from TrainLogger import TrainLogger
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser('Point Cloud Classification')
''' === Training and Model === '''
parser.add_argument('--log_dir', type=str, help='log folder [default: ]')
parser.add_argument('--gpu', type=str, default='0', help='GPU [default: 0]')
parser.add_argument('--mode', type=str, default='train', help='train or test')
parser.add_argument('--epoch', type=int, default=200, help='epochs [default: 200]')
# parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--batch_size', type=int, default=24, help='batch size [default: 24]')
parser.add_argument('--model', default='pointnet_cls', help='model [default: pointnet_cls]')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate [default: 0.5]')
parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum [default: 0.9]')
parser.add_argument('--lr_decay', type=float, default=0.5, help='lr decay rate [default: 0.5]')
parser.add_argument('--step_size', type=int, default=20, help='lr decay step [default: 20 eps]')
parser.add_argument('--num_point', type=int, default=1024, help='points number [default: 1024]')
parser.add_argument('--restore', action='store_true', help='using pre-trained [default: False]')
parser.add_argument('--restore_path', type=str, help="path to pretrained weights [default: None]")
parser.add_argument('--emb_dims', type=int, default=1024, help='dimension of embeddings [default: 1024]')
parser.add_argument('--k', type=int, default=20, help='number of nearest neighbors to use [default: 20]')
parser.add_argument('--use_sgd', action='store_true', default=False, help='use SGD optimiser [default: False]')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate [default: 0.001, 0.1 if using sgd]')
parser.add_argument('--scheduler', type=str, default='step', help='lr decay scheduler [default: step, or cos]')
''' === Dataset === '''
parser.add_argument('--partial', action='store_true', help='partial objects [default: False]')
parser.add_argument('--bn', action='store_true', help='with background noise [default: False]')
parser.add_argument('--data_aug', action='store_true', help='data Augmentation [default: False]')
parser.add_argument('--dataset', type=str, default='modelnet40', help='dataset [default: modelnet40]')
parser.add_argument('--fname', type=str, help='filename, used in ScanObjectNN or fewer data [default:]')
return parser.parse_args()
def main(args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# seed_torch(args.seed)
''' === Set up Loggers and Load Data === '''
MyLogger = TrainLogger(args, name=args.model.upper(), subfold='cls', filename=args.mode + '_log')
writer = SummaryWriter(os.path.join(MyLogger.experiment_dir, 'runs'))
MyLogger.logger.info('Load dataset %s' % args.dataset)
NUM_CLASSES, TRAIN_FILES, TEST_FILES = Dataset_Loc(dataset=args.dataset, fname=args.fname,
partial=args.partial, bn=args.bn)
TRAIN_DATASET = General_CLSDataLoader_HDF5(file_list=TRAIN_FILES, num_point=1024)
TEST_DATASET = General_CLSDataLoader_HDF5(file_list=TEST_FILES, num_point=1024)
trainDataLoader = DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True)
testDataLoader = DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)
''' === Load Model and Backup Scripts === '''
MODEL = importlib.import_module(args.model)
shutil.copy(os.path.abspath(__file__), MyLogger.log_dir)
shutil.copy('./models/%s.py' % args.model, MyLogger.log_dir)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
classifier = MODEL.get_model(args=args, num_channel=3, num_class=NUM_CLASSES).to(device)
criterion = MODEL.get_loss().to(device)
classifier = torch.nn.DataParallel(classifier)
# nn.DataParallel has its own issues (slow, memory expensive),
# here are some advanced solutions: https://zhuanlan.zhihu.com/p/145427849
print('=' * 27)
print('Using %d GPU,' % torch.cuda.device_count(), 'Indices: %s' % args.gpu)
print('=' * 27)
''' === Restore Model from Pre-Trained Checkpoints: OcCo/Jigsaw etc === '''
if args.restore:
checkpoint = torch.load(args.restore_path)
classifier = copy_parameters(classifier, checkpoint, verbose=True)
MyLogger.logger.info('Use pre-trained weights from %s' % args.restore_path)
else:
MyLogger.logger.info('No pre-trained weights, start training from scratch...')
if not args.use_sgd:
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.lr,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=1e-4
)
else:
optimizer = torch.optim.SGD(classifier.parameters(),
lr=args.lr * 100,
momentum=args.momentum,
weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(optimizer, T_max=args.epoch, eta_min=1e-3)
else:
scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.lr_decay)
LEARNING_RATE_CLIP = 0.01 * args.lr
if args.mode == 'test':
with torch.no_grad():
classifier.eval()
MyLogger.epoch_init(training=False)
for points, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
points, target = points.float().transpose(2, 1).cuda(), target.long().cuda()
if args.model == 'pointnet_cls':
pred, trans_feat = classifier(points)
loss = criterion(pred, target, trans_feat)
else:
pred = classifier(points)
loss = criterion(pred, target)
MyLogger.step_update(pred.data.max(1)[1].cpu().numpy(),
target.long().cpu().numpy(),
loss.cpu().detach().numpy())
MyLogger.epoch_summary(writer=writer, training=False)
sys.exit("Test Finished")
for epoch in range(MyLogger.epoch, args.epoch + 1):
''' === Training === '''
MyLogger.epoch_init()
for points, target in tqdm(trainDataLoader, total=len(trainDataLoader), smoothing=0.9):
writer.add_scalar('Learning Rate', scheduler.get_lr()[-1], MyLogger.step)
# Augmentation, might bring performance gains
if args.data_aug:
points = random_point_dropout(points.data.numpy())
points[:, :, :3] = random_scale_point_cloud(points[:, :, :3])
points[:, :, :3] = random_shift_point_cloud(points[:, :, :3])
points = torch.Tensor(points)
points, target = points.transpose(2, 1).float().cuda(), target.long().cuda()
# FP and BP
classifier.train()
optimizer.zero_grad()
if args.model == 'pointnet_cls':
pred, trans_feat = classifier(points)
loss = criterion(pred, target, trans_feat)
else:
pred = classifier(points)
loss = criterion(pred, target)
loss.backward()
optimizer.step()
MyLogger.step_update(pred.data.max(1)[1].cpu().numpy(),
target.long().cpu().numpy(),
loss.cpu().detach().numpy())
MyLogger.epoch_summary(writer=writer, training=True)
''' === Validating === '''
with torch.no_grad():
classifier.eval()
MyLogger.epoch_init(training=False)
for points, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
points, target = points.float().transpose(2, 1).cuda(), target.long().cuda()
if args.model == 'pointnet_cls':
pred, trans_feat = classifier(points)
loss = criterion(pred, target, trans_feat)
else:
pred = classifier(points)
loss = criterion(pred, target)
MyLogger.step_update(pred.data.max(1)[1].cpu().numpy(),
target.long().cpu().numpy(),
loss.cpu().detach().numpy())
MyLogger.epoch_summary(writer=writer, training=False)
if MyLogger.save_model:
state = {
'step': MyLogger.step,
'epoch': MyLogger.best_instance_epoch,
'instance_acc': MyLogger.best_instance_acc,
'best_class_acc': MyLogger.best_class_acc,
'best_class_epoch': MyLogger.best_class_epoch,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, MyLogger.savepath)
scheduler.step()
if args.scheduler == 'step':
for param_group in optimizer.param_groups:
if optimizer.param_groups[0]['lr'] < LEARNING_RATE_CLIP:
param_group['lr'] = LEARNING_RATE_CLIP
MyLogger.train_summary()
if __name__ == '__main__':
args = parse_args()
main(args)
| 49.478673 | 120 | 0.623467 | 1,245 | 10,440 | 5.071486 | 0.236145 | 0.03421 | 0.064618 | 0.027558 | 0.286665 | 0.214919 | 0.195597 | 0.174691 | 0.174691 | 0.174691 | 0 | 0.01853 | 0.240134 | 10,440 | 210 | 121 | 49.714286 | 0.777386 | 0.051724 | 0 | 0.275 | 0 | 0 | 0.149111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0125 | false | 0 | 0.06875 | 0 | 0.0875 | 0.01875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38e092cf84c59f06abadf70c263d37ab8c6570ad | 2,420 | py | Python | cdc_kafka/change_index.py | woodlee/sqlserver-cdc-to-kafka | 602c17432a87c1aaee94dc6c971cde8496314fda | [
"MIT"
] | 10 | 2020-04-09T09:32:54.000Z | 2021-10-04T09:20:59.000Z | cdc_kafka/change_index.py | woodlee/sqlserver-cdc-to-kafka | 602c17432a87c1aaee94dc6c971cde8496314fda | [
"MIT"
] | 4 | 2019-10-04T14:15:32.000Z | 2020-05-13T18:48:58.000Z | cdc_kafka/change_index.py | woodlee/sqlserver-cdc-to-kafka | 602c17432a87c1aaee94dc6c971cde8496314fda | [
"MIT"
] | 6 | 2019-11-11T18:01:00.000Z | 2021-06-09T09:49:57.000Z | from functools import total_ordering
from typing import Dict, Any
from . import constants
@total_ordering
class ChangeIndex(object):
def __init__(self, lsn: bytes, seqval: bytes, operation: int) -> None:
self.lsn: bytes = lsn
self.seqval: bytes = seqval
if isinstance(operation, int):
self.operation: int = operation
elif isinstance(operation, str):
self.operation: int = constants.CDC_OPERATION_NAME_TO_ID[operation]
else:
raise Exception(f'Unrecognized type for parameter `operation` (type: {type(operation)}, '
f'value: {operation}).')
def __eq__(self, other: 'ChangeIndex') -> bool:
if isinstance(other, ChangeIndex):
return self.lsn + self.seqval + bytes([self.operation]) == \
other.lsn + other.seqval + bytes([other.operation])
return False
def __lt__(self, other: 'ChangeIndex') -> bool:
return self.lsn + self.seqval + bytes([self.operation]) < \
other.lsn + other.seqval + bytes([other.operation])
# For user-friendly display in logging etc.; not the format to be used for persistent data storage
def __repr__(self) -> str:
lsn = self.lsn.hex()
seqval = self.seqval.hex()
return f'0x{lsn[:8]} {lsn[8:16]} {lsn[16:]}:0x{seqval[:8]} {seqval[8:16]} {seqval[16:]}:{self.operation}'
# Converts from binary LSN/seqval to a string representation that is more friendly to some things that may
# consume this data. The stringified form is also "SQL query ready" for pasting into SQL Server queries.
def to_avro_ready_dict(self) -> Dict[str, str]:
return {
constants.LSN_NAME: f'0x{self.lsn.hex()}',
constants.SEQVAL_NAME: f'0x{self.seqval.hex()}',
constants.OPERATION_NAME: constants.CDC_OPERATION_ID_TO_NAME[self.operation]
}
@staticmethod
def from_avro_ready_dict(avro_dict: Dict[str, Any]) -> 'ChangeIndex':
return ChangeIndex(
int(avro_dict[constants.LSN_NAME][2:], 16).to_bytes(10, "big"),
int(avro_dict[constants.SEQVAL_NAME][2:], 16).to_bytes(10, "big"),
constants.CDC_OPERATION_NAME_TO_ID[avro_dict[constants.OPERATION_NAME]]
)
LOWEST_CHANGE_INDEX = ChangeIndex(b'\x00' * 10, b'\x00' * 10, 0)
HIGHEST_CHANGE_INDEX = ChangeIndex(b'\xff' * 10, b'\xff' * 10, 4)
| 43.214286 | 113 | 0.638017 | 313 | 2,420 | 4.766773 | 0.319489 | 0.02815 | 0.026139 | 0.036193 | 0.170241 | 0.170241 | 0.131367 | 0.105898 | 0.105898 | 0.105898 | 0 | 0.021657 | 0.236777 | 2,420 | 55 | 114 | 44 | 0.78614 | 0.12562 | 0 | 0.047619 | 0 | 0.02381 | 0.132102 | 0.035985 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0.071429 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38e1c729bafbb07865a50b526ed396fd93d212cd | 1,053 | py | Python | test.py | Omri-L/QuadMeshCNN | 61646019da37adb2d0c510d75951ce6051dd0d62 | [
"MIT"
] | null | null | null | test.py | Omri-L/QuadMeshCNN | 61646019da37adb2d0c510d75951ce6051dd0d62 | [
"MIT"
] | null | null | null | test.py | Omri-L/QuadMeshCNN | 61646019da37adb2d0c510d75951ce6051dd0d62 | [
"MIT"
] | null | null | null | from options.test_options import TestOptions
from data import DataLoader
from models import create_model
from util.writer import Writer
import numpy as np
def run_test(epoch=-1):
print('Running Test')
opt = TestOptions().parse()
opt.serial_batches = True # no shuffle
dataset = DataLoader(opt)
model = create_model(opt)
writer = Writer(opt)
# test
conf_mat_final = np.zeros((dataset.dataset.nclasses, dataset.dataset.nclasses))
all_labels = np.array([v for v in dataset.dataset.class_to_idx.values()])
writer.reset_counter()
for i, data in enumerate(dataset):
model.set_input(data)
ncorrect, nexamples, conf_mat = model.test(all_labels)
conf_mat_final += conf_mat
writer.update_counter(ncorrect, nexamples)
conf_mat_final = conf_mat_final / conf_mat_final.sum(1) * 100
writer.print_acc(epoch, writer.acc)
for l in all_labels:
print('label %d, predictions: %s' % (l, conf_mat_final[l]))
return writer.acc
if __name__ == '__main__':
run_test()
| 30.085714 | 83 | 0.698006 | 149 | 1,053 | 4.684564 | 0.436242 | 0.080229 | 0.103152 | 0.068768 | 0.078797 | 0.051576 | 0 | 0 | 0 | 0 | 0 | 0.005931 | 0.19943 | 1,053 | 34 | 84 | 30.970588 | 0.822064 | 0.014245 | 0 | 0 | 0 | 0 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.185185 | 0 | 0.259259 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38e4570136910d4663a8f55121c43d5095f0cff0 | 871 | py | Python | ukpsummarizer-be/cplex/cplex/python/2.7/x86-64_linux/cplex/_internal/_constantsenum.py | avineshpvs/vldb2018-sherlock | 5e116f42f44c50bcb289be3c4b4b76e29b238c18 | [
"Apache-2.0"
] | 2 | 2019-01-13T08:41:00.000Z | 2021-03-27T22:55:10.000Z | ukpsummarizer-be/cplex/cplex/python/3.6/x86-64_linux/cplex/_internal/_constantsenum.py | AIPHES/vldb2018-sherlock | 3746efa35c4c1769cc4aaeb15aeb9453564e1226 | [
"Apache-2.0"
] | null | null | null | ukpsummarizer-be/cplex/cplex/python/3.6/x86-64_linux/cplex/_internal/_constantsenum.py | AIPHES/vldb2018-sherlock | 3746efa35c4c1769cc4aaeb15aeb9453564e1226 | [
"Apache-2.0"
] | 4 | 2018-11-06T16:12:55.000Z | 2019-08-21T13:22:32.000Z | # --------------------------------------------------------------------------
# Version 12.8.0
# --------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2000, 2017. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# --------------------------------------------------------------------------
CPXCALLBACKINFO_THREADID = 0
CPXCALLBACKINFO_NODECOUNT = 1
CPXCALLBACKINFO_ITCOUNT = 2
CPXCALLBACKINFO_BEST_SOL = 3
CPXCALLBACKINFO_BEST_BND = 4
CPXCALLBACKINFO_THREADS = 5
CPXCALLBACKINFO_FEASIBLE = 6
CPXCALLBACKINFO_TIME = 7
CPXCALLBACKINFO_DETTIME = 8
CPXCALLBACKSOLUTION_CHECKFEAS = 0
CPXCALLBACKSOLUTION_PROPAGATE = 1
| 37.869565 | 76 | 0.598163 | 84 | 871 | 6.047619 | 0.738095 | 0.074803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083227 | 0.10333 | 871 | 22 | 77 | 39.590909 | 0.567222 | 0.602755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38e6109e923e9a9c4faa663254fad6166c4e3ab8 | 9,481 | py | Python | radio/models/keras/keras_3dunet.py | kmader/radio | b7897a72c6750bd848504f57bf6e31d27785dfb3 | [
"Apache-2.0"
] | null | null | null | radio/models/keras/keras_3dunet.py | kmader/radio | b7897a72c6750bd848504f57bf6e31d27785dfb3 | [
"Apache-2.0"
] | null | null | null | radio/models/keras/keras_3dunet.py | kmader/radio | b7897a72c6750bd848504f57bf6e31d27785dfb3 | [
"Apache-2.0"
] | 1 | 2018-08-24T09:40:48.000Z | 2018-08-24T09:40:48.000Z | # pylint: disable=too-many-statements
""" Contains Keras3DUNet model class. """
from functools import wraps
import tensorflow as tf
import keras
from keras.layers import (Input,
concatenate,
Conv3D,
MaxPooling3D,
UpSampling3D,
Activation)
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from .keras_model import KerasModel
from .losses import dice_loss
class Keras3DUNet(KerasModel):
""" Model incapsulating 3D U-Net architecture for 3D scans implemented in keras.
Class extends KerasModel class.
Contains description of 'bottleneck_block', 'reduction_block' and
'upsampling_block'. Current 3D U-Net architecture is implemented
inside _build method using these blocks.
Architecture is inspired by 3D U-Net (Çiçek et Al., https://arxiv.org/abs/1606.06650).
Notes
-----
Implementation requires the input tensor having shape=(batch_size, 1, 32, 64, 64).
"""
def build_config(self):
input_shape = self.get('input_shape', self.config, (1, 32, 64, 64))
self.config.update({'input_shape': input_shape})
super().build_config()
def bottleneck_block(self, inputs, filters, scope, padding='same'):
""" Apply bottleneck block transform to input tensor.
Parameters
----------
inputs : keras tensor
input tensor.
filters : int
number of output filters required by Conv3D operation.
scope : str
scope name for block, will be used as an argument of tf.variable_scope.
padding : str
padding mode, can be 'same' or 'valid'.
Returns
-------
keras tensor
output tensor.
Notes
-----
`channels_first` dim-ordering is used.
"""
with tf.variable_scope(scope):
conv1 = Conv3D(filters, (3, 3, 3),
data_format='channels_first',
padding=padding)(inputs)
conv1 = BatchNormalization(axis=1, momentum=0.1,
scale=True)(conv1)
conv1 = Activation('relu')(conv1)
conv2 = Conv3D(filters, (3, 3, 3),
data_format='channels_first',
padding=padding)(conv1)
conv2 = BatchNormalization(axis=1, momentum=0.1,
scale=True)(conv2)
conv2 = Activation('relu')(conv2)
return conv2
def reduction_block(self, inputs, filters, scope, pool_size=(2, 2, 2), padding='same'):
""" Apply reduction block transform to input tensor.
Layer consists of two 3D-convolutional layers with batch normalization
before 'relu' activation and max_pooling3d layer in the end.
Parameters
----------
inputs : keras tensor
input tensor.
filters : int
number of filters in first and second covnolutions.
scope : str
scope name for block, will be used as an argument of tf.variable_scope.
pool_size : tuple(int, int, int)
size of pooling kernel along three axis, required by Conv3D operation.
padding : str
padding mode for convolutions, can be 'same' or 'valid'.
Returns
-------
keras tensor
output tensor.
Notes
-----
`channels_first` dim-ordering is used.
"""
with tf.variable_scope(scope):
conv1 = Conv3D(filters, (3, 3, 3),
data_format='channels_first',
padding=padding)(inputs)
conv1 = BatchNormalization(axis=1, momentum=0.1,
scale=True)(conv1)
conv1 = Activation('relu')(conv1)
conv2 = Conv3D(filters, (3, 3, 3),
data_format='channels_first',
padding=padding)(conv1)
conv2 = BatchNormalization(axis=1, momentum=0.1,
scale=True)(conv2)
conv2 = Activation('relu')(conv2)
max_pool = MaxPooling3D(data_format='channels_first',
pool_size=pool_size)(conv2)
return conv2, max_pool
def upsampling_block(self, inputs, skip_connect_tensor, filters, scope, padding='same'):
""" Apply upsampling transform to two input tensors.
First of all, UpSampling3D transform is applied to inputs. Then output
tensor of operation is concatenated with skip_connect_tensor. After this
two 3D-convolutions with batch normalization before 'relu' activation
are applied.
Parameters
----------
inputs : keras tensor
input tensor from previous layer.
skip_connect_tensor : keras tensor
input tensor from simmiliar layer from reduction branch of 3D U-Net.
filters : int
number of filters in convolutional layers.
scope : str
name of scope for block.
padding : str
padding mode for convolutions, can be 'same' or 'valid'.
Returns
-------
keras tensor
ouput tensor.
Notes
-----
`channels_first` dim-ordering is used.
"""
with tf.variable_scope(scope):
upsample_tensor = UpSampling3D(data_format="channels_first",
size=(2, 2, 2))(inputs)
upsample_tensor = concatenate([upsample_tensor, skip_connect_tensor], axis=1)
conv1 = Conv3D(filters, (3, 3, 3),
data_format="channels_first",
padding="same")(upsample_tensor)
conv1 = BatchNormalization(axis=1, momentum=0.1,
scale=True)(conv1)
conv1 = Activation('relu')(conv1)
conv2 = Conv3D(filters, (3, 3, 3),
data_format="channels_first",
padding="same")(conv1)
conv2 = BatchNormalization(axis=1, momentum=0.1,
scale=True)(conv2)
conv2 = Activation('relu')(conv2)
return conv2
def _build(self, *args, **kwargs):
""" Build 3D NoduleVnet model implemented in keras. """
num_targets = self.get('num_targets', self.config)
input_shape = self.get('input_shape', self.config)
inputs = Input(shape=input_shape)
# Downsampling or reduction layers: ReductionBlock_A, ReductionBlock_B, ReductionBlock_C, ReductionBlock_D
# block_A has shape (None, 32, 64, 64, 32), reduct_block_A has shape (None, 16, 32, 32, 32)
block_A, reduct_block_A = self.reduction_block(inputs, 32,
scope='ReductionBlock_A')
# block_B has shape (None, 16, 32, 32, 64), reduct_block_B has shape (None, 8, 16, 16, 64)
block_B, reduct_block_B = self.reduction_block(reduct_block_A, 64,
scope='ReductionBlock_B')
# block_C has shape (None, 8, 16, 16, 128), reduct_block_C has shape (None, 4, 8, 8, 128)
block_C, reduct_block_C = self.reduction_block(reduct_block_B, 128,
scope='ReductionBlock_C')
# block_D has shape (None, 4, 8, 8, 256), reduct_block_D has shape (None, 2, 4, 4, 256)
block_D, reduct_block_D = self.reduction_block(reduct_block_C, 256,
scope='ReductionBlock_D')
# Bottleneck layer
# bottleneck_block has shape (None, 2, 4, 4, 512)
bottleneck_block = self.bottleneck_block(reduct_block_D, 512, 'BottleNeckBlock')
# Upsampling Layers: UpsamplingBlock_D, UpsamplingBlock_C, UpsamplingBlock_B, UpsamplingBlock_A
# upsample_block_C has shape (None, 4, 8, 8, 256)
upsample_block_D = self.upsampling_block(bottleneck_block, block_D,
256, scope='UpsamplingBlock_D')
# upsample_block_C has shape (None, 8, 16, 16, 128)
upsample_block_C = self.upsampling_block(upsample_block_D, block_C,
128, scope='UpsamplingBlock_C')
# upsample_block_B has shape (None, 16, 32, 32, 64)
upsample_block_B = self.upsampling_block(upsample_block_C, block_B,
64, scope='UpsamplingBlock_B')
# upsample_block_A has shape (None, 32, 64, 64, 32)
upsample_block_A = self.upsampling_block(upsample_block_B, block_A,
32, scope='UpsamplingBlock_A')
# Final convolution
final_conv = Conv3D(num_targets, (1, 1, 1),
activation='sigmoid',
data_format="channels_first",
padding='same')(upsample_block_A)
return [inputs], [final_conv]
@wraps(keras.models.Model.compile)
def compile(self, optimizer='adam', loss=dice_loss, **kwargs):
""" Compile 3D U-Net model. """
super().compile(optimizer=optimizer, loss=loss)
| 40.690987 | 114 | 0.560595 | 1,026 | 9,481 | 5.025341 | 0.187135 | 0.020171 | 0.030256 | 0.040147 | 0.474205 | 0.398759 | 0.352405 | 0.338635 | 0.316137 | 0.285881 | 0 | 0.040402 | 0.349963 | 9,481 | 232 | 115 | 40.866379 | 0.796203 | 0.349119 | 0 | 0.412371 | 0 | 0 | 0.067589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061856 | false | 0 | 0.082474 | 0 | 0.195876 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38e835965fc53e6b6954f811b965d7a0b4b3d712 | 6,541 | py | Python | tests/test_game_scraper.py | frutoper/Hockey-Scraper | bd521a4670396f0a565573fdc9cb95c28064ce0a | [
"MIT"
] | null | null | null | tests/test_game_scraper.py | frutoper/Hockey-Scraper | bd521a4670396f0a565573fdc9cb95c28064ce0a | [
"MIT"
] | null | null | null | tests/test_game_scraper.py | frutoper/Hockey-Scraper | bd521a4670396f0a565573fdc9cb95c28064ce0a | [
"MIT"
] | null | null | null | """ Tests for 'game_scraper.py' """
import pytest
import pandas as pd
from hockey_scraper import game_scraper, json_pbp, playing_roster
@pytest.fixture
def players():
return {'Home':
{'NOAH HANIFIN': {'id': 8478396, 'number': '5', 'last_name': 'HANIFIN'},
'KLAS DAHLBECK': {'id': 8476403, 'number': '6', 'last_name': 'DAHLBECK'},
'DEREK RYAN': {'id': 8478585, 'number': '7', 'last_name': 'RYAN'},
'JORDAN STAAL': {'id': 8473533, 'number': '11', 'last_name': 'STAAL'},
'JUSTIN WILLIAMS': {'id': 8468508, 'number': '14', 'last_name': 'WILLIAMS'},
'SEBASTIAN AHO': {'id': 8478427, 'number': '20', 'last_name': 'AHO'},
'LEE STEMPNIAK': {'id': 8470740, 'number': '21', 'last_name': 'STEMPNIAK'},
'BRETT PESCE': {'id': 8477488, 'number': '22', 'last_name': 'PESCE'},
'BROCK MCGINN': {'id': 8476934, 'number': '23', 'last_name': 'MCGINN'},
'JUSTIN FAULK': {'id': 8475753, 'number': '27', 'last_name': 'FAULK'},
'ELIAS LINDHOLM': {'id': 8477496, 'number': '28', 'last_name': 'LINDHOLM'},
'PHILLIP DI GIUSEPPE': {'id': 8476858, 'number': '34', 'last_name': 'DI GIUSEPPE'},
'JOAKIM NORDSTROM': {'id': 8475807, 'number': '42', 'last_name': 'NORDSTROM'},
'VICTOR RASK': {'id': 8476437, 'number': '49', 'last_name': 'RASK'},
'JEFF SKINNER': {'id': 8475784, 'number': '53', 'last_name': 'SKINNER'},
'TREVOR VAN RIEMSDYK': {'id': 8477845, 'number': '57', 'last_name': 'VAN RIEMSDYK'},
'JACCOB SLAVIN': {'id': 8476958, 'number': '74', 'last_name': 'SLAVIN'},
'TEUVO TERAVAINEN': {'id': 8476882, 'number': '86', 'last_name': 'TERAVAINEN'},
'CAM WARD': {'id': 8470320, 'number': '30', 'last_name': 'WARD'},
'HAYDN FLEURY': {'id': 8477938, 'number': '4', 'last_name': 'FLEURY'},
'PATRICK BROWN': {'id': 8477887, 'number': '36', 'last_name': 'BROWN'}
},
'Away':
{'NICK LEDDY': {'id': 8475181, 'number': '2', 'last_name': 'LEDDY'},
'RYAN PULOCK': {'id': 8477506, 'number': '6', 'last_name': 'PULOCK'},
'JORDAN EBERLE': {'id': 8474586, 'number': '7', 'last_name': 'EBERLE'},
'JOSH BAILEY': {'id': 8474573, 'number': '12', 'last_name': 'BAILEY'},
'MATHEW BARZAL': {'id': 8478445, 'number': '13', 'last_name': 'BARZAL'},
'THOMAS HICKEY': {'id': 8474066, 'number': '14', 'last_name': 'HICKEY'},
'CAL CLUTTERBUCK': {'id': 8473504, 'number': '15', 'last_name': 'CLUTTERBUCK'},
'ANDREW LADD': {'id': 8471217, 'number': '16', 'last_name': 'LADD'},
'ANDERS LEE': {'id': 8475314, 'number': '27', 'last_name': 'LEE'},
'SEBASTIAN AHO': {'id': 8480222, 'number': '28', 'last_name': 'AHO'},
'BROCK NELSON': {'id': 8475754, 'number': '29', 'last_name': 'NELSON'},
'ADAM PELECH': {'id': 8476917, 'number': '50', 'last_name': 'PELECH'},
'ROSS JOHNSTON': {'id': 8477527, 'number': '52', 'last_name': 'JOHNSTON'},
'CASEY CIZIKAS': {'id': 8475231, 'number': '53', 'last_name': 'CIZIKAS'},
'JOHNNY BOYCHUK': {'id': 8470187, 'number': '55', 'last_name': 'BOYCHUK'},
'TANNER FRITZ': {'id': 8479206, 'number': '56', 'last_name': 'FRITZ'},
'ANTHONY BEAUVILLIER': {'id': 8478463, 'number': '72', 'last_name': 'BEAUVILLIER'},
'JOHN TAVARES': {'id': 8475166, 'number': '91', 'last_name': 'TAVARES'},
'THOMAS GREISS': {'id': 8471306, 'number': '1', 'last_name': 'GREISS'},
'DENNIS SEIDENBERG': {'id': 8469619, 'number': '4', 'last_name': 'SEIDENBERG'},
'ALAN QUINE': {'id': 8476409, 'number': '10', 'last_name': 'QUINE'},
'JASON CHIMERA': {'id': 8466251, 'number': '25', 'last_name': 'CHIMERA'}
}
}
@pytest.fixture
def pbp_columns():
return ['Game_Id', 'Date', 'Period', 'Event', 'Description', 'Time_Elapsed', 'Seconds_Elapsed', 'Strength',
'Ev_Zone', 'Type', 'Ev_Team', 'Home_Zone', 'Away_Team', 'Home_Team', 'p1_name', 'p1_ID', 'p2_name', 'p2_ID',
'p3_name', 'p3_ID', 'awayPlayer1', 'awayPlayer1_id', 'awayPlayer2', 'awayPlayer2_id', 'awayPlayer3',
'awayPlayer3_id', 'awayPlayer4', 'awayPlayer4_id', 'awayPlayer5', 'awayPlayer5_id', 'awayPlayer6',
'awayPlayer6_id', 'homePlayer1', 'homePlayer1_id', 'homePlayer2', 'homePlayer2_id', 'homePlayer3',
'homePlayer3_id', 'homePlayer4', 'homePlayer4_id', 'homePlayer5', 'homePlayer5_id', 'homePlayer6',
'homePlayer6_id', 'Away_Players', 'Home_Players', 'Away_Score', 'Home_Score', 'Away_Goalie',
'Away_Goalie_Id', 'Home_Goalie', 'Home_Goalie_Id', 'xC', 'yC', 'Home_Coach', 'Away_Coach'
]
@pytest.fixture
def shifts_columns():
return ['Game_Id', 'Period', 'Team', 'Player', 'Player_Id', 'Start', 'End', 'Duration', 'Date']
def test_scrape_game(pbp_columns, shifts_columns):
""" Tests if scrape pbp and shifts for game correctly with and without shifts.
Check:
1. Returns either a DataFrame or None (for shifts when False)
2. The number of rows is correct
3. The columns are correct
"""
# 1. Try first without shifts
pbp, shifts = game_scraper.scrape_game("2016020475", "2016-12-18", False)
assert isinstance(pbp, pd.DataFrame)
assert shifts is None
assert pbp.shape[0] == 326
assert list(pbp.columns) == pbp_columns
# 2. Try with shifts
pbp, shifts = game_scraper.scrape_game("2007020222", "2007-11-08", True)
assert isinstance(pbp, pd.DataFrame)
assert isinstance(shifts, pd.DataFrame)
assert pbp.shape[0] == 248
assert shifts.shape[0] == 726
assert list(pbp.columns) == pbp_columns
assert list(shifts.columns) == shifts_columns
def test_combine_players_lists(players):
""" Check that it combines the list of players from the json pbp and the html roster correctly """
game_id = "2017020891"
json_players = game_scraper.get_players_json(json_pbp.get_pbp(game_id)['gameData']['players'])
roster = playing_roster.scrape_roster(game_id)['players']
assert players == game_scraper.combine_players_lists(json_players, roster, game_id)
| 59.463636 | 120 | 0.561229 | 725 | 6,541 | 4.897931 | 0.375172 | 0.096874 | 0.013517 | 0.008448 | 0.057449 | 0.057449 | 0.020276 | 0 | 0 | 0 | 0 | 0.095363 | 0.244917 | 6,541 | 109 | 121 | 60.009174 | 0.623608 | 0.058707 | 0 | 0.082353 | 0 | 0 | 0.384489 | 0 | 0 | 0 | 0 | 0 | 0.129412 | 1 | 0.058824 | false | 0 | 0.035294 | 0.035294 | 0.129412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c7f76a0570cfc5d00767d59e7d82e37483e3c54e | 1,933 | py | Python | detector.py | yarondantes/crossing_stats | fdfcec18247982807ac0881f6aea476d60e03a4a | [
"Apache-2.0"
] | null | null | null | detector.py | yarondantes/crossing_stats | fdfcec18247982807ac0881f6aea476d60e03a4a | [
"Apache-2.0"
] | null | null | null | detector.py | yarondantes/crossing_stats | fdfcec18247982807ac0881f6aea476d60e03a4a | [
"Apache-2.0"
] | 1 | 2021-08-23T13:15:14.000Z | 2021-08-23T13:15:14.000Z | import cv2
import numpy as np
_COCO_NAME_PATH = './resources/coco.names'
_YOLO3_WEIGHTS_PATH = '../yolov3.weights'
_YOLO3_CONFIG_PATH = './resources/yolov3.cfg'
class Detector(object):
def __init__(self):
super(Detector, self).__init__()
def detect(self, frame):
return
class Yolo3Detector(Detector):
def __init__(self):
super(Yolo3Detector, self).__init__()
self.labels_path = _COCO_NAME_PATH
self.all_labels = open(self.labels_path).read().strip().split("\n")
self.weights_path = _YOLO3_WEIGHTS_PATH
self.model = cv2.dnn.readNetFromDarknet(_YOLO3_CONFIG_PATH, self.weights_path)
self.model.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
self.ln = self.model.getLayerNames()
self.ln = [self.ln[i[0] - 1] for i in self.model.getUnconnectedOutLayers()]
def detect(self, frame):
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
self.model.setInput(blob)
layer_output = self.model.forward(self.ln)
predictions = []
# loop over the detections
for output in layer_output:
# loop over each of the detections
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
label = self.all_labels[class_id]
box = detection[0:4] * np.array([W, H, W, H])
(center_x, center_y, width, height) = box.astype("int")
start_x = int(center_x - (width / 2))
start_y = int(center_y - (height / 2))
end_x = start_x + width
end_y = start_y + height
predictions.append({'start_x': start_x,
'start_y': start_y,
'end_x': end_x,
'end_y': end_y,
'center_x': center_x,
'center_y': center_y,
'confidence': confidence,
'label': label})
return predictions
def detector_factory(name):
if name == "yolo3":
return Yolo3Detector() | 29.738462 | 85 | 0.682876 | 272 | 1,933 | 4.588235 | 0.345588 | 0.050481 | 0.03125 | 0.025641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021546 | 0.183652 | 1,933 | 65 | 86 | 29.738462 | 0.769328 | 0.029488 | 0 | 0.078431 | 0 | 0 | 0.067236 | 0.023479 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098039 | false | 0 | 0.039216 | 0.019608 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c7f86c676d525a88389da7e53c47fb1948481a48 | 12,285 | py | Python | vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pprint
from neutron_lib import context as n_context
from oslo_config import cfg
from oslo_log import log as logging
from vmware_nsx.shell.admin.plugins.common import constants
import vmware_nsx.shell.admin.plugins.common.utils as admin_utils
import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils
import vmware_nsx.shell.resources as shell
from neutron_lib.callbacks import registry
from neutron_lib import exceptions as nl_exc
from vmware_nsx.common import locking
from vmware_nsx.db import nsxv_db
from vmware_nsx.plugins.nsx_v.vshield.common import (
constants as nsxv_constants)
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
from vmware_nsx.plugins.nsx_v.vshield import edge_utils
from vmware_nsx.plugins.nsx_v.vshield import vcns_driver
LOG = logging.getLogger(__name__)
nsxv = utils.get_nsxv_client()
neutron_db = utils.NeutronDbClient()
def nsx_get_static_bindings_by_edge(edge_id):
nsx_dhcp_static_bindings = set()
try:
nsx_dhcp_bindings = nsxv.query_dhcp_configuration(edge_id)
except exceptions.ResourceNotFound:
LOG.error("Edge %s was not found", edge_id)
return
# nsx_dhcp_bindings[0] contains response headers;
# nsx_dhcp_bindings[1] contains response payload
sbindings = nsx_dhcp_bindings[1].get('staticBindings').get(
'staticBindings')
for binding in sbindings:
nsx_dhcp_static_bindings.add(
(edge_id, binding.get('macAddress').lower(),
binding.get('bindingId').lower()))
return nsx_dhcp_static_bindings
def neutron_get_static_bindings_by_edge(edge_id):
neutron_db_dhcp_bindings = set()
for binding in nsxv_db.get_dhcp_static_bindings_by_edge(
neutron_db.context.session, edge_id):
neutron_db_dhcp_bindings.add(
(binding.edge_id, binding.mac_address.lower(),
binding.binding_id.lower()))
return neutron_db_dhcp_bindings
@admin_utils.output_header
def list_missing_dhcp_bindings(resource, event, trigger, **kwargs):
"""List missing DHCP bindings from NSXv backend.
Missing DHCP bindings are those that exist in Neutron DB;
but are not present on corresponding NSXv Edge.
"""
for (edge_id, count) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge(
neutron_db.context.session):
LOG.info("%s", "=" * 60)
LOG.info("For edge: %s", edge_id)
nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id)
if nsx_dhcp_static_bindings is None:
continue
neutron_dhcp_static_bindings = \
neutron_get_static_bindings_by_edge(edge_id)
LOG.info("# of DHCP bindings in Neutron DB: %s",
len(neutron_dhcp_static_bindings))
LOG.info("# of DHCP bindings on NSXv backend: %s",
len(nsx_dhcp_static_bindings))
missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings
if not missing:
LOG.info("No missing DHCP bindings found.")
LOG.info("Neutron DB and NSXv backend are in sync")
else:
LOG.info("Missing DHCP bindings:")
LOG.info("%s", pprint.pformat(missing))
@admin_utils.output_header
def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs):
"""Resync DHCP bindings on NSXv Edge"""
if not kwargs.get('property'):
LOG.error("Need to specify edge-id parameter")
return
else:
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
edge_id = properties.get('edge-id')
if not edge_id:
LOG.error("Need to specify edge-id parameter")
return
LOG.info("Updating NSXv Edge: %s", edge_id)
# Need to create a plugin object; so that we are able to
# do neutron list-ports.
with utils.NsxVPluginWrapper() as plugin:
nsxv_manager = vcns_driver.VcnsDriver(
edge_utils.NsxVCallbacks(plugin))
edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin)
try:
edge_manager.update_dhcp_service_config(
neutron_db.context, edge_id)
except exceptions.ResourceNotFound:
LOG.error("Edge %s not found", edge_id)
def delete_old_dhcp_edge(context, old_edge_id, bindings):
LOG.info("Deleting the old DHCP edge: %s", old_edge_id)
with locking.LockManager.get_lock(old_edge_id):
# Delete from NSXv backend
# Note - If we will not delete the router, but free it - it will be
# immediately used as the new one, So it is better to delete it.
try:
nsxv.delete_edge(old_edge_id)
except Exception as e:
LOG.warning("Failed to delete the old edge %(id)s: %(e)s",
{'id': old_edge_id, 'e': e})
# Continue the process anyway
# The edge may have been already deleted at the backend
try:
# Remove bindings from Neutron DB
nsxv_db.clean_edge_router_binding(context.session, old_edge_id)
nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id)
except Exception as e:
LOG.warning("Failed to delete the old edge %(id)s from the "
"DB : %(e)s", {'id': old_edge_id, 'e': e})
def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id):
"""Handle the DHCP edge recreation of a network
"""
LOG.info("Moving network %s to a new edge", net_id)
# delete the old binding
resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36]
nsxv_db.delete_nsxv_router_binding(context.session, resource_id)
# Delete the old static binding of the networks` compute ports
port_filters = {'network_id': [net_id],
'device_owner': ['compute:None']}
compute_ports = plugin.get_ports(context, filters=port_filters)
if old_edge_id:
for port in compute_ports:
# Delete old binding from the DB
nsxv_db.delete_edge_dhcp_static_binding(context.session,
old_edge_id, port['mac_address'])
# Go over all the subnets with DHCP
net_filters = {'network_id': [net_id], 'enable_dhcp': [True]}
subnets = plugin.get_subnets(context, filters=net_filters)
for subnet in subnets:
LOG.info("Moving subnet %s to a new edge", subnet['id'])
# allocate / reuse the new dhcp edge
new_resource_id = edge_manager.create_dhcp_edge_service(
context, net_id, subnet)
if new_resource_id:
# also add fw rules and metadata, once for the new edge
plugin._update_dhcp_service_new_edge(context, resource_id)
# Update the ip of the dhcp port
LOG.info("Creating network %s DHCP address group", net_id)
address_groups = plugin._create_network_dhcp_address_group(
context, net_id)
plugin._update_dhcp_edge_service(context, net_id, address_groups)
# find out the id of the new edge:
new_binding = nsxv_db.get_nsxv_router_binding(
context.session, resource_id)
if new_binding:
LOG.info("Network %(net_id)s was moved to edge %(edge_id)s",
{'net_id': net_id, 'edge_id': new_binding['edge_id']})
else:
LOG.error("Network %(net_id)s was not moved to a new edge",
{'net_id': net_id})
@admin_utils.output_header
def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs):
"""Recreate a dhcp edge with all the networks n a new NSXv edge"""
usage_msg = ("Need to specify edge-id or net-id parameter")
if not kwargs.get('property'):
LOG.error(usage_msg)
return
# input validation
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
old_edge_id = properties.get('edge-id')
if not old_edge_id:
# if the net-id property exist - recreate the edge for this network
net_id = properties.get('net-id')
if net_id:
nsx_recreate_dhcp_edge_by_net_id(net_id)
return
LOG.error(usage_msg)
return
LOG.info("ReCreating NSXv Edge: %s", old_edge_id)
context = n_context.get_admin_context()
# verify that this is a DHCP edge
bindings = nsxv_db.get_nsxv_router_bindings_by_edge(
context.session, old_edge_id)
if (not bindings or
not bindings[0]['router_id'].startswith(
nsxv_constants.DHCP_EDGE_PREFIX)):
LOG.error("Edge %(edge_id)s is not a DHCP edge",
{'edge_id': old_edge_id})
return
# init the plugin and edge manager
cfg.CONF.set_override('core_plugin',
'vmware_nsx.shell.admin.plugins.nsxv.resources'
'.utils.NsxVPluginWrapper')
with utils.NsxVPluginWrapper() as plugin:
nsxv_manager = vcns_driver.VcnsDriver(
edge_utils.NsxVCallbacks(plugin))
edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin)
# find the networks bound to this DHCP edge
networks_binding = nsxv_db.get_edge_vnic_bindings_by_edge(
context.session, old_edge_id)
network_ids = [binding['network_id'] for binding in networks_binding]
# Delete the old edge
delete_old_dhcp_edge(context, old_edge_id, bindings)
# This is a regular DHCP edge:
# Move all the networks to other (new or existing) edge
for net_id in network_ids:
recreate_network_dhcp(context, plugin, edge_manager,
old_edge_id, net_id)
def nsx_recreate_dhcp_edge_by_net_id(net_id):
"""Recreate a dhcp edge for a specific network without an edge"""
LOG.info("ReCreating NSXv Edge for network: %s", net_id)
context = n_context.get_admin_context()
# verify that there is no DHCP edge for this network at the moment
resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36]
router_binding = nsxv_db.get_nsxv_router_binding(
context.session, resource_id)
if router_binding:
# make sure there is no edge
if router_binding['edge_id']:
LOG.warning("Network %(net_id)s already has a dhcp edge: "
"%(edge_id)s",
{'edge_id': router_binding['edge_id'],
'net_id': net_id})
return
# delete this old entry
nsxv_db.delete_nsxv_router_binding(context.session, resource_id)
# init the plugin and edge manager
cfg.CONF.set_override('core_plugin',
'vmware_nsx.shell.admin.plugins.nsxv.resources'
'.utils.NsxVPluginWrapper')
with utils.NsxVPluginWrapper() as plugin:
nsxv_manager = vcns_driver.VcnsDriver(edge_utils.NsxVCallbacks(plugin))
edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin)
# Verify that the network exists on neutron
try:
plugin.get_network(context, net_id)
except nl_exc.NetworkNotFound:
LOG.error("Network %s does not exist", net_id)
return
recreate_network_dhcp(context, plugin, edge_manager,
None, net_id)
registry.subscribe(list_missing_dhcp_bindings,
constants.DHCP_BINDING,
shell.Operations.LIST.value)
registry.subscribe(nsx_update_dhcp_edge_binding,
constants.DHCP_BINDING,
shell.Operations.NSX_UPDATE.value)
registry.subscribe(nsx_recreate_dhcp_edge,
constants.DHCP_BINDING,
shell.Operations.NSX_RECREATE.value)
| 40.147059 | 79 | 0.666504 | 1,672 | 12,285 | 4.642345 | 0.167464 | 0.039423 | 0.024349 | 0.018938 | 0.43262 | 0.360345 | 0.299923 | 0.287684 | 0.233831 | 0.155759 | 0 | 0.001959 | 0.252096 | 12,285 | 305 | 80 | 40.278689 | 0.842838 | 0.177696 | 0 | 0.307317 | 0 | 0 | 0.125848 | 0.013772 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039024 | false | 0 | 0.078049 | 0 | 0.170732 | 0.009756 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c7f889b175a0501be3dc69351ea2836a126b0701 | 1,613 | py | Python | test.py | NattapongSiri/ds_ml_euclidean.py | f6ae1fa1da93be8605cde7c8bb986aa1174b1a29 | [
"MIT"
] | null | null | null | test.py | NattapongSiri/ds_ml_euclidean.py | f6ae1fa1da93be8605cde7c8bb986aa1174b1a29 | [
"MIT"
] | null | null | null | test.py | NattapongSiri/ds_ml_euclidean.py | f6ae1fa1da93be8605cde7c8bb986aa1174b1a29 | [
"MIT"
] | null | null | null | import keras
from keras.models import Model, model_from_json
import numpy as np
import json
def denormalize(y, exp, highest, lowest):
y *= highest
y += lowest
y /= 10 ** exp
return y
with open("model.json", "r") as model_fp:
model_json = model_fp.read()
with open("norm_param.json", "r") as norm_fp:
params = json.load(norm_fp)
model = model_from_json(model_json)
model.load_weights("model.h5")
sensor_0 = []
sensor_1 = []
with open("processed.txt") as input_fp:
for line in input_fp:
record = json.loads(line)
sensor_0.append((int(record[0] * 10 ** params["precision"]) - params["lowest"]) / params["highest"])
sensor_1.append((int(record[1] * 10 ** params["precision"]) - params["lowest"]) / params["highest"])
anchor = 0
# first set of value taken from processed.txt
x = np.array(sensor_0[anchor:anchor + params["look_back"]]).reshape(1, params["look_back"], 1)
prediction_len = 100
with open("predicted.txt", "w") as output_fp:
output.write("[")
# copy first raw value into output
str_x = [str(denormalize(v, params["precision"], params["highest"], params["lowest"])) for v in sensor_0[anchor: anchor + params["look_back"]]]
output_fp.write(",".join(str_x))
# predict till reach specified length
for j in range(0, prediction_len):
y = model.predict(x)
output_fp.write("," + str(denormalize(y[0][0], params["precision"], params["highest"], params["lowest"])))
x = np.roll(x, -1, 1) # rotate row 1 by 1
x[0][params["look_back"] - 1][0] = y # set last vale to predicted value
output.write("]") | 35.844444 | 147 | 0.651581 | 244 | 1,613 | 4.180328 | 0.327869 | 0.031373 | 0.082353 | 0.035294 | 0.22549 | 0.22549 | 0.147059 | 0 | 0 | 0 | 0 | 0.023538 | 0.183509 | 1,613 | 45 | 148 | 35.844444 | 0.750949 | 0.101054 | 0 | 0 | 0 | 0 | 0.131488 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.114286 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c7fa2d7b6279fbc16ddf225ebbef4fbcd6439d6d | 2,204 | py | Python | Grundlagen/Python/Minensucher/minensucher.py | jneug/schule-projekte | 4f1d56d6bb74a47ca019cf96d2d6cc89779803c9 | [
"MIT"
] | 2 | 2020-09-24T12:11:16.000Z | 2022-03-31T04:47:24.000Z | Grundlagen/Python/Minensucher/minensucher.py | jneug/schule-projekte | 4f1d56d6bb74a47ca019cf96d2d6cc89779803c9 | [
"MIT"
] | 1 | 2021-02-27T15:06:27.000Z | 2021-03-01T16:32:48.000Z | Grundlagen/Python/Minensucher/minensucher.py | jneug/schule-projekte | 4f1d56d6bb74a47ca019cf96d2d6cc89779803c9 | [
"MIT"
] | 1 | 2021-02-24T05:12:35.000Z | 2021-02-24T05:12:35.000Z |
from random import randint
FELD_BREITE = 15
FELD_HOEHE = 10
ANZAHL_MINEN = randint(
int(FELD_BREITE * FELD_HOEHE * 0.1), int(FELD_BREITE * FELD_HOEHE * 0.2)
)
WIDTH = FELD_BREITE * 20
HEIGHT = FELD_HOEHE * 20
feld = []
def minen_verteilen(anzahl):
for i in range(FELD_BREITE):
feld.append([])
for j in range(FELD_HOEHE):
if anzahl > 0 and randint(0, 10) < 3:
feld[i].append("X")
anzahl -= 1
else:
feld[i].append(0)
def anzahl_anpassen(i, j):
for x in range(3):
for y in range(3):
new_i = i - 1 + x
new_j = j - 1 + y
if new_i >= 0 and new_i < FELD_BREITE and new_j >= 0 and new_j < FELD_HOEHE:
if feld[new_i][new_j] != "X":
feld[new_i][new_j] += 1
def minen_zaehlen():
for i in range(FELD_BREITE):
for j in range(FELD_HOEHE):
cell = feld[i][j]
if cell == "X":
anzahl_anpassen(i, j)
sprites = []
def feld_aufbauen():
for i in range(FELD_BREITE):
for j in range(FELD_HOEHE):
inhalt = feld[i][j]
if inhalt == "X":
bomb_sprite = Actor("bomb")
bomb_sprite.center = (i * 20 + 10, j * 20 + 10)
sprites.append(bomb_sprite)
feld_sprite = Actor("feld")
feld_sprite.topleft = (i * 20, j * 20)
sprites.append(feld_sprite)
minen_verteilen(ANZAHL_MINEN)
minen_zaehlen()
feld_aufbauen()
def draw():
screen.clear()
for i in range(FELD_BREITE):
for j in range(FELD_HOEHE):
inhalt = feld[i][j]
screen.draw.textbox(str(inhalt), Rect((i*20,j*20), (20,20)))
for sprite in sprites:
sprite.draw()
def on_mouse_down(pos, button):
if button == mouse.LEFT:
for sprite in sprites:
if sprite.collidepoint(pos):
sprites.remove(sprite)
i, j = int(pos[0] / 20), int(pos[1] / 20)
if feld[i][j] == 'X':
print("Bombe!")
else:
print(feld[i][j]) | 25.929412 | 89 | 0.503176 | 299 | 2,204 | 3.555184 | 0.200669 | 0.065851 | 0.082785 | 0.041392 | 0.242709 | 0.220132 | 0.138288 | 0.138288 | 0.138288 | 0.138288 | 0 | 0.037791 | 0.375681 | 2,204 | 85 | 90 | 25.929412 | 0.734738 | 0 | 0 | 0.215385 | 0 | 0 | 0.008962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0.030769 | 0.015385 | 0 | 0.107692 | 0.030769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c7fb9a4574cc5b059182a0003a64fad616c40f66 | 1,853 | py | Python | src/simmate/website/core_components/templatetags/matplotlib_filter.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 9 | 2021-12-21T02:58:21.000Z | 2022-01-25T14:00:06.000Z | src/simmate/website/core_components/templatetags/matplotlib_filter.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 51 | 2022-01-01T15:59:58.000Z | 2022-03-26T21:25:42.000Z | src/simmate/website/core_components/templatetags/matplotlib_filter.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 7 | 2022-01-01T03:44:32.000Z | 2022-03-29T19:59:27.000Z | # -*- coding: utf-8 -*-
from io import BytesIO
import base64
from django import template
from django.utils.safestring import mark_safe
# We need a registration instance in order to configure everything with Django
register = template.Library()
@register.filter(name="matplotlib_figure", is_safe=True)
def matplotlib_to_html(plot):
"""
Converts a matplotlib figure into an html element.
"""
# Lmao I have no idea how this works and I just got lucky when following
# these stack overflow posts... I should switch to plotly ASAP so I can
# remove this filter.
# https://stackoverflow.com/questions/14824522/
# https://stackoverflow.com/questions/40534715/
# Alternatively, I could rewrite this function to save a randomly generated
# file to the /static/runtime folder and then passing back an html <img> tag
# that points back to this image.
figdata = BytesIO()
plot.savefig(figdata, format="png")
figdata.seek(0)
data = figdata.getvalue()
data_decoded = base64.b64encode(data).decode("utf-8").replace("\n", "")
html = f"""
<img id="ItemPreview" class="img-fluid" src="data:image/png;base64,{data_decoded}">
"""
# BUG: every time this filter is called, I recieve the following warning:
#
# .../lib/python3.10/site-packages/pymatgen/util/plotting.py:48: UserWarning:
# Starting a Matplotlib GUI outside of the main thread will likely fail.
# plt.figure(figsize=(width, height), facecolor="w", dpi=dpi)
#
# This doesn't seem to cause any issues, so I ignore it for now.
# Because we added new html to our script, we need to have Django check it
# ensure it safe before returning. Read more about this here:
# https://docs.djangoproject.com/en/3.2/howto/custom-template-tags/#filters-and-auto-escaping
return mark_safe(html)
| 37.816327 | 97 | 0.703184 | 268 | 1,853 | 4.83209 | 0.664179 | 0.006178 | 0.032432 | 0.046332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022697 | 0.191581 | 1,853 | 48 | 98 | 38.604167 | 0.841789 | 0.622774 | 0 | 0 | 0 | 0.0625 | 0.180995 | 0.064857 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a01cd1f09cce901b60d2993b1ae23840fb04eb6 | 619 | py | Python | main.py | abhiomkar/paadu-gajala | 8f38229f971e279eef79cb4558f35c0aee93688f | [
"MIT"
] | null | null | null | main.py | abhiomkar/paadu-gajala | 8f38229f971e279eef79cb4558f35c0aee93688f | [
"MIT"
] | null | null | null | main.py | abhiomkar/paadu-gajala | 8f38229f971e279eef79cb4558f35c0aee93688f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import re
import urllib
from urllib import parse
import subprocess
url = "https://www.google.ie/search?q=play+arijit+singh+songs&oq=play+arijit+singh+songs&aqs=chrome..69i57.16305j0j4&sourceid=chrome&es_sm=119&ie=UTF-8"
p = parse.urlparse(url)
if (re.match("^www.google..*", p.hostname)):
query = re.match("q=[^&]+", p.query).group(0)[2:]
query = parse.unquote(query)
query = query.lower()
if query.startswith("play+"):
query = query[5:]
query = query.replace("+", " ")
print("mpsyt /%s, 1, all -a" % (query))
proc = subprocess.Popen("mpsyt /%s, 1, all -a" % (query), shell=True)
| 24.76 | 152 | 0.662359 | 96 | 619 | 4.260417 | 0.572917 | 0.0978 | 0.07335 | 0.0978 | 0.07824 | 0.07824 | 0 | 0 | 0 | 0 | 0 | 0.038745 | 0.124394 | 619 | 24 | 153 | 25.791667 | 0.715867 | 0.027464 | 0 | 0 | 0 | 0.066667 | 0.352745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a0352257dfecae269908aa7ad1b13698f39a3b0 | 937 | py | Python | autogalaxy/cosmology/model.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
] | null | null | null | autogalaxy/cosmology/model.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
] | null | null | null | autogalaxy/cosmology/model.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
] | null | null | null | from astropy import cosmology as cosmo
from autogalaxy.cosmology.wrap import FlatLambdaCDM
from autogalaxy.cosmology.wrap import FlatwCDM
class Planck15Om0(FlatLambdaCDM):
def __init__(self, Om0: float = 0.3075):
planck15 = cosmo.Planck15
super().__init__(
H0=planck15.H0,
Om0=Om0,
Tcmb0=planck15.Tcmb0,
Neff=planck15.Neff,
m_nu=planck15.m_nu,
Ob0=planck15.Ob0,
name=planck15.name,
)
class Planck15FlatwCDM(FlatwCDM):
def __init__(self, Om0: float = 0.3075, w0: float = -1.0):
planck15 = cosmo.Planck15
super().__init__(
H0=planck15.H0,
Om0=Om0,
w0=w0,
Tcmb0=planck15.Tcmb0,
Neff=planck15.Neff,
m_nu=planck15.m_nu,
Ob0=planck15.Ob0,
name=planck15.name,
)
| 24.657895 | 63 | 0.545358 | 98 | 937 | 5.010204 | 0.316327 | 0.02444 | 0.093686 | 0.10998 | 0.745418 | 0.610998 | 0.610998 | 0.513238 | 0.513238 | 0.513238 | 0 | 0.11745 | 0.363927 | 937 | 37 | 64 | 25.324324 | 0.706376 | 0 | 0 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a039fa5b8e17797042273ddf8857c1b5d16a4c0 | 2,250 | py | Python | test_image.py | HAIbingshuai/yolo_v3_tensorflow | 91f9a7146b06082fb9159aeaf4c54764c3f0e75c | [
"MIT"
] | null | null | null | test_image.py | HAIbingshuai/yolo_v3_tensorflow | 91f9a7146b06082fb9159aeaf4c54764c3f0e75c | [
"MIT"
] | null | null | null | test_image.py | HAIbingshuai/yolo_v3_tensorflow | 91f9a7146b06082fb9159aeaf4c54764c3f0e75c | [
"MIT"
] | null | null | null | # coding=utf-8 python3.6
# ================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
# license='MIT License'
# Author : haibingshuai
# Created date: 2019/10/29 18:05
# Description :
# ================================================================
import cv2
import numpy as np
import core.utils as utils
import tensorflow as tf
from PIL import Image
import os
return_elements = ["input/input_data:0", "pred_smt_box/concat_2:0", "pred_mid_box/concat_2:0",
"pred_big_box/concat_2:0"]
pb_file = "./susong_header_5k_model_1.pb"
image_path_ = "./data_test/in"
image_out_path_ = "./data_test/out"
num_classes = 1
input_size = 608 # 416
graph = tf.Graph()
pic_list_path = [[os.path.join(image_path_, one), os.path.join(image_out_path_, one)] for one in
os.listdir(image_path_)]
with tf.Session(graph=graph) as sess:
for image_path, image_out_path in pic_list_path:
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = utils.image_pretreat_process(np.copy(original_image), input_size)
image_data = image_data[np.newaxis, ...]
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
[return_tensors[1], return_tensors[2], return_tensors[3]],
feed_dict={return_tensors[0]: image_data})
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
np.reshape(pred_mbbox, (-1, 5 + num_classes)),
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.2)
bboxes = utils.nms(bboxes, 0.5, method='nms')
print(len(bboxes))
image = utils.draw_bbox(original_image, bboxes)
image = Image.fromarray(image)
image.show()
image = np.array(image)
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
cv2.imwrite(image_out_path, image)
| 39.473684 | 96 | 0.622222 | 301 | 2,250 | 4.365449 | 0.372093 | 0.079148 | 0.03653 | 0.025114 | 0.060883 | 0.038052 | 0.038052 | 0 | 0 | 0 | 0 | 0.033803 | 0.211111 | 2,250 | 56 | 97 | 40.178571 | 0.706479 | 0.137333 | 0 | 0 | 0 | 0 | 0.076644 | 0.050751 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a04153361ec959fa152730e3ea3a15064201f43 | 1,975 | py | Python | infra/cifuzz/actions/entrypoint.py | alexcrichton/oss-fuzz | e82397baf623b69e17f29115817c5415f52df32b | [
"Apache-2.0"
] | 2 | 2020-01-23T10:31:30.000Z | 2020-01-23T17:34:20.000Z | infra/cifuzz/actions/entrypoint.py | alexcrichton/oss-fuzz | e82397baf623b69e17f29115817c5415f52df32b | [
"Apache-2.0"
] | 1 | 2020-01-23T10:31:32.000Z | 2020-01-23T10:31:32.000Z | infra/cifuzz/actions/entrypoint.py | alexcrichton/oss-fuzz | e82397baf623b69e17f29115817c5415f52df32b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds and runs specific OSS-Fuzz project's fuzzers for CI tools."""
import os
import subprocess
import sys
def main():
"""Runs OSS-Fuzz project's fuzzers for CI tools."""
project_name = os.environ['OSS_FUZZ_PROJECT_NAME']
repo_name = os.environ['GITHUB_REPOSITORY'].rsplit('/', 1)[-1]
commit_sha = os.environ['GITHUB_SHA']
# Build the specified project's fuzzers from the current repo state.
print('Building fuzzers\nproject: {0}\nrepo name: {1}\ncommit: {2}'.format(
project_name, repo_name, commit_sha))
command = [
'python3', '/src/oss-fuzz/infra/cifuzz.py', 'build_fuzzers', project_name,
repo_name, commit_sha
]
print('Running command: "{0}"'.format(' '.join(command)))
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as err:
sys.stderr.write('Error building fuzzers: "{0}"'.format(str(err)))
return err.returncode
# Run the specified project's fuzzers from the build.
command = [
'python3', '/src/oss-fuzz/infra/cifuzz.py', 'run_fuzzers', project_name
]
print('Running command: "{0}"'.format(' '.join(command)))
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as err:
sys.stderr.write('Error running fuzzers: "{0}"'.format(str(err)))
return err.returncode
print('Fuzzers ran successfully.')
return 0
if __name__ == '__main__':
sys.exit(main())
| 34.051724 | 80 | 0.713418 | 279 | 1,975 | 4.953405 | 0.437276 | 0.043415 | 0.043415 | 0.041245 | 0.42547 | 0.42547 | 0.384949 | 0.335745 | 0.17945 | 0.17945 | 0 | 0.012085 | 0.162025 | 1,975 | 57 | 81 | 34.649123 | 0.822961 | 0.394937 | 0 | 0.375 | 0 | 0 | 0.290598 | 0.067521 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.09375 | 0 | 0.21875 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a04693e9cf2c844710daf8e07cff96644dd0053 | 1,843 | py | Python | train.py | tmielika/Prototrain | 1488aa96e6a6f565bfc02b45e6ea6ec812951b82 | [
"Apache-2.0"
] | 8 | 2019-05-16T23:04:12.000Z | 2020-03-15T09:11:47.000Z | train.py | tmielika/Prototrain | 1488aa96e6a6f565bfc02b45e6ea6ec812951b82 | [
"Apache-2.0"
] | null | null | null | train.py | tmielika/Prototrain | 1488aa96e6a6f565bfc02b45e6ea6ec812951b82 | [
"Apache-2.0"
] | 5 | 2019-05-13T16:53:14.000Z | 2020-02-21T21:51:32.000Z | # Copyright 2019, Oath Inc.
# Licensed under the terms of the Apache 2.0 license.
# See LICENSE file in http://github.com/yahoo/prototrain for terms.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import logging
logging.getLogger('tensorflow').disabled = True
import sys
import json
import tensorflow as tf
import numpy as np
import importlib
from argparse import ArgumentParser
parser = ArgumentParser(usage="Train a simple supervised neural net")
parser.add_argument("-m", "--model",
help="The model you want to train (ex: 'models.simple')")
parser.add_argument("-d", "--debug",
action="store_true",
help="Launch ipython after model setup")
parser.add_argument("-f", "--fresh",
action="store_true",
help="Restart experiment from scratch instead of resuming")
parser.add_argument("-t", "--trainer",
default="trainers.default",
help="The trainer you want to use to train (default: 'trainers.default')")
def print_config(config):
print("======================")
print("Configuration Settings")
print("======================")
print(config)
print("=" * 100)
print("")
if __name__ == '__main__':
args = parser.parse_args()
# turn off tf logging
tf.logging.set_verbosity(tf.logging.ERROR)
# load model
print("Loading model from '%s' module" % args.model)
model = importlib.import_module(args.model)
config = model.config
# set random seed before doing anything else
np.random.seed(config["trainer.random_seed"])
tf.set_random_seed(config["trainer.random_seed"])
# display config
print("Loading trainer from '%s' module" % args.trainer)
trainer = importlib.import_module(args.trainer)
trainer.train(model, args)
| 30.716667 | 94 | 0.642973 | 227 | 1,843 | 5.101322 | 0.484582 | 0.043178 | 0.058722 | 0.032815 | 0.056995 | 0.056995 | 0 | 0 | 0 | 0 | 0 | 0.006916 | 0.21541 | 1,843 | 59 | 95 | 31.237288 | 0.793914 | 0.125882 | 0 | 0.1 | 0 | 0 | 0.320649 | 0.027449 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.25 | 0 | 0.275 | 0.225 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a047f431458c32076be051349d3955f564f2b9c | 865 | py | Python | python_api/src/api/libraries/awt/awt.py | berkerdemoglu/My3DEngine | 86edd838563a1d0691c1a267f5af62e184db33f5 | [
"CC0-1.0"
] | 1 | 2021-05-10T08:50:45.000Z | 2021-05-10T08:50:45.000Z | python_api/src/api/libraries/awt/awt.py | berkerdemoglu/My3DEngine | 86edd838563a1d0691c1a267f5af62e184db33f5 | [
"CC0-1.0"
] | 4 | 2021-05-10T10:46:04.000Z | 2021-05-11T11:10:36.000Z | python_api/src/api/libraries/awt/awt.py | berkerdemoglu/My3DEngine | 86edd838563a1d0691c1a267f5af62e184db33f5 | [
"CC0-1.0"
] | null | null | null | from ...base import BaseAPIClass
class Color(BaseAPIClass):
"""Represents the java.awt.Color class."""
def __init__(self, r: int, g: int, b: int):
"""Initialize the color in RGB format."""
self._set_color('r', r)
self._set_color('g', g)
self._set_color('b', b)
def _set_color(self, name, value):
if 0 <= value <= 255:
setattr(self, name, int(value))
else:
raise ValueError("Color provided outside the possible range")
def as_dict(self):
return self.__dict__
# Color constants
WHITE = Color(255, 255, 255)
LIGHT_GRAY = Color(192, 192, 192)
GRAY = Color(128, 128, 128)
DARK_GRAY = Color(64, 64, 64)
BLACK = Color(0, 0, 0)
RED = Color(255, 0, 0)
GREEN = Color(0, 255, 0)
BLUE = Color(0, 0, 255)
YELLOW = Color(255, 255, 0)
MAGENTA = Color(255, 0, 255)
CYAN = Color(0, 255, 255)
PINK = Color(255, 175, 175)
ORANGE = Color(255, 200, 0)
| 21.625 | 64 | 0.656647 | 143 | 865 | 3.839161 | 0.398601 | 0.087432 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129944 | 0.181503 | 865 | 39 | 65 | 22.179487 | 0.64548 | 0.10289 | 0 | 0 | 0 | 0 | 0.057516 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.038462 | 0.038462 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a0561dbd562b6a25ca87c6e16b3708235133cdb | 6,377 | py | Python | utils_eval.py | wellowdata/pytorch-layoutnet | 3d4352f94ed00d3c37890e9119452811d4f0893f | [
"MIT"
] | 155 | 2018-08-13T05:26:35.000Z | 2022-03-26T13:13:24.000Z | utils_eval.py | wellowdata/pytorch-layoutnet | 3d4352f94ed00d3c37890e9119452811d4f0893f | [
"MIT"
] | 25 | 2018-08-17T08:39:51.000Z | 2022-01-11T01:08:37.000Z | utils_eval.py | wellowdata/pytorch-layoutnet | 3d4352f94ed00d3c37890e9119452811d4f0893f | [
"MIT"
] | 44 | 2018-08-17T19:53:05.000Z | 2022-03-31T06:25:37.000Z | import numpy as np
from scipy.spatial import HalfspaceIntersection, ConvexHull
from pano import pano_connect_points
def np_coorx2u(coorx, coorW=1024):
return ((coorx + 0.5) / coorW - 0.5) * 2 * np.pi
def np_coory2v(coory, coorH=512):
return -((coory + 0.5) / coorH - 0.5) * np.pi
def np_coor2xy(coor, z=50, coorW=1024, coorH=512):
'''
coor: N x 2, index of array in (col, row) format
'''
coor = np.array(coor)
u = np_coorx2u(coor[:, 0], coorW)
v = np_coory2v(coor[:, 1], coorH)
c = z / np.tan(v)
x = c * np.sin(u)
y = -c * np.cos(u)
return np.hstack([x[:, None], y[:, None]])
def tri2halfspace(pa, pb, p):
v1 = pa - p
v2 = pb - p
vn = np.cross(v1, v2)
if -vn @ p > 0:
vn = -vn
return [*vn, -vn @ p]
def xyzlst2halfspaces(xyz_floor, xyz_ceil):
'''
return halfspace enclose (0, 0, 0)
'''
N = xyz_floor.shape[0]
halfspaces = []
for i in range(N):
last_i = (i - 1 + N) % N
next_i = (i + 1) % N
p_floor_a = xyz_floor[last_i]
p_floor_b = xyz_floor[next_i]
p_floor = xyz_floor[i]
p_ceil_a = xyz_ceil[last_i]
p_ceil_b = xyz_ceil[next_i]
p_ceil = xyz_ceil[i]
halfspaces.append(tri2halfspace(p_floor_a, p_floor_b, p_floor))
halfspaces.append(tri2halfspace(p_floor_a, p_ceil, p_floor))
halfspaces.append(tri2halfspace(p_ceil, p_floor_b, p_floor))
halfspaces.append(tri2halfspace(p_ceil_a, p_ceil_b, p_ceil))
halfspaces.append(tri2halfspace(p_ceil_a, p_floor, p_ceil))
halfspaces.append(tri2halfspace(p_floor, p_ceil_b, p_ceil))
return np.array(halfspaces)
def eval_3diou(dt_floor_coor, dt_ceil_coor, gt_floor_coor, gt_ceil_coor,
ch=-1.6, coorW=1024, coorH=512):
'''
Evaluate 3D IoU of "convex layout".
Instead of voxelization, this function use halfspace intersection
to evaluate the volume.
Input parameters:
dt_ceil_coor, dt_floor_coor, gt_ceil_coor, gt_floor_coor
have to be in shape [N, 2] and in the format of:
[[x, y], ...]
listing the corner position from left to right on the equirect image.
'''
dt_floor_coor = np.array(dt_floor_coor)
dt_ceil_coor = np.array(dt_ceil_coor)
gt_floor_coor = np.array(gt_floor_coor)
gt_ceil_coor = np.array(gt_ceil_coor)
assert (dt_floor_coor[:, 0] != dt_ceil_coor[:, 0]).sum() == 0
assert (gt_floor_coor[:, 0] != gt_ceil_coor[:, 0]).sum() == 0
N = len(dt_floor_coor)
dt_floor_xyz = np.hstack([
np_coor2xy(dt_floor_coor, ch, coorW, coorH),
np.zeros((N, 1)) + ch,
])
gt_floor_xyz = np.hstack([
np_coor2xy(gt_floor_coor, ch, coorW, coorH),
np.zeros((N, 1)) + ch,
])
dt_c = np.sqrt((dt_floor_xyz[:, :2] ** 2).sum(1))
gt_c = np.sqrt((gt_floor_xyz[:, :2] ** 2).sum(1))
dt_v2 = np_coory2v(dt_ceil_coor[:, 1], coorH)
gt_v2 = np_coory2v(gt_ceil_coor[:, 1], coorH)
dt_ceil_z = dt_c * np.tan(dt_v2)
gt_ceil_z = gt_c * np.tan(gt_v2)
dt_ceil_xyz = dt_floor_xyz.copy()
dt_ceil_xyz[:, 2] = dt_ceil_z
gt_ceil_xyz = gt_floor_xyz.copy()
gt_ceil_xyz[:, 2] = gt_ceil_z
dt_halfspaces = xyzlst2halfspaces(dt_floor_xyz, dt_ceil_xyz)
gt_halfspaces = xyzlst2halfspaces(gt_floor_xyz, gt_ceil_xyz)
in_halfspaces = HalfspaceIntersection(np.concatenate([dt_halfspaces, gt_halfspaces]),
np.zeros(3))
dt_halfspaces = HalfspaceIntersection(dt_halfspaces, np.zeros(3))
gt_halfspaces = HalfspaceIntersection(gt_halfspaces, np.zeros(3))
in_volume = ConvexHull(in_halfspaces.intersections).volume
dt_volume = ConvexHull(dt_halfspaces.intersections).volume
gt_volume = ConvexHull(gt_halfspaces.intersections).volume
un_volume = dt_volume + gt_volume - in_volume
return in_volume / un_volume
def eval_PE(dt_ceil_coor, dt_floor_coor, gt_ceil_coor, gt_floor_coor, H=512, W=1024):
'''
Evaluate pixel surface error (3 labels: ceiling, wall, floor)
Input parameters:
dt_ceil_coor, dt_floor_coor, gt_ceil_coor, gt_floor_coor
have to be in shape [N, 2] and in the format of:
[[x, y], ...]
listing the corner position from left to right on the equirect image.
'''
y0 = np.zeros(W)
y1 = np.zeros(W)
y0_gt = np.zeros(W)
y1_gt = np.zeros(W)
for j in range(dt_ceil_coor.shape[0]):
coorxy = pano_connect_points(dt_ceil_coor[j], dt_ceil_coor[(j+1)%4], -50)
y0[np.round(coorxy[:, 0]).astype(int)] = coorxy[:, 1]
coorxy = pano_connect_points(dt_floor_coor[j], dt_floor_coor[(j+1)%4], 50)
y1[np.round(coorxy[:, 0]).astype(int)] = coorxy[:, 1]
coorxy = pano_connect_points(gt_ceil_coor[j], gt_ceil_coor[(j+1)%4], -50)
y0_gt[np.round(coorxy[:, 0]).astype(int)] = coorxy[:, 1]
coorxy = pano_connect_points(gt_floor_coor[j], gt_floor_coor[(j+1)%4], 50)
y1_gt[np.round(coorxy[:, 0]).astype(int)] = coorxy[:, 1]
surface = np.zeros((H, W), dtype=np.int32)
surface[np.round(y0).astype(int), np.arange(W)] = 1
surface[np.round(y1).astype(int), np.arange(W)] = 1
surface = np.cumsum(surface, axis=0)
surface_gt = np.zeros((H, W), dtype=np.int32)
surface_gt[np.round(y0_gt).astype(int), np.arange(W)] = 1
surface_gt[np.round(y1_gt).astype(int), np.arange(W)] = 1
surface_gt = np.cumsum(surface_gt, axis=0)
return (surface != surface_gt).sum() / (H * W)
def augment(x_img, flip, rotate):
aug_type = ['']
x_imgs_augmented = [x_img]
if flip:
aug_type.append('flip')
x_imgs_augmented.append(np.flip(x_img, axis=-1))
for rotate in rotate:
shift = int(round(rotate * x_img.shape[-1]))
aug_type.append('rotate %d' % shift)
x_imgs_augmented.append(np.roll(x_img, shift, axis=-1))
return np.array(x_imgs_augmented), aug_type
def augment_undo(x_imgs_augmented, aug_type):
x_imgs = []
for x_img, aug in zip(x_imgs_augmented, aug_type):
if aug == 'flip':
x_imgs.append(np.flip(x_img, axis=-1))
elif aug.startswith('rotate'):
shift = int(aug.split()[-1])
x_imgs.append(np.roll(x_img, -shift, axis=-1))
elif aug == '':
x_imgs.append(x_img)
else:
raise NotImplementedError()
return np.array(x_imgs) | 35.427778 | 89 | 0.631331 | 1,019 | 6,377 | 3.689892 | 0.155054 | 0.050266 | 0.032181 | 0.047872 | 0.42633 | 0.363564 | 0.304255 | 0.243351 | 0.199734 | 0.167819 | 0 | 0.031872 | 0.227536 | 6,377 | 180 | 90 | 35.427778 | 0.731425 | 0.11024 | 0 | 0.031746 | 0 | 0 | 0.004132 | 0 | 0 | 0 | 0 | 0 | 0.015873 | 1 | 0.071429 | false | 0 | 0.02381 | 0.015873 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a072f65ffbf646328f7f88131efc498b7381e02 | 3,341 | py | Python | python/sdm/houdini/properties.py | sashaouellet/SDMTools | edb529398b07a577a5492887fe840c6cfd891551 | [
"MIT"
] | 7 | 2017-11-27T20:51:11.000Z | 2020-07-18T22:51:46.000Z | python/sdm/houdini/properties.py | tws0002/SDMTools | edb529398b07a577a5492887fe840c6cfd891551 | [
"MIT"
] | 7 | 2017-12-03T21:25:19.000Z | 2018-02-12T08:03:29.000Z | python/sdm/houdini/properties.py | tws0002/SDMTools | edb529398b07a577a5492887fe840c6cfd891551 | [
"MIT"
] | 3 | 2018-04-27T02:45:28.000Z | 2020-02-15T14:12:45.000Z | """Utility functions regarding property and parameter interfaces
__author__ = Sasha Ouellet (www.sashaouellet.com)
__version__ = 1.0.0
__date__ = 12/10/17
"""
import hou
def initRopNotificationProperty(node):
"""Given a ROP node, replaces the execute button with an identical
button that launches a notification after the cache/render process
if the newly added checkbox is checked.
Args:
node (hou.Node): The ROP node to add the new button and notifications
checkbox to
"""
parmTemplate = node.parmTemplateGroup()
notifyParm = hou.ToggleParmTemplate('notify', 'Notify on Completion', help='Receive a notification when this ROP output operation completes. Notifications are based on settings in SDMTools > Preferences.')
notifyScript = "; from sdm.houdini.notifications import notifyUser, NotificationType;" \
"seconds = time.time() - start;" \
"m, s = divmod(seconds, 60);" \
"h, m = divmod(m, 60);" \
"p = hou.pwd().parm('notify');" \
"data = {'Node':hou.pwd().name(), 'Duration':'%d:%02d:%02d' % (h, m, s)};" \
"notifyUser(NotificationType.ROP_COMPLETE if p and p.eval() else None, data=data)"
if node.type() == 'filecache':
folderParm = parmTemplate.containingFolder('execute')
allParms = folderParm.parmTemplates()
newParms = ()
for parm in allParms:
if parm.name() == 'renderdialog':
parm.setJoinWithNext(True)
newParms += (parm,)
newParms += (notifyParm,)
elif parm.name() == 'execute':
execCache = parm.clone()
parm.hide(True)
newParms += (parm,)
callback = execCache.scriptCallback()
callback = 'import time; start = time.time(); ' + callback
callback += notifyScript
execCache.setScriptCallback(callback)
execCache.setName('executeWithNotification')
newParms += (execCache,)
else:
newParms += (parm,)
folderParm.setParmTemplates(newParms)
parmTemplate.replace(folderParm.name(), folderParm)
else:
renderButton = parmTemplate.find('execute')
if renderButton:
renderNotify = renderButton.clone()
renderButton.hide(True)
callback = renderButton.scriptCallback() or 'import hou; hou.pwd().render()'
callback = 'import time; start = time.time(); ' + callback
callback += notifyScript
renderNotify.setScriptCallback(callback)
renderNotify.setName('executeWithNotification')
parmTemplate.replace('execute', renderButton)
parmTemplate.insertAfter('execute', renderNotify)
parmTemplate.insertAfter('renderdialog', notifyParm)
node.setParmTemplateGroup(parmTemplate)
def getMainParm(node, parm):
"""Given a node and a name of a parameter on the node,
returns the parameter at the end of any potential reference
chains.
Args:
node (hou.Node): The node to search for the parameter on
parm (str): The name of the parameter on the given node to
find the final referenced parameter of
Returns:
hou.Parm: The final parm in the chain of referenced parms
Raises:
ValueError: When the given node is not of type hou.Node
"""
parm = node.parm(parm)
assert isinstance(node, hou.Node), 'Must specify a node (hou.Node)'
if not parm:
raise ValueError('Specified parm: {} is not present on given node {}'.format(parm, node))
return None
otherParm = parm.getReferencedParm()
while otherParm != parm:
parm = otherParm
return otherParm | 30.651376 | 206 | 0.70877 | 396 | 3,341 | 5.94697 | 0.396465 | 0.014862 | 0.018684 | 0.012739 | 0.065393 | 0.050106 | 0.050106 | 0.050106 | 0.050106 | 0 | 0 | 0.006177 | 0.176295 | 3,341 | 109 | 207 | 30.651376 | 0.849564 | 0.258306 | 0 | 0.155172 | 0 | 0.017241 | 0.315639 | 0.075949 | 0 | 0 | 0 | 0 | 0.017241 | 1 | 0.034483 | false | 0 | 0.086207 | 0 | 0.155172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a08b8e7d4b8cfaa372b5a9eb53f76c5bc2b10cc | 14,066 | py | Python | buildchain/buildchain/packaging.py | n1603/metalk8s | 2f337a435380102055d3725f0cc2b6165818e880 | [
"Apache-2.0"
] | null | null | null | buildchain/buildchain/packaging.py | n1603/metalk8s | 2f337a435380102055d3725f0cc2b6165818e880 | [
"Apache-2.0"
] | null | null | null | buildchain/buildchain/packaging.py | n1603/metalk8s | 2f337a435380102055d3725f0cc2b6165818e880 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""Tasks to put repositories on the ISO.
This modules provides several services:
- build a unique container image for all the build tasks
- downloading packages and repositories
- building local packages from sources
- building local repositories from local packages
Note that for now, it only works for CentOS 7 x86_64.
Overview;
(e.g.: base, …)
┌─────────┐ ┌──────────┐ ┌──────────────┐
│ builder │──────>│ │ download │ │ build │
│ image │ │──────>│ packages │──────>│ repositories │
└─────────┘ │ └──────────┘ └──────────────┘
│ ┌──────────┐ ┌──────────────┐
┌─────────┐ │──────>│ build │──────>│ build │
│ mkdir │──────>│ │ packages │ │ repositories │
└─────────┘ └──────────┘ └──────────────┘
(e.g: calico) (e.g.: scality)
"""
from pathlib import Path
from typing import (
Dict, FrozenSet, Iterator, List, Mapping, Optional, Sequence, Tuple
)
import doit # type: ignore
from buildchain import builder
from buildchain import constants
from buildchain import coreutils
from buildchain import docker_command
from buildchain import targets
from buildchain import types
from buildchain import utils
from buildchain import versions
# Utilities {{{
def _list_packages_to_build(
pkg_cats: Mapping[str, Tuple[targets.Package, ...]]
) -> List[str]:
return [
pkg.name for pkg_list in pkg_cats.values() for pkg in pkg_list
]
def _list_packages_to_download(
package_versions: Tuple[versions.PackageVersion, ...],
packages_to_build: List[str]
) -> Dict[str, Optional[str]]:
return {
pkg.name: pkg.full_version
for pkg in package_versions
if pkg.name not in packages_to_build
}
# }}}
# Tasks {{{
def task_packaging() -> types.TaskDict:
"""Build the packages and repositories."""
return {
'actions': None,
'task_dep': [
'_package_mkdir_root',
'_package_mkdir_iso_root',
'_download_rpm_packages',
'_build_rpm_packages',
'_build_rpm_repositories',
'_build_deb_packages',
'_download_deb_packages',
'_build_deb_repositories',
],
}
def task__package_mkdir_root() -> types.TaskDict:
"""Create the packages root directory."""
return targets.Mkdir(
directory=constants.PKG_ROOT, task_dep=['_build_root']
).task
def task__package_mkdir_rpm_root() -> types.TaskDict:
"""Create the RedHat packages root directory."""
return targets.Mkdir(
directory=constants.PKG_RPM_ROOT, task_dep=['_package_mkdir_root']
).task
def task__package_mkdir_deb_root() -> types.TaskDict:
"""Create the Debian packages root directory."""
return targets.Mkdir(
directory=constants.PKG_DEB_ROOT, task_dep=['_package_mkdir_root']
).task
def task__package_mkdir_iso_root() -> types.TaskDict:
"""Create the packages root directory on the ISO."""
return targets.Mkdir(
directory=constants.REPO_ROOT, task_dep=['_iso_mkdir_root']
).task
def task__package_mkdir_rpm_iso_root() -> types.TaskDict:
"""Create the RedHat packages root directory on the ISO."""
return targets.Mkdir(
directory=constants.REPO_RPM_ROOT, task_dep=['_package_mkdir_iso_root']
).task
def task__package_mkdir_deb_iso_root() -> types.TaskDict:
"""Create the Debian packages root directory on the ISO."""
return targets.Mkdir(
directory=constants.REPO_DEB_ROOT, task_dep=['_package_mkdir_iso_root']
).task
def task__download_rpm_packages() -> types.TaskDict:
"""Download packages locally."""
def clean() -> None:
"""Delete cache and repositories on the ISO."""
coreutils.rm_rf(constants.PKG_RPM_ROOT/'var')
for repository in RPM_REPOSITORIES:
# Repository with an explicit list of packages are created by a
# dedicated task that will also handle their cleaning, so we skip
# them here.
if repository.packages:
continue
coreutils.rm_rf(repository.rootdir)
mounts = [
utils.bind_mount(
source=constants.PKG_RPM_ROOT, target=Path('/install_root')
),
utils.bind_mount(
source=constants.REPO_RPM_ROOT, target=Path('/repositories')
),
]
dl_packages_callable = docker_command.DockerRun(
command=['/entrypoint.sh', 'download_packages', *RPM_TO_DOWNLOAD],
builder=builder.RPM_BUILDER,
mounts=mounts,
environment={'RELEASEVER': 7},
run_config=docker_command.default_run_config(
constants.REDHAT_ENTRYPOINT
)
)
return {
'title': utils.title_with_target1('GET RPM PKGS'),
'actions': [dl_packages_callable],
'targets': [constants.PKG_RPM_ROOT/'var'],
'task_dep': [
'_package_mkdir_rpm_root',
'_package_mkdir_rpm_iso_root',
'_build_builder:{}'.format(builder.RPM_BUILDER.name),
],
'clean': [clean],
'uptodate': [doit.tools.config_changed(_TO_DOWNLOAD_RPM_CONFIG)],
# Prevent Docker from polluting our output.
'verbosity': 0,
}
def task__download_deb_packages() -> types.TaskDict:
"""Download Debian packages locally."""
witness = constants.PKG_DEB_ROOT/'.witness'
def clean() -> None:
"""Delete downloaded Debian packages."""
for repository in DEB_REPOSITORIES:
# Repository with an explicit list of packages are created by a
# dedicated task that will also handle their cleaning, so we skip
# them here.
if repository.packages:
continue
coreutils.rm_rf(repository.pkgdir)
utils.unlink_if_exist(witness)
def mkdirs() -> None:
"""Create directories for the repositories."""
for repository in DEB_REPOSITORIES:
repository.pkgdir.mkdir(exist_ok=True)
mounts = [
utils.bind_ro_mount(
source=constants.ROOT/'packages'/'debian'/'download_packages.py',
target=Path('/download_packages.py'),
),
utils.bind_mount(
source=constants.PKG_DEB_ROOT,
target=Path('/repositories')
),
]
dl_packages_callable = docker_command.DockerRun(
command=['/download_packages.py', *DEB_TO_DOWNLOAD],
builder=builder.DEB_BUILDER,
mounts=mounts,
environment={'SALT_VERSION': versions.SALT_VERSION},
run_config=docker_command.default_run_config(
constants.DEBIAN_ENTRYPOINT
)
)
return {
'title': utils.title_with_target1('GET DEB PKGS'),
'actions': [mkdirs, dl_packages_callable],
'targets': [constants.PKG_DEB_ROOT/'.witness'],
'task_dep': [
'_package_mkdir_deb_root',
'_package_mkdir_deb_iso_root',
'_build_builder:{}'.format(builder.DEB_BUILDER.name),
],
'clean': [clean],
'uptodate': [doit.tools.config_changed(_TO_DOWNLOAD_DEB_CONFIG)],
# Prevent Docker from polluting our output.
'verbosity': 0,
}
def task__build_rpm_packages() -> Iterator[types.TaskDict]:
"""Build a RPM package."""
for repo_pkgs in RPM_TO_BUILD.values():
for package in repo_pkgs:
yield from package.execution_plan
def task__build_deb_packages() -> Iterator[types.TaskDict]:
"""Build Debian packages"""
for repo_pkgs in DEB_TO_BUILD.values():
for package in repo_pkgs:
yield from package.execution_plan
def task__build_rpm_repositories() -> Iterator[types.TaskDict]:
"""Build a RPM repository."""
for repository in RPM_REPOSITORIES:
yield from repository.execution_plan
@doit.create_after(executed='_download_deb_packages') # type: ignore
def task__build_deb_repositories() -> Iterator[types.TaskDict]:
"""Build a DEB repository."""
for repository in DEB_REPOSITORIES:
if next(repository.pkgdir.glob('*.deb'), False):
yield from repository.execution_plan
# }}}
# RPM packages and repository {{{
# Packages to build, per repository.
def _rpm_package(name: str, sources: List[Path]) -> targets.RPMPackage:
try:
pkg_info = versions.RPM_PACKAGES_MAP[name]
except KeyError as exc:
raise ValueError(
'Missing version for package "{}"'.format(name)
) from exc
# In case the `release` is of form "{build_id}.{os}", which is standard
build_id_str, _, _ = pkg_info.release.partition('.')
return targets.RPMPackage(
basename='_build_rpm_packages',
name=name,
version=pkg_info.version,
build_id=int(build_id_str),
sources=sources,
builder=builder.RPM_BUILDER,
task_dep=[
'_package_mkdir_rpm_root',
'_build_builder:{}'.format(builder.RPM_BUILDER.name)
],
)
def _rpm_repository(
name: str, packages: Optional[Sequence[targets.RPMPackage]]=None
) -> targets.RPMRepository:
"""Return a RPM repository object.
Arguments:
name: repository name
packages: list of locally built packages
"""
mkdir_task = '_package_mkdir_rpm_iso_root'
download_task = '_download_rpm_packages'
return targets.RPMRepository(
basename='_build_rpm_repositories',
name=name,
builder=builder.RPM_BUILDER,
packages=packages,
task_dep=[download_task if packages is None else mkdir_task],
)
# Calico Container Network Interface Plugin.
CALICO_RPM = _rpm_package(
name='calico-cni-plugin',
sources=[
Path('calico-amd64'),
Path('calico-ipam-amd64'),
Path('v{}.tar.gz'.format(versions.CALICO_VERSION)),
],
)
CONTAINERD_RPM = _rpm_package(
name='containerd',
sources=[
Path('0001-Revert-commit-for-Windows-metrics.patch'),
Path('containerd.service'),
Path('containerd.toml'),
Path('containerd-{}.tar.gz'.format(versions.CONTAINERD_VERSION)),
],
)
RPM_TO_BUILD : Dict[str, Tuple[targets.RPMPackage, ...]] = {
'scality': (
# SOS report custom plugins.
_rpm_package(
name='metalk8s-sosreport',
sources=[
Path('metalk8s.py'),
Path('containerd.py'),
],
),
CALICO_RPM,
CONTAINERD_RPM,
),
}
_RPM_TO_BUILD_PKG_NAMES : List[str] = _list_packages_to_build(RPM_TO_BUILD)
# All packages not referenced in `RPM_TO_BUILD` but listed in
# `versions.RPM_PACKAGES` are supposed to be downloaded.
RPM_TO_DOWNLOAD : FrozenSet[str] = frozenset(
package.rpm_full_name
for package in versions.RPM_PACKAGES
if package.name not in _RPM_TO_BUILD_PKG_NAMES
)
# Store these versions in a dict to use with doit.tools.config_changed
_TO_DOWNLOAD_RPM_CONFIG: Dict[str, Optional[str]] = \
_list_packages_to_download(versions.RPM_PACKAGES, _RPM_TO_BUILD_PKG_NAMES)
SCALITY_RPM_REPOSITORY : targets.RPMRepository = _rpm_repository(
name='scality', packages=RPM_TO_BUILD['scality']
)
RPM_REPOSITORIES : Tuple[targets.RPMRepository, ...] = (
SCALITY_RPM_REPOSITORY,
_rpm_repository(name='epel'),
_rpm_repository(name='kubernetes'),
_rpm_repository(name='saltstack'),
)
# }}}
# Debian packages and repositories {{{
def _deb_package(name: str, sources: Path) -> targets.DEBPackage:
try:
pkg_info = versions.DEB_PACKAGES_MAP[name]
except KeyError as exc:
raise ValueError(
'Missing version for package "{}"'.format(name)
) from exc
return targets.DEBPackage(
basename='_build_deb_packages',
name=name,
version=pkg_info.version,
build_id=int(pkg_info.release),
sources=sources,
builder=builder.DEB_BUILDER,
task_dep=[
'_package_mkdir_deb_root',
'_build_builder:{}'.format(builder.DEB_BUILDER.name)
],
)
def _deb_repository(
name: str, packages: Optional[Sequence[targets.DEBPackage]]=None
) -> targets.DEBRepository:
"""Return a DEB repository object.
Arguments:
name: repository name
packages: list of locally built packages
"""
mkdir_task = '_package_mkdir_deb_iso_root'
download_task = '_download_deb_packages'
return targets.DEBRepository(
basename='_build_deb_repositories',
name=name,
builder=builder.DEB_BUILDER,
packages=packages,
task_dep=[download_task if packages is None else mkdir_task],
)
DEB_TO_BUILD : Dict[str, Tuple[targets.DEBPackage, ...]] = {
'scality': (
# SOS report custom plugins.
_deb_package(
name='metalk8s-sosreport',
sources=constants.ROOT/'packages/common/metalk8s-sosreport',
),
_deb_package(
name='calico-cni-plugin',
sources=SCALITY_RPM_REPOSITORY.get_rpm_path(CALICO_RPM)
),
)
}
_DEB_TO_BUILD_PKG_NAMES : List[str] = _list_packages_to_build(DEB_TO_BUILD)
# Store these versions in a dict to use with doit.tools.config_changed
_TO_DOWNLOAD_DEB_CONFIG: Dict[str, Optional[str]] = \
_list_packages_to_download(versions.DEB_PACKAGES, _DEB_TO_BUILD_PKG_NAMES)
DEB_TO_DOWNLOAD : FrozenSet[str] = frozenset(
package.deb_full_name
for package in versions.DEB_PACKAGES
if package.name not in _DEB_TO_BUILD_PKG_NAMES
)
DEB_REPOSITORIES : Tuple[targets.DEBRepository, ...] = (
_deb_repository(name='scality', packages=DEB_TO_BUILD['scality']),
_deb_repository(name='bionic'),
_deb_repository(name='bionic-backports'),
_deb_repository(name='bionic-security'),
_deb_repository(name='bionic-updates'),
_deb_repository(name='kubernetes-xenial'),
_deb_repository(name='salt_ubuntu1804'),
)
# }}}
__all__ = utils.export_only_tasks(__name__)
| 31.18847 | 79 | 0.640623 | 1,634 | 14,066 | 5.354345 | 0.160343 | 0.016802 | 0.014402 | 0.019545 | 0.515716 | 0.443708 | 0.352955 | 0.332495 | 0.287004 | 0.2518 | 0 | 0.00253 | 0.241362 | 14,066 | 450 | 80 | 31.257778 | 0.796458 | 0.194369 | 0 | 0.370861 | 0 | 0 | 0.135762 | 0.052891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072848 | false | 0 | 0.036424 | 0.006623 | 0.15894 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a0c66cf1da5d3b37007d5317166f8bfc40e6fc7 | 13,464 | py | Python | optOutSendgridGlobalSuppressions.py | MutedJam/crossengage-python-snippets | 6c3d7c93b43f2b001824a6c862f7befba7b4a3f2 | [
"MIT"
] | 1 | 2021-02-11T16:13:51.000Z | 2021-02-11T16:13:51.000Z | optOutSendgridGlobalSuppressions.py | that-one-tom/crossengage-python-snippets | 6c3d7c93b43f2b001824a6c862f7befba7b4a3f2 | [
"MIT"
] | null | null | null | optOutSendgridGlobalSuppressions.py | that-one-tom/crossengage-python-snippets | 6c3d7c93b43f2b001824a6c862f7befba7b4a3f2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import dotenv, os, datetime, time, traceback, sys, json, requests, uuid
# Load environment variables
dotenv.load_dotenv()
API_KEY = os.getenv('XNG_MASTER_API_KEY')
XNG_USER = os.getenv('XNG_APP_USER')
XNG_PASS = os.getenv('XNG_APP_PASSWORD')
WEB_TRACKING_KEY = os.getenv('XNG_WEB_TRACKING_KEY')
SG_KEY = os.getenv('SENDGRID_API_KEY')
print('CrossEngage API key (last 3 characters):', API_KEY[-3:])
print('CrossEngage User:', XNG_USER)
print('Sendgrid API Key (last 3 characters):', SG_KEY[-3:])
# Configuration
TIMEOUT = 60
MAX_USERS_PER_SEGMENT = 100
# Create re-usable session
session = requests.Session()
retries = requests.adapters.HTTPAdapter(max_retries=3)
session.mount('https://', retries)
API_BASE_URL = 'https://api.crossengage.io'
UI_BASE_URL = 'https://ui-api.crossengage.io/ui'
SENDGRID_API_BASE_URL = 'https://api.sendgrid.com/v3'
# Sendgrid API Headers
SENDGRID_API_HEADERS = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + SG_KEY
}
# Fetch global unsubscribes (paginated, Sendgrid only provides a maximum of 500 entries per request)
SENDGRID_UNSUBSCRIBES = []
globalUnsubscribesUrl = SENDGRID_API_BASE_URL + '/suppression/unsubscribes?limit=500&offset=0'
moreUnsubscribes = True
while moreUnsubscribes == True:
try:
globalUnsubscribesResponse = session.get(globalUnsubscribesUrl, headers=SENDGRID_API_HEADERS, timeout=TIMEOUT)
if globalUnsubscribesResponse.status_code == 200:
globalUnsubscribes = json.loads(globalUnsubscribesResponse.text)
for globalUnsubscribe in globalUnsubscribes:
SENDGRID_UNSUBSCRIBES.append(globalUnsubscribe['email'])
print('Retrieved', len(globalUnsubscribes), 'global unsubscribes from Sendgrid')
if globalUnsubscribesResponse.links['next'] and globalUnsubscribesResponse.links['next']['url'] and globalUnsubscribesUrl != globalUnsubscribesResponse.links['next']['url']:
globalUnsubscribesUrl = globalUnsubscribesResponse.links['next']['url']
moreUnsubscribes = True
else:
moreUnsubscribes = False
break
else:
raise ValueError('Unexpected response code ' + str(globalUnsubscribesResponse.status_code) + ' when fetching global unsubscribes')
except Exception:
print('Fetching global unsubscribes failed')
traceback.print_exc()
sys.exit(1)
print('Fetched a toal of', len(SENDGRID_UNSUBSCRIBES),'global unsubscribes from Sendgrid')
SENDGRID_UNSUBSCRIBES = list(set(SENDGRID_UNSUBSCRIBES))
print(len(SENDGRID_UNSUBSCRIBES),'unsubscribes remaining after deduplication')
# Identify the CrossEngage Company ID
companyIdurl = UI_BASE_URL + '/managers/companies'
companyIdPayload = {
'email': XNG_USER
}
try:
companyIdResponse = session.post(companyIdurl, data=json.dumps(companyIdPayload), timeout=TIMEOUT)
if companyIdResponse.status_code == 200:
companyIds = json.loads(companyIdResponse.text)
if len(companyIds) != 1:
raise ValueError('Unexpected number of company IDs returned: ' + companyIdResponse.text)
else:
companyId = companyIds[0]
print('Found CrossEngage Company ID', companyId)
else:
raise ValueError('Unexpected response code ' + str(companyIdResponse.status_code) + ' when fetching company ID')
except Exception:
print('Fetching company ID failed')
traceback.print_exc()
sys.exit(1)
# Getting UI API token
uiTokenUrl = UI_BASE_URL + '/managers/login'
uiTokenPayload = {
"email": XNG_USER,
"password": XNG_PASS
}
uiTokenHeaders = {
'content-type': 'application/json',
'company-id': str(companyId)
}
try:
uiTokenResponse = session.post(uiTokenUrl, data=json.dumps(uiTokenPayload), headers=uiTokenHeaders, timeout=TIMEOUT)
if uiTokenResponse.status_code == 200:
uiToken = json.loads(uiTokenResponse.text)['token']
print('Retrieved UI token (last 5 characters):', uiToken[-3:])
else:
raise ValueError('Unexpected response code ' + str(uiTokenResponse.status_code) + ' when fetching UI token')
except Exception:
print('Fetching UI token failed')
traceback.print_exc()
sys.exit(1)
# Define CrossEngage Headers
API_HEADERS = {
'Content-Type': 'application/json',
'X-XNG-ApiVersion': str(1),
'X-XNG-AuthToken': API_KEY
}
UI_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Company-ID': str(companyId),
'X-XNG-ApiVersion': str(2),
'Authorization': 'Bearer ' + uiToken
}
### Identify the correct attribute ID for traits.email (this is required for the segment creation)
print('Searching attribute ID for traits.email')
getAttributesUrl = UI_BASE_URL + '/campaigns/event-classes'
getAttributesResponse = session.get(getAttributesUrl, headers=UI_HEADERS, timeout=TIMEOUT)
try:
if getAttributesResponse.status_code == 200:
attributeDetails = json.loads(getAttributesResponse.text)
print('Retrieved attribute details')
else:
raise ValueError('Unexpected response code ' + str(getAttributesResponse.status_code) + ' when fetching attribute details')
except Exception:
print('Fetching attributes failed')
traceback.print_exc()
sys.exit(1)
try:
properties = attributeDetails['properties']
except:
print('Fetching attribute details failed')
traceback.print_exc()
sys.exit(1)
ID_EMAIL_ATTRIBUTE = None
for propertyDetail in properties:
if propertyDetail['label'] == 'traits.email':
ID_EMAIL_ATTRIBUTE = propertyDetail['id']
print('Identified ID of traits.email: ' + str(propertyDetail['id']))
if not ID_EMAIL_ATTRIBUTE:
print('Could not find ID of traits.email')
sys.exit(1)
### Build segments
SENDGRID_UNSUBSCRIBE_CHUNKS = [SENDGRID_UNSUBSCRIBES[x:x+MAX_USERS_PER_SEGMENT] for x in range(0, len(SENDGRID_UNSUBSCRIBES), MAX_USERS_PER_SEGMENT)]
for i, UNSUBSCRIBE_CHUNK in enumerate(SENDGRID_UNSUBSCRIBE_CHUNKS):
SEGMENT_NAME = '[Sendgrid Opt-Out Sync] ' + str(uuid.uuid4())[:8]
print('Creating segment',i+1,'of',len(SENDGRID_UNSUBSCRIBE_CHUNKS),'as',SEGMENT_NAME,'with',len(UNSUBSCRIBE_CHUNK),'emails')
SEGMENT_PAYLOAD = {
'label': SEGMENT_NAME,
'type': 'CONTAINER',
'operator': 'OR',
'subFilters': [],
'justCreated': False,
'$$hashKey': 'object:1340'
}
for EMAIL in UNSUBSCRIBE_CHUNK:
SUBFILTER = {
'type': 'ATTRIBUTE',
'label': '_gen:_' + str(time.time_ns()),
'operator': None,
'justCreated': False,
'subFilters': [],
'conditions': [{
'values': [EMAIL],
'conditions': [{
'values': [''],
'$$hashKey': 'object:1874'
}],
'valueIdList': [0],
'attributeId': ID_EMAIL_ATTRIBUTE,
'operator': '=='
}],
'id': None
}
SEGMENT_PAYLOAD['subFilters'].append(SUBFILTER)
createSegmentUrl = UI_BASE_URL + '/campaigns/filters'
createSegmentResponse = session.post(createSegmentUrl, data=json.dumps(SEGMENT_PAYLOAD), headers=UI_HEADERS, timeout=TIMEOUT)
try:
if createSegmentResponse.status_code == 200:
newSegment = json.loads(createSegmentResponse.text)
print('New segment created with ID', newSegment['id'])
else:
raise ValueError('Unexpected response code ' + str(createSegmentResponse.status_code) + ' creating segment')
except Exception:
print('Creating segment failed')
traceback.print_exc()
sys.exit(1)
#### Trigger user count to populate segment
print('Trigger user count for segment with ID ' + str(newSegment['id']))
refreshSegmentUrl = UI_BASE_URL + '/filters/' + str(newSegment['id']) + '/count'
refreshSegmentResponse = session.get(refreshSegmentUrl, headers=UI_HEADERS, timeout=TIMEOUT)
try:
if refreshSegmentResponse.status_code == 200:
refreshResult = json.loads(refreshSegmentResponse.text)
print('User Count:', refreshResult['total'])
else:
raise ValueError('Unexpected response code ' + str(createSegmentResponse.status_code) + ' for user count')
except Exception:
print('User count failed')
traceback.print_exc()
sys.exit(1)
#### Fetch users in segment
print('Retrieving users in segment with ID', str(newSegment['id']))
fetchSegmentUrl = UI_BASE_URL + '/userexplorer/' + str(newSegment['id']) + '?offset=0&limit=' + str(MAX_USERS_PER_SEGMENT)
fetchNewSegmentResponse = session.get(fetchSegmentUrl, headers=UI_HEADERS, timeout=TIMEOUT)
try:
if fetchNewSegmentResponse.status_code == 200:
fetchNewSegmentResult = json.loads(fetchNewSegmentResponse.text)
newSegmentPart = fetchNewSegmentResult['part']
else:
raise ValueError('Unexpected response code ' + str(fetchNewSegmentResponse.status_code) + ' when fetching segment users')
except Exception:
print('Fetching segment users failed')
traceback.print_exc()
sys.exit(1)
#### Check opt-out status for each user in segment and opt out if required
for xngUser in newSegmentPart:
if xngUser['externalId']:
print('Fetching opt out status for user with xngGlobalUserId', xngUser['xngGlobalUserId'], 'and email', xngUser['email'])
optOutStatusUrl = API_BASE_URL + '/users/' + xngUser['externalId'] + '/recipient-status'
optOutStatusResponse = session.get(optOutStatusUrl, headers=API_HEADERS, timeout=TIMEOUT)
try:
if optOutStatusResponse.status_code == 200:
optOutStatusResult = json.loads(optOutStatusResponse.text)
optedOut = optOutStatusResult['optOutAll']
else:
raise ValueError('Unexpected response code ' + str(optOutStatusResponse.status_code) + ' when fetching opt out status')
except Exception:
print('Fetching opt out status failed')
traceback.print_exc()
sys.exit(1)
if optedOut == True:
print('User with xngGlobalUserId', xngUser['xngGlobalUserId'], 'and email', xngUser['email'], 'is already opted out')
else:
print('Opting out user with xngGlobalUserId', xngUser['xngGlobalUserId'], 'and email', xngUser['email'])
optOutUrl = API_BASE_URL + '/users/' + xngUser['externalId'] + '/optout-status'
optOutPayload = {
'optOut': True
}
optOutResponse = session.put(optOutUrl, data=json.dumps(optOutPayload), headers=API_HEADERS, timeout=TIMEOUT)
try:
if optOutResponse.status_code == 200:
optOutResult = json.loads(optOutResponse.text)
newOptOutStatus = optOutResult['optOut']
if newOptOutStatus != True:
raise ValueError('Unexpected opt out status after update: ' + str(newOptOutStatus))
else:
raise ValueError('Unexpected response code ' + str(optOutStatusResponse.status_code) + ' when opting out')
except Exception:
print('Opting out failed')
traceback.print_exc()
sys.exit(1)
print('User with xngGlobalUserId', xngUser['xngGlobalUserId'], 'and email', xngUser['email'], 'opted out successfully')
else:
print('No external ID found for user with xngGlobalUserId', xngUser['xngGlobalUserId'], 'and email', xngUser['email'])
print('Attempting to opt out user through opt out link workaround')
optOutLinkUrl = 'https://trk-api.crossengage.io/optout/inbound/webhook/' + WEB_TRACKING_KEY + '/' + xngUser['xngGlobalUserId'] + '?channelType=all'
optOutLinkResponse = session.get(optOutLinkUrl, timeout=TIMEOUT)
try:
if optOutLinkResponse.status_code == 200:
print('User with xngGlobalUserId', xngUser['xngGlobalUserId'], 'and email', xngUser['email'], 'was opted out successfully through opt out link method (Response:', optOutLinkResponse.text.replace('\n', ' ').replace('\r', ''), ')')
else:
raise ValueError('Unexpected response code ' + str(fetchNewSegmentResponse.status_code) + ' when using opt out link workaround')
except Exception:
print('Opt out link workaround failed')
traceback.print_exc()
sys.exit(1)
#### Our work is done, now deleting segment
print('Deleting segment with ID ' + str(newSegment['id']))
deleteSegmentUrl = UI_BASE_URL + '/filters/' + str(newSegment['id'])
deleteSegmentResponse = session.delete(deleteSegmentUrl, headers=UI_HEADERS, timeout=TIMEOUT)
try:
if deleteSegmentResponse.status_code == 204:
print('Segment with ID', newSegment['id'], 'deleted')
else:
raise ValueError('Unexpected response code ' + str(createSegmentResponse.status_code) + ' for segment deletion')
except Exception:
print('Segment deletion')
traceback.print_exc()
sys.exit(1)
| 46.427586 | 249 | 0.6582 | 1,382 | 13,464 | 6.303184 | 0.212735 | 0.025255 | 0.037309 | 0.027551 | 0.28332 | 0.244518 | 0.216393 | 0.128688 | 0.110205 | 0.102744 | 0 | 0.008193 | 0.229427 | 13,464 | 289 | 250 | 46.588235 | 0.831422 | 0.042484 | 0 | 0.293436 | 0 | 0 | 0.251536 | 0.005287 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.007722 | 0.003861 | 0 | 0.003861 | 0.19305 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a0c97de5f64f2277f854de55da1ded524b56492 | 1,743 | py | Python | kepler/custom_traits.py | jaidevd/kepler | 9b8de57dd85d706fc9aa707d02810403ff39bfd6 | [
"MIT"
] | 7 | 2018-11-23T10:19:36.000Z | 2020-05-15T09:25:23.000Z | kepler/custom_traits.py | jaidevd/kepler | 9b8de57dd85d706fc9aa707d02810403ff39bfd6 | [
"MIT"
] | 8 | 2019-12-16T21:17:47.000Z | 2021-11-10T19:43:36.000Z | kepler/custom_traits.py | jaidevd/kepler | 9b8de57dd85d706fc9aa707d02810403ff39bfd6 | [
"MIT"
] | 1 | 2019-10-08T08:58:03.000Z | 2019-10-08T08:58:03.000Z | import os.path as op
from traitlets import Unicode, TraitError, List
from keras.models import Model
from h5py import File as H5File
class File(Unicode):
"""Trait representing a file."""
def validate(self, obj, value):
super(File, self).validate(obj, value)
if value:
if not op.isfile(value):
raise TraitError('File {} does not exist.')
return value
class Directory(Unicode):
"""Trait representing a directory."""
def validate(self, obj, value):
super(Directory, self).validate(obj, value)
if value:
if not op.isdir(value):
raise TraitError('Directory {} does not exist.')
return value
class KerasModelWeights(File):
"""A file containing Keras model weights."""
def validate(self, obj, value):
"""Overwritten from parent to ensure that the string is path to a
valid keras model.
"""
super(KerasModelWeights, self).validate(obj, value)
if value:
with H5File(value, 'r') as f_in:
if 'model_config' not in f_in.attrs:
raise TraitError(
'{} does not contain a valid keras model.'.format(
value))
return value
class KerasModelMethods(List):
"""List trait containing keras model method names."""
def validate(self, obj, values):
super(KerasModelMethods, self).validate(obj, values)
for method_name in values:
func = getattr(Model, method_name, False)
if callable(func):
continue
else:
raise TraitError(method_name + ' is not a keras Model method.')
return values
| 30.578947 | 79 | 0.589214 | 202 | 1,743 | 5.054455 | 0.331683 | 0.047013 | 0.058766 | 0.070519 | 0.225269 | 0.202742 | 0.066601 | 0.066601 | 0.066601 | 0 | 0 | 0.002527 | 0.31899 | 1,743 | 56 | 80 | 31.125 | 0.857624 | 0.130235 | 0 | 0.236842 | 0 | 0 | 0.090292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a0cb78cc44a366a9496d0d8cb23cbae27381802 | 1,738 | py | Python | raiden/encoding/signing.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | raiden/encoding/signing.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | raiden/encoding/signing.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | from coincurve import PublicKey
import structlog
from raiden.utils import sha3, publickey_to_address
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
def recover_publickey(messagedata, signature, hasher=sha3):
if len(signature) != 65:
raise ValueError('invalid signature')
signature = signature[:-1] + chr(signature[-1] - 27).encode()
publickey = PublicKey.from_signature_and_message(
signature,
messagedata,
hasher=hasher,
)
return publickey.format(compressed=False)
def recover_publickey_safe(messagedata, signature, hasher=sha3):
publickey = None
try:
publickey = recover_publickey(messagedata, signature, hasher)
except ValueError:
# raised if the signature has the wrong length
log.error('invalid signature')
except TypeError as e:
# raised if the PublicKey instantiation failed
log.error('invalid key data: {}'.format(e))
except Exception as e: # pylint: disable=broad-except
# secp256k1 is using bare Exception classes: raised if the recovery failed
log.error('error while recovering pubkey: {}'.format(e))
return publickey
def recover_address(messagedata, signature, hasher=sha3):
public_key = recover_publickey_safe(messagedata, signature, hasher)
if public_key is None:
return None
return publickey_to_address(public_key)
def sign(messagedata, private_key, hasher=sha3):
signature = private_key.sign_recoverable(messagedata, hasher=hasher)
if len(signature) != 65:
raise ValueError('invalid signature')
return signature[:-1] + chr(signature[-1] + 27).encode()
def address_from_key(key):
return sha3(key[1:])[-20:]
| 29.965517 | 82 | 0.705409 | 207 | 1,738 | 5.792271 | 0.338164 | 0.083403 | 0.108424 | 0.075063 | 0.276897 | 0.206839 | 0.130108 | 0.078399 | 0 | 0 | 0 | 0.017986 | 0.20023 | 1,738 | 57 | 83 | 30.491228 | 0.844604 | 0.126582 | 0 | 0.108108 | 0 | 0 | 0.068783 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.081081 | 0.027027 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a0d9e640eb5c7d69dbd25d71bca37fe3d5e7a33 | 898 | py | Python | Settings.py | altaha/ArgoJsonRDBMS | 93640b8b832eed854fc8e77dcdd3459c1304955e | [
"Apache-2.0"
] | null | null | null | Settings.py | altaha/ArgoJsonRDBMS | 93640b8b832eed854fc8e77dcdd3459c1304955e | [
"Apache-2.0"
] | null | null | null | Settings.py | altaha/ArgoJsonRDBMS | 93640b8b832eed854fc8e77dcdd3459c1304955e | [
"Apache-2.0"
] | null | null | null | __author__ = 'Ahmed'
def _add_extra_to_name(file_name):
file_name = file_name.split('.')
file_name[0] += '_extra'
return '.'.join(file_name)
FILES_DIR = '/home/vagrant/workspace/ArgoBench/ArgoJsonRDBMS/'
ARGO_FILENAME = 'nobench_data_argo.json'
GENERIC_FILENAME = 'nobench_data.json'
MONGO_FILENAME = 'nobench_data_mongo.json'
PJSON_FILENAME = 'nobench_data_mongo.json'
ARGO_EXTRA_FILENAME = _add_extra_to_name(ARGO_FILENAME)
GENERIC_EXTRA_FILENAME = _add_extra_to_name(GENERIC_FILENAME)
MONGO_EXTRA_FILENAME = _add_extra_to_name(MONGO_FILENAME)
PJSON_EXTRA_FILENAME = _add_extra_to_name(PJSON_FILENAME)
PSQL_USER = 'vagrant'
MONGO_USER = 'vagrant'
RESULTS_FILENAME = 'results.csv'
ARGO_PICKLE_FILENAME = 'rec_strings_argo'
MONGO_PICKLE_FILENAME = 'rec_strings_mongo'
PJSON_PICKLE_FILENAME = 'rec_strings_mongo'
DATA_SIZE = 4000000
NUM_BENCH_ITERATIONS = 10
DEEPLY_NESTED = True
| 28.967742 | 62 | 0.81069 | 126 | 898 | 5.198413 | 0.34127 | 0.061069 | 0.076336 | 0.10687 | 0.381679 | 0.164886 | 0 | 0 | 0 | 0 | 0 | 0.012315 | 0.095768 | 898 | 30 | 63 | 29.933333 | 0.794335 | 0 | 0 | 0 | 0 | 0 | 0.246102 | 0.129176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a109802b86212ed3546cb25faf516d7541dff90 | 3,856 | py | Python | engine/src/valet/engine/search/filters/filter_utils.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | engine/src/valet/engine/search/filters/filter_utils.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | engine/src/valet/engine/search/filters/filter_utils.py | onap/optf-fgps | 1494071d0329698297c5d78ee0799dbff0b57e43 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-15T18:54:03.000Z | 2021-10-15T18:54:03.000Z | #
# -------------------------------------------------------------------------
# Copyright (c) 2019 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import collections
import operator
# 1. The following operations are supported:
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <all-in>, <or>, ==, !=, >=, <=
# 2. Note that <or> is handled in a different way below.
# 3. If the first word in the extra_specs is not one of the operators,
# it is ignored.
op_methods = {'=': lambda x, y: float(x) >= float(y),
'<in>': lambda x, y: y in x,
'<all-in>': lambda x, y: all(val in x for val in y),
'==': lambda x, y: float(x) == float(y),
'!=': lambda x, y: float(x) != float(y),
'>=': lambda x, y: float(x) >= float(y),
'<=': lambda x, y: float(x) <= float(y),
's==': operator.eq,
's!=': operator.ne,
's<': operator.lt,
's<=': operator.le,
's>': operator.gt,
's>=': operator.ge}
def match(value, req):
words = req.split()
op = method = None
if words:
op = words.pop(0)
method = op_methods.get(op)
if op != '<or>' and not method:
return value == req
if value is None:
return False
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
while True:
if words.pop(0) == value:
return True
if not words:
break
words.pop(0) # remove a keyword <or>
if not words:
break
return False
if words:
if op == '<all-in>': # requires a list not a string
return method(value, words)
return method(value, words[0])
return False
def aggregate_metadata_get_by_host(_level, _host, _key=None):
"""Returns a dict of all metadata based on a metadata key for a specific host.
If the key is not provided, returns a dict of all metadata.
"""
metadatas = {}
groups = _host.get_memberships(_level)
for gk, g in groups.items():
if g.group_type == "aggr":
if _key is None or _key in g.metadata:
metadata = collections.defaultdict(set)
for k, v in g.metadata.items():
if k != "prior_metadata":
metadata[k].update(x.strip() for x in v.split(','))
else:
# metadata[k] = v
if isinstance(g.metadata["prior_metadata"], dict):
for ik, iv in g.metadata["prior_metadata"].items():
metadata[ik].update(y.strip() for y in iv.split(','))
metadatas[gk] = metadata
return metadatas
def availability_zone_get_by_host(_level, _host):
availability_zone_list = []
groups = _host.get_memberships(_level)
for gk, g in groups.items():
if g.group_type == "az":
g_name_elements = gk.split(':', 1)
if len(g_name_elements) > 1:
g_name = g_name_elements[1]
else:
g_name = gk
availability_zone_list.append(g_name)
return availability_zone_list
| 32.677966 | 85 | 0.521006 | 494 | 3,856 | 3.977733 | 0.331984 | 0.024936 | 0.028499 | 0.033079 | 0.159288 | 0.137913 | 0.112468 | 0.10229 | 0.10229 | 0.10229 | 0 | 0.008055 | 0.323911 | 3,856 | 117 | 86 | 32.957265 | 0.745685 | 0.316909 | 0 | 0.220588 | 0 | 0 | 0.040108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.029412 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a10da99f50e1202c69ab30d398c0a3e0db4ca15 | 9,993 | py | Python | ptb.py | cjratcliff/adaptive-regularization | 4aef9029b2452911fac4cb40ac951a1168e611b7 | [
"Apache-2.0"
] | 4 | 2017-11-21T16:31:57.000Z | 2020-05-29T07:15:15.000Z | ptb.py | cjratcliff/adaptive-regularization | 4aef9029b2452911fac4cb40ac951a1168e611b7 | [
"Apache-2.0"
] | null | null | null | ptb.py | cjratcliff/adaptive-regularization | 4aef9029b2452911fac4cb40ac951a1168e611b7 | [
"Apache-2.0"
] | 1 | 2018-04-14T10:08:05.000Z | 2018-04-14T10:08:05.000Z | from __future__ import division
from __future__ import print_function
import argparse
import time
import random
import copy
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
from ptb_reader import ptb_raw_data
from custom_lstm import CustomLSTMCell, CustomMultiRNNCell
# Adapted from https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py
class SmallConfig(object):
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
#max_epochs = 13
keep_prob = 1.0
batch_size = 20
decay_lr_at = 4
lr_decay = 0.5
vocab_size = 10000
wd_lr = 0.001
wd_clipping = 0.1
class MediumConfig(object):
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
#max_epochs = 39
keep_prob = 0.5
batch_size = 20
decay_lr_at = 6
lr_decay = 0.8
vocab_size = 10000
wd_lr = 0.001
wd_clipping = 0.01
class LargeConfig(object):
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
#max_epochs = 55
keep_prob = 0.35
batch_size = 20
decay_lr_at = 14
lr_decay = 1 / 1.15
vocab_size = 10000
wd_lr = 0.001 # Needs to be tuned
wd_clipping = 0.002 # Needs to be tuned
class PTBModel(object):
def __init__(self,config):
self.c = c = config
self.x = tf.placeholder(tf.int32, [None, None], 'x')
self.y = tf.placeholder(tf.int32, [None, None], 'y')
if c.reg_type == 'adaptive':
self.val_x = tf.placeholder(tf.int32, [None, None], 'val_x')
self.val_y = tf.placeholder(tf.int32, [None, None], 'val_y')
self.batch_size = tf.placeholder(tf.int32, [], name='batch_size')
if c.reg_type == 'static':
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
else:
self.keep_prob = None
cells = [tf.contrib.rnn.LSTMCell(num_units = c.hidden_size) for i in range(c.num_layers)]
if c.reg_type == 'static':
cells = [tf.contrib.rnn.DropoutWrapper(i, output_keep_prob=self.keep_prob) for i in cells]
cells = tf.contrib.rnn.MultiRNNCell(cells)
self.initial_state = cells.zero_state(self.batch_size, tf.float32)
logits,self.final_state = self.forward_prop(self.x, cells, self.initial_state, keep_prob=self.keep_prob)
self.loss = self.loss_fn(logits,self.y)
main_params = tf.trainable_variables()
if c.reg_type == 'adaptive':
self.l2_weight_decay_coef = tf.Variable(tf.constant(0.0), name="l2_weight_decay")
l2_weight_decay = sum([tf.reduce_sum(tf.square(i)) for i in main_params])
self.loss_reg = self.loss + tf.maximum(0.0,self.l2_weight_decay_coef)*l2_weight_decay
self.lr = tf.Variable(tf.constant(1.0),trainable=False)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
if c.reg_type == 'adaptive':
grads = tf.gradients(self.loss_reg, main_params)
else:
grads = tf.gradients(self.loss, main_params)
grads,_ = tf.clip_by_global_norm(grads, c.max_grad_norm)
gv = list(zip(grads,main_params))
self.train_step = optimizer.apply_gradients(gv)
if c.reg_type == 'adaptive':
# Compute gradients on the training sample with weight decay
train_update = [tf.multiply(-self.lr,g) for (g,v) in gv]
new_params = [v+u for (v,u) in zip(main_params,train_update)]
cells = [CustomLSTMCell(num_units = c.hidden_size) for i in range(c.num_layers)]
cells = CustomMultiRNNCell(cells)
self.val_initial_state = cells.zero_state(self.batch_size, tf.float32)
val_logits,self.val_final_state = self.forward_prop(self.val_x, cells, self.val_initial_state, new_params)
self.val_loss = self.loss_fn(val_logits,self.val_y)
term1 = tf.gradients([self.val_loss],train_update)
term2 = [-2*self.lr*v for v in main_params]
reg_grad = 0.0
for (v1,v2) in zip(term1,term2):
reg_grad += tf.reduce_sum(v1*v2)
reg_grad = tf.clip_by_value(reg_grad, -c.wd_clipping, c.wd_clipping)
gv = [(reg_grad,self.l2_weight_decay_coef)]
self.reg_train_step = tf.train.GradientDescentOptimizer(c.wd_lr).apply_gradients(gv)
def loss_fn(self,logits,y):
loss = tf.contrib.seq2seq.sequence_loss(
logits,
y,
tf.ones([self.c.batch_size, self.c.num_steps], dtype=tf.float32),
average_across_timesteps=False,
average_across_batch=True
)
return tf.reduce_sum(loss)
def forward_prop(self, x, cells, state, params=None, keep_prob=None,):
c = self.c
with tf.device("/cpu:0"):
if params is None:
embedding = tf.get_variable("embedding", [c.vocab_size, c.hidden_size], dtype=tf.float32)
else:
embedding = params[0]
h = tf.nn.embedding_lookup(embedding, x)
if c.reg_type == 'static':
h = tf.nn.dropout(h, keep_prob)
h = tf.unstack(h, num=c.num_steps, axis=1)
outputs = []
with tf.variable_scope("RNN"):
for time_step in range(c.num_steps):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
if params is None:
(cell_output, state) = cells(h[time_step], state)
else:
(cell_output, state) = cells(h[time_step], state, [params[1:3],params[3:5]])
outputs.append(cell_output)
h = tf.reshape(tf.stack(axis=1, values=outputs), [-1, c.hidden_size])
if params is None:
logits = tf.contrib.layers.fully_connected(h, c.vocab_size, activation_fn=tf.identity)
else:
logits = tf.matmul(h,params[5]) + params[6]
# Reshape logits to be 3-D tensor for sequence loss
return tf.reshape(logits, [c.batch_size, c.num_steps, c.vocab_size]), state
def fit(self, train_data, val_data, sess):
c = self.c
results = []
for epoch in range(100):
print("\nEpoch %d" % (epoch+1))
start = time.time()
# Decay the learning rate
if epoch >= c.decay_lr_at:
sess.run(tf.assign(self.lr,c.lr_decay*self.lr))
print("Learning rate set to: %f" % sess.run(self.lr))
if c.reg_type == 'adaptive':
train_perplexity = self.run_epoch(train_data, True, False, sess, val_data=val_data)
else:
train_perplexity = self.run_epoch(train_data, True, False, sess)
print("Train perplexity: %.3f" % train_perplexity)
val_perplexity = self.run_epoch(val_data, False, False, sess)
print("Val perplexity: %.3f" % val_perplexity)
print("Time taken: %.3f" % (time.time() - start))
results.append([train_perplexity,val_perplexity])
np.savetxt('results.csv', np.array(results), fmt='%5.5f', delimiter=',')
def reshape_data(self,data):
c = self.c
num_batches = len(data) // c.batch_size
data = data[0 : c.batch_size * num_batches]
data = np.reshape(data,[c.batch_size, num_batches])
return data
def run_epoch(self, data, is_training, full_eval, sess, val_data=None):
assert not(is_training and full_eval)
c = copy.deepcopy(self.c)
if full_eval: # Very slow so only used for the test set
c.batch_size = 1
c.num_steps = 1
data = self.reshape_data(data)
if c.reg_type == 'adaptive' and is_training:
val_data = self.reshape_data(val_data)
total_loss = 0.0
total_iters = 0.0
state = sess.run(self.initial_state, feed_dict={self.batch_size: c.batch_size})
if self.c.reg_type == 'adaptive' and is_training:
val_state = sess.run(self.val_initial_state, feed_dict={self.batch_size: c.batch_size})
for idx in range(0,data.shape[1],c.num_steps):
batch_x = data[:, idx:idx+c.num_steps]
batch_y = data[:, idx+1:idx+c.num_steps+1]
if batch_x.shape != (c.batch_size,c.num_steps) or \
batch_y.shape != (c.batch_size,c.num_steps):
#print(batch_x.shape,batch_y.shape)
continue
feed_dict = {self.x: batch_x,
self.y: batch_y,
self.batch_size: c.batch_size}
if c.reg_type == 'adaptive' and is_training:
val_idx = random.choice(range(0,val_data.shape[1],c.num_steps))
batch_val_x = val_data[:, val_idx:val_idx+c.num_steps]
batch_val_y = val_data[:, val_idx+1:val_idx+c.num_steps+1]
if batch_val_x.shape != (c.batch_size,c.num_steps) or \
batch_val_y.shape != (c.batch_size,c.num_steps):
continue
feed_dict[self.val_x] = batch_val_x
feed_dict[self.val_y] = batch_val_y
for i, (c_state,h_state) in enumerate(self.initial_state):
feed_dict[c_state] = state[i].c
feed_dict[h_state] = state[i].h
if self.c.reg_type == 'adaptive' and is_training:
for i, (c_state,h_state) in enumerate(self.val_initial_state):
feed_dict[c_state] = val_state[i].c
feed_dict[h_state] = val_state[i].h
if c.reg_type == 'static':
if is_training:
feed_dict[self.keep_prob] = c.keep_prob
else:
feed_dict[self.keep_prob] = 1.0
if is_training:
if c.reg_type == 'adaptive':
_,_,loss,state,val_state,wd = sess.run([self.train_step, self.reg_train_step, self.loss, self.final_state, self.val_final_state, self.l2_weight_decay_coef], feed_dict)
else:
_,loss,state = sess.run([self.train_step, self.loss, self.final_state], feed_dict)
else:
loss,state = sess.run([self.loss,self.final_state], feed_dict)
total_loss += loss
total_iters += c.num_steps
return np.exp(total_loss/total_iters)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--reg', type=str, help='none, static or adaptive', required=True)
parser.add_argument('--size', type=str, help='small, medium or large', required=True)
args = parser.parse_args()
X_train, X_valid, X_test, vocab = ptb_raw_data()
print("\nData loaded")
print("Training set: %d words" % len(X_train))
print("Validation set: %d words" % len(X_valid))
print("Test set: %d words" % len(X_test))
print("Vocab size: %d words\n" % len(vocab))
if args.size == 'small':
c = SmallConfig()
elif args.size == 'medium':
c = MediumConfig()
elif args.size == 'large':
c = LargeConfig()
else:
raise ValueError("Invalid value for size argument")
assert args.reg in ['none','static','adaptive']
c.reg_type = args.reg
m = PTBModel(c)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
m.fit(X_train,X_valid,sess)
m.run_epoch(X_test, False, True, sess)
if __name__ == "__main__":
main()
| 30.747692 | 172 | 0.691884 | 1,640 | 9,993 | 3.980488 | 0.170122 | 0.03171 | 0.022059 | 0.018382 | 0.34038 | 0.261642 | 0.217831 | 0.151501 | 0.116728 | 0.086091 | 0 | 0.021088 | 0.174322 | 9,993 | 324 | 173 | 30.842593 | 0.770088 | 0.038227 | 0 | 0.208333 | 0 | 0 | 0.052725 | 0 | 0 | 0 | 0 | 0 | 0.008333 | 1 | 0.029167 | false | 0 | 0.045833 | 0 | 0.245833 | 0.045833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a128c7b70345a573d23b4516326b45f39fef4d1 | 3,846 | py | Python | MSE430Funcs/DOSFuncs.py | KCMak653/MSE430 | 4f2ecfff557447de141121bbafbe5aa6bd60753b | [
"MIT"
] | null | null | null | MSE430Funcs/DOSFuncs.py | KCMak653/MSE430 | 4f2ecfff557447de141121bbafbe5aa6bd60753b | [
"MIT"
] | null | null | null | MSE430Funcs/DOSFuncs.py | KCMak653/MSE430 | 4f2ecfff557447de141121bbafbe5aa6bd60753b | [
"MIT"
] | null | null | null | """Contains functions used for DOS module"""
def DOS(dim):
import numpy as np
Es = np.linspace(-2, 10, 600)
m_e = 0.8 * 9.11e-31 #kg
hbar = 1.054e-34 # m^2kg/s
if dim == 3:
gE = 1/(2*np.pi**2)*(2*m_e/hbar**2)**(3/2)*np.sqrt(abs(Es))
if dim == 2:
gE= m_e/(np.pi*hbar**2)*np.ones(len(abs(Es)))
if dim == 1:
gE = m_e/(np.pi*hbar)*np.sqrt(m_e/(2*abs(Es)))
gE[Es<0]=np.nan
return(gE, Es)
def center_axis(ax):
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
#Redraw axis
labls = [r'$k_x$', r'$k_y$', r'$k_z$']
val = [1,0, 0]
a1 =[-12, 12]
a2 = [0, 0]
ax.plot(a1, a2, a2, color = 'black', lw = 3, alpha=0.8)
ax.plot(a2, a1, a2, color = 'black', lw = 3, alpha =0.8)
ax.plot(a2, a2, a1, color = 'black', lw = 3, alpha = 0.8)
ax.text(-14, 0, 0, labls[0])
ax.text(0, -14, 0, labls[1])
ax.text(0, 0, -14, labls[2])
def k_diag(dim):
import numpy as np
pts = np.linspace(-10, 10, 11)
if dim == 3:
#plot grid points
(kx, ky, kz) = np.meshgrid(pts, pts, pts)
#ax1.scatter(kx, ky, kz, s=0.7)
#make surface plot
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = 10 * np.outer(np.cos(u), np.sin(v))
y = 10 * np.outer(np.sin(u), np.sin(v))
z = 10 * np.outer(np.ones(np.size(u)), np.cos(v))
#vector
k_vec=np.sqrt(100/dim)
(k_vec_x, k_vec_y, k_vec_z)=[[0, k_vec], [0,k_vec], [0,k_vec]]
(kx_t, ky_t, kz_t) = [k_vec, k_vec/2 +2, k_vec/2]
if dim ==2:
#plot grid points
(kx, ky, kz) = np.meshgrid(pts, pts, np.zeros(len(pts)))
#ax1.scatter(kx,ky, kz, s=0.7)
#make surface plot
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = 10 * np.outer(np.cos(u), np.sin(v))
y = 10 * np.outer(np.sin(u), np.sin(v))
z = 0 * np.outer(np.ones(np.size(u)), np.cos(v))
#ax1.plot_surface(x, y, z, rstride=4, cstride=4, color='b', linewidth=0, alpha=0.3)
#add vector
k_vec=np.sqrt(100/dim)
print(k_vec)
(k_vec_x, k_vec_y, k_vec_z)=[[0, k_vec], [0,k_vec], [0,0]]
(kx_t, ky_t, kz_t) = [k_vec/2, k_vec/2+2, 0]
if dim==1:
#plot grid points
kx = pts
ky = np.ones(len(pts))
kz = np.zeros(len(pts))
#ax1.scatter(kx,ky, s=0.7)
x=np.zeros([2,2])
y=x
z=x
#add vector
(k_vec_x, k_vec_y, k_vec_z)=[[0, 10], [1,1], [0,0]]
(kx_t, ky_t, kz_t) = [5, 3, 0]
return(kx, ky, kz, k_vec_x, k_vec_y, k_vec_z, kx_t, ky_t, kz_t, x, y, z)
def makeVector(k_vec_x, k_vec_y, k_vec_z,ax):
from matplotlib.patches import FancyArrowPatch
class Arrow3D(FancyArrowPatch):
from matplotlib.patches import FancyArrowPatch
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
a = Arrow3D(k_vec_x, k_vec_y, k_vec_z, mutation_scale=20,
lw=1, arrowstyle="-|>", color="r")
ax.add_artist(a)
def graphProp(ax1, ax2, dim):
ymx = ax2.get_ylim()
multy =[1,2,1]
ax2.set(title = 'Density of States in {}D'.format(dim), xlabel = 'E', ylabel = r'$\rho (E)$',
yticks =[], xticks=[0], xticklabels=[r'$E_C$'], xlim=[-2,10], ylim=[0, ymx[1]*multy[dim-1]])
dep = ['E^{-1/2}', 'E^0', 'E^{1/2}']
ax2.text(6, ymx[1]/2, r'$\propto~~{}$'.format(dep[dim-1]))
| 32.871795 | 100 | 0.518981 | 699 | 3,846 | 2.725322 | 0.216023 | 0.065092 | 0.028346 | 0.018898 | 0.457218 | 0.376378 | 0.356955 | 0.333858 | 0.28294 | 0.259318 | 0 | 0.074901 | 0.277951 | 3,846 | 116 | 101 | 33.155172 | 0.611091 | 0.087103 | 0 | 0.240964 | 0 | 0 | 0.030077 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0 | 0.048193 | 0 | 0.144578 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a12fba164e644eb46b2c6794568015cbeb50478 | 1,207 | py | Python | scripts/publish_models.py | hanseungwook/BasicSR | 3dda59f179e19bd9e5741299d373a138c501485a | [
"Apache-2.0",
"MIT"
] | null | null | null | scripts/publish_models.py | hanseungwook/BasicSR | 3dda59f179e19bd9e5741299d373a138c501485a | [
"Apache-2.0",
"MIT"
] | null | null | null | scripts/publish_models.py | hanseungwook/BasicSR | 3dda59f179e19bd9e5741299d373a138c501485a | [
"Apache-2.0",
"MIT"
] | 1 | 2021-01-29T05:48:24.000Z | 2021-01-29T05:48:24.000Z | import glob
import subprocess
import torch
from os import path as osp
paths = glob.glob('experiments/pretrained_models/*.pth')
for idx, path in enumerate(paths):
print(f'{idx+1:03d}: Processing {path}')
net = torch.load(path, map_location=torch.device('cpu'))
basename = osp.basename(path)
if 'params' not in net and 'params_ema' not in net:
raise ValueError(f'Please check! Model {basename} does not '
f"have 'params'/'params_ema' key.")
else:
if '-' in basename:
# check whether the sha is the latest
old_sha = basename.split('-')[1].split('.')[0]
new_sha = subprocess.check_output(['sha256sum', path]).decode()[:8]
if old_sha != new_sha:
final_file = path.split('-')[0] + f'-{new_sha}.pth'
print(f'\t Save from {path} to {final_file}')
subprocess.Popen(['mv', path, final_file])
else:
sha = subprocess.check_output(['sha256sum', path]).decode()[:8]
final_file = path.split('.pth')[0] + f'-{sha}.pth'
print(f'\t Save from {path} to {final_file}')
subprocess.Popen(['mv', path, final_file])
| 41.62069 | 79 | 0.579122 | 160 | 1,207 | 4.2625 | 0.4 | 0.079179 | 0.02346 | 0.070381 | 0.322581 | 0.322581 | 0.322581 | 0.322581 | 0.193548 | 0.193548 | 0 | 0.016968 | 0.267606 | 1,207 | 28 | 80 | 43.107143 | 0.754525 | 0.028998 | 0 | 0.24 | 0 | 0 | 0.238462 | 0.047863 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a13d1545b26c46e28d8b7f042e02285f87a18b6 | 975 | py | Python | src/adonis_blue/adonis_blue.py | butterflysky/butterfly_bots | 6bc282044dcbee9701aa1c45ee492a76eb1be81e | [
"Apache-2.0",
"MIT"
] | null | null | null | src/adonis_blue/adonis_blue.py | butterflysky/butterfly_bots | 6bc282044dcbee9701aa1c45ee492a76eb1be81e | [
"Apache-2.0",
"MIT"
] | null | null | null | src/adonis_blue/adonis_blue.py | butterflysky/butterfly_bots | 6bc282044dcbee9701aa1c45ee492a76eb1be81e | [
"Apache-2.0",
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import os
import butterfly_bot.cogs
import discord
from discord.ext import commands
from discord_slash import SlashCommand
from dotenv import load_dotenv
from version import get_bot_version
load_dotenv()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("adonis_blue")
intents = discord.Intents.default()
intents.members = True
intents.typing = False
intents.presences = False
description = "butterfly bot alpha"
adonis_blue = commands.Bot(
command_prefix=commands.when_mentioned_or("!"),
description=description,
intents=intents,
strip_after_prefix=True,
)
slash = SlashCommand(adonis_blue, sync_commands=True, sync_on_cog_reload=True)
@adonis_blue.command()
async def version(ctx):
await ctx.send(get_bot_version())
adonis_blue.add_cog(butterfly_bot.cogs.OpenAIBot(adonis_blue))
adonis_blue.add_cog(butterfly_bot.cogs.UtilityBot(adonis_blue))
adonis_blue.run(os.getenv("DISCORD_API_KEY"))
| 24.375 | 78 | 0.803077 | 135 | 975 | 5.562963 | 0.437037 | 0.11984 | 0.063915 | 0.04261 | 0.08522 | 0.08522 | 0.08522 | 0 | 0 | 0 | 0 | 0.001142 | 0.101538 | 975 | 39 | 79 | 25 | 0.856164 | 0.021538 | 0 | 0 | 0 | 0 | 0.048269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.275862 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a17a1ae5e3722183d1d9d75f4231d3bd69a6b5c | 1,053 | py | Python | rgw/standalone/s3_versioned.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | rgw/standalone/s3_versioned.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | rgw/standalone/s3_versioned.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | """
This script will create 10 random file sized between 64KB to 20 MB and
upload it to specified versioned container.
Install boto package on machine to run this script.
"""
import boto.s3.connection
import boto
import os
from random import randint
access_key = '<s3 access key>'
secret_key = '<s3 secret key>'
conn = boto.connect_s3(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host='<Hostname or IP>',
port=8080,
is_secure=False, # Change it to True if RGW running using SSL
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
for i in range(1, 11):
r = randint(64, 20240)
cmd = 'dd if=/dev/zero of=testfile{i} bs=1024 count={r}'.format(i=i, r=r)
os.system(cmd)
bucket = conn.create_bucket('<Bucket name>')
bucket.configure_versioning(versioning=True)
print("creating objects")
for i in range(1000):
r = randint(1, 10)
t = 'testfile' + str(r)
name = '<Bucket name>' + str(i)
key = bucket.new_key(name)
key.set_contents_from_filename(t)
print(name)
| 26.325 | 77 | 0.698955 | 166 | 1,053 | 4.319277 | 0.542169 | 0.062762 | 0.04463 | 0.050209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041958 | 0.185185 | 1,053 | 39 | 78 | 27 | 0.793706 | 0.19943 | 0 | 0 | 0 | 0 | 0.172455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a1af3fec816e2958fb884c9301676ff44e0a800 | 1,929 | py | Python | tests/test_response.py | altescy/queuery_client_python | 0cc05f193feeacbe826127bcd4f9fc817d2474d3 | [
"MIT"
] | 4 | 2021-12-09T07:58:18.000Z | 2021-12-09T11:09:05.000Z | tests/test_response.py | altescy/queuery_client_python | 0cc05f193feeacbe826127bcd4f9fc817d2474d3 | [
"MIT"
] | null | null | null | tests/test_response.py | altescy/queuery_client_python | 0cc05f193feeacbe826127bcd4f9fc817d2474d3 | [
"MIT"
] | 1 | 2021-12-15T02:56:53.000Z | 2021-12-15T02:56:53.000Z | import gzip
import json
from typing import Any, Dict
from unittest import mock
from queuery_client.response import Response, ResponseBody
class MockResponse:
def __init__(self, content: bytes, status_code: int) -> None:
self.content = content
self.status_code = status_code
def json(self) -> Dict[str, Any]:
data = json.loads(self.content)
assert isinstance(data, dict)
return data
def test_response() -> None:
response_body = ResponseBody(
id=1,
data_file_urls=["https://queuery.example.com"],
error=None,
status="success",
)
response = Response(response_body)
mock_response = MockResponse(gzip.compress(b'"1","test_recipe1"\n"2","test_recipe2"'), 200)
with mock.patch("requests.Session.get", return_value=mock_response):
data = response.read()
assert data == [["1", "test_recipe1"], ["2", "test_recipe2"]]
def test_response_with_type_cast() -> None:
response_body = ResponseBody(
id=1,
data_file_urls=["https://queuery.example.com/data"],
error=None,
status="success",
manifest_file_url="https://queuery.example.com/manifest",
)
response = Response(response_body, enable_cast=True)
manifest_response = MockResponse(
b"""
{"schema": {
"elements": [
{"name": "id", "type": {"base": "integer"}},
{"name": "title", "type": {"base": "character varying"}}
]
}}
""",
200,
)
with mock.patch("requests.Session.get", return_value=manifest_response):
response.fetch_manifest()
data_response = MockResponse(gzip.compress(b'"1","test_recipe1"\n"2","test_recipe2"'), 200)
with mock.patch("requests.Session.get", return_value=data_response):
data = response.read()
assert data == [[1, "test_recipe1"], [2, "test_recipe2"]]
| 30.619048 | 95 | 0.615863 | 220 | 1,929 | 5.209091 | 0.318182 | 0.069808 | 0.041885 | 0.057592 | 0.433682 | 0.433682 | 0.433682 | 0.433682 | 0.433682 | 0.394415 | 0 | 0.01838 | 0.238466 | 1,929 | 62 | 96 | 31.112903 | 0.761743 | 0 | 0 | 0.227273 | 0 | 0 | 0.172717 | 0.044496 | 0 | 0 | 0 | 0 | 0.068182 | 1 | 0.090909 | false | 0 | 0.113636 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a1d297d58101879710bf75f5abe734e8ecc4f07 | 3,668 | py | Python | cal_tools/test/ijconvert_test.py | JasonCozens/CalTools | 595e338faec3275481d5c0d39f95fd2783b58913 | [
"BSD-2-Clause"
] | null | null | null | cal_tools/test/ijconvert_test.py | JasonCozens/CalTools | 595e338faec3275481d5c0d39f95fd2783b58913 | [
"BSD-2-Clause"
] | null | null | null | cal_tools/test/ijconvert_test.py | JasonCozens/CalTools | 595e338faec3275481d5c0d39f95fd2783b58913 | [
"BSD-2-Clause"
] | null | null | null | __author__ = 'Jason'
import unittest
import icalendar
import icalendar.cal
import icalendar.parser_tools
import icalendar.parser
import icalendar.prop
import cal_tools.ijconvert
import json
import yaml
class IJConvertTest(unittest.TestCase):
def test_empty_vcalendar(self):
# Arrange.
expected_result = '["vcalendar"]'
cal = icalendar.Calendar()
i_cal = cal.to_ical()
# Act.
j_cal = cal_tools.ijconvert.ICalJCalConverter().convert(i_cal)
# Assert.
self.assertEqual(j_cal, expected_result)
icalendar.vCalAddress
def test_example(self):
ical_str = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20080205T191224Z
DTSTART:20081006
SUMMARY:Planning meeting
UID:4088E990AD89CB3DBB484909
END:VEVENT
END:VCALENDAR"""
cal = icalendar.Calendar()
cal.add('prodid', '-//Example Inc.//Example Calendar//EN')
cal.add('version', '2.0')
c = icalendar.cal.Component()
c.name = 'VEVENT'
cal.add_component(c)
component_name = "vcalendar"
properties = []
for skey in cal.singletons:
properties.append([skey, cal[skey]])
subcomponents = []
for sub in cal.subcomponents:
subcomponents.append(sub)
jcal = [component_name, properties, subcomponents]
print(str(jcal))
def test_json(self):
self.assertEqual(json.dumps([]), '[]')
self.assertEqual(json.dumps(['vcalendar']), '["vcalendar"]')
expected = """[
"vcalendar",
[],
[]
]"""
self.assertEqual(json.dumps(['vcalendar',[],[]],indent=2), expected)
expected = """[
"vcalendar",
[
[
"calscale",
{},
"text",
"GREGORIAN"
],
[
"prodid",
{},
"text",
"-//Example Inc.//Example Calendar//EN"
],
[
"version",
{},
"text",
"2.0"
]
],
[
[
"vevent",
[
[
"dtstamp",
{},
"date-time",
"2008-02-05T19:12:24Z"
],
[
"dtstart",
{},
"date",
"2008-10-06"
],
[
"summary",
{},
"text",
"Planning meeting"
],
[
"uid",
{},
"text",
"4088E990AD89CB3DBB484909"
]
],
[]
]
]
]"""
input = ['vcalendar', [
['calscale', {}, 'text', 'GREGORIAN'],
['prodid', {}, 'text', '-//Example Inc.//Example Calendar//EN'],
['version', {}, 'text', '2.0']],
[['vevent',
[
['dtstamp', {}, 'date-time', '2008-02-05T19:12:24Z'],
['dtstart', {}, 'date', '2008-10-06'],
['summary', {}, 'text', 'Planning meeting'],
['uid', {}, 'text', '4088E990AD89CB3DBB484909']
],
[]
]]
]
jcal = json.dumps(input)
self.assertEqual(json.dumps(input, indent=2), expected)
print(jcal)
print(yaml.safe_dump(json.loads(jcal)))
def test_cal_from_ical(self):
ical_str = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20080205T191224Z
DTSTART:20081006
SUMMARY:Planning meeting
UID:4088E990AD89CB3DBB484909
END:VEVENT
END:VCALENDAR"""
cal = icalendar.Calendar.from_ical(ical_str.encode().replace(b'\n',b'\r\n'))
for c in cal.walk('VEVENT'):
print(c.decoded('DTSTART')) | 23.664516 | 84 | 0.523446 | 338 | 3,668 | 5.594675 | 0.278107 | 0.039662 | 0.04495 | 0.066103 | 0.492332 | 0.45743 | 0.439979 | 0.439979 | 0.439979 | 0.439979 | 0 | 0.065626 | 0.318702 | 3,668 | 155 | 85 | 23.664516 | 0.691076 | 0.005725 | 0 | 0.373239 | 0 | 0 | 0.431669 | 0.048299 | 0 | 0 | 0 | 0 | 0.035211 | 1 | 0.028169 | false | 0 | 0.06338 | 0 | 0.098592 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a1df4cc18dbad2d16c9befd8339ef7e41e5ee6f | 642 | py | Python | tests/unit/test_cli.py | jeklein/rubicon-ml | a4a3c4d3504cb251597890dbfa8302b0bde06f30 | [
"Apache-2.0"
] | 42 | 2021-02-23T23:30:49.000Z | 2021-05-01T02:54:03.000Z | tests/unit/test_cli.py | jeklein/rubicon-ml | a4a3c4d3504cb251597890dbfa8302b0bde06f30 | [
"Apache-2.0"
] | 56 | 2021-05-13T13:47:50.000Z | 2022-03-24T13:46:49.000Z | tests/unit/test_cli.py | jeklein/rubicon-ml | a4a3c4d3504cb251597890dbfa8302b0bde06f30 | [
"Apache-2.0"
] | 9 | 2021-02-23T23:30:51.000Z | 2021-04-24T16:42:28.000Z | from unittest.mock import patch
import click
from click.testing import CliRunner
from rubicon_ml.cli import cli
def mock_click_output(**server_args):
click.echo("Running the mock server")
@patch("rubicon_ml.ui.dashboard.Dashboard.run_server")
@patch("rubicon_ml.ui.dashboard.Dashboard.__init__")
def test_cli(mock_init, mock_run_server):
mock_init.return_value = None
mock_run_server.side_effect = mock_click_output
runner = CliRunner()
result = runner.invoke(
cli,
["ui", "--root-dir", "/path/to/root"],
)
assert result.exit_code == 0
assert "Running the mock server" in result.output
| 23.777778 | 54 | 0.721184 | 91 | 642 | 4.835165 | 0.43956 | 0.061364 | 0.068182 | 0.090909 | 0.181818 | 0.181818 | 0.181818 | 0 | 0 | 0 | 0 | 0.001876 | 0.169782 | 642 | 26 | 55 | 24.692308 | 0.82364 | 0 | 0 | 0 | 0 | 0 | 0.244548 | 0.133956 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a22e57638609a095e34a2850bb278bc95d7cb54 | 1,110 | py | Python | s05_estruturas_logicas_e_condicionais/s05_exercicios/s05_exercicio_13.py | adeogliari/GeekUniversity_Python | 1b6badc45ca1dfbaa2f42196fb2dedac417b866e | [
"MIT"
] | null | null | null | s05_estruturas_logicas_e_condicionais/s05_exercicios/s05_exercicio_13.py | adeogliari/GeekUniversity_Python | 1b6badc45ca1dfbaa2f42196fb2dedac417b866e | [
"MIT"
] | null | null | null | s05_estruturas_logicas_e_condicionais/s05_exercicios/s05_exercicio_13.py | adeogliari/GeekUniversity_Python | 1b6badc45ca1dfbaa2f42196fb2dedac417b866e | [
"MIT"
] | null | null | null | """
13) Faça um algoritmo que calcule a média ponderada das notas de 3 provas. A primeira e a segunda prova têm peso 1 e a terceira tem peso 2. Ao final, mostrar a média do aluno e indicar se o aluno foi aprovado ou reprovado. A nota para aprovação deve ser igual ou superior a 60 pontos.
"""
nota_prova01 = float(input('Digite a nota da prova 01 \n'))
if (nota_prova01 >= 0) and (nota_prova01 <= 100):
nota_prova02 = float(input('Digite a nota da prova 02 \n'))
if (nota_prova02 >= 0) and (nota_prova02 <= 100):
nota_prova03 = float(input('Digite a nota da prova 03 \n'))
if (nota_prova03 >= 0) and (nota_prova03 <= 100):
media_ponderada = ((nota_prova01 + nota_prova02) + (2 * nota_prova03))/4
if media_ponderada > 60:
print(f'O aluno foi aprovado e sua média ponderada foi: {media_ponderada} \n')
else:
print(f'O Aluno foi reprovado e sua média ponderada foi: {media_ponderada} \n')
else:
print('Nota inválida')
else:
print('Nota inválida')
else:
print('Nota inválida')
| 37 | 284 | 0.63964 | 169 | 1,110 | 4.106509 | 0.39645 | 0.028818 | 0.038905 | 0.073487 | 0.364553 | 0.32853 | 0.32853 | 0.207493 | 0.129683 | 0.129683 | 0 | 0.064792 | 0.263063 | 1,110 | 29 | 285 | 38.275862 | 0.783619 | 0.255856 | 0 | 0.411765 | 0 | 0 | 0.318627 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.294118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a23aa0782780444d4e0b738172a4658e24c57ce | 1,796 | py | Python | Naive Algo/Complete Set/treutoy2.py | Suvoo/Formal-Concept-Analysis | 53e895b7112a741af352fc60869e689324c75557 | [
"MIT"
] | 4 | 2021-07-14T16:53:15.000Z | 2021-07-15T04:03:27.000Z | Naive Algo/Complete Set/treutoy2.py | Suvoo/Formal-Concept-Analysis | 53e895b7112a741af352fc60869e689324c75557 | [
"MIT"
] | null | null | null | Naive Algo/Complete Set/treutoy2.py | Suvoo/Formal-Concept-Analysis | 53e895b7112a741af352fc60869e689324c75557 | [
"MIT"
] | null | null | null | # set representation to bit representation and then return answer
# to read input form file
a,arr= [],[]
start,end = 4,5
with open('Naive Algo\Input\demo2') as file:
for line in file:
line = line.strip()
for c in line:
if c != ' ':
# print(int(c))
a.append(int(c))
# print(a)
arr.append(a)
a = []
# print(arr)
# modify the 2d array
for i in range(len(arr)):
arr[i] = arr[i][1:]
# print(arr[i])
# find and store Gr in a set s
s = set()
for st in range(start, end+1):
for i in range(len(arr)):
if st in arr[i]:
s.add(i+1)
print('Gr is',s)
s1 = set()
coun1,coun2 = 0,0
for r in range(start):
for i in range(len(arr)):
if r in arr[i]:
coun1+=1
if i+1 in s:
coun2+=1
ans = r
#print(r,i+1,arr[i])
if coun1 == coun2 and coun1 !=0 and coun2 !=0:
s1.add(ans)
coun1,coun2 = 0,0
print('Attributes are',s1) #attributes
# For Similarity,:
s1dash = []
for i in range(1,start):
s1dash.append(i)
# print(s1dash)
ansDict = {}
grDash = []
for atrans in s1dash:
for i in range(len(arr)):
if atrans in arr[i]:
grDash.append(i+1)
# print(grDash)
num = []
for gtrans in grDash:
if gtrans in s:
num.append(gtrans)
# print(len(num)/len(grDash))
ansDict[atrans] = len(num)/len(grDash)
grDash = []
print(ansDict)
'''
s2 = set()
for atrans in s1:
for i in range(len(arr)):
if atrans in arr[i]:
s2.add(i+1)
print('new gr is ',s2)
flag = 0
for i in s2:
if i in s:
print('sfb',i) #glitch
else:
print('bad')
flag = 1
break
print('verify ans is ',flag)''' | 21.129412 | 65 | 0.51392 | 284 | 1,796 | 3.25 | 0.253521 | 0.026002 | 0.045504 | 0.071506 | 0.151679 | 0.126761 | 0.108342 | 0.067172 | 0.067172 | 0.067172 | 0 | 0.037131 | 0.3402 | 1,796 | 85 | 66 | 21.129412 | 0.741772 | 0.159243 | 0 | 0.170213 | 0 | 0 | 0.035146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.06383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a24602b090b912c81940709fe611c02407a806f | 3,275 | py | Python | ALRA/sparseutils.py | pavlin-policar/ALRA | fab8d7661bf2a2179b40e68fb4f022015c700252 | [
"BSD-3-Clause"
] | 3 | 2018-12-13T06:10:25.000Z | 2020-06-19T01:53:00.000Z | ALRA/sparseutils.py | pavlin-policar/ALRA | fab8d7661bf2a2179b40e68fb4f022015c700252 | [
"BSD-3-Clause"
] | null | null | null | ALRA/sparseutils.py | pavlin-policar/ALRA | fab8d7661bf2a2179b40e68fb4f022015c700252 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import scipy.sparse as sp
def nonzero_mean(X, axis=0):
"""Compute the mean of non-zero values in a given matrix.
Parameters
----------
X: array_like
axis: int
Returns
-------
np.ndarray
"""
if sp.issparse(X):
if axis == 0:
X = X.tocsc()
elif axis == 1:
X = X.tocsr()
else:
raise NotImplementedError(
f"`axis={axis}` is not implemented for " f"sparse matrices."
)
counts = np.diff(X.indptr)
sums = X.sum(axis=axis)
sums = np.asarray(sums).ravel()
with np.errstate(invalid="ignore"):
return sums / counts
else:
X = np.ma.masked_array(X, mask=X <= 0)
means = X.mean(axis=axis)
return means.filled(0)
def nonzero_var(X, axis=0, ddof=0):
"""Compute the variance of non-zero values in a given matrix.
Parameters
----------
X: array_like
axis: int
ddof: int
Returns
-------
np.ndarray
"""
if sp.issparse(X):
# We'll modify X inplace so we need to create a copy
if axis == 0:
X = X.tocsc(copy=True)
elif axis == 1:
X = X.tocsr(copy=True)
else:
raise NotImplementedError(
f"`axis={axis}` is not implemented " f"sparse matrices."
)
X = X.astype(float)
means = nonzero_mean(X, axis=axis)
i, j, v = sp.find(X)
if axis == 0:
X[i, j] = v - means[j]
elif axis == 1:
X[i, j] = v - means[i]
X.data = X.data ** 2
counts = np.diff(X.indptr) - ddof
sums = X.sum(axis=axis)
sums = np.asarray(sums).ravel()
with np.errstate(invalid="ignore"):
return sums / counts
else:
X = np.ma.masked_array(X, mask=X <= 0)
variances = X.var(axis=axis, ddof=ddof)
return variances.filled(0)
def nonzero_std(X, axis=0, ddof=0):
"""Compute the standard deviation of non-zero values in a given matrix.
Parameters
----------
X: array_like
axis: int
ddof: int
Returns
-------
np.ndarray
"""
return np.sqrt(nonzero_var(X, axis=axis, ddof=ddof))
def find_zeroed_indices(adjusted, original):
"""Find the indices of the values present in ``original`` but missing in ``adjusted``.
Parameters
----------
adjusted: np.array
original: array_like
Returns
-------
Tuple[np.ndarray]
Indices of the values present in ``original`` but missing in ``adjusted``.
"""
if sp.issparse(original):
i, j, v = sp.find(original)
# Use hash maps to figure out which indices have been lost in the original
original_indices = set(zip(i, j))
adjusted_indices = set(zip(*np.where(~adjusted.mask)))
zeroed_indices = original_indices - adjusted_indices
# Convert our hash map of coords into the standard numpy indices format
indices = list(zip(*zeroed_indices))
indices = tuple(map(np.array, indices))
return indices
else:
original = np.ma.masked_array(original, mask=original <= 0)
return np.where(adjusted.mask & ~original.mask)
| 24.259259 | 90 | 0.550229 | 432 | 3,275 | 4.125 | 0.263889 | 0.035915 | 0.006734 | 0.025253 | 0.489338 | 0.443322 | 0.409652 | 0.386083 | 0.362514 | 0.30303 | 0 | 0.007644 | 0.320916 | 3,275 | 134 | 91 | 24.440299 | 0.793615 | 0.269618 | 0 | 0.403226 | 0 | 0 | 0.051328 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.032258 | 0 | 0.209677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a26cb14793cecba07e1be7ff374b0ceebd1c498 | 5,733 | py | Python | 01_mediapipe/03_feach_data_from_holistic.py | lingwsh/hand_track_mediapipe | 89dfe647cef52957b72f9a2b05b7f2cd04ba330e | [
"MIT"
] | 1 | 2022-01-16T21:23:06.000Z | 2022-01-16T21:23:06.000Z | 01_mediapipe/03_feach_data_from_holistic.py | lingwsh/hand_track_mediapipe | 89dfe647cef52957b72f9a2b05b7f2cd04ba330e | [
"MIT"
] | null | null | null | 01_mediapipe/03_feach_data_from_holistic.py | lingwsh/hand_track_mediapipe | 89dfe647cef52957b72f9a2b05b7f2cd04ba330e | [
"MIT"
] | null | null | null | import cv2
import mediapipe as mp
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandas as pd
import time
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
count = 0
alldata = []
fps_time = 0
pose_tubuh = ['NOSE', 'LEFT_EYE_INNER', 'LEFT_EYE', 'LEFT_EYE_OUTER', 'RIGHT_EYE_INNER', 'RIGHT_EYE', 'RIGHT_EYE_OUTER', 'LEFT_EAR', 'RIGHT_EAR', 'MOUTH_LEFT', 'MOUTH_RIGHT',
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW', 'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_PINKY', 'RIGHT_PINKY', 'LEFT_INDEX', 'RIGHT_INDEX', 'LEFT_THUMB',
'RIGHT_THUMB', 'LEFT_HIP', 'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE', 'RIGHT_ANKLE', 'LEFT_HEEL', 'RIGHT_HEEL', 'LEFT_FOOT_INDEX', 'RIGHT_FOOT_INDEX']
pose_tangan = ['WRIST', 'THUMB_CPC', 'THUMB_MCP', 'THUMB_IP', 'THUMB_TIP', 'INDEX_FINGER_MCP', 'INDEX_FINGER_PIP', 'INDEX_FINGER_DIP', 'INDEX_FINGER_TIP', 'MIDDLE_FINGER_MCP',
'MIDDLE_FINGER_PIP', 'MIDDLE_FINGER_DIP', 'MIDDLE_FINGER_TIP', 'RING_FINGER_PIP', 'RING_FINGER_DIP', 'RING_FINGER_TIP',
'RING_FINGER_MCP', 'PINKY_MCP', 'PINKY_PIP', 'PINKY_DIP', 'PINKY_TIP']
pose_tangan_2 = ['WRIST2', 'THUMB_CPC2', 'THUMB_MCP2', 'THUMB_IP2', 'THUMB_TIP2', 'INDEX_FINGER_MCP2', 'INDEX_FINGER_PIP2', 'INDEX_FINGER_DIP2', 'INDEX_FINGER_TIP2', 'MIDDLE_FINGER_MCP2',
'MIDDLE_FINGER_PIP2', 'MIDDLE_FINGER_DIP2', 'MIDDLE_FINGER_TIP2', 'RING_FINGER_PIP2', 'RING_FINGER_DIP2', 'RING_FINGER_TIP2',
'RING_FINGER_MCP2', 'PINKY_MCP2', 'PINKY_PIP2', 'PINKY_DIP2', 'PINKY_TIP2']
cap = cv2.VideoCapture(0)
#suc,frame_video = cap.read()
#vid_writer = cv2.VideoWriter('pose.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_video.shape[1], frame_video.shape[0]))
with mp_holistic.Holistic(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = holistic.process(image)
# Draw landmark annotation on the image.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image_asli = np.copy(image)
image = np.zeros(image.shape)
mp_drawing.draw_landmarks(
image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
# if(results.pose_landmarks is not None and results.left_hand_landmarks is not None and results.right_hand_landmarks is not None):
if results.pose_landmarks:
data_tubuh = {}
for i in range(len(pose_tubuh)):
results.pose_landmarks.landmark[i].x = results.pose_landmarks.landmark[i].x * image.shape[0]
results.pose_landmarks.landmark[i].y = results.pose_landmarks.landmark[i].y * image.shape[1]
data_tubuh.update(
{pose_tubuh[i] : results.pose_landmarks.landmark[i]}
)
alldata.append(data_tubuh)
if results.right_hand_landmarks:
data_tangan_kanan = {}
for i in range(len(pose_tangan)):
results.right_hand_landmarks.landmark[i].x = results.right_hand_landmarks.landmark[i].x * image.shape[0]
results.right_hand_landmarks.landmark[i].y = results.right_hand_landmarks.landmark[i].y * image.shape[1]
data_tubuh.update(
{pose_tangan[i] : results.right_hand_landmarks.landmark[i]}
)
alldata.append(data_tubuh)
if results.left_hand_landmarks:
data_tangan_kiri = {}
for i in range(len(pose_tangan)):
results.left_hand_landmarks.landmark[i].x = results.left_hand_landmarks.landmark[i].x * image.shape[0]
results.left_hand_landmarks.landmark[i].y = results.left_hand_landmarks.landmark[i].y * image.shape[1]
data_tubuh.update(
{pose_tangan_2[i] : results.left_hand_landmarks.landmark[i]}
)
alldata.append(data_tubuh)
#cv2.namedWindow('MediaPipe Holistic', cv2.WND_PROP_FULLSCREEN)
#cv2.setWindowProperty('MediaPipe Holistic', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2,)
cv2.imshow('MediaPipe Holistic', image) #sudah menampilkan backgrounnd hitam dan skeleton
cv2.imshow('Gambar asli', image_asli)
count = count + 1
print(count)
fps_time = time.time()
#vid_writer.write(image)
#plt.imshow((image*255).astype(np.uint8))
#plt.savefig("image-frame/" + str(count) + ".jpg")
if cv2.waitKey(5) & 0xFF == 27:
df = pd.DataFrame(alldata)
df.to_excel("koordinat.xlsx")
break
cap.release()
| 52.59633 | 188 | 0.637188 | 726 | 5,733 | 4.749311 | 0.258953 | 0.060325 | 0.078306 | 0.063805 | 0.323956 | 0.313225 | 0.25348 | 0.185035 | 0.142401 | 0.090197 | 0 | 0.019617 | 0.2442 | 5,733 | 108 | 189 | 53.083333 | 0.776137 | 0.153497 | 0 | 0.160494 | 0 | 0 | 0.209514 | 0 | 0 | 0 | 0.000846 | 0 | 0 | 1 | 0 | false | 0 | 0.08642 | 0 | 0.08642 | 0.024691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a27506c8cbed51d51e030e2eeb8c6e021a5a351 | 1,881 | py | Python | easyrec/models/fnn.py | xu-zhiwei/easyrec | 4e42a356efe799bcd469a568d356852e4230bbc8 | [
"MIT"
] | 5 | 2021-08-12T22:54:07.000Z | 2022-03-27T11:46:48.000Z | easyrec/models/fnn.py | xu-zhiwei/pyrec | 4e42a356efe799bcd469a568d356852e4230bbc8 | [
"MIT"
] | null | null | null | easyrec/models/fnn.py | xu-zhiwei/pyrec | 4e42a356efe799bcd469a568d356852e4230bbc8 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.activations import sigmoid
from tensorflow.keras.layers import Dense, Flatten
from easyrec import blocks
class FNN(tf.keras.Model):
"""
Factorization-machine supported Neural Network (FNN).
Reference: Weinan Zhang. Deep Learning over Multi-field Categorical Data – A Case Study on User Response
Prediction. ECIR. 2016.
"""
def __init__(self,
one_hot_feature_columns,
k=16,
units_list=None,
activation='tanh'
):
"""
fm: Pretrained Factorization Machines.
one_hot_feature_columns: List[CategoricalColumn] encodes one hot feature fields, such as sex_id.
units_list: Dimension of fully connected stack outputs.
activation: Activation to use.
"""
super(FNN, self).__init__()
if units_list is None:
units_list = [256, 128]
self.fm = blocks.FM(one_hot_feature_columns, k=k)
self.num_fields = len(one_hot_feature_columns)
self.dense_block = blocks.DenseBlock(units_list, activation)
self.score = Dense(1, activation='sigmoid')
self.flatten = Flatten()
def call(self, inputs, pretraining=True, training=None, mask=None):
if pretraining:
logits = self.fm(inputs)
return sigmoid(logits)
else:
self._freeze_fm()
ws = tf.concat([self.fm.w[i](inputs) for i in range(self.num_fields)], axis=1)
vs = tf.concat([self.fm.v[i](inputs) for i in range(self.num_fields)], axis=1)
x = tf.concat((ws, vs), axis=1)
x = self.dense_block(x)
x = self.score(x)
return x
def _freeze_fm(self):
self.fm.trainable = False
for layer in self.fm.layers:
layer.trainable = False
| 34.2 | 108 | 0.60925 | 237 | 1,881 | 4.691983 | 0.451477 | 0.032374 | 0.058453 | 0.071942 | 0.102518 | 0.064748 | 0.064748 | 0.064748 | 0.064748 | 0.064748 | 0 | 0.012075 | 0.295587 | 1,881 | 54 | 109 | 34.833333 | 0.826415 | 0.215311 | 0 | 0 | 0 | 0 | 0.007829 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.114286 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a2781c28d90f13e079dd5c334d7fcfa39d8b7eb | 1,340 | py | Python | scripts/python3/latencies_to_csv.py | naderalfares/cloudping.co | 89f956bac79a78e4f712c4f9969eaacff5e00d1f | [
"MIT"
] | null | null | null | scripts/python3/latencies_to_csv.py | naderalfares/cloudping.co | 89f956bac79a78e4f712c4f9969eaacff5e00d1f | [
"MIT"
] | null | null | null | scripts/python3/latencies_to_csv.py | naderalfares/cloudping.co | 89f956bac79a78e4f712c4f9969eaacff5e00d1f | [
"MIT"
] | null | null | null | import requests
import json
import sys
if __name__ == "__main__":
try:
REGIONS_NAMES = json.load(open("regions.json", "r"))
except Exception as e:
print(e)
URL = "https://api.cloudping.co/averages"
r = requests.get(URL)
regions = []
b_selected = False
for reg in r.json():
regions.append(reg["region"])
if len(sys.argv) == 1:
selected_regions = regions
elif len(sys.argv) == 2 and sys.argv[1] == "-s":
selected_regions = list(REGIONS_NAMES.keys())
print(">>", selected_regions)
b_selected = True
else:
print("Usage: python3 " + sys.argv[0] + "[-s]\n\t-s: selected regions")
sys.exit(1)
with open("latencies.csv", "w") as fd:
for region in regions:
if region not in selected_regions:
continue
if b_selected:
fd.write("," + REGIONS_NAMES[region])
else:
fd.write("," + region)
fd.write("\n")
for index1, reg in enumerate(r.json()):
if reg["region"] not in selected_regions:
continue
assert(reg["region"] == regions[index1])
if b_selected:
fd.write(REGIONS_NAMES[regions[index1]])
else:
fd.write(regions[index1])
for index2 , avg in enumerate(reg["averages"]):
if avg["regionTo"] not in selected_regions:
continue
assert(avg["regionTo"] == regions[index2])
fd.write("," + str(int(avg["average"])))
fd.write("\n")
| 20.9375 | 73 | 0.636567 | 189 | 1,340 | 4.396825 | 0.354497 | 0.126354 | 0.046931 | 0.072202 | 0.202166 | 0.202166 | 0.072202 | 0 | 0 | 0 | 0 | 0.011163 | 0.197761 | 1,340 | 63 | 74 | 21.269841 | 0.76186 | 0 | 0 | 0.217391 | 0 | 0 | 0.127612 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0 | false | 0 | 0.065217 | 0 | 0.065217 | 0.065217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a27bf98ef639173dc873224e8779bef520c5079 | 1,828 | py | Python | Simply_Teasure_Game.py | royukira/Adaptive_ER | 317316ef03e579377b3001620c473fa04b68b134 | [
"MIT"
] | 2 | 2019-10-08T06:54:44.000Z | 2020-02-22T08:42:11.000Z | Simply_Teasure_Game.py | royukira/Adaptive_ER | 317316ef03e579377b3001620c473fa04b68b134 | [
"MIT"
] | null | null | null | Simply_Teasure_Game.py | royukira/Adaptive_ER | 317316ef03e579377b3001620c473fa04b68b134 | [
"MIT"
] | null | null | null | """
A simple example for Reinforcement Learning using table lookup Q-learning method.
An agent "o" is on the left of a 1 dimensional world, the treasure is on the rightmost location.
Run this program and to see how the agent will improve its strategy of finding the treasure.
The initial position is random in my version.
"""
import time
def update_env(S, episode, step_counter, numState):
env_list = ['-'] * numState + ['T'] # example:'---------T' our environment
if S == -1:
interaction = '==> Episode %s: total_steps = %s' % (episode + 1, step_counter)
print('\r{}'.format(interaction), end='')
#time.sleep(2)
#print('\r ', end='')
else:
env_list[S] = 'O'
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(0.3)
return interaction
def get_env_feedback(S, A,numState):
"""
The rule of rewarding
:param S: Now State
:param A: Action
:return: Next state S_ (according to what Action it take), Reward R
"""
# This is how agent will interact with the environment
if A == 1: # right move
if S == numState - 1: # terminate; 因为 numState-1 是 Treasure 的 position
S_ = -1 # 因此再向右移一个就是terminal = -1
R = 1 # 到达terminal即找到treasure,获得奖励 R=1
return S_, R
else:
S_ = S + 1 # 如果没到terminal,根据 Action 到 Next State , 往右移
R = 0 # 无奖励
return S_, R
elif A==2: # wrong move
R = 0 # 因为这个环境中 Treasure是在最右边 所以不管怎么往左移 都是无奖励 R=0
if S == 0:
S_ = S # reach the wall
else:
S_ = S - 1 # 往左移
#S_ = random.randint(-1,S)
#S_ = -1
return S_, R | 33.851852 | 96 | 0.543764 | 240 | 1,828 | 4.0625 | 0.466667 | 0.010256 | 0.024615 | 0.047179 | 0.071795 | 0.071795 | 0.071795 | 0 | 0 | 0 | 0 | 0.018425 | 0.346827 | 1,828 | 54 | 97 | 33.851852 | 0.798157 | 0.472101 | 0 | 0.344828 | 0 | 0 | 0.046995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.241379 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a2806808bf6c908b14845bb57e11c90940c3f9b | 14,082 | py | Python | tools/perf/page_sets/tough_video_cases.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/page_sets/tough_video_cases.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/page_sets/tough_video_cases.py | metux/chromium-deb | 3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
_PAGE_TAGS_LIST = [
# Audio codecs:
'pcm',
'mp3',
'aac',
'vorbis',
'opus',
# Video codecs:
'h264',
'vp8',
'vp9',
# Test types:
'audio_video',
'audio_only',
'video_only',
# Other filter tags:
'is_50fps',
'is_4k',
# Play action:
'seek',
'normal_play',
'background',
]
class ToughVideoCasesPage(page_module.Page):
def __init__(self, url, page_set, tags):
if tags:
for t in tags:
assert t in _PAGE_TAGS_LIST
super(ToughVideoCasesPage, self).__init__(
url=url, page_set=page_set, tags=tags, name=url.split('/')[-1])
def PlayAction(self, action_runner):
# Play the media until it has finished or it times out.
action_runner.PlayMedia(playing_event_timeout_in_seconds=60,
ended_event_timeout_in_seconds=60)
# Generate memory dump for memoryMetric.
if self.page_set.measure_memory:
action_runner.MeasureMemory()
def SeekBeforeAndAfterPlayhead(self, action_runner,
action_timeout_in_seconds=60):
timeout = action_timeout_in_seconds
# Start the media playback.
action_runner.PlayMedia(
playing_event_timeout_in_seconds=timeout)
# Wait for 1 second so that we know the play-head is at ~1s.
action_runner.Wait(1)
# Seek to before the play-head location.
action_runner.SeekMedia(seconds=0.5, timeout_in_seconds=timeout,
label='seek_warm')
# Seek to after the play-head location.
action_runner.SeekMedia(seconds=9, timeout_in_seconds=timeout,
label='seek_cold')
# Generate memory dump for memoryMetric.
if self.page_set.measure_memory:
action_runner.MeasureMemory()
def PlayInBackgroundTab(self, action_runner, background_time=10):
# Steps:
# 1. Play a video
# 2. Open new tab overtop to obscure the video
# 3. Close the tab to go back to the tab that is playing the video.
# This test case will work differently depending on whether the platform is
# desktop or Android and whether the video has sound or not. For example,
# the current Chrome video implementation (as of July 2017) pauses video on
# Android when the tab is backgrounded, but on desktop the video is not
# paused.
# TODO(crouleau): Use --disable-media-suspend flag to enable Android to
# play video in the background.
# The motivation for this test case is crbug.com/678663.
action_runner.PlayMedia(
playing_event_timeout_in_seconds=60)
action_runner.Wait(.5)
new_tab = action_runner.tab.browser.tabs.New()
new_tab.Activate()
action_runner.Wait(background_time)
new_tab.Close()
action_runner.Wait(.5)
# Generate memory dump for memoryMetric.
if self.page_set.measure_memory:
action_runner.MeasureMemory()
class Page2(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page2, self).__init__(
url='file://tough_video_cases/video.html?src=crowd.ogg&type=audio',
page_set=page_set,
tags=['vorbis', 'audio_only'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page4(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page4, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080.webm',
page_set=page_set,
tags=['is_50fps', 'vp8', 'vorbis', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page7(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page7, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.ogg&type=audio',
page_set=page_set,
tags=['vorbis', 'audio_only', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page8(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page8, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.wav&type=audio',
page_set=page_set,
tags=['pcm', 'audio_only', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page11(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page11, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080.mp4',
page_set=page_set,
tags=['is_50fps', 'h264', 'aac', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page12(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page12, self).__init__(
url='file://tough_video_cases/video.html?src=crowd2160.mp4',
page_set=page_set,
tags=['is_4k', 'is_50fps', 'h264', 'aac', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page13(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page13, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp3&type=audio',
page_set=page_set,
tags=['mp3', 'audio_only', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page14(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page14, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp4',
page_set=page_set,
tags=['h264', 'aac', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page15(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page15, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.m4a&type=audio',
page_set=page_set,
tags=['aac', 'audio_only', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page16(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page16, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.webm',
page_set=page_set,
tags=['is_4k', 'vp8', 'vorbis', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page17(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page17, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.mp4',
page_set=page_set,
tags=['is_4k', 'h264', 'aac', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page19(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page19, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.ogg&type=audio&seek',
page_set=page_set,
tags=['vorbis', 'audio_only', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page20(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page20, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.wav&type=audio&seek',
page_set=page_set,
tags=['pcm', 'audio_only', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page23(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page23, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp3&type=audio&seek',
page_set=page_set,
tags=['mp3', 'audio_only', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page24(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page24, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.mp4&seek',
page_set=page_set,
tags=['h264', 'aac', 'audio_video', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page25(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page25, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.webm&seek',
page_set=page_set,
tags=['is_4k', 'vp8', 'vorbis', 'audio_video', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page26(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page26, self).__init__(
url='file://tough_video_cases/video.html?src=garden2_10s.mp4&seek',
page_set=page_set,
tags=['is_4k', 'h264', 'aac', 'audio_video', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page30(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page30, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.vp9.webm',
page_set=page_set,
tags=['vp9', 'opus', 'audio_video', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page31(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page31, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.vp9.webm&seek',
page_set=page_set,
tags=['vp9', 'opus', 'audio_video', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page32(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page32, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080_vp9.webm',
page_set=page_set,
tags=['vp9', 'video_only', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page33(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page33, self).__init__(
url='file://tough_video_cases/video.html?src=crowd1080_vp9.webm&seek',
page_set=page_set,
tags=['vp9', 'video_only', 'seek'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner)
class Page34(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page34, self).__init__(
url='file://tough_video_cases/video.html?src=crowd720_vp9.webm',
page_set=page_set,
tags=['vp9', 'video_only', 'normal_play'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayAction(action_runner)
class Page36(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page36, self).__init__(
url=('file://tough_video_cases/video.html?src='
'smpte_3840x2160_60fps_vp9.webm&seek'),
page_set=page_set,
tags=['is_4k', 'vp9', 'video_only', 'seek'])
self.add_browser_metrics = True
def RunPageInteractions(self, action_runner):
self.SeekBeforeAndAfterPlayhead(action_runner,
action_timeout_in_seconds=120)
class Page37(ToughVideoCasesPage):
def __init__(self, page_set):
super(Page37, self).__init__(
url='file://tough_video_cases/video.html?src=tulip2.vp9.webm&background',
page_set=page_set,
tags=['vp9', 'opus', 'audio_video', 'background'])
self.skip_basic_metrics = True
def RunPageInteractions(self, action_runner):
self.PlayInBackgroundTab(action_runner)
class ToughVideoCasesPageSet(story.StorySet):
"""
Description: Video Stack Perf pages that report time_to_play, seek time and
many other media-specific and generic metrics.
"""
def __init__(self, measure_memory=False):
super(ToughVideoCasesPageSet, self).__init__(
cloud_storage_bucket=story.PARTNER_BUCKET)
self.measure_memory = measure_memory
# Normal play tests:
self.AddStory(Page2(self))
self.AddStory(Page4(self))
self.AddStory(Page7(self))
self.AddStory(Page8(self))
self.AddStory(Page11(self))
self.AddStory(Page12(self))
self.AddStory(Page13(self))
self.AddStory(Page14(self))
self.AddStory(Page15(self))
self.AddStory(Page16(self))
self.AddStory(Page17(self))
self.AddStory(Page30(self))
self.AddStory(Page32(self))
self.AddStory(Page34(self))
# Seek tests:
self.AddStory(Page19(self))
self.AddStory(Page20(self))
self.AddStory(Page23(self))
self.AddStory(Page24(self))
self.AddStory(Page25(self))
self.AddStory(Page26(self))
self.AddStory(Page31(self))
self.AddStory(Page33(self))
self.AddStory(Page36(self))
# Background playback tests:
self.AddStory(Page37(self))
class ToughVideoCasesDesktopStoryExpectations(
story.expectations.StoryExpectations):
def SetExpectations(self):
self.PermanentlyDisableBenchmark(
[story.expectations.ALL_MOBILE], 'Desktop Benchmark')
class ToughVideoCasesAndroidStoryExpectations(
story.expectations.StoryExpectations):
def SetExpectations(self):
self.PermanentlyDisableBenchmark(
[story.expectations.ALL_DESKTOP], 'Android Benchmark')
| 29.035052 | 79 | 0.706079 | 1,771 | 14,082 | 5.295313 | 0.13834 | 0.058221 | 0.046065 | 0.037321 | 0.705054 | 0.698976 | 0.68618 | 0.570058 | 0.538068 | 0.507571 | 0 | 0.025407 | 0.175472 | 14,082 | 484 | 80 | 29.095041 | 0.782275 | 0.094376 | 0 | 0.435897 | 0 | 0 | 0.174207 | 0.112755 | 0 | 0 | 0 | 0.002066 | 0.003205 | 1 | 0.176282 | false | 0 | 0.00641 | 0 | 0.272436 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a2884d1b927eac2716292f8d01b5195fe627cac | 5,262 | py | Python | camera_mgmt.py | agnes-yang/firecam | 9282d1b5b83be3abf6a137f7a72c090a9eca05f6 | [
"Apache-2.0"
] | 10 | 2019-12-19T02:37:33.000Z | 2021-12-07T04:47:08.000Z | camera_mgmt.py | agnes-yang/firecam | 9282d1b5b83be3abf6a137f7a72c090a9eca05f6 | [
"Apache-2.0"
] | 5 | 2019-10-27T23:22:52.000Z | 2020-02-13T23:08:15.000Z | camera_mgmt.py | agnes-yang/firecam | 9282d1b5b83be3abf6a137f7a72c090a9eca05f6 | [
"Apache-2.0"
] | 13 | 2019-09-24T18:53:24.000Z | 2021-07-16T05:57:18.000Z | # Copyright 2018 The Fuego Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
@author: Kinshuk Govil
add, delete, enable, disable, stats, or list cameras in detection system
"""
import os
import sys
fuegoRoot = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(fuegoRoot, 'lib'))
sys.path.insert(0, fuegoRoot)
import settings
settings.fuegoRoot = fuegoRoot
import collect_args
import db_manager
import logging
import random
import datetime
def execCameraSql(dbManager, sqlTemplate, cameraID, isQuery):
sqlStr = sqlTemplate % cameraID
logging.warning('SQL str: %s', sqlStr)
if isQuery:
dbResult = dbManager.query(sqlStr)
logging.warning('dbr %d: %s', len(dbResult), dbResult)
else:
dbManager.execute(sqlStr)
dbResult = None
return dbResult
def getTime(dbResult):
if len(dbResult) != 1:
return None
timeVal = dbResult[0]['maxtime']
if not timeVal:
return None
return datetime.datetime.fromtimestamp(timeVal).isoformat()
def main():
reqArgs = [
["m", "mode", "add, delete, enable, disable, stats, or list"],
]
optArgs = [
["c", "cameraID", "ID of the camera (e.g., mg-n-mobo-c)"],
["u", "url", "url to get images from camera"],
]
args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
cameraInfos = dbManager.get_sources(activeOnly=False)
logging.warning('Num all cameras: %d', len(cameraInfos))
logging.warning('Num active cameras: %d', len(list(filter(lambda x: x['dormant'] == 0, cameraInfos))))
if args.mode == 'list':
logging.warning('All cameras: %s', list(map(lambda x: x['name'], cameraInfos)))
return
matchingCams = list(filter(lambda x: x['name'] == args.cameraID, cameraInfos))
logging.warning('Found %d matching cams for ID %s', len(matchingCams), args.cameraID)
if args.mode == 'add':
if len(matchingCams) != 0:
logging.error('Camera with ID %s already exists: %s', args.cameraID, matchingCams)
exit(1)
dbRow = {
'name': args.cameraID,
'url': args.url,
'dormant': 0,
'randomID': random.random(),
'last_date': datetime.datetime.now().isoformat()
}
dbManager.add_data('sources', dbRow)
logging.warning('Successfully added camera %s', args.cameraID)
return
if len(matchingCams) != 1:
logging.error('Cannot find camera with ID %s: %s', args.cameraID, matchingCams)
exit(1)
camInfo = matchingCams[0]
logging.warning('Cam details: %s', camInfo)
if args.mode == 'del':
sqlTemplate = """DELETE FROM sources WHERE name = '%s' """
execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False)
return
if args.mode == 'enable':
if camInfo['dormant'] == 0:
logging.error('Camera already enabled: dormant=%d', camInfo['dormant'])
exit(1)
sqlTemplate = """UPDATE sources SET dormant=0 WHERE name = '%s' """
execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False)
return
if args.mode == 'disable':
if camInfo['dormant'] == 1:
logging.error('Camera already disabled: dormant=%d', camInfo['dormant'])
exit(1)
sqlTemplate = """UPDATE sources SET dormant=1 WHERE name = '%s' """
execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False)
return
if args.mode == 'stats':
sqlTemplate = """SELECT max(timestamp) as maxtime FROM scores WHERE CameraName = '%s' """
dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True)
logging.warning('Most recent image scanned: %s', getTime(dbResult))
sqlTemplate = """SELECT max(timestamp) as maxtime FROM detections WHERE CameraName = '%s' """
dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True)
logging.warning('Most recent smoke detection: %s', getTime(dbResult))
sqlTemplate = """SELECT max(timestamp) as maxtime FROM alerts WHERE CameraName = '%s' """
dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True)
logging.warning('Most recent smoke alert: %s', getTime(dbResult))
return
logging.error('Unexpected mode: %s', args.mode)
exit(1)
if __name__=="__main__":
main()
| 38.130435 | 106 | 0.6374 | 612 | 5,262 | 5.447712 | 0.326797 | 0.043191 | 0.069286 | 0.066587 | 0.303839 | 0.293041 | 0.275045 | 0.242651 | 0.242651 | 0.242651 | 0 | 0.00638 | 0.22558 | 5,262 | 137 | 107 | 38.408759 | 0.811779 | 0.139111 | 0 | 0.188119 | 0 | 0 | 0.220275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029703 | false | 0.009901 | 0.079208 | 0 | 0.207921 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a28fc2262a9b7b229c39ae50f40d559af0cddfc | 922 | py | Python | Timelapser(no gui).py | mcsim415/Timelapser | 7cb9bcc03e69f124bf1722f250a4d5dad135b109 | [
"MIT"
] | null | null | null | Timelapser(no gui).py | mcsim415/Timelapser | 7cb9bcc03e69f124bf1722f250a4d5dad135b109 | [
"MIT"
] | null | null | null | Timelapser(no gui).py | mcsim415/Timelapser | 7cb9bcc03e69f124bf1722f250a4d5dad135b109 | [
"MIT"
] | null | null | null | import os
import sys
import cv2
import glob
import numpy as np
def img2mp4(paths, pathOut, fps):
frame_array = []
for idx, path in enumerate(paths):
ff = np.fromfile(path, np.uint8)
img = cv2.imdecode(ff,1)
height, width, layers = img.shape
size = (width, height)
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'mp4v'), fps, size)
for i in range(len(frame_array)):
out.write(frame_array[i])
out.release()
input_path = 'C:\\Users\\sgh20\\OneDrive\\바탕 화면\\motion'
output_path = 'C:\\Users\\sgh20\\OneDrive\\바탕 화면\\motion\\results.mp4'
set_fps = 60
paths = sorted(glob.glob(input_path + '\\*.jpg'))
paths = [os.path.join(input_path, path) for path in paths]
print(str(len(paths)))
print('[Timelapser] Strat!...')
if len(paths) == 0:
print('[Timelapser] No File. Drop.')
else:
img2mp4(paths, output_path, set_fps)
print('[Timelapser] Done!') | 28.8125 | 75 | 0.67462 | 139 | 922 | 4.388489 | 0.489209 | 0.065574 | 0.032787 | 0.04918 | 0.108197 | 0.108197 | 0.108197 | 0.108197 | 0 | 0 | 0 | 0.024485 | 0.158351 | 922 | 32 | 76 | 28.8125 | 0.761598 | 0 | 0 | 0 | 0 | 0 | 0.193946 | 0.093049 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.206897 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a294378fdcd058a83bdfc9c1da261a068763c78 | 1,834 | py | Python | tests/objects/test_contact.py | VunkLai/ndk | 76894d2b81297ed0b7b48a35227d919d50e8fb64 | [
"MIT"
] | 1 | 2020-10-23T07:02:52.000Z | 2020-10-23T07:02:52.000Z | tests/objects/test_contact.py | VunkLai/ndk | 76894d2b81297ed0b7b48a35227d919d50e8fb64 | [
"MIT"
] | null | null | null | tests/objects/test_contact.py | VunkLai/ndk | 76894d2b81297ed0b7b48a35227d919d50e8fb64 | [
"MIT"
] | null | null | null | import unittest
import attr
from ndk.objects.command import Email as SendByEmail
from ndk.objects.contact import Contact, Email
from ndk.objects.timeperiod import TwentyFourSeven
from ndk.options import contact as options
from ndk.stack import Stack
# import HostNotifications, ServiceNotifications
class ContactTestCase(unittest.TestCase):
def setUp(self):
self.stack = Stack('ContactTesting')
def test_contact(self):
tp = TwentyFourSeven(self.stack)
cmd = SendByEmail(self.stack)
contact = Contact(self.stack, contact_name='foo',
host_notifications_period=tp,
service_notifications_period=tp,
host_notification_commands=cmd,
service_notification_commands=cmd)
assert contact.contact_name == 'foo'
assert contact.host_notifications_enabled
assert 'service_notifications_options w,u,c,r,f,s' in contact.synth()
class EmailTestCase(unittest.TestCase):
def setUp(self):
self.stack = Stack('EmailTesting')
def test_email(self):
tp = TwentyFourSeven(self.stack)
cmd = SendByEmail(self.stack)
with self.assertRaises(TypeError):
contact = Email(self.stack, contact_name='foo',
host_notifications_period=tp,
service_notifications_period=tp,
host_notification_commands=cmd,
service_notification_commands=cmd)
assert Email(self.stack, contact_name='foo', email='foo@bar.baz',
host_notifications_period=tp,
service_notifications_period=tp,
host_notification_commands=cmd,
service_notification_commands=cmd)
| 36.68 | 80 | 0.635224 | 186 | 1,834 | 6.080645 | 0.27957 | 0.071618 | 0.111406 | 0.05305 | 0.54023 | 0.54023 | 0.511052 | 0.511052 | 0.436782 | 0.343059 | 0 | 0 | 0.292803 | 1,834 | 49 | 81 | 37.428571 | 0.872012 | 0.025082 | 0 | 0.473684 | 0 | 0 | 0.052072 | 0.016237 | 0 | 0 | 0 | 0 | 0.131579 | 1 | 0.105263 | false | 0 | 0.184211 | 0 | 0.342105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a2f5a1fd484897984f460a3291638b731d0469e | 1,119 | py | Python | app/utils/svgo.py | grigala/codestats-profile-readme | 80bae7ee882e8c009f059b004c2fa0587df8929f | [
"MIT"
] | 14 | 2020-07-22T22:23:23.000Z | 2022-02-20T15:26:01.000Z | app/utils/svgo.py | grigala/codestats-profile-readme | 80bae7ee882e8c009f059b004c2fa0587df8929f | [
"MIT"
] | null | null | null | app/utils/svgo.py | grigala/codestats-profile-readme | 80bae7ee882e8c009f059b004c2fa0587df8929f | [
"MIT"
] | 6 | 2020-08-18T17:17:12.000Z | 2021-08-28T21:24:25.000Z | # -*- coding: utf-8 -*-
import subprocess
from flask import current_app
def try_optimize_svg(original: str) -> str:
if not current_app.config['SVG_OPTIMIZE_ENABLE']:
return original
args = [
current_app.config['SVGO_PATH'],
'--input', '-',
'--output', '-',
'--config', current_app.config['SVGO_CONFIG_PATH']
]
try:
with subprocess.Popen(args, encoding='utf-8',
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(original, timeout=3)
if stderr:
current_app.logger.warning('svg optimize error: %s', stderr)
return original
current_app.logger.info('svg optimized %d bytes -> %d bytes', len(original), len(stdout))
return stdout
except subprocess.TimeoutExpired as err:
current_app.logger.warning('svg optimize timeout')
except Exception as err:
current_app.logger.exception(err)
return original
| 36.096774 | 109 | 0.585344 | 121 | 1,119 | 5.289256 | 0.413223 | 0.125 | 0.1 | 0.0625 | 0.146875 | 0.10625 | 0 | 0 | 0 | 0 | 0 | 0.003841 | 0.302055 | 1,119 | 30 | 110 | 37.3 | 0.815621 | 0.018767 | 0 | 0.192308 | 0 | 0 | 0.136861 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a369e185a47283fb392237ce8b328b1bd4127f0 | 533 | py | Python | Aula 12/Desafios/051.py | mateuschaves/curso-python | 53b2f3b4bf083ae2ce7ea19dd358f49a36becd9d | [
"MIT"
] | 1 | 2018-07-23T04:03:35.000Z | 2018-07-23T04:03:35.000Z | Aula 12/Desafios/051.py | mateuschaves/curso-python | 53b2f3b4bf083ae2ce7ea19dd358f49a36becd9d | [
"MIT"
] | null | null | null | Aula 12/Desafios/051.py | mateuschaves/curso-python | 53b2f3b4bf083ae2ce7ea19dd358f49a36becd9d | [
"MIT"
] | null | null | null | """
Desenvolva um programa que leia o
primeiro termo e a razão de uma PA.
No final, mostre os 10 primeiros termos
dessa progressão.
Get up on the floor
Dancin' all night long
Get up on the floor
Dancin' till the break of dawn
Get up on the floor
Dancin' till the break of dawn
Get up on the floor
Dancin'
Dancin - Aaron Smith ♪♫
"""
r = int(input('Informe a razão da PA: '))
p = int(input('Informe o primeiro termo da PA: '))
for c in range(p, p + (10 * r), r):
print(c)
| 20.5 | 50 | 0.624765 | 92 | 533 | 3.641304 | 0.543478 | 0.059701 | 0.083582 | 0.119403 | 0.358209 | 0.358209 | 0.295522 | 0.295522 | 0.295522 | 0.295522 | 0 | 0.010638 | 0.294559 | 533 | 25 | 51 | 21.32 | 0.875 | 0.611632 | 0 | 0 | 0 | 0 | 0.381944 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a37cf5c60e5b9941cca33dae62739a1a31215b9 | 1,284 | py | Python | AsciiImageProcess.py | jcarrete5/ImageASCII | 98ea2749c17fc12d8db20baa072d97cd2c4118d0 | [
"MIT"
] | 1 | 2017-10-18T22:11:10.000Z | 2017-10-18T22:11:10.000Z | AsciiImageProcess.py | jcarrete5/ImageASCII | 98ea2749c17fc12d8db20baa072d97cd2c4118d0 | [
"MIT"
] | null | null | null | AsciiImageProcess.py | jcarrete5/ImageASCII | 98ea2749c17fc12d8db20baa072d97cd2c4118d0 | [
"MIT"
] | 1 | 2019-06-30T20:21:44.000Z | 2019-06-30T20:21:44.000Z | import numpy
from PIL import Image
charScale = "MNBKVFT|;:." #List of characters from light to dark by apparant shading
def imgToAscii(imgPath):
#Read image
img = Image.open(imgPath, "r")
#Reduce image size
scaleSize = (300, int(img.size[1] * (150/img.size[0]))) #Double width as characters are taller than they are wide
img = img.resize(scaleSize, Image.ANTIALIAS)
#Convert to greyscale
img = img.convert('L')
#Get data of pixels
pixels = list(img.getdata(0)) #Gets value of only red pixels as, in a greyscale image, r = g = b
width, height = img.size
pixels = [pixels[i * width:(i + 1) * width] for i in range(height)] #reformating into a 2D array
#Storing corresponding ascii arrangement in an array to be returned
asciiImgChars = []
for i in range(height):
asciiImgChars.append([])
for j in range(width):
val = int(pixels[i][j]/(256 / len(charScale)))
asciiImgChars[i].append(charScale[val])
return asciiImgChars
#Turn an ascii character array into an html string for use in a webpage
def asciiArrayToHtml(ascii):
html = "<div id=\"asciiArt\" style = \"font-size: 0.3%;\">\n"
for i in range(len(ascii)):
for j in range(len(ascii[0])):
html += ascii[i][j]
html += "<br>\n"
html += "</div>"
return html
| 33.789474 | 115 | 0.669782 | 197 | 1,284 | 4.365482 | 0.472081 | 0.040698 | 0.02093 | 0.038372 | 0.039535 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016489 | 0.197041 | 1,284 | 37 | 116 | 34.702703 | 0.817653 | 0.316199 | 0 | 0 | 0 | 0 | 0.054147 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a3a214b5c46cd54b9af169a87aed4e01a6f3801 | 16,564 | py | Python | setup.py | podgorskiy/DareBlopy | 5f7eb0db0d5e02b9465f41d9d737a6f207d328f0 | [
"Apache-2.0"
] | 99 | 2020-04-16T15:56:30.000Z | 2022-03-30T20:42:46.000Z | setup.py | podgorskiy/DareBlopy | 5f7eb0db0d5e02b9465f41d9d737a6f207d328f0 | [
"Apache-2.0"
] | 10 | 2020-05-02T16:03:06.000Z | 2021-12-25T02:40:03.000Z | setup.py | podgorskiy/DareBlopy | 5f7eb0db0d5e02b9465f41d9d737a6f207d328f0 | [
"Apache-2.0"
] | 7 | 2020-05-01T13:20:10.000Z | 2021-07-10T01:33:26.000Z | # Copyright 2019-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import setup, Extension
from distutils.errors import *
from distutils.dep_util import newer_group
from distutils import log
from distutils.command.build_ext import build_ext
from codecs import open
import os
import sys
import platform
import re
import glob
from distutils.ccompiler import CCompiler
from multiprocessing.pool import ThreadPool as Pool
def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
def f(x):
try:
src, ext = build[x]
except KeyError:
return
self._compile(x, src, ext, cc_args, extra_postargs, pp_opts)
pool = Pool(processes=6)
pool.map(f, objects)
return objects
# Overwrite to enable multiprocess compilation
CCompiler.compile = compile
target_os = 'none'
if sys.platform == 'darwin':
target_os = 'darwin'
elif os.name == 'posix':
target_os = 'posix'
elif platform.system() == 'Windows':
target_os = 'win32'
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def filter_sources(sources):
"""Filters sources into c, cpp, objc and asm"""
cpp_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
c_ext_match = re.compile(r'.*[.](c|C)\Z', re.I).match
objc_ext_match = re.compile(r'.*[.]m\Z', re.I).match
asm_ext_match = re.compile(r'.*[.](asm|s|S)\Z', re.I).match
c_sources = []
cpp_sources = []
objc_sources = []
asm_sources = []
other_sources = []
for source in sources:
if c_ext_match(source):
c_sources.append(source)
elif cpp_ext_match(source):
cpp_sources.append(source)
elif objc_ext_match(source):
objc_sources.append(source)
elif asm_ext_match(source):
asm_sources.append(source)
else:
other_sources.append(source)
return c_sources, cpp_sources, objc_sources, asm_sources, other_sources
def build_extension(self, ext):
"""Modified version of build_extension method from distutils.
Can handle compiler args for different files"""
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
sources = self.swig_sources(sources, ext)
extra_args = ext.extra_compile_args or []
extra_c_args = getattr(ext, "extra_compile_c_args", [])
extra_cpp_args = getattr(ext, "extra_compile_cpp_args", [])
extra_objc_args = getattr(ext, "extra_compile_objc_args", [])
extra_asm_args = getattr(ext, "extra_compile_asm_args", [])
file_specific_definitions = getattr(ext, "file_specific_definitions", {})
asm_include = getattr(ext, "asm_include", [])
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cpp_sources, objc_sources, asm_sources, other_sources = filter_sources(sources)
self.compiler.src_extensions += ['.asm']
self.compiler.set_executable('assembler', ['nasm'])
def _compile(src, args):
obj = []
for s in src:
additional_macros = []
if s in file_specific_definitions.keys():
additional_macros += file_specific_definitions[s]
obj += self.compiler.compile([s],
output_dir=self.build_temp,
macros=macros + additional_macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args + args,
depends=ext.depends)
return obj
def _compile_asm(src):
obj = []
for s in src:
additional_macros = []
if s in file_specific_definitions.keys():
additional_macros += file_specific_definitions[s]
macros_, objects, extra_postargs, asm_args, build = \
self.compiler._setup_compile(self.build_temp, macros + additional_macros, asm_include, [s],
depends, extra_asm_args)
for o in objects:
try:
src, ext = build[o]
except KeyError:
continue
try:
self.spawn(self.compiler.assembler + extra_postargs + asm_args + ['-o', o, src])
except DistutilsExecError as msg:
raise CompileError(msg)
obj += objects
return obj
objects = []
objects += _compile_asm(asm_sources)
objects += _compile(c_sources, extra_c_args)
objects += _compile(cpp_sources, extra_cpp_args)
objects += _compile(objc_sources, extra_objc_args)
objects += _compile(other_sources, [])
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
# patching
build_ext.build_extension = build_extension
fsal = list(glob.glob('libs/fsal/sources/*.cpp'))
zlib = list(glob.glob('libs/zlib/*.c'))
lz4 = list(glob.glob('libs/lz4/lib/*.c'))
dareblopy = list(glob.glob('sources/*.c*')) + list(glob.glob('sources/protobuf/*.c*'))
crc32c = """crc32c.cc crc32c_arm64.cc crc32c_portable.cc crc32c_sse42.cc"""
crc32c = ['libs/crc32c/src/' + x for x in crc32c.split()]
protobuf = """any_lite.cc arena.cc extension_set.cc generated_enum_util.cc
generated_message_table_driven_lite.cc generated_message_util.cc implicit_weak_message.cc
io/coded_stream.cc io/io_win32.cc io/strtod.cc io/zero_copy_stream.cc io/zero_copy_stream_impl.cc
io/zero_copy_stream_impl_lite.cc message_lite.cc parse_context.cc repeated_field.cc stubs/bytestream.cc
stubs/common.cc stubs/int128.cc stubs/status.cc stubs/statusor.cc stubs/stringpiece.cc stubs/stringprintf.cc
stubs/structurally_valid.cc stubs/strutil.cc stubs/time.cc wire_format_lite.cc
any.cc any.pb.cc api.pb.cc compiler/importer.cc compiler/parser.cc descriptor.cc descriptor.pb.cc
descriptor_database.cc duration.pb.cc dynamic_message.cc empty.pb.cc extension_set_heavy.cc
field_mask.pb.cc generated_message_reflection.cc generated_message_table_driven.cc io/gzip_stream.cc
io/printer.cc io/tokenizer.cc map_field.cc message.cc reflection_ops.cc service.cc source_context.pb.cc
struct.pb.cc stubs/mathlimits.cc stubs/substitute.cc text_format.cc timestamp.pb.cc type.pb.cc
unknown_field_set.cc util/delimited_message_util.cc util/field_comparator.cc util/field_mask_util.cc
util/internal/datapiece.cc util/internal/default_value_objectwriter.cc util/internal/error_listener.cc
util/internal/field_mask_utility.cc util/internal/json_escaping.cc util/internal/json_objectwriter.cc
util/internal/json_stream_parser.cc util/internal/object_writer.cc util/internal/proto_writer.cc
util/internal/protostream_objectsource.cc util/internal/protostream_objectwriter.cc
util/internal/type_info.cc util/internal/type_info_test_helper.cc util/internal/utility.cc
util/json_util.cc util/message_differencer.cc util/time_util.cc util/type_resolver_util.cc
wire_format.cc wrappers.pb.cc"""
protobuf = ['libs/protobuf/src/google/protobuf/' + x for x in protobuf.split()]
jpeg_turbo = """jcapimin.c jcapistd.c jccoefct.c jccolor.c jcdctmgr.c jchuff.c
jcicc.c jcinit.c jcmainct.c jcmarker.c jcmaster.c jcomapi.c jcparam.c
jcphuff.c jcprepct.c jcsample.c jctrans.c jdapimin.c jdapistd.c jdatadst.c
jdatasrc.c jdcoefct.c jdcolor.c jddctmgr.c jdhuff.c jdicc.c jdinput.c
jdmainct.c jdmarker.c jdmaster.c jdmerge.c jdphuff.c jdpostct.c jdsample.c
jdtrans.c jerror.c jfdctflt.c jfdctfst.c jfdctint.c jidctflt.c jidctfst.c
jidctint.c jidctred.c jquant1.c jquant2.c jutils.c jmemmgr.c jmemnobs.c
jaricom.c jcarith.c jdarith.c"""
jpeg_turbo = ['libs/libjpeg-turbo/' + x for x in jpeg_turbo.split()]
p64 = sys.maxsize > 2**32
jpeg_turbo_simd_64 = """x86_64/jsimdcpu.asm x86_64/jfdctflt-sse.asm
x86_64/jccolor-sse2.asm x86_64/jcgray-sse2.asm x86_64/jchuff-sse2.asm
x86_64/jcphuff-sse2.asm x86_64/jcsample-sse2.asm x86_64/jdcolor-sse2.asm
x86_64/jdmerge-sse2.asm x86_64/jdsample-sse2.asm x86_64/jfdctfst-sse2.asm
x86_64/jfdctint-sse2.asm x86_64/jidctflt-sse2.asm x86_64/jidctfst-sse2.asm
x86_64/jidctint-sse2.asm x86_64/jidctred-sse2.asm x86_64/jquantf-sse2.asm
x86_64/jquanti-sse2.asm
x86_64/jccolor-avx2.asm x86_64/jcgray-avx2.asm x86_64/jcsample-avx2.asm
x86_64/jdcolor-avx2.asm x86_64/jdmerge-avx2.asm x86_64/jdsample-avx2.asm
x86_64/jfdctint-avx2.asm x86_64/jidctint-avx2.asm x86_64/jquanti-avx2.asm x86_64/jsimd.c"""
jpeg_turbo_simd_86 = """i386/jccolor-avx2.asm i386/jccolor-mmx.asm i386/jccolor-sse2.asm
i386/jcgray-avx2.asm i386/jcgray-mmx.asm i386/jcgray-sse2.asm i386/jchuff-sse2.asm
i386/jcphuff-sse2.asm i386/jcsample-avx2.asm i386/jcsample-mmx.asm i386/jcsample-sse2.asm
i386/jdcolor-avx2.asm i386/jdcolor-mmx.asm i386/jdcolor-sse2.asm i386/jdmerge-avx2.asm
i386/jdmerge-mmx.asm i386/jdmerge-sse2.asm i386/jdsample-avx2.asm i386/jdsample-mmx.asm
i386/jdsample-sse2.asm i386/jfdctflt-3dn.asm i386/jfdctflt-sse.asm i386/jfdctfst-mmx.asm
i386/jfdctfst-sse2.asm i386/jfdctint-avx2.asm i386/jfdctint-mmx.asm i386/jfdctint-sse2.asm
i386/jidctflt-3dn.asm i386/jidctflt-sse.asm i386/jidctflt-sse2.asm i386/jidctfst-mmx.asm
i386/jidctfst-sse2.asm i386/jidctint-avx2.asm i386/jidctint-mmx.asm i386/jidctint-sse2.asm
i386/jidctred-mmx.asm i386/jidctred-sse2.asm i386/jquant-3dn.asm i386/jquant-mmx.asm
i386/jquant-sse.asm i386/jquantf-sse2.asm i386/jquanti-avx2.asm i386/jquanti-sse2.asm
i386/jsimd.c i386/jsimdcpu.asm"""
jpeg_turbo_simd = ['libs/libjpeg-turbo/simd/' + x for x in (jpeg_turbo_simd_64 if p64 else jpeg_turbo_simd_86).split()]
jpeg_vanila = """jmemnobs.c jaricom.c jcapimin.c jcapistd.c jcarith.c jccoefct.c jccolor.c
jcdctmgr.c jchuff.c jcinit.c jcmainct.c jcmarker.c jcmaster.c jcomapi.c jcparam.c
jcprepct.c jcsample.c jctrans.c jdapimin.c jdapistd.c jdarith.c jdatadst.c jdatasrc.c
jdcoefct.c jdcolor.c jddctmgr.c jdhuff.c jdinput.c jdmainct.c jdmarker.c jdmaster.c
jdmerge.c jdpostct.c jdsample.c jdtrans.c jerror.c jfdctflt.c jfdctfst.c jfdctint.c
jidctflt.c jidctfst.c jidctint.c jquant1.c jquant2.c jutils.c jmemmgr.c"""
jpeg_vanila = ['libs/libjpeg/' + x for x in jpeg_vanila.split()]
definitions = {
'darwin': [('HAVE_SSE42', 0), ('HAVE_PTHREAD', 0)],
'posix': [('HAVE_SSE42', 0), ('HAVE_PTHREAD', 0)],
'win32': [('HAVE_SSE42', 0)],
}
file_specific_definitions = {}
for file in jpeg_turbo:
file_specific_definitions[file] = [('TURBO', 0)]
for file in jpeg_turbo_simd:
file_specific_definitions[file] = [('TURBO', 0)]
for file in jpeg_vanila:
file_specific_definitions[file] = [('VANILA', 0)]
libs = {
'darwin': [],
'posix': ["rt", "m", "stdc++fs", "gomp"],
'win32': ["ole32", "shell32"],
}
extra_link = {
'darwin': [],
'posix': ['-static-libstdc++', '-static-libgcc', '-flto'],
'win32': [],
}
extra_compile_args = {
'darwin': ['-fPIC', '-msse2', '-msse3', '-msse4', '-funsafe-math-optimizations'],
'posix': ['-fPIC', '-msse2', '-msse3', '-msse4', '-funsafe-math-optimizations'],
'win32': ['/MT', '/fp:fast', '/GL', '/GR-'],
}
extra_compile_cpp_args = {
'darwin': ['-std=c++14', '-lstdc++fs', '-Ofast', '-flto', '-fopenmp'],
'posix': ['-std=c++14', '-lstdc++fs', '-Ofast', '-flto', '-fopenmp'],
'win32': [],
}
extra_compile_c_args = {
'darwin': ['-std=c99', '-Ofast', '-flto'],
'posix': ['-std=c99', '-Ofast', '-flto'],
'win32': [],
}
extra_compile_asm_args = {
'darwin': ['-DMACHO', '-D__x86_64__' if p64 else '', '-DPIC', '-DTURBO', '-f macho', '-Ox'],
'posix': ['-DELF', '-D__x86_64__' if p64 else '', '-DPIC', '-DTURBO', '-f elf64' if p64 else '-f elf', '-Ox'],
'win32': ['-DWIN64' if p64 else '-DWIN32', '-D__x86_64__' if p64 else '', '-DPIC', '-DTURBO', '-f win64' if p64 else '-f win32', '-Ox'],
}
extension = Extension("_dareblopy",
jpeg_turbo + jpeg_vanila + jpeg_turbo_simd + dareblopy + fsal + crc32c + zlib + protobuf + lz4,
define_macros = definitions[target_os],
include_dirs=[
"libs/zlib",
"libs/fsal/sources",
"libs/lz4/lib",
"libs/pybind11/include",
"libs/crc32c/include",
"libs/protobuf/src",
"sources",
"configs"
],
extra_compile_args=extra_compile_args[target_os],
extra_link_args=extra_link[target_os],
libraries = libs[target_os])
extension.extra_compile_cpp_args = extra_compile_cpp_args[target_os]
extension.extra_compile_c_args = extra_compile_c_args[target_os]
extension.file_specific_definitions = file_specific_definitions
extension.extra_compile_asm_args = extra_compile_asm_args[target_os]
extension.asm = 'nasm'
extension.asm_include = ['libs/libjpeg-turbo/simd/nasm/', 'libs/libjpeg-turbo/simd/x86_64/' if p64 else 'libs/libjpeg-turbo/simd/i386/']
setup(
name='dareblopy',
version='0.0.5',
description='dareblopy',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/podgorskiy/dareblopy',
author='Stanislav Pidhorskyi',
author_email='stpidhorskyi@mix.wvu.edu',
license='Apache 2.0 License',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='dareblopy',
packages=['dareblopy'],
ext_modules=[extension],
install_requires=['numpy']
)
| 42.040609 | 141 | 0.651775 | 2,226 | 16,564 | 4.663073 | 0.206649 | 0.028998 | 0.020809 | 0.018497 | 0.191908 | 0.141908 | 0.130347 | 0.122062 | 0.116474 | 0.096435 | 0 | 0.037049 | 0.221082 | 16,564 | 393 | 142 | 42.147583 | 0.767478 | 0.050652 | 0 | 0.107383 | 0 | 0.134228 | 0.436082 | 0.167793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020134 | false | 0 | 0.04698 | 0 | 0.087248 | 0.006711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a3ac21658ac9695b5fecda9001a487e08821284 | 2,402 | py | Python | src/gocept/webtoken/tests/test_token.py | gocept/gocept.webtoken | 55207d401d27617dc7ae3e3628d6c9dd2734ec40 | [
"MIT"
] | null | null | null | src/gocept/webtoken/tests/test_token.py | gocept/gocept.webtoken | 55207d401d27617dc7ae3e3628d6c9dd2734ec40 | [
"MIT"
] | null | null | null | src/gocept/webtoken/tests/test_token.py | gocept/gocept.webtoken | 55207d401d27617dc7ae3e3628d6c9dd2734ec40 | [
"MIT"
] | null | null | null | import pytest
def test_token__decode_web_token__1(token):
"""Raises ValueError on invalid token."""
with pytest.raises(ValueError) as err:
token.decode({'token': 'asdf'}, 'jwt-application-public', 'asdf')
assert 'Not enough segments' == str(err.value)
def test_token__decode_web_token__2(token):
"""Raises ValueError on wrong cryptographic key."""
token_dict = token.create('jwt-application-private', 'app')
with pytest.raises(ValueError) as err:
token.decode(token_dict, 'jwt-access-public', 'app')
assert 'Signature verification failed' == str(err.value)
def test_token__decode_web_token__3(token):
"""Raises ValueError on expired token."""
token_dict = token.create('jwt-access-private', 'app', expires_in=-1)
with pytest.raises(ValueError) as err:
token.decode(token_dict, 'jwt-access-public', 'app')
assert 'Signature has expired' == str(err.value)
def test_token__decode_web_token__4(token):
"""Raises ValueError on invalid subject."""
token_dict = token.create('jwt-access-private', 'app')
with pytest.raises(ValueError) as err:
token.decode(token_dict, 'jwt-access-public', 'access')
assert "Subject mismatch 'access' != 'app'" == str(err.value)
def test_token__decode_web_token__5(token):
"""Returns decoded token contend if valid."""
token_dict = token.create('jwt-access-private', 'app', data={'foo': 'bar'})
decoded = token.decode(token_dict, 'jwt-access-public', 'app')
assert (
sorted([u'iss', u'iat', u'data', u'sub', u'nbf']) ==
sorted(decoded.keys()))
assert 'issuer' == decoded['iss']
assert {u'foo': u'bar'} == decoded['data']
# iat, nbf and exp have been checked implicitly by validation upon
# decoding
def test_token__decode_web_token__6(token):
"""Subject matching is optional."""
token_dict = token.create('jwt-access-private', 'app', data={'foo': 'bar'})
decoded = token.decode(token_dict, 'jwt-access-public', None)
assert (
sorted([u'iss', u'iat', u'data', u'sub', u'nbf']) ==
sorted(decoded.keys()))
def test_token__create_web_token__1(token):
"""Create web token returns encoded token and token contents."""
token_dict = token.create('jwt-access-private', 'app', data={'foo': 'bar'})
assert token_dict['data'] == token.decode(
token_dict, 'jwt-access-public', 'app')
| 38.741935 | 79 | 0.671107 | 326 | 2,402 | 4.751534 | 0.242331 | 0.092318 | 0.054229 | 0.069722 | 0.661072 | 0.607489 | 0.573919 | 0.573919 | 0.505487 | 0.377663 | 0 | 0.004008 | 0.169026 | 2,402 | 61 | 80 | 39.377049 | 0.772044 | 0.149459 | 0 | 0.394737 | 0 | 0 | 0.230309 | 0.022433 | 0 | 0 | 0 | 0 | 0.236842 | 1 | 0.184211 | false | 0 | 0.026316 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a4037e6458b2052d39e07a53f191e81d52120f2 | 3,512 | py | Python | test.py | mivade/permadict | 472953d5dfb5997f398ddec1538b19b1eba719d1 | [
"Unlicense"
] | 4 | 2017-12-02T15:36:52.000Z | 2022-01-28T17:19:55.000Z | test.py | mivade/permadict | 472953d5dfb5997f398ddec1538b19b1eba719d1 | [
"Unlicense"
] | 1 | 2018-08-14T01:34:35.000Z | 2018-08-14T01:34:35.000Z | test.py | mivade/permadict | 472953d5dfb5997f398ddec1538b19b1eba719d1 | [
"Unlicense"
] | null | null | null | from tempfile import gettempdir
import os
import os.path as osp
import sqlite3
import pytest
from permadict import Permadict
@pytest.fixture
def db_filename():
filename = osp.join(gettempdir(), "database.sqlite")
yield filename
try:
os.remove(filename)
except: # noqa
pass
@pytest.mark.parametrize("journal_mode,synchronous", [
("WAL", True),
("WAL", False),
("OFF", True),
("OFF", False),
])
def test_create(journal_mode, synchronous, tmpdir):
Permadict(journal_mode=journal_mode, synchronous=synchronous)
Permadict(key="value", otherkey=1)
path = str(tmpdir.join("test.sqlite"))
db = Permadict(path, journal_mode=journal_mode, synchronous=synchronous)
cur = db.conn.cursor()
cur.execute("PRAGMA journal_mode")
mode = cur.fetchone()[0]
assert mode.lower() == journal_mode.lower()
cur.execute("PRAGMA synchronous")
sync = cur.fetchone()[0]
if not synchronous:
assert sync == 0
else:
assert sync == 2 # default mode (FULL)
def test_len():
d = Permadict(thing=1, other=2)
assert len(d) is 2
def test_set_and_get():
d = Permadict()
d["key"] = "value"
assert d["key"] == "value"
with pytest.raises(KeyError):
print(d["nosuchkey"])
def test_del():
d = Permadict()
d["one"] = 1
d["two"] = 2
assert len(d) == 2
assert d["one"] == 1
del d["one"]
with pytest.raises(KeyError):
d["one"]
assert d["two"] == 2
assert len(d) == 1
def test_clear():
d = Permadict()
d["one"] = 1
d["two"] = 2
assert len(d) is 2
d.clear()
assert len(d) is 0
def test_keys():
d = Permadict(key="value")
assert len(list(d.keys())) is 1
assert list(d.keys()) == ["key"]
def test_in():
d = Permadict(key="value")
assert "key" in d
assert "nope" not in d
def test_iterator():
d = Permadict(one=1, two=2)
x = [d[key] for key in d]
assert len(x) is 2
assert sorted(x) == [1, 2]
def test_items():
items = dict(a=1, b=2, c=3)
d = Permadict(**items)
for key, value in d.items():
assert key in items
assert key in d
assert items[key] == d[key]
def test_values():
values = [1, 2, 3]
items = {str(v): v for v in values}
d = Permadict(**items)
for v in d.values():
assert v in values
def test_get():
d = Permadict(**dict(a=1, b=2, c=3))
value = d.get("a")
assert value == 1
value = d.get("nonexistant")
assert value is None
value = d.get("other", "one")
assert value == "one"
def test_pop():
d = Permadict(**dict(a=1))
value = d.pop("a")
assert value == 1
with pytest.raises(KeyError):
d.pop("a")
def test_update():
other = dict(one=1, two=2)
d = Permadict(**other)
assert len(d) is 2
assert d["one"] == 1
assert d["two"] == 2
d.update({"one": 3, "three": 1})
assert len(d) is 3
assert d["one"] == 3
assert d["two"] == 2
assert d["three"] == 1
pairs = [("one", 1), ("three", 3), ("four", 4)]
res = d.update(pairs)
assert res is None
assert len(d) is 4
assert d["one"] == 1
assert d["two"] == 2
assert d["three"] == 3
assert d["four"] == 4
def test_context(db_filename):
with Permadict(db_filename) as d:
d["key"] = "value"
with pytest.raises(sqlite3.ProgrammingError):
d["key"]
with Permadict(db_filename) as d:
assert d["key"] == "value"
| 20.068571 | 76 | 0.571469 | 514 | 3,512 | 3.850195 | 0.196498 | 0.04952 | 0.040424 | 0.036382 | 0.286003 | 0.192521 | 0.080344 | 0.052552 | 0.030318 | 0.030318 | 0 | 0.022093 | 0.265376 | 3,512 | 174 | 77 | 20.183908 | 0.744961 | 0.006834 | 0 | 0.25 | 0 | 0 | 0.077188 | 0.006887 | 0 | 0 | 0 | 0 | 0.304688 | 1 | 0.117188 | false | 0.007813 | 0.046875 | 0 | 0.164063 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a4091166653e0e606aa8215c1e93151d20d7fa1 | 6,262 | py | Python | checkers/CheckersGame.py | as1067/Connect4AlphaZero | 1566a3e2cc055ab0d86e75c16c422196dbdb236a | [
"MIT"
] | null | null | null | checkers/CheckersGame.py | as1067/Connect4AlphaZero | 1566a3e2cc055ab0d86e75c16c422196dbdb236a | [
"MIT"
] | null | null | null | checkers/CheckersGame.py | as1067/Connect4AlphaZero | 1566a3e2cc055ab0d86e75c16c422196dbdb236a | [
"MIT"
] | null | null | null | from .CheckersLogic import Board
import numpy as np
class CheckersGame():
"""
This class specifies the base Game class. To define your own game, subclass
this class and implement the functions below. This works when the game is
two-player, adversarial and turn-based.
Use 1 for player1 and -1 for player2.
See othello/OthelloGame.py for an example implementation.
"""
def __init__(self,n):
self.b = Board(n)
self.n = n
self.startingBoard = [1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]
def getInitBoard(self):
"""
Returns:
startBoard: a representation of the board (ideally this is the form
that will be the input to your neural network)
"""
b = np.zeros((8,8))
for i in range(len(self.startingBoard)):
if not self.startingBoard[i] == 0:
coord = self.posToCoord(i)
#print(coord)
b[coord[0]][coord[1]] = self.startingBoard[i]
return b
def getBoardSize(self):
"""
Returns:
(x,y): a tuple of board dimensions
"""
return self.n
def getActionSize(self):
"""
Returns:
actionSize: number of all possible actions
"""
return 32*32
def getNextState(self, board, player, action):
"""
Input:
board: current board
player: current player (1 or -1)
action: action taken by current player
Returns:
nextBoard: board after applying action
nextPlayer: player who plays in the next turn (should be -player)
"""
if player == -1:
player == 2
self.b.boardToGame(board,player)
move = self.actionToMove(action)
self.b.executeMove(move)
return self.b.getBoard(),self.b.curPlayer()
def getValidMoves(self, board, player):
"""
Input:
board: current board
player: current player
Returns:
validMoves: a binary vector of length self.getActionSize(), 1 for
moves that are valid from the current board and player,
0 for invalid moves
"""
#print("Current player for board is:"+str(self.curPlayer()))
#print("Current player given is:"+str(player))
#print(str(self.b.getBoard()))
if player == -1:
player = 2
self.b.boardToGame(board,player)
valids = [0]*self.getActionSize()
moves = self.b.getLegalMoves()
print(str(moves))
for move in moves:
#print(move)
valids[self.moveToAction(move)] = 1
return valids
def getGameEnded(self, board, player):
"""
Input:
board: current board
player: current player (1 or -1)
Returns:
r: 0 if game has not ended. 1 if player won, -1 if player lost,
small non-zero value for draw.
"""
self.b.boardToGame(board,player)
return self.b.getWinner(player)
def getCanonicalForm(self, board, player):
"""
Input:
board: current board
player: current player (1 or -1)
Returns:
canonicalBoard: returns canonical form of board. The canonical form
should be independent of player. For e.g. in chess,
the canonical form can be chosen to be from the pov
of white. When the player is white, we can return
board as is. When the player is black, we can invert
the colors and return the board.
"""
print(board)
if player == -1:
board = np.asarray(board)
board = np.flipud(board)
board = np.fliplr(board)
return list(board)
else:
return board
def getSymmetries(self, board, pi):
"""
Input:
board: current board
pi: policy vector of size self.getActionSize()
Returns:
symmForms: a list of [(board,pi)] where each tuple is a symmetrical
form of the board and the corresponding pi vector. This
is used when training the neural network from examples.
"""
return [(board,pi)]
def stringRepresentation(self, board):
"""
Input:
board: current board
Returns:
boardString: a quick conversion of board to a string format.
Required by MCTS for hashing.
"""
return str(board)
def moveToAction(self, move):
return (move[0]-1)*32+(move[1]-1)
def actionToMove(self,action):
start = int(action/32)
end = action - 32*(start)
start+=1
end+=1
return [start,end]
def coordToPos(self, coord):
r = coord[0]
c = coord[1]
if not c==0:
c = int(c/2)
return r*4+c
def posToCoord(self, pos):
r = int(pos/4)
if r%2==0:
c = 2*(pos%4)+1
else:
c = 2*(pos%4)
return (r,c)
def moveToCoords(self,move):
start = move[0]
end = move[1]
return (self.posToCoord(start),self.posToCoord(end))
def curPlayer(self):
return self.b.curPlayer()
def display(board):
n = 8
king = 2
for y in range(n):
print (y,"|",end="")
print("")
print(" -----------------------")
for y in range(n):
print(y, "|",end="") # print the row #
for x in range(n):
piece = board[y][x]
if piece == -1: print("R ",end="")
elif piece == 1: print("B ",end="")
elif piece == -1*king: print("RK",end="")
elif piece == king: print("BK",end="")
else:
if x==n:
print("-",end="")
else:
print("- ",end="")
print("|")
print(" -----------------------")
| 28.724771 | 106 | 0.504471 | 740 | 6,262 | 4.263514 | 0.258108 | 0.01458 | 0.019017 | 0.022821 | 0.136926 | 0.128368 | 0.128368 | 0.125832 | 0.125832 | 0.10935 | 0 | 0.024541 | 0.381827 | 6,262 | 217 | 107 | 28.857143 | 0.790493 | 0.372565 | 0 | 0.18 | 0 | 0 | 0.019087 | 0.013719 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 | false | 0 | 0.02 | 0.02 | 0.36 | 0.14 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a43d19348986e3c1c65fbf4c94c24cdc6afc564 | 792 | py | Python | app.py | danemacaulay/trending-topics-app | 8097414b6e7fd93e92ba2fe7f2a6046be0520d35 | [
"MIT"
] | null | null | null | app.py | danemacaulay/trending-topics-app | 8097414b6e7fd93e92ba2fe7f2a6046be0520d35 | [
"MIT"
] | null | null | null | app.py | danemacaulay/trending-topics-app | 8097414b6e7fd93e92ba2fe7f2a6046be0520d35 | [
"MIT"
] | null | null | null | import os
import json
from flask import Flask, render_template, request, redirect, url_for, Response
from db import Tweet
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/services/tweets', methods=['GET'])
def get_tweets():
page = int(request.args.get('page'))
limit = int(request.args.get('limit'))
tweets_models = Tweet.select().order_by(Tweet.date.desc()).paginate(page, limit)
tweets = [Tweet.to_dict(a)['tweet_data'] for a in tweets_models]
body = {
'tweets': tweets,
}
return Response(json.dumps(body), mimetype='application/json')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
print('http://0.0.0.0:' + str(port))
app.run(host='0.0.0.0', port=port, debug=True)
| 28.285714 | 82 | 0.683081 | 118 | 792 | 4.40678 | 0.491525 | 0.023077 | 0.023077 | 0.065385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017417 | 0.130051 | 792 | 27 | 83 | 29.333333 | 0.7373 | 0 | 0 | 0 | 0 | 0 | 0.136536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0.045455 | 0.363636 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a498052d3719344055b6f13d42d40b6fa1ffabe | 1,088 | py | Python | uk_postcode_validator/tests.py | jayakrishnandingit/uk_postcode_validator | edae0a464708cba340cd9f4834ba25e093ee8398 | [
"MIT"
] | null | null | null | uk_postcode_validator/tests.py | jayakrishnandingit/uk_postcode_validator | edae0a464708cba340cd9f4834ba25e093ee8398 | [
"MIT"
] | null | null | null | uk_postcode_validator/tests.py | jayakrishnandingit/uk_postcode_validator | edae0a464708cba340cd9f4834ba25e093ee8398 | [
"MIT"
] | null | null | null | import unittest
from uk_postcode_validator.client import PostCodeClient, ValidationError
class PostCodeTest(unittest.TestCase):
def test_invalid_postcode_validation_fails(self):
client = PostCodeClient()
with self.assertRaises(ValidationError) as e:
client.validate('695001')
def test_valid_postcode_is_success(self):
client = PostCodeClient()
response = client.validate('M1 1AE')
self.assertEqual(response, 'M1 1AE')
def test_case_insensitive_validation_is_success(self):
client = PostCodeClient()
response = client.validate('ox495nu')
self.assertEqual(response, 'OX49 5NU')
def test_formatting_invalid_postcode_fails(self):
client = PostCodeClient()
with self.assertRaises(ValidationError) as e:
client.format_code('695001')
def test_formatting_valid_postcode_is_success(self):
client = PostCodeClient()
response = client.format_code('OX495NU')
self.assertEqual(response, 'OX49 5NU')
if __name__ == '__main__':
unittest.main()
| 32.969697 | 72 | 0.701287 | 114 | 1,088 | 6.394737 | 0.368421 | 0.048011 | 0.164609 | 0.078189 | 0.552812 | 0.552812 | 0.451303 | 0.451303 | 0.364883 | 0.200274 | 0 | 0.03252 | 0.20864 | 1,088 | 32 | 73 | 34 | 0.81417 | 0 | 0 | 0.36 | 0 | 0 | 0.056985 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.2 | false | 0 | 0.08 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a49806f9aa16f33f9b25eccb101ab24853f3c82 | 7,764 | py | Python | Samsung/benchmarks/bert/implementations/pytorch/schedulers.py | gglin001/training_results_v1.1 | 58fd4103f0f465bda6eb56a06a74b7bbccbbcf24 | [
"Apache-2.0"
] | 6 | 2021-10-23T00:34:22.000Z | 2022-02-10T09:33:59.000Z | schedulers.py | SAITPublic/MLPerf_Training_v1.1 | 3f00b82dcaa1c42078c547e0f2ed4aecbcad3277 | [
"Apache-2.0"
] | null | null | null | schedulers.py | SAITPublic/MLPerf_Training_v1.1 | 3f00b82dcaa1c42078c547e0f2ed4aecbcad3277 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
import mlperf_logger
class LRScheduler(_LRScheduler):
def __init__(self, optimizer, last_epoch=-1):
# Check if using mixed precision training
self.mixed_training = False
base_optimizer = optimizer
# Check that optimizer param is valid
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
super(LRScheduler, self).__init__(base_optimizer, last_epoch)
def step(self, epoch=None):
# Set the current training step
# ('epoch' is used to be consistent with _LRScheduler)
if self.mixed_training:
# The assumption is that the step will be constant
state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]]
if 'step' in state_dict:
self.last_epoch = state_dict['step'] + 1
else:
self.last_epoch = 1
else:
self.last_epoch = epoch if epoch is not None else self.last_epoch + 1
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class LinearWarmUpScheduler(LRScheduler):
"""
Applies a warm up period to the learning rate.
"""
def __init__(self, optimizer, warmup, total_steps, last_epoch=-1):
self.warmup = warmup
self.total_steps = total_steps
super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch)
mlperf_logger.log_event(key=mlperf_logger.constants.OPT_LR_WARMUP_STEPS, value=total_steps * warmup, sync=False)
def get_lr(self):
progress = self.last_epoch / self.total_steps
if progress < self.warmup:
return [base_lr * progress / self.warmup for base_lr in self.base_lrs]
else:
return [base_lr * max((progress - 1.0) / (self.warmup - 1.0), 0.) for base_lr in self.base_lrs]
class LinearWarmupPolyDecayScheduler(LRScheduler):
"""
Applies a warm up period to the learning rate.
"""
def __init__(self, optimizer, start_warmup_steps, warmup_steps, total_steps, end_learning_rate=0.0, degree=1.0,
last_epoch=-1):
self.num_warmup_updates = warmup_steps
self.start_warmup_steps = start_warmup_steps
self.total_steps = total_steps
self.end_learning_rate = end_learning_rate
self.degree = degree
self.last_epoch = 0
super(LinearWarmupPolyDecayScheduler, self).__init__(optimizer, last_epoch)
mlperf_logger.log_event(key=mlperf_logger.constants.OPT_LR_WARMUP_STEPS, value=self.num_warmup_updates,
sync=False)
mlperf_logger.log_event(key='opt_lamb_learning_rate_decay_poly_power', value=degree, sync=False)
mlperf_logger.log_event(key='start_warmup_step', value=self.start_warmup_steps, sync=False)
def step(self, epoch=None):
param_group = self.optimizer.param_groups[0]
if 'step' in param_group:
self.last_epoch = param_group['step'] + 1
else:
self.last_epoch += 1 # Ensure.. sometime step does not exist..
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
if self.last_epoch > self.total_steps:
return [self.end_learning_rate for base_lr in self.base_lrs]
else:
mod_step = self.last_epoch - self.start_warmup_steps
if mod_step < self.num_warmup_updates:
progress = mod_step / self.num_warmup_updates
return [(base_lr * progress) for base_lr in self.base_lrs]
else:
progress = min((self.last_epoch - self.num_warmup_updates) / (self.total_steps - self.num_warmup_updates), 1.0)
return [(base_lr - self.end_learning_rate) * (1 - progress) ** self.degree + self.end_learning_rate
for base_lr in self.base_lrs]
class LinearWarmupPolyDecayScheduler2(object):
def __init__(self, optimizer, start_warmup_steps, warmup_steps, total_steps, end_learning_rate=0.0, degree=1.0,
last_epoch=-1):
super(LinearWarmupPolyDecayScheduler2, self).__init__()
self.num_warmup_updates = warmup_steps
self.start_warmup_steps = start_warmup_steps
self.total_steps = total_steps
self.end_learning_rate = end_learning_rate
self.degree = degree
mlperf_logger.log_event(key=mlperf_logger.constants.OPT_LR_WARMUP_STEPS, value=self.num_warmup_updates,
sync=False)
mlperf_logger.log_event(key='opt_lamb_learning_rate_decay_poly_power', value=degree, sync=False)
mlperf_logger.log_event(key='start_warmup_step', value=self.start_warmup_steps, sync=False)
self.optimizer = optimizer
self.last_epoch = 0
self.base_lrs = [group['lr'] for group in optimizer.param_groups]
def step(self, epoch=None):
self.last_epoch = self.last_epoch + 1
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
mod_step = self.last_epoch - self.start_warmup_steps
if mod_step < self.num_warmup_updates:
progress = mod_step / self.num_warmup_updates
return [(base_lr * progress) for base_lr in self.base_lrs]
else:
progress = min((self.last_epoch - self.num_warmup_updates)
/ (self.total_steps - self.num_warmup_updates), 1.0)
return [(base_lr - self.end_learning_rate) * (1 - progress) ** self.degree + self.end_learning_rate
for base_lr in self.base_lrs]
if __name__ == "__main__":
import torch
import matplotlib.pyplot as plt
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(3, 1)
def forward(self, x):
return self.linear(x)
model = Model()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = LinearWarmupPolyDecayScheduler(optimizer, start_warmup_steps=-34, warmup_steps=100, total_steps=500,
end_learning_rate=0.0, degree=1.0)
scheduler_name = scheduler.__class__.__name__
num_epoch = 500
lr_list = []
optimizer.param_groups[0]['step'] = 0
for epoch in range(num_epoch):
lr = optimizer.param_groups[0]['lr']
print('epoch: {:3d}, lr: {:.8f}'.format(epoch, lr))
lr_list.append(lr)
scheduler.step()
optimizer.param_groups[0]['step'] += 1
plt.title(scheduler_name)
plt.xticks(range(0, num_epoch, 1))
plt.ylim(0, optimizer.defaults['lr'] + optimizer.defaults['lr'] / 10)
plt.plot(lr_list)
# plt.autoscale()
plt.savefig('lr_scheduler.png')
| 41.297872 | 127 | 0.661257 | 1,030 | 7,764 | 4.705825 | 0.193204 | 0.042707 | 0.042913 | 0.049515 | 0.518052 | 0.475758 | 0.464617 | 0.455127 | 0.437178 | 0.437178 | 0 | 0.013659 | 0.245621 | 7,764 | 187 | 128 | 41.518717 | 0.813898 | 0.131504 | 0 | 0.453125 | 0 | 0 | 0.034065 | 0.011654 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.054688 | 0.007813 | 0.25 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a4a281b70d3d12924dcef9d93aee0ffda8acbdb | 2,061 | py | Python | routelift_api/push_notifications/views.py | BitMask-Technologies/route-lift-api | 7ac78c6cce523fc5a3852dca7b289fe3a5f3afa8 | [
"MIT"
] | null | null | null | routelift_api/push_notifications/views.py | BitMask-Technologies/route-lift-api | 7ac78c6cce523fc5a3852dca7b289fe3a5f3afa8 | [
"MIT"
] | 7 | 2021-06-24T16:12:09.000Z | 2021-08-05T16:09:22.000Z | routelift_api/push_notifications/views.py | BitMask-Technologies/route-lift-api | 7ac78c6cce523fc5a3852dca7b289fe3a5f3afa8 | [
"MIT"
] | null | null | null | import time
import requests
from django.conf import settings
from fcm_django.models import FCMDevice
from api_utils.views import (sms_notifier, successResponse, )
# import the logging library
import logging
from firebase_admin.messaging import Message, Notification
logger = logging.getLogger(__name__)
"""Get an instance of a logger"""
# Create your views here.
def send_push_notifications(title, short_text, image, full_text, device_id):
"""send push notification to all, single or list of devices"""
try:
if device_id:
if len(device_id) > 1:
device = FCMDevice.objects.filter(device_id__in=device_id)
else:
device = FCMDevice.objects.filter(device_id=device_id)
else:
device = FCMDevice.objects.all()
Message(
notification=Notification(title=title, body=short_text, image=image),
topic=full_text,
)
send = device.send_message(Message)
return send.response.responses
except Exception:
time.sleep(3)
return send_push_notifications(title, short_text, image, full_text, device_id)
def sendPushNotification(title, msg, uniqueId):
try:
req = requests.post("https://fcm.googleapis.com/fcm/send", headers={'Authorization': f'{settings.CLOUD_MESSAGE_KEY}'},
json={"to": f"/topics/{uniqueId}", "notification": {"body": f"{msg}", "title": f"{title}"}})
response = req.json()
if req.status_code != 200:
logger.error(req.status_code)
return response.get('error'), 'failed'
return response['message_id'], 'success'
except requests.exceptions.RequestException:
time.sleep(3)
return sendPushNotification(title, msg, uniqueId)
def test_sms(request):
res = sms_notifier(['08111333971', '08170838856', '08034279434', '08137363621', '+2348076464134', '08065064924'],
"TESTING BULK SMS BY FEMI third sms y me")
return successResponse(message="DONE", body=res)
| 34.932203 | 126 | 0.657933 | 240 | 2,061 | 5.5 | 0.454167 | 0.048485 | 0.031818 | 0.039394 | 0.174242 | 0.174242 | 0.084848 | 0.084848 | 0.084848 | 0.084848 | 0 | 0.046717 | 0.231441 | 2,061 | 58 | 127 | 35.534483 | 0.786616 | 0.052402 | 0 | 0.142857 | 0 | 0 | 0.140617 | 0.014637 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.166667 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a4b2ec41462983c82bebfc79eda1bd4d17fa3cc | 509 | py | Python | solutions/Palindrome Linked List/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | 3 | 2021-06-06T22:03:15.000Z | 2021-06-08T08:49:04.000Z | solutions/Palindrome Linked List/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | solutions/Palindrome Linked List/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
node = head
n = 0
ans = []
while(node != None):
n += 1
ans.append(node.val)
node = node.next
for i in range(n//2):
if ans[i] != ans[n - i - 1]:
return False
return True
| 25.45 | 51 | 0.473477 | 62 | 509 | 3.822581 | 0.516129 | 0.059072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 0.410609 | 509 | 19 | 52 | 26.789474 | 0.773333 | 0.275049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a4c5517106d23699ccd8e477b8c26f81d1b7c90 | 2,274 | py | Python | airbyte-integrations/connectors/source-paystack/source_paystack/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2022-03-02T13:46:05.000Z | 2022-03-05T12:31:28.000Z | airbyte-integrations/connectors/source-paystack/source_paystack/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 29 | 2021-10-07T17:20:29.000Z | 2021-12-27T13:07:09.000Z | airbyte-integrations/connectors/source-paystack/source_paystack/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2021-04-28T15:15:37.000Z | 2022-03-28T17:32:15.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Tuple
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from source_paystack.streams import Customers, Disputes, Invoices, Refunds, Settlements, Subscriptions, Transactions, Transfers
class SourcePaystack(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
"""
Check connection by fetching customers
:param config: the user-input config object conforming to the connector's spec.json
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
try:
authenticator = TokenAuthenticator(token=config["secret_key"])
stream = Customers(authenticator=authenticator, start_date=config["start_date"])
records = stream.read_records(sync_mode=SyncMode.full_refresh)
next(records)
return True, None
except StopIteration:
# there are no records, but connection was fine
return True, None
except Exception as e:
return False, repr(e)
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""
Returns list of streams output by the Paystack source connector
:param config: A Mapping of the user input configuration as defined in the connector spec.
"""
authenticator = TokenAuthenticator(config["secret_key"])
args = {"authenticator": authenticator, "start_date": config["start_date"]}
incremental_args = {**args, "lookback_window_days": config.get("lookback_window_days")}
return [
Customers(**incremental_args),
Disputes(**incremental_args),
Invoices(**incremental_args),
Refunds(**incremental_args),
Settlements(**incremental_args),
Subscriptions(**incremental_args),
Transactions(**incremental_args),
Transfers(**incremental_args),
]
| 40.607143 | 140 | 0.671064 | 247 | 2,274 | 6.064777 | 0.445344 | 0.09012 | 0.037383 | 0.042056 | 0.104139 | 0.066756 | 0.066756 | 0 | 0 | 0 | 0 | 0.002316 | 0.240545 | 2,274 | 55 | 141 | 41.345455 | 0.865084 | 0.239666 | 0 | 0.0625 | 0 | 0 | 0.062538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.40625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a4f7367f94953de35fe5e3d42c522bd77c27dd3 | 2,962 | py | Python | tests/unit/conversation/attachments/test_cache.py | OneRainbowDev/django-machina | 7354cc50f58dcbe49eecce7e1f019f6fff21d690 | [
"BSD-3-Clause"
] | 1 | 2019-01-07T19:18:56.000Z | 2019-01-07T19:18:56.000Z | tests/unit/conversation/attachments/test_cache.py | OneRainbowDev/django-machina | 7354cc50f58dcbe49eecce7e1f019f6fff21d690 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/conversation/attachments/test_cache.py | OneRainbowDev/django-machina | 7354cc50f58dcbe49eecce7e1f019f6fff21d690 | [
"BSD-3-Clause"
] | 1 | 2019-04-20T05:26:27.000Z | 2019-04-20T05:26:27.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.utils.encoding import force_bytes
from machina.apps.forum_conversation.forum_attachments.cache import cache
from machina.conf import settings as machina_settings
@pytest.mark.django_db
class TestAttachmentCache(object):
def test_should_raise_at_import_if_the_cache_backend_is_not_configured(self):
# Run & check
machina_settings.ATTACHMENT_CACHE_NAME = 'dummy'
with pytest.raises(ImproperlyConfigured):
from machina.apps.forum_conversation.forum_attachments.cache import AttachmentCache
AttachmentCache()
machina_settings.ATTACHMENT_CACHE_NAME = 'machina_attachments'
def test_is_able_to_store_the_state_of_request_files(self):
# Setup
f1 = SimpleUploadedFile('file1.txt', force_bytes('file_content_1'))
f2 = SimpleUploadedFile('file2.txt', force_bytes('file_content_2_long'))
f2.charset = 'iso-8859-1'
files = {'f1': f1, 'f2': f2}
real_cache = cache.get_backend()
# Run
cache.set('mykey', files)
states = real_cache.get('mykey')
# Check
assert states['f1']['name'] == 'file1.txt'
assert states['f1']['content'] == force_bytes('file_content_1')
assert states['f1']['charset'] is None
assert states['f1']['content_type'] == 'text/plain'
assert states['f1']['size'] == 14
assert states['f2']['name'] == 'file2.txt'
assert states['f2']['content'] == force_bytes('file_content_2_long')
assert states['f2']['charset'] == 'iso-8859-1'
assert states['f2']['content_type'] == 'text/plain'
assert states['f2']['size'] == 19
def test_is_able_to_regenerate_the_request_files_dict(self):
# Setup
original_f1 = SimpleUploadedFile('file1.txt', force_bytes('file_content_1'))
original_f2 = SimpleUploadedFile('file2.txt', force_bytes('file_content_2_long' * 300000))
original_f2.charset = 'iso-8859-1'
original_files = {'f1': original_f1, 'f2': original_f2}
cache.set('mykey', original_files)
# Run
files = cache.get('mykey')
assert 'f1' in files
assert 'f2' in files
f1 = files['f1']
f2 = files['f2']
assert isinstance(f1, InMemoryUploadedFile)
assert f1.name == 'file1.txt'
assert f1.file.read() == force_bytes('file_content_1')
assert isinstance(f2, TemporaryUploadedFile) # because of the size of the content of f2
assert f2.name == 'file2.txt'
assert f2.file.read() == force_bytes('file_content_2_long' * 300000)
assert f2.charset == 'iso-8859-1'
| 43.558824 | 98 | 0.675557 | 364 | 2,962 | 5.258242 | 0.266484 | 0.062696 | 0.058516 | 0.087774 | 0.468652 | 0.332811 | 0.190178 | 0.170324 | 0.170324 | 0.056426 | 0 | 0.038625 | 0.204591 | 2,962 | 67 | 99 | 44.208955 | 0.773769 | 0.033761 | 0 | 0 | 0 | 0 | 0.144409 | 0 | 0 | 0 | 0 | 0 | 0.358491 | 1 | 0.056604 | false | 0 | 0.207547 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a531ffff6a435822d64c896900ea509b9532c87 | 7,656 | py | Python | blackboxopt/optimizers/testing.py | boschresearch/blackboxopt | 85abea86f01a4a9d50f05d15e7d850e3288baafd | [
"ECL-2.0",
"Apache-2.0"
] | 8 | 2021-07-05T13:37:22.000Z | 2022-03-11T12:23:27.000Z | blackboxopt/optimizers/testing.py | boschresearch/blackboxopt | 85abea86f01a4a9d50f05d15e7d850e3288baafd | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2021-07-07T13:55:23.000Z | 2022-02-07T13:09:01.000Z | blackboxopt/optimizers/testing.py | boschresearch/blackboxopt | 85abea86f01a4a9d50f05d15e7d850e3288baafd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/blackboxopt
#
# SPDX-License-Identifier: Apache-2.0
"""Tests that can be imported and used to test optimizer implementations against this
packages blackbox optimizer interface."""
from typing import List
import parameterspace as ps
from blackboxopt import Objective, ObjectivesError, OptimizationComplete, Optimizer
from blackboxopt.base import (
EvaluationsError,
MultiObjectiveOptimizer,
SingleObjectiveOptimizer,
)
def _initialize_optimizer(
optimizer_class,
optimizer_kwargs: dict,
objective: Objective,
objectives: List[Objective],
seed=42,
) -> Optimizer:
space = ps.ParameterSpace()
space.add(ps.IntegerParameter("p1", bounds=[1, 32], transformation="log"))
space.add(ps.ContinuousParameter("p2", [-2, 2]))
space.add(ps.ContinuousParameter("p3", [0, 1]))
space.add(ps.CategoricalParameter("p4", [True, False]))
if issubclass(optimizer_class, MultiObjectiveOptimizer):
return optimizer_class(space, objectives, seed=seed, **optimizer_kwargs)
if issubclass(optimizer_class, SingleObjectiveOptimizer):
return optimizer_class(space, objective, seed=seed, **optimizer_kwargs)
return optimizer_class(space, seed=seed, **optimizer_kwargs)
def optimize_single_parameter_sequentially_for_n_max_evaluations(
optimizer_class, optimizer_kwargs: dict, n_max_evaluations: int = 20
) -> bool:
"""[summary]
Args:
optimizer_class: [description]
optimizer_kwargs: [description]
n_max_evaluations: [description]
Returns:
[description]
"""
def quadratic_function(p1):
return p1 ** 2
assert issubclass(optimizer_class, Optimizer), (
"The default test suite is only applicable for implementations of "
"blackboxopt.base.Optimizer"
)
optimizer = _initialize_optimizer(
optimizer_class,
optimizer_kwargs,
objective=Objective("loss", False),
objectives=[Objective("loss", False), Objective("score", True)],
)
eval_spec = optimizer.generate_evaluation_specification()
if issubclass(optimizer_class, MultiObjectiveOptimizer):
evaluation = eval_spec.create_evaluation(
objectives={"loss": None, "score": None}
)
else:
evaluation = eval_spec.create_evaluation(objectives={"loss": None})
optimizer.report(evaluation)
for _ in range(n_max_evaluations):
try:
eval_spec = optimizer.generate_evaluation_specification()
except OptimizationComplete:
break
loss = quadratic_function(p1=eval_spec.configuration["p1"])
if issubclass(optimizer_class, MultiObjectiveOptimizer):
evaluation_result = {"loss": loss, "score": -loss}
else:
evaluation_result = {"loss": loss}
evaluation = eval_spec.create_evaluation(objectives=evaluation_result)
optimizer.report(evaluation)
return True
def is_deterministic_with_fixed_seed(optimizer_class, optimizer_kwargs: dict) -> bool:
"""Check if optimizer is deterministic.
Repeatedly initialize the optimizer with the same parameter space and a fixed seed,
get an evaluation specification, report a placeholder result and get another
evaluation specification. The configuration of all final evaluation specifications
should be equal.
Args:
optimizer_class: Optimizer to test.
optimizer_kwargs: Expected to contain additional arguments for initializating
the optimizer. (`search_space` and `objective(s)` are set automatically
by the test.)
Returns:
`True` if the test is passed.
"""
final_configurations = []
for _ in range(2):
opt = _initialize_optimizer(
optimizer_class,
optimizer_kwargs,
objective=Objective("loss", False),
objectives=[Objective("loss", False)],
)
es1 = opt.generate_evaluation_specification()
evaluation1 = es1.create_evaluation(objectives={"loss": 0.42})
opt.report(evaluation1)
es2 = opt.generate_evaluation_specification()
final_configurations.append(es2.configuration.copy())
assert final_configurations[0] == final_configurations[1]
return True
def handles_reporting_evaluations_list(optimizer_class, optimizer_kwargs: dict) -> bool:
"""Check if optimizer's report method can process an iterable of evalutions.
All optimizers should be able to allow reporting batches of evalutions. It's up to
the optimizer's implementation, if evaluations in a batch are processed
one by one like if they were reported individually, or if a batch is handled
differently.
Args:
optimizer_class: Optimizer to test.
optimizer_kwargs: Expected to contain additional arguments for initializating
the optimizer. (`search_space` and `objective(s)` are set automatically
by the test.)
Returns:
`True` if the test is passed.
"""
opt = _initialize_optimizer(
optimizer_class,
optimizer_kwargs,
objective=Objective("loss", False),
objectives=[Objective("loss", False)],
)
evaluations = []
for _ in range(3):
es = opt.generate_evaluation_specification()
evaluation = es.create_evaluation(objectives={"loss": 0.42})
evaluations.append(evaluation)
opt.report(evaluations)
return True
def raises_evaluation_error_when_reporting_unknown_objective(
optimizer_class, optimizer_kwargs: dict
) -> bool:
"""Check if optimizer's report method raises exception in case objective is unknown.
Also make sure that the faulty evaluations (and only those) are included in the
exception.
Args:
optimizer_class: Optimizer to test.
optimizer_kwargs: Expected to contain additional arguments for initializating
the optimizer. (`search_space` and `objective(s)` are set automatically
by the test.)
Returns:
`True` if the test is passed.
"""
opt = _initialize_optimizer(
optimizer_class,
optimizer_kwargs,
objective=Objective("loss", False),
objectives=[Objective("loss", False)],
)
es_1 = opt.generate_evaluation_specification()
es_2 = opt.generate_evaluation_specification()
es_3 = opt.generate_evaluation_specification()
# NOTE: The following is not using pytest.raises because this would add pytest as
# a regular dependency to blackboxopt.
try:
evaluation_1 = es_1.create_evaluation(objectives={"loss": 1})
evaluation_2 = es_2.create_evaluation(objectives={"unknown_objective": 2})
evaluation_3 = es_3.create_evaluation(objectives={"loss": 4})
opt.report([evaluation_1, evaluation_2, evaluation_3])
raise AssertionError(
f"Optimizer {optimizer_class} did not raise an ObjectivesError when a "
+ "result including an unknown objective name was reported."
)
except EvaluationsError as exception:
invalid_evaluations = [e for e, _ in exception.evaluations_with_errors]
assert len(invalid_evaluations) == 1
assert evaluation_2 in invalid_evaluations
return True
ALL_REFERENCE_TESTS = [
optimize_single_parameter_sequentially_for_n_max_evaluations,
is_deterministic_with_fixed_seed,
handles_reporting_evaluations_list,
raises_evaluation_error_when_reporting_unknown_objective,
]
| 33.876106 | 88 | 0.699451 | 844 | 7,656 | 6.154028 | 0.257109 | 0.059299 | 0.057566 | 0.05025 | 0.398729 | 0.357528 | 0.287832 | 0.268579 | 0.228148 | 0.217944 | 0 | 0.00987 | 0.219175 | 7,656 | 225 | 89 | 34.026667 | 0.858983 | 0.288662 | 0 | 0.292683 | 0 | 0 | 0.06175 | 0.004955 | 0 | 0 | 0 | 0 | 0.04065 | 1 | 0.04878 | false | 0 | 0.03252 | 0.00813 | 0.146341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a542dda121b25fca9d2bfa2ec0a112af0a94fc3 | 3,108 | py | Python | recoverGoogAuth.py | ClashTheBunny/googAuthRecover | 44618fc4f562d1f999b5e1cfb568ac53415ee8aa | [
"MIT"
] | 1 | 2021-01-23T15:21:34.000Z | 2021-01-23T15:21:34.000Z | recoverGoogAuth.py | ClashTheBunny/googAuthRecover | 44618fc4f562d1f999b5e1cfb568ac53415ee8aa | [
"MIT"
] | null | null | null | recoverGoogAuth.py | ClashTheBunny/googAuthRecover | 44618fc4f562d1f999b5e1cfb568ac53415ee8aa | [
"MIT"
] | 1 | 2021-01-23T15:21:36.000Z | 2021-01-23T15:21:36.000Z | #!/usr/bin/python3
import json
import base64
import xmltodict as xd
import qrcode
from termcolor import colored, cprint
import binascii
import sys
import tarfile
import io
import pprint as pp
import subprocess
import urllib
import os
filenameSwitcher = {".gz": "data/data/org.fedorahosted.freeotp/./shared_prefs/tokens.xml",
".ab": "apps/org.fedorahosted.freeotp/sp/tokens.xml"} # I'm actually not sure what it is in the android backup, probably needs a prefix.
def getTarObjectFromABBackup(filename):
abHeaderReplacement = b"\x1f\x8b\x08\x00\x00\x00\x00\x00"
ab = open(filename, 'rb')
ab.seek(24)
abf = abHeaderReplacement + ab.read()
tar = open(filename[:-3] + ".tar.gz", 'wb')
tar.write(abf)
tar.close()
return tarfile.open(filename[:-3] + ".tar.gz")
def getTarObjectFromTarBackup(filename):
print(filename)
return tarfile.open(filename)
def extractXMLFileFromTarObject(tarObj,backupExtension):
return tarObj.extractfile(filenameSwitcher[backupExtension])
backupFile = sys.argv[1]
backupExtension = sys.argv[1][-3:]
extensionSwitcher = {".ab": getTarObjectFromABBackup,
".gz": getTarObjectFromTarBackup}
xmlFH = extractXMLFileFromTarObject(extensionSwitcher[backupExtension](backupFile), backupExtension)
xmldata = xd.parse(xmlFH)
data = [json.loads(x['#text']) for x in xmldata['map']['string']]
for datum in data:
pp.pprint(datum)
if type(datum) == list:
continue
# issuerExt = ''
# issuerExtTemplate = ''
# if 'issuerExt' in datum and len(datum['issuerExt']) > 0:
# issuerExt=datum['issuerExt']
# issuerExtTemplate = '?issuer={issuerExt}'
issuerInt = ''
issuerIntTemplate = ''
if 'issuerInt' in datum and len(datum['issuerInt']) > 0:
issuerInt=datum['issuerInt']
issuerIntTemplate = '{issuerInt}:'
secret=str(base64.b32encode( b''.join([x.to_bytes(1,'big',signed=True) for x in datum['secret']])))[2:-1]
codeTextTempl = "otpauth://{type}/" + issuerIntTemplate + "{label}?secret={secret}" # + issuerExtTemplate
codeText = codeTextTempl.format(type=datum['type'].lower(),label=datum['label'],secret=secret,issuerInt=issuerInt) #,issuerExt=issuerExt)
qrfactory = qrcode.QRCode(box_size=1)
qrfactory.add_data(codeText)
qrfactory.make(fit=True)
img = qrfactory.make_image()
pixels = list(img.getdata())
pixMap = {0: "", 255: "reverse"}
for pixel, value in enumerate(pixels):
if pixel % img.height == 0:
print()
if value == 0:
text = colored(" ", "white", attrs=[])
else:
text = colored(" ", "white", attrs=["reverse"])
print(text,end="")
print()
print(codeText.replace(" ", "%20"))
if os.environ.get('PASSWORD_STORE_DIR'):
env={"PASSWORD_STORE_DIR": os.environ.get('PASSWORD_STORE_DIR')}
else:
env={}
subprocess.run("pass otp insert " + issuerIntTemplate.format(issuerInt=issuerInt).replace(":","/") + datum['label'], env=env, shell=True, input=codeText.replace(" ", "%20"), encoding='ascii')
| 35.724138 | 195 | 0.661519 | 356 | 3,108 | 5.744382 | 0.44382 | 0.011736 | 0.013203 | 0.011736 | 0.062592 | 0.027384 | 0 | 0 | 0 | 0 | 0 | 0.017364 | 0.184685 | 3,108 | 86 | 196 | 36.139535 | 0.789661 | 0.100708 | 0 | 0.057971 | 0 | 0 | 0.141421 | 0.056712 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.043478 | 0.188406 | 0.014493 | 0.275362 | 0.115942 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a57bfd87267a69d044a651546d34c067716feab | 501 | py | Python | ispyb/exception.py | rjgildea/ispyb-api | 24e76c79ef06c7cc8edc35066aa1021dee20d67a | [
"Apache-2.0"
] | null | null | null | ispyb/exception.py | rjgildea/ispyb-api | 24e76c79ef06c7cc8edc35066aa1021dee20d67a | [
"Apache-2.0"
] | null | null | null | ispyb/exception.py | rjgildea/ispyb-api | 24e76c79ef06c7cc8edc35066aa1021dee20d67a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
import warnings
import ispyb
warnings.warn(
"ispyb.exceptions is deprecated and will be removed in the next release. Use the exceptions underneath ispyb. instead.",
DeprecationWarning,
)
ISPyBException = ispyb.ISPyBException
ISPyBConnectionException = ispyb.ConnectionError
ISPyBNoResultException = ispyb.NoResult
ISPyBWriteFailed = ispyb.ReadWriteError
ISPyBRetrieveFailed = ispyb.ReadWriteError
ISPyBKeyProblem = KeyError
| 27.833333 | 124 | 0.832335 | 50 | 501 | 8.22 | 0.7 | 0.092457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11976 | 501 | 17 | 125 | 29.470588 | 0.931973 | 0 | 0 | 0 | 0 | 0.076923 | 0.233533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a59ddd3dfb2d4bff530b79bbfdc4faea8f9b142 | 8,122 | py | Python | src/swell/tasks/base/config.py | danholdaway/swell | 6817b70686d777cb88e418ec01885a7df2788e67 | [
"Apache-2.0"
] | 4 | 2022-02-08T01:24:05.000Z | 2022-03-08T19:14:18.000Z | src/swell/tasks/base/config.py | danholdaway/swell | 6817b70686d777cb88e418ec01885a7df2788e67 | [
"Apache-2.0"
] | 8 | 2022-02-01T16:36:14.000Z | 2022-03-31T19:41:06.000Z | src/swell/tasks/base/config.py | danholdaway/swell | 6817b70686d777cb88e418ec01885a7df2788e67 | [
"Apache-2.0"
] | 1 | 2022-03-06T22:47:58.000Z | 2022-03-06T22:47:58.000Z | # (C) Copyright 2021-2022 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# --------------------------------------------------------------------------------------------------
import copy
import datetime as pydatetime
import glob
import isodate
import os
import string
import re
import json
import yaml
from swell.utilities.string_utils import replace_vars
# --------------------------------------------------------------------------------------------------
# @package configuration
#
# Class containing the configuration. This is a dictionary that is converted from
# an input yaml configuration file. Various function are included for interacting with the
# dictionary.
#
# --------------------------------------------------------------------------------------------------
class Config(dict):
"""Provides methods for reading YAML files and managing configuration
parameters.
Attributes
----------
self : dict
YAML definitions
defs : dict
Root-level YAML, environment and cycle dependent parameters
Methods
-------
__init__(inputs):
Reads in YAML files.
define(cycle_dt):
Defines cycle/time dependent parameters.
"""
# ----------------------------------------------------------------------------------------------
def __init__(self, input, logger):
"""Reads YAML file(s) as a dictionary.
Environment definitions and root-level YAML parameters are extracted to be
used for variable interpolation within strings (see replace_vars()).
Parameters
----------
input : string, required Name of YAML file(s)
Returns
-------
config : Config, dict
Config object
"""
# Keep track of the input config file
self.input = input
# Read the configuration yaml file(s)
with open(self.input, 'r') as ymlfile:
config = yaml.safe_load(ymlfile)
# Initialize the parent class with the config
super().__init__(config)
# Standard datetime format for config
self.dt_format = "%Y-%m-%dT%H:%M:%SZ"
# Create list of definitions from top level of dictionary
self.defs = {}
self.defs.update({k: str(v) for k, v in iter(self.items())
if not isinstance(v, dict) and not isinstance(v, list)})
# Keep copy of logger
self.logger = logger
# ----------------------------------------------------------------------------------------------
def merge(self, other):
""" Merge another dictionary with self
Parameters
----------
other : dictionary, required
other dictionary to merge
"""
# Merge the other dictionary into self
self.update(other)
# Overwrite the top level definitions
self.defs.update({k: str(v) for k, v in iter(self.items())
if not isinstance(v, dict) and not isinstance(v, list)})
# ----------------------------------------------------------------------------------------------
def add_cyle_time_parameter(self, cycle_dt):
""" Add cyle time to the configuration
Parameters
----------
cycle_dt : datetime, required
Current cycle date/time as datetime object
"""
# Create new dictionary to hold cycle time
cycle_dict = {}
cycle_dict['current_cycle'] = cycle_dt.strftime(self.dt_format)
# Merge with self
self.merge(cycle_dict)
# --------------------------------------------------------------------------------------------------
def add_data_assimilation_window_parameters(self):
""" Defines cycle dependent parameters for the data assimilation window
Parameters defined by this method are needed for resolving
time-dependent variables using the replace_vars() method.
Parameters
----------
cycle_dt : datetime, required
Current cycle date/time as datetime object
"""
# Current cycle datetime object
current_cycle_dto = pydatetime.datetime.strptime(self.get('current_cycle'), self.dt_format)
# Type of data assimilation window (3D or 4D)
window_type = self.get('window_type', '4D')
# Extract window information and convert to duration
window_length = self.get('window_length', 'PT6H')
window_offset = self.get('window_offset', 'PT3H')
window_offset_dur = isodate.parse_duration(window_offset)
# Compute window beginning time
window_begin_dto = current_cycle_dto - window_offset_dur
# Background time for satbias files
background_time_offset = self.get('background_time_offset', 'PT9H')
background_time_offset_dur = isodate.parse_duration(background_time_offset)
background_time_dto = current_cycle_dto - background_time_offset_dur
# Background time for the window
if window_type == '4D':
local_background_time = window_begin_dto
elif window_type == '3D':
local_background_time = current_cycle_dto
else:
self.logger.abort("add_data_assimilation_window_parameters: window type must be " +
"either 4D or 3D")
# Create new dictionary with these items
window_dict = {}
window_dict['window_type'] = window_type
window_dict['window_length'] = window_length
window_dict['window_offset'] = window_offset
window_dict['window_begin'] = window_begin_dto.strftime(self.dt_format)
window_dict['background_time'] = background_time_dto.strftime(self.dt_format)
window_dict['local_background_time'] = local_background_time.strftime(self.dt_format)
# Merge with self
self.merge(window_dict)
# --------------------------------------------------------------------------------------------------
def resolve_config_file(self):
"""Resolves/interpolates all defined variables in the base configuration.
Returns
-------
d: dict
YAML dictionary with all defined variables interpolated.
"""
# Read input file as text file
with open(self.input) as f:
text = f.read()
# Replace any unresolved variables in the file
text = replace_vars(text, **self.defs)
# Return a yaml
resolved_dict = yaml.safe_load(text)
# Merge dictionary
self.merge(resolved_dict)
# ----------------------------------------------------------------------------------------------
def overlay(self, hash, override=False, root=None):
"""Combines two dictionaries.
This method recursively traverses the nodes of the dictionaries to
locate the appropriate insertion point at the leaf-nodes.
Parameters
----------
hash : dict, required
New dictionary to be added
root : dict, private
Root node to add new values. This is set during recursion.
override : boolean, optional
Indicates whether existing dictionary entries should be overwritten.
"""
if root is None:
root = self
for key in hash:
if key not in root:
if isinstance(hash[key], dict):
root[key] = copy.deepcopy(hash[key])
else:
root[key] = hash[key]
elif isinstance(hash[key], dict) and isinstance(root[key], dict):
self.overlay(hash[key], override, root[key])
else:
if override:
root[key] = hash[key]
# ----------------------------------------------------------------------------------------------
| 33.15102 | 104 | 0.54691 | 842 | 8,122 | 5.134204 | 0.288599 | 0.045339 | 0.016655 | 0.018506 | 0.145269 | 0.103632 | 0.103632 | 0.088365 | 0.088365 | 0.068934 | 0 | 0.003677 | 0.263359 | 8,122 | 244 | 105 | 33.286885 | 0.71887 | 0.486333 | 0 | 0.121622 | 0 | 0 | 0.077641 | 0.022771 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.135135 | 0 | 0.22973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a5e830fee42f76090ac917578b53f890498155d | 414 | py | Python | tests/deep-learning-from-scratch/relu.py | Fryguy/py2rb | 0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8 | [
"MIT"
] | 124 | 2017-08-19T05:37:16.000Z | 2022-03-08T18:24:18.000Z | tests/deep-learning-from-scratch/relu.py | JeMaMokuma/py2rb | 0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8 | [
"MIT"
] | 15 | 2017-12-16T05:59:31.000Z | 2022-02-08T02:51:17.000Z | tests/deep-learning-from-scratch/relu.py | JeMaMokuma/py2rb | 0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8 | [
"MIT"
] | 18 | 2017-09-25T11:57:04.000Z | 2022-02-19T17:33:48.000Z | # coding: utf-8
import numpy as np
def relu(x):
return np.maximum(0, x)
x = np.arange(-5.0, 5.0, 0.1)
y = relu(x)
def print_array(data):
datas = []
for i in data:
if float("%.3f" % abs(i)) == 0:
datas.append(float("%.3f" % abs(i)))
else:
datas.append(float("%.3f" % i))
print(datas)
print(len(x))
print_array(list(x))
print(len(y))
print_array(list(y))
| 17.25 | 48 | 0.538647 | 70 | 414 | 3.142857 | 0.457143 | 0.136364 | 0.090909 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 0.26087 | 414 | 23 | 49 | 18 | 0.679739 | 0.031401 | 0 | 0 | 0 | 0 | 0.030151 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0.058824 | 0.235294 | 0.352941 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a5ed2540e8c9ed153fae4cd52343b140239b019 | 207 | py | Python | interview-questions-python/numbers/isPalindrome.py | hassonor/core-python | 92672aa72c1474061df5247a2dd4dfd9fab1642a | [
"MIT"
] | 1 | 2022-03-09T20:58:33.000Z | 2022-03-09T20:58:33.000Z | interview-questions-python/numbers/isPalindrome.py | hassonor/core-python | 92672aa72c1474061df5247a2dd4dfd9fab1642a | [
"MIT"
] | null | null | null | interview-questions-python/numbers/isPalindrome.py | hassonor/core-python | 92672aa72c1474061df5247a2dd4dfd9fab1642a | [
"MIT"
] | null | null | null | def is_palindrome(num):
if num < 0:
return False
str_1 = str(abs(num))
str_2 = str_1[::-1] # save the reverse num
print(str_2)
return str_1 == str_2
print(is_palindrome(7107))
| 18.818182 | 47 | 0.608696 | 35 | 207 | 3.371429 | 0.485714 | 0.101695 | 0.118644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07947 | 0.270531 | 207 | 10 | 48 | 20.7 | 0.701987 | 0.096618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a609f8528aa6fa173c15d3547a798c83fb7d430 | 530 | py | Python | tests/unit/objects/test_purchaseorder.py | bayusantoso/python-intuitquickbooks | a501fd86b6ba59f5a614f36951fa08bde0d2d24a | [
"MIT"
] | null | null | null | tests/unit/objects/test_purchaseorder.py | bayusantoso/python-intuitquickbooks | a501fd86b6ba59f5a614f36951fa08bde0d2d24a | [
"MIT"
] | null | null | null | tests/unit/objects/test_purchaseorder.py | bayusantoso/python-intuitquickbooks | a501fd86b6ba59f5a614f36951fa08bde0d2d24a | [
"MIT"
] | 1 | 2020-12-07T22:21:35.000Z | 2020-12-07T22:21:35.000Z | import unittest
from intuitquickbooks import QuickBooks
from intuitquickbooks.objects.purchaseorder import PurchaseOrder
class PurchaseOrderTests(unittest.TestCase):
def test_unicode(self):
purchase_order = PurchaseOrder()
purchase_order.TotalAmt = 1000
self.assertEquals(str(purchase_order), '1000')
def test_valid_object_name(self):
obj = PurchaseOrder()
client = QuickBooks()
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
| 26.5 | 64 | 0.730189 | 55 | 530 | 6.836364 | 0.527273 | 0.103723 | 0.074468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018779 | 0.196226 | 530 | 19 | 65 | 27.894737 | 0.86385 | 0 | 0 | 0 | 0 | 0 | 0.007547 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.153846 | false | 0 | 0.230769 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a62c5bcbe661e679333599a17ac8df985de1b3d | 3,850 | py | Python | tests/test_proposal/test_rejection.py | mj-will/nessai | e1ccc791a332565af372d14e5986920d552e1294 | [
"MIT"
] | 16 | 2021-02-18T00:04:54.000Z | 2021-09-01T03:25:45.000Z | tests/test_proposal/test_rejection.py | mj-will/nessai | e1ccc791a332565af372d14e5986920d552e1294 | [
"MIT"
] | 59 | 2021-03-09T11:05:37.000Z | 2022-03-30T14:21:14.000Z | tests/test_proposal/test_rejection.py | mj-will/nessai | e1ccc791a332565af372d14e5986920d552e1294 | [
"MIT"
] | 1 | 2022-03-25T12:28:16.000Z | 2022-03-25T12:28:16.000Z | # -*- coding: utf-8 -*-
"""
Test the rejection proposal class.
"""
import numpy as np
import pytest
from unittest.mock import Mock, create_autospec, patch
from nessai.livepoint import numpy_array_to_live_points
from nessai.proposal import RejectionProposal
@pytest.fixture
def proposal():
return create_autospec(RejectionProposal)
def test_init(proposal):
"""Test the init method."""
with patch('nessai.proposal.rejection.AnalyticProposal.__init__') \
as mock_super:
RejectionProposal.__init__(proposal, 'model', poolsize=10, test=True)
mock_super.assert_called_once_with('model', poolsize=10, test=True)
assert proposal._checked_population is True
assert proposal.population_acceptance is None
@pytest.mark.parametrize('N', [None, 5])
def test_draw_proposal(proposal, N):
"""Assert `model.new_point` is called with the corred number of samples."""
points = np.array([1, 2])
proposal.poolsize = 10
proposal.model = Mock()
proposal.model.new_point = Mock(return_value=points)
samples = RejectionProposal.draw_proposal(proposal, N=N)
np.testing.assert_array_equal(samples, points)
if N is None:
proposal.model.new_point.assert_called_once_with(N=10)
else:
proposal.model.new_point.assert_called_once_with(N=5)
def test_log_proposal(proposal):
"""Assert the correct method from the model is called"""
x = np.array([3, 4])
log_prob = np.array([1, 2])
proposal.model = Mock()
proposal.model.new_point_log_prob = Mock(return_value=log_prob)
out = RejectionProposal.log_proposal(proposal, x)
proposal.model.new_point_log_prob.assert_called_once_with(x)
np.testing.assert_array_equal(out, log_prob)
def test_compute_weights(proposal):
"""Test the compute weights method"""
x = numpy_array_to_live_points(np.array([[1], [2], [3]]), 'x')
proposal.model = Mock()
proposal.model.log_prior = Mock(return_value=np.array([6, 6, 6]))
proposal.log_proposal = Mock(return_value=np.array([3, 4, np.nan]))
log_w = np.array([0, -1, np.nan])
out = RejectionProposal.compute_weights(proposal, x)
proposal.model.log_prior.assert_called_once_with(x)
proposal.log_proposal.assert_called_once_with(x)
np.testing.assert_array_equal(out, log_w)
@pytest.mark.parametrize('pool', [None, True])
@pytest.mark.parametrize('N', [None, 4])
def test_populate(proposal, pool, N):
"""Test the populate method"""
poolsize = 8
if N is None:
log_w = np.arange(poolsize)
else:
log_w = np.arange(N)
x = np.random.randn(log_w.size)
u = np.exp(log_w.copy() + 1)
# These points will have log_u ~ -inf so corresponding samples will be
# accepted.
u[::2] = 1e-10
samples = x[::2]
proposal.poolsize = poolsize
proposal.populated = False
proposal.draw_proposal = Mock(return_value=x)
proposal.compute_weights = Mock(return_value=log_w)
proposal.evaluate_likelihoods = Mock()
proposal.pool = pool
with patch('numpy.random.rand', return_value=u):
RejectionProposal.populate(proposal, N=N)
assert proposal.population_acceptance == 0.5
assert proposal.populated is True
np.testing.assert_array_equal(proposal.samples, samples)
if N is None:
N = poolsize
proposal.draw_proposal.assert_called_once_with(N=N)
if pool is not None:
proposal.evaluate_likelihoods.assert_called_once()
else:
proposal.evaluate_likelihoods.assert_not_called()
assert sorted(proposal.indices) == list(range(samples.size))
@pytest.mark.integration_test
def test_populate_integration(model):
"""Integration test for the populate method"""
proposal = RejectionProposal(model)
N = 500
proposal.populate(N=N)
assert proposal.samples.size == N
assert proposal.populated is True
| 33.189655 | 79 | 0.710649 | 535 | 3,850 | 4.91028 | 0.203738 | 0.054435 | 0.048725 | 0.053293 | 0.274838 | 0.113818 | 0.100495 | 0.071565 | 0.071565 | 0.039589 | 0 | 0.012579 | 0.174026 | 3,850 | 115 | 80 | 33.478261 | 0.813522 | 0.097922 | 0 | 0.13253 | 0 | 0 | 0.02476 | 0.014856 | 0 | 0 | 0 | 0 | 0.240964 | 1 | 0.084337 | false | 0 | 0.060241 | 0.012048 | 0.156627 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a63d771aef0fba54551647094cacfd2311913dd | 29,007 | py | Python | networking_oneview/ml2/drivers/oneview/common.py | HewlettPackard/networking-oneview | 21881596eeb39565a027022921d4ec4f71e3278b | [
"Apache-2.0"
] | 6 | 2018-02-02T17:37:15.000Z | 2018-09-20T14:06:16.000Z | networking_oneview/ml2/drivers/oneview/common.py | HewlettPackard/networking-oneview | 21881596eeb39565a027022921d4ec4f71e3278b | [
"Apache-2.0"
] | 18 | 2018-02-06T14:54:22.000Z | 2018-02-27T13:34:37.000Z | networking_oneview/ml2/drivers/oneview/common.py | HewlettPackard/networking-oneview | 21881596eeb39565a027022921d4ec4f71e3278b | [
"Apache-2.0"
] | 2 | 2020-04-28T14:36:12.000Z | 2020-07-22T13:09:44.000Z | # Copyright (2016-2017) Hewlett Packard Enterprise Development LP.
# Copyright (2016-2017) Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from itertools import chain
import six
from hpOneView.oneview_client import OneViewClient
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import strutils
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from networking_oneview.conf import CONF
from networking_oneview.ml2.drivers.oneview import database_manager
from networking_oneview.ml2.drivers.oneview import exceptions
MAPPING_TYPE_NONE = 0
FLAT_NET_MAPPINGS_TYPE = 1
UPLINKSET_MAPPINGS_TYPE = 2
NETWORK_TYPE_TAGGED = 'tagged'
NETWORK_TYPE_UNTAGGED = 'untagged'
ETHERNET_NETWORK_PREFIX = '/rest/ethernet-networks/'
LOG = log.getLogger(__name__)
oneview_exceptions = importutils.try_import('hpOneView.exceptions')
def get_oneview_conf():
"""Get OneView Access Configuration."""
insecure = CONF.oneview.allow_insecure_connections
ssl_certificate = CONF.oneview.tls_cacert_file
if not (insecure or ssl_certificate):
raise oneview_exceptions.HPOneViewException(
"Failed to start Networking OneView. Attempting to open secure "
"connection to OneView but CA certificate file is missing. Please "
"check your configuration file.")
if insecure:
LOG.info("Networking OneView is opening an insecure connection to "
"HPE OneView. We recommend you to configure secure "
"connections with a CA certificate file.")
if ssl_certificate:
LOG.info("Insecure connection to OneView, the CA certificate: %s "
"will be ignored." % ssl_certificate)
ssl_certificate = None
oneview_conf = {
"ip": CONF.oneview.oneview_host,
"credentials": {
"userName": CONF.oneview.username,
"password": CONF.oneview.password
},
"ssl_certificate": ssl_certificate
}
return oneview_conf
def get_oneview_client():
"""Get the OneView Client."""
LOG.debug("Creating a new OneViewClient instance.")
try:
client = OneViewClient(get_oneview_conf())
except oneview_exceptions.HPOneViewException as ex:
LOG.info("Networking OneView could not open a connection to "
"HPE OneView. Check credentials and/or CA certificate file. "
"See details on error below:\n")
raise ex
return client
def oneview_reauth(f):
def wrapper(self, *args, **kwargs):
try:
self.oneview_client.connection.get('/rest/logindomains')
except oneview_exceptions.HPOneViewException:
LOG.debug("Reauthenticating to OneView.")
oneview_conf = get_oneview_conf()
self.oneview_client.connection.login(oneview_conf["credentials"])
return f(self, *args, **kwargs)
return wrapper
# Utils
def id_from_uri(uri):
if not uri:
return None
return uri.split("/")[-1]
def id_list_from_uri_list(uri_list):
return [id_from_uri(uri) for uri in uri_list]
def uplinksets_id_from_network_uplinkset_list(net_uplink_list):
return [net_uplink.oneview_uplinkset_id for net_uplink in net_uplink_list]
def get_uplinkset_by_name_from_list(uplinkset_list, uplinkset_name):
"""Get the first uplinkset from a list that matches the name.
Given a list of uplinksets, it retrieves the first uplinkset with
the same name as uplinkset_name.
:param uplinkset_list: a list of uplinksets;
:param uplinkset_name: the name of the desired uplinkset;
:returns: A uplinkset with name uplinkset_name
:raise ElementNotFoundException: Uplinkset name not found in
Uplinkset list;
"""
try:
uplinkset_obj = next(
uplinkset for uplinkset in uplinkset_list if uplinkset.get(
'name') == uplinkset_name)
except Exception:
err_msg = (
"Uplinkset '%s' is not found in the Uplinkset List '%s'"
) % (uplinkset_name, uplinkset_list)
LOG.error(err_msg)
raise exceptions.ElementNotFoundException(err_msg)
return uplinkset_obj
def get_uplinkset_by_name_in_lig(oneview_client, lig_id, uplinkset_name):
"""Get the uplinkset in a Logical Interconnect Group with that name.
:param oneview_client: An instanciated oneview_client
:param lig_id: The logical Interconnect Group ID
:param uplinkset_name: The name of the uplinkset to be retrieved.
:returns: The uplinkset from LIG
"""
lig = oneview_client.logical_interconnect_groups.get(lig_id)
uplinkset = (uls for uls in lig.get(
'uplinkSets') if uls.get('name') == uplinkset_name).next()
return uplinkset
def get_logical_interconnect_group_by_id(lig_id):
"""Get a Logical Interconnect Group Object to a given LIG id.
:param lig_id: the id of the Logical Interconnect Group;
:returns: the Logical Interconnect Group object
:raise OneViewResourceNotFoundException: If it was not possible
to retrieve LIG;
"""
oneview_client = get_oneview_client()
try:
return oneview_client.logical_interconnect_groups.get(lig_id)
except oneview_exceptions.HPOneViewException:
err_msg = (
"Could not find a 'Logical Interconnect Group' with the id '%s'"
) % lig_id
LOG.error(err_msg)
raise exceptions.OneViewResourceNotFoundException(err_msg)
def get_ethernet_network_by_id(oneview_network_id):
"""Get a Ethernet Network Object to a given Network id.
:param oneview_network_id: the id of the Ethernet Network;
:returns: the Ethernet Network object;
:raise OneViewResourceNotFoundException: If it was not possible
to retrieve the Network;
"""
oneview_client = get_oneview_client()
try:
return oneview_client.ethernet_networks.get(oneview_network_id)
except oneview_exceptions.HPOneViewException:
err_msg = (
"Could not find an 'Ethernet Network' with the id '%s'"
) % oneview_network_id
LOG.error(err_msg)
raise exceptions.OneViewResourceNotFoundException(err_msg)
def get_uplink_port_group_uris_for_ethernet_network_by_id(oneview_network_id):
"""Get Uplink Port Group URIs for a Ethernet Network by id.
:param oneview_network_id: the id of the Ethernet Network;
:returns: a list of Uplink Port Group URIs;
:raise OneViewResourceNotFoundException: If it was not possible
to retrieve the list;
"""
oneview_client = get_oneview_client()
try:
return oneview_client.ethernet_networks.get_associated_uplink_groups(
oneview_network_id)
except oneview_exceptions.HPOneViewException:
err_msg = (
"Could not find an 'Ethernet Network' with the id '%s'"
) % oneview_network_id
LOG.error(err_msg)
raise exceptions.OneViewResourceNotFoundException(err_msg)
def get_logical_interconnect_group_from_uplink(oneview_client,
uplinkset_id):
"""Get Logical Interconnect Group Object to a given uplinkset id.
:param oneview_client: a instance of the OneView Client;
:param uplinkset_id: the id of the Uplinkset;
:returns: the Logical Interconnect Group object
"""
uplinkset = oneview_client.uplink_sets.get(uplinkset_id)
logical_interconnect = oneview_client.logical_interconnects.get(
uplinkset.get('logicalInterconnectUri'))
logical_interconnect_group = (
oneview_client.logical_interconnect_groups.get(
logical_interconnect.get('logicalInterconnectGroupUri')))
return logical_interconnect_group
def load_conf_option_to_dict(key_value_option):
"""Convert the uplinkset and flat_net mappings value to a dict.
It converts the value from the Config fields uplinkset_mappings
and or flat_net_mappings to a dict object. The object returned
is in the format:
{
provider_from_uplinkset_mapping: ["lig_id", "uplinkset_name"],
provider_flat_net_mapping: ["oneview_network_id"]
}
:param key_value_option: A string with the mappings, in the format
provider:lig_id:uplinkset_name for uplinkset_mappings, and
provider:oneview_network_id for flat_net_mappings;
:returns: the Logical Interconnect Group object
"""
key_value_dict = {}
if not key_value_option:
return key_value_dict
key_value_list = key_value_option.split(',')
for key_value in key_value_list:
values = key_value.split(':')
provider = values[0]
key_value_dict.setdefault(provider, []).extend(values[1:])
return key_value_dict
def network_uri_from_id(network_id):
return ETHERNET_NETWORK_PREFIX + network_id
def network_dict_for_network_creation(
physical_network, network_type, neutron_net_id, segmentation_id=None):
return {
'provider:physical_network': physical_network,
'provider:network_type': network_type,
'provider:segmentation_id': segmentation_id,
'id': neutron_net_id,
}
def port_dict_for_port_creation(
network_id, vnic_type, mac_address, profile, host_id='host_id'):
return {
'network_id': network_id,
'binding:vnic_type': vnic_type,
'binding:host_id': host_id,
'mac_address': mac_address,
'binding:profile': profile
}
def session_from_context(context):
"""Get the Session from a Neutron Context.
:param context: a Neutron Context;
:return: the session;
"""
plugin_context = getattr(context, '_plugin_context', None)
return getattr(plugin_context, '_session', None)
def network_from_context(context):
"""Get the Network from a Neutron Context.
:param context: a Neutron Context;
:return: the network;
"""
return getattr(context, '_network', None)
def port_from_context(context):
"""Get the Port from a Neutron Context.
:param context: a Neutron Context;
:return: the port;
"""
return getattr(context, '_port', None)
def local_link_information_from_port(port_dict):
"""Get the Local Link Information from a port.
:param port_dict: a Neutron port object;
:return: the local link information;
"""
binding_profile_dict = port_dict.get('binding:profile')
return binding_profile_dict.get(
'local_link_information') if binding_profile_dict else None
def is_local_link_information_valid(local_link_information_list):
"""Verify if a local link information list is valid.
A local link information list is valid if:
1 - the list has only one local link information
2 - It has switch info defined
3 - The switch info has a server_hardware_id
4 - The switch info has information about being bootable
5 - The switch info's bootable value is boolean
"""
if len(local_link_information_list) != 1:
return False
local_link_information = local_link_information_list[0]
switch_info = local_link_information.get('switch_info')
if not switch_info:
return False
server_hardware_uuid = switch_info.get('server_hardware_id')
bootable = switch_info.get('bootable')
if not server_hardware_uuid:
return False
return isinstance(bootable, bool)
def server_hardware_from_local_link_information_list(
oneview_client, local_link_information_list):
"""Get the Server Hardware from Local Link Information.
:param oneview_client: a instance of the OneView Client;
:param local_link_information_list: an list of local link information;
:return: server_hardware;
"""
switch_info = local_link_information_list[0].get('switch_info')
if isinstance(switch_info, six.text_type):
switch_info = jsonutils.loads(switch_info)
server_hardware_id = switch_info.get('server_hardware_id')
server_hardware = oneview_client.server_hardware.get(
server_hardware_id
)
return server_hardware
def switch_info_from_local_link_information_list(local_link_information_list):
"""Get the switch_info from Local Link Information.
:param oneview_client: a instance of the OneView Client;
:param local_link_information_list: an list of local link information;
:return: switch_info;
"""
switch_info = local_link_information_list[0].get('switch_info')
if isinstance(switch_info, six.text_type):
switch_info = jsonutils.loads(switch_info)
return switch_info
def is_rack_server(server_hardware):
"""Verify if Server Hardware is a Rack Server.
:param server_hardware: a server hardware object;
:return: True or False;
"""
return False if server_hardware.get('locationUri') else True
def check_oneview_entities_availability(oneview_client, server_hardware):
_check_server_hardware_availability(server_hardware)
_check_server_profile_availability(oneview_client, server_hardware)
def _check_server_hardware_availability(server_hardware):
max_number_of_attempts = CONF.DEFAULT.retries_to_lock_sh
interval = CONF.DEFAULT.retries_to_lock_sh_interval
for _ in range(max_number_of_attempts):
if not server_hardware.get('powerLock'):
return True
time.sleep(interval)
return False
def _check_server_profile_availability(oneview_client, server_hardware):
max_number_of_attempts = CONF.DEFAULT.retries_to_lock_sp
interval = CONF.DEFAULT.retries_to_lock_sp_interval
for _ in range(max_number_of_attempts):
if oneview_client.get_server_profile_state(server_hardware):
return True
time.sleep(interval)
return False
def _get_server_profile_state(oneview_client, server_hardware):
server_profile_dict = server_profile_from_server_hardware(
oneview_client, server_hardware
)
return server_profile_dict.get('status')
def server_profile_from_server_hardware(oneview_client, server_hardware):
server_profile_uri = server_hardware.get('serverProfileUri')
if not server_profile_uri:
LOG.warning("There is no Server Profile available on "
"Server Hardware: %s." % server_hardware.get('uuid'))
return None
LOG.info("There is Server Profile %s available.", server_profile_uri)
return oneview_client.server_profiles.get(server_profile_uri)
def get_server_hardware_power_state(server_hardware):
return server_hardware.get('powerState')
def is_lig_id_uplink_name_mapped(lig_bd_entry, mappings):
mapped_lig_id = lig_bd_entry.get('oneview_lig_id')
mapped_uplink_name = lig_bd_entry.get('oneview_uplinkset_name')
for lig_id, uplinkset_name in zip(mappings[0::2], mappings[1::2]):
if lig_id == mapped_lig_id and (
uplinkset_name == mapped_uplink_name):
return True
return False
def get_boot_priority(server_profile, bootable):
if bootable:
connections = server_profile.get('connections')
if _is_boot_priority_available(connections, 'Primary'):
return 'Primary'
elif _is_boot_priority_available(connections, 'Secondary'):
return 'Secondary'
return None
return 'NotBootable'
def _is_boot_priority_available(connections, boot_priority):
for connection in connections:
if connection.get('boot').get('priority') == boot_priority:
return False
return True
def port_id_from_mac(server_hardware, mac_address):
port_info = _get_port_info(server_hardware, mac_address)
if not port_info:
return None
return (
str(port_info.get('device_slot_location')) + " " +
str(port_info.get('device_slot_port_number')) + ":" +
str(port_info.get('physical_port_number')) + "-" +
str(port_info.get('virtual_port_function'))
)
def _get_port_info(server_hardware, mac_address):
port_map = server_hardware.get('portMap')
device_slots = port_map.get('deviceSlots')
try:
for device_slot in device_slots:
physical_ports = device_slot.get('physicalPorts')
for physical_port in physical_ports:
virtual_ports = physical_port.get('virtualPorts')
for virtual_port in virtual_ports:
mac = virtual_port.get('mac')
if mac.upper() == mac_address.upper():
return {
'virtual_port_function': virtual_port.get(
'portFunction'
),
'physical_port_number': physical_port.get(
'portNumber'
),
'device_slot_port_number': device_slot.get(
'slotNumber'
),
'device_slot_location': device_slot.get(
'location'
),
}
return None
except oneview_exceptions.HPOneViewException as ex:
LOG.warning("Could not get port information on the Server "
"Hardware: %s" % server_hardware.get('uuid'))
raise ex
def connection_with_mac_address(connections, mac_address):
for connection in connections:
if connection.get('mac') == mac_address:
return connection
return None
def is_port_valid_to_reflect_on_oneview(
session, port_dict, local_link_information):
vnic_type = port_dict.get('binding:vnic_type')
port_id = port_dict.get("id")
if vnic_type != 'baremetal':
LOG.warning("'vnic_type' of the port %s must be baremetal" %
port_id)
return False
network_id = port_dict.get('network_id')
neutron_oneview_network = database_manager.get_neutron_oneview_network(
session, network_id
)
if not neutron_oneview_network:
LOG.warning(
"There is no network created for the port %s" % port_id)
return False
return _is_local_link_information_valid(port_id, local_link_information)
def _is_local_link_information_valid(port_id, local_link_information):
if not local_link_information:
LOG.warning(
"The port %s must have 'local_link_information'" % port_id)
return False
if len(local_link_information) > 1:
LOG.warning(
"'local_link_information' must have only one value")
return False
switch_info = switch_info_from_local_link_information_list(
local_link_information)
if not switch_info:
LOG.warning(
"'local_link_information' must contain 'switch_info'.")
return False
server_hardware_id = switch_info.get('server_hardware_id')
try:
strutils.bool_from_string(
subject=switch_info.get('bootable'),
strict=True)
except ValueError:
LOG.warning("'bootable' must be a boolean.")
return False
if not server_hardware_id:
LOG.warning(
"'local_link_information' must contain `server_hardware_id`.")
return False
return True
def remove_inconsistence_from_db(
session, neutron_network_id, oneview_network_id):
database_manager.delete_neutron_oneview_network(
session, neutron_network_id=neutron_network_id
)
database_manager.delete_oneview_network_lig(
session, oneview_network_id=oneview_network_id
)
def check_valid_resources():
"""Verify if the OneView resources exist.
Verify if the resources described on the configuration file
exist on OneView.
:raise OneViewResourceNotFoundException: If any of the OneView
resources does not exist.
:raise ElementNotFoundException: If the UplinkSet name is not
in the LIG's UplinkSets list. If a Network is not associated
to any UplinkSet.
"""
LOG.info("Checking if resources in mappings exist in OneView.")
check_uplinkset_mappings_resources()
check_flat_net_mappings_resources()
def check_uplinkset_mappings_resources():
"""Verify if the Logical Interconnect Groups and UplinkSets exist.
:raise ClientException:: If a Logical Interconnect Group does not exist
or if a UplinkSet name is not in the LIG's UplinkSets list.
"""
mappings = load_conf_option_to_dict(CONF.DEFAULT.uplinkset_mappings)
errors = {"ligs": [], "uplinksets": []}
for physnet in mappings:
provider = zip(
mappings.get(physnet)[0::2],
mappings.get(physnet)[1::2])
# Check if Logical Interconnect Groups and UplinkSets exist
for lig_id, uplinkset_name in provider:
try:
lig = get_logical_interconnect_group_by_id(lig_id)
except exceptions.OneViewResourceNotFoundException:
errors["ligs"].append(lig_id)
continue
uplinksets = lig.get('uplinkSets')
try:
get_uplinkset_by_name_from_list(uplinksets, uplinkset_name)
except exceptions.ElementNotFoundException:
errors["uplinksets"].append(
"%s in the lig %s" % (uplinkset_name, lig_id))
if errors["ligs"] or errors["uplinksets"]:
err_msg = (
'There are invalid values in the UplinkSet mappings '
'within the OneView configuration file:')
if errors["ligs"]:
err_msg += (
"\nThose Logical Interconnect Groups "
"could not be found: {err[ligs]}")
if errors["uplinksets"]:
err_msg += (
'\nThose UplinkSets could not be found: {err[uplinksets]}')
err_msg = err_msg.format(err=errors)
raise exceptions.ClientException(err_msg)
def check_flat_net_mappings_resources():
"""Verify if the Ethernet Networks exist.
:raise ClientException: If an Ethernet Network does not exist
or If there is no UplinkSet associated with the Network.
"""
mappings = load_conf_option_to_dict(CONF.DEFAULT.flat_net_mappings)
errors = {"networks": [], "no_uplinkset": []}
for physnet in mappings:
oneview_network_ids = mappings.get(physnet)
for oneview_network_id in oneview_network_ids:
try:
get_ethernet_network_by_id(oneview_network_id)
except exceptions.OneViewResourceNotFoundException:
errors["networks"].append(oneview_network_id)
continue
if not get_uplink_port_group_uris_for_ethernet_network_by_id(
oneview_network_id):
errors["no_uplinkset"].append(oneview_network_id)
if errors["networks"] or errors["no_uplinkset"]:
err_msg = (
'There are invalid values in the Flat net mappings '
'within the OneView configuration file:')
if errors["networks"]:
err_msg += (
"\nThose Networks could not be found: {err[networks]}")
if errors["no_uplinkset"]:
err_msg += (
'\nThose Networks are not associated '
'to any Uplinkset: {err[no_uplinkset]}')
err_msg = err_msg.format(err=errors)
raise exceptions.ClientException(err_msg)
def uplinkset_mappings_by_type(uplinkset_mappings):
uplinkset_by_type = {}
uplinkset_by_type[NETWORK_TYPE_TAGGED] = (
get_uplinkset_by_type(
uplinkset_mappings, NETWORK_TYPE_TAGGED
)
)
uplinkset_by_type[NETWORK_TYPE_UNTAGGED] = (
get_uplinkset_by_type(
uplinkset_mappings, NETWORK_TYPE_UNTAGGED
)
)
return uplinkset_by_type
def get_uplinkset_by_type(uplinkset_mappings, net_type):
uplinksets_by_type = {}
for physnet in uplinkset_mappings:
provider = uplinkset_mappings.get(physnet)
for lig_id, uplinkset_name in zip(provider[0::2], provider[1::2]):
lig = get_logical_interconnect_group_by_id(lig_id)
lig_uplinksets = lig.get('uplinkSets')
uplinkset = get_uplinkset_by_name_from_list(
lig_uplinksets, uplinkset_name
)
if uplinkset.get('ethernetNetworkType').lower() == net_type:
uplinksets_by_type.setdefault(physnet, []).extend(
[lig_id, uplinkset_name]
)
return uplinksets_by_type
def check_uplinkset_types_constraint(oneview_client, uplinkset_mappings):
"""Check the number of uplinkset types for a provider in a LIG.
It is only possible to map one provider to at the most one uplink
of each type.
"""
LOG.info("Checking if a provider has two mappings for the same LIG with "
"different uplinksets of the same type.")
for provider in uplinkset_mappings:
provider_mapping = zip(
uplinkset_mappings.get(provider)[::2],
uplinkset_mappings.get(provider)[1::2])
uplinksets_type = {}
for lig_id, ups_name in provider_mapping:
lig_mappings = uplinksets_type.setdefault(lig_id, [])
lig = oneview_client.logical_interconnect_groups.get(
lig_id
)
uplinkset = get_uplinkset_by_name_from_list(
lig.get('uplinkSets'), ups_name)
lig_mappings.append(uplinkset.get('ethernetNetworkType'))
if len(lig_mappings) != len(set(lig_mappings)):
err = (
"The provider %(provider)s has more than one "
"uplinkset of the same type in the logical "
"interconnect group %(lig_id)s."
) % {"provider": provider, "lig_id": lig_id}
LOG.error(err)
raise Exception(err)
def check_unique_lig_per_provider_constraint(uplinkset_mappings):
LOG.info("Checking if different providers have the same mapping.")
for provider in uplinkset_mappings:
for provider2 in uplinkset_mappings:
if provider != provider2:
provider_lig_mapping_tupples = zip(
uplinkset_mappings.get(provider)[::2],
uplinkset_mappings.get(provider)[1::2])
provider2_lig_mapping_tupples = zip(
uplinkset_mappings.get(provider2)[::2],
uplinkset_mappings.get(provider2)[1::2])
identical_mappings = (set(provider_lig_mapping_tupples) &
set(provider2_lig_mapping_tupples))
if identical_mappings:
err_message_attrs = {
"prov1": provider,
"prov2": provider2,
"identical_mappings": "\n".join(
(", ".join(mapping)
for mapping in identical_mappings)
)
}
err = (
"The providers %(prov1)s and %(prov2)s are being "
"mapped to the same Logical Interconnect Group "
"and the same Uplinkset.\n"
"The LIG ids and Uplink names are: "
"%(identical_mappings)s"
) % err_message_attrs
LOG.error(err)
raise Exception(err)
def delete_outdated_flat_mapped_networks(flat_net_mappings):
LOG.info("Synchronizing flat network mappings.")
session = get_database_session()
mappings = flat_net_mappings.values()
mapped_networks_uuids = list(chain.from_iterable(mappings))
oneview_networks_uuids = (
network.oneview_network_id for network
in database_manager.list_neutron_oneview_network(session)
if not network.manageable)
unmapped_networks_uuids = (
uuid for uuid
in oneview_networks_uuids
if uuid not in mapped_networks_uuids)
for uuid in unmapped_networks_uuids:
database_manager.delete_neutron_oneview_network(
session, oneview_network_id=uuid)
def get_database_session():
connection = CONF.database.connection
Session = sessionmaker(bind=create_engine(connection),
autocommit=True)
return Session()
| 35.032609 | 79 | 0.671148 | 3,482 | 29,007 | 5.30672 | 0.113441 | 0.036368 | 0.04113 | 0.018184 | 0.376881 | 0.307934 | 0.258037 | 0.202241 | 0.162896 | 0.140546 | 0 | 0.00324 | 0.255111 | 29,007 | 827 | 80 | 35.07497 | 0.851946 | 0.176302 | 0 | 0.24952 | 0 | 0 | 0.148873 | 0.017701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09405 | false | 0.001919 | 0.026871 | 0.011516 | 0.243762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a663ec88245df01b3d73a61e4196afea2ecc96d | 1,972 | py | Python | get_compiled_model.py | launis/areadata | 8cf0e30ec489ce9655fcd9829284d1ec70e7360d | [
"BSD-3-Clause"
] | null | null | null | get_compiled_model.py | launis/areadata | 8cf0e30ec489ce9655fcd9829284d1ec70e7360d | [
"BSD-3-Clause"
] | null | null | null | get_compiled_model.py | launis/areadata | 8cf0e30ec489ce9655fcd9829284d1ec70e7360d | [
"BSD-3-Clause"
] | null | null | null |
def get_compiled_model(X, target, log_dir):
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
input_layer = Input(shape=(X.values.shape[1]), name='Areadata')
dense_layer_1 = Dense(64, activation='relu', name='Dense_1')(input_layer)
dense_layer_2 = Dense(64, activation='relu', name='Dense_2')(dense_layer_1)
dense_layer_3 = Dense(64, activation='relu', name='Dense_3')(dense_layer_2)
dense_layer_4 = Dense(64, activation='relu', name='Dense_4')(dense_layer_3)
dense_layer_5 = Dense(64, activation='relu', name='Dense_5')(dense_layer_4)
dense_layer_6 = Dense(64, activation='relu', name='Dense_6')(dense_layer_5)
out = Dense(len(target), activation='linear', name='Party_shares')(dense_layer_6)
model = Model(inputs=input_layer, outputs=[out], name="areadata_model")
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=10000,
decay_rate=0.96,
staircase=True)
optimizer=tf.optimizers.Adam(learning_rate=lr_schedule)
model.compile(
optimizer=optimizer,
loss=['mean_squared_error'],
metrics=["mean_squared_error"])
earlystopping_callback = keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=0.001,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=50,
verbose=1)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
callbacks = [earlystopping_callback, tensorboard_callback]
return(model, callbacks) | 41.957447 | 93 | 0.6714 | 252 | 1,972 | 5.011905 | 0.380952 | 0.095012 | 0.08076 | 0.099762 | 0.142518 | 0.142518 | 0 | 0 | 0 | 0 | 0 | 0.035807 | 0.221095 | 1,972 | 47 | 94 | 41.957447 | 0.786458 | 0.095842 | 0 | 0 | 0 | 0 | 0.086555 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.121212 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a66bed8025b5762839fbf814242ae6c19d9b073 | 1,712 | py | Python | src/rhml_client/rhml_client/UI/helpers/restore_file.py | PycT/RhythmicML | abf3eea273dcaa97b9308772c8054cfc60b77a4f | [
"Apache-2.0"
] | null | null | null | src/rhml_client/rhml_client/UI/helpers/restore_file.py | PycT/RhythmicML | abf3eea273dcaa97b9308772c8054cfc60b77a4f | [
"Apache-2.0"
] | null | null | null | src/rhml_client/rhml_client/UI/helpers/restore_file.py | PycT/RhythmicML | abf3eea273dcaa97b9308772c8054cfc60b77a4f | [
"Apache-2.0"
] | null | null | null | from rhythmic import rhythmicDB, faultReturnHandler;
from . import configuration, scanFolder, unpackSingleFile;
def restoreFile(data):
"""
var data =
{
"file_absolute_path": absolute_path,
"model_path": window.model_path,
"model_id": window.the_model_id,
"version_number": window.active_version_number,
"version_id": window.active_version_id
}
"""
model_version_archive_file_name = "{}/{}/model_{}_ver{}.zip".\
format(
data["model_path"],
configuration.storage_folder_name,
data["model_id"],
data["version_number"]
);
unpackSingleFile(model_version_archive_file_name, data["file_absolute_path"], data["model_path"]);
#after unpacking we have to update last_modified_time for UI would not mark that file as modified
base_index = data["file_absolute_path"].rfind("/");
file_containing_folder = data["file_absolute_path"][: base_index + 1]
folder_contents = scanFolder(file_containing_folder);
if data["file_absolute_path"] in folder_contents:
with rhythmicDB(configuration.db_name, configuration.db_file_name) as db:
db.execute(
"""
UPDATE files_table SET last_modified_time ='{}' WHERE model_version_id = '{}' AND absolute_path = '{}';
""".format(
folder_contents[ data["file_absolute_path"] ]["last_modified_time"],
data["version_id"],
data["file_absolute_path"]
)
);
return "Success";
| 36.425532 | 119 | 0.586449 | 173 | 1,712 | 5.427746 | 0.364162 | 0.115016 | 0.119276 | 0.149095 | 0.057508 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000846 | 0.309579 | 1,712 | 46 | 120 | 37.217391 | 0.79357 | 0.188084 | 0 | 0.083333 | 0 | 0 | 0.174564 | 0.01995 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a68db339fbb6f0118a461ed74c8244c0b67b502 | 4,652 | py | Python | episode-4/flask/src/watson.py | chughts/python-primer-companion-code | 3a147616183932d52714373b68054c212a040dc9 | [
"Apache-2.0"
] | 18 | 2016-03-30T14:55:28.000Z | 2019-01-01T12:41:27.000Z | episode-4/flask/src/watson.py | chughts/python-primer-companion-code | 3a147616183932d52714373b68054c212a040dc9 | [
"Apache-2.0"
] | 5 | 2016-02-22T20:12:33.000Z | 2018-11-19T15:33:46.000Z | episode-4/flask/src/watson.py | chughts/python-primer-companion-code | 3a147616183932d52714373b68054c212a040dc9 | [
"Apache-2.0"
] | 21 | 2016-02-22T19:22:59.000Z | 2020-12-02T14:46:36.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from flask import Flask, jsonify, render_template, redirect, session, url_for
from flask import request
from flask.ext.wtf import Form
from wtforms import TextAreaField, SubmitField
from wtforms.validators import Required
from watson_developer_cloud import WatsonException
from languagetranslation import LanguageTranslationUtils
from naturallanguageclassification import NaturalLanguageClassifierUtils
app = Flask(__name__)
app.config['SECRET_KEY'] = 'please subtittute this string with something hard to guess'
class LangForm(Form):
txtdata = TextAreaField('Text to process', validators=[Required()])
submit = SubmitField('Process')
@app.route('/wl/lang', methods=['GET', 'POST'])
def wlhome():
app.logger.info('wlhome page requested')
allinfo = {}
outputTxt = "TBD"
targetlang = 'en'
lang = "TBD"
txt = None
form = LangForm()
if form.validate_on_submit():
lang = "TBC"
txt = form.txtdata.data
form.txtdata.data = ''
try:
ltu = LanguageTranslationUtils(app)
nlcu = NaturalLanguageClassifierUtils(app)
lang = ltu.identifyLanguage(txt)
primarylang = lang["language"]
confidence = lang["confidence"]
outputTxt = "I am %s confident that the language is %s" % (confidence, primarylang)
if targetlang != primarylang:
supportedModels = ltu.checkForTranslation(primarylang, targetlang)
if supportedModels:
englishTxt = ltu.performTranslation(txt, primarylang, targetlang)
outputTxt += ", which in english is %s" % englishTxt
classification = nlcu.classifyTheText(englishTxt)
else:
outputTxt += ", which unfortunately we can't translate into English"
else:
classification = nlcu.classifyTheText(txt)
if classification:
outputTxt += "(and %s confident that it is %s classification)" \
% (classification['confidence'],
classification['className'])
session['langtext'] = outputTxt
allinfo['lang'] = lang
allinfo['form'] = form
return redirect(url_for('wlhome'))
except WatsonException as err:
allinfo['error'] = err
allinfo['lang'] = session.get('langtext')
allinfo['form'] = form
return render_template('watson/wlindex.html', info=allinfo)
@app.route('/api/process/', methods=['POST'])
def apiprocess():
app.logger.info('REST API for process has been invoked')
targetlang = 'en'
classification = {"className":"unknown"}
results = {}
theData = {"error":"If you see this message then something has gone badly wrong"}
app.logger.info(request.form['txtdata'])
if not 'txtdata' in request.form:
theData = {"error":"Text to be processed must not be blank"}
else:
del theData["error"]
try:
data = request.form['txtdata']
ltu = LanguageTranslationUtils(app)
nlcu = NaturalLanguageClassifierUtils(app)
primarylang = theData['language'] = ltu.identifyLanguage(data)["language"]
if targetlang != primarylang:
supportedModels = ltu.checkForTranslation(primarylang, targetlang)
if supportedModels:
englishTxt = ltu.performTranslation(data, primarylang, targetlang)
classification = nlcu.classifyTheText(englishTxt)
else:
classification = nlcu.classifyTheText(data)
theData['classification'] = classification['className']
except WatsonException as err:
theData['error'] = err;
results["results"] = theData
return jsonify(results), 201
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port), debug=True)
| 36.920635 | 95 | 0.63693 | 481 | 4,652 | 6.116424 | 0.413721 | 0.020394 | 0.044867 | 0.010877 | 0.165194 | 0.133243 | 0.087695 | 0.087695 | 0.087695 | 0.087695 | 0 | 0.005845 | 0.264402 | 4,652 | 125 | 96 | 37.216 | 0.853887 | 0.125967 | 0 | 0.266667 | 0 | 0 | 0.16786 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a6af1690d6232cd49d86ec0f6d169063cae363f | 13,288 | py | Python | tests/test_gen.py | OrquestraDigital/aboutcode-toolkit | d9ff859735a72635563fb5a9e265ecd7023d401a | [
"Apache-2.0"
] | 1 | 2021-08-31T10:58:29.000Z | 2021-08-31T10:58:29.000Z | tests/test_gen.py | sthagen/aboutcode-toolkit | cd74f15bcc223c7e1b7424f169481af8e55e0f38 | [
"Apache-2.0"
] | null | null | null | tests/test_gen.py | sthagen/aboutcode-toolkit | cd74f15bcc223c7e1b7424f169481af8e55e0f38 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import unittest
from testing_utils import get_temp_dir
from testing_utils import get_test_loc
from attributecode import ERROR
from attributecode import INFO
from attributecode import CRITICAL
from attributecode import Error
from attributecode import gen
from unittest.case import skip
class GenTest(unittest.TestCase):
def test_check_duplicated_columns(self):
test_file = get_test_loc('test_gen/dup_keys.csv')
expected = [Error(ERROR, 'Duplicated column name(s): copyright with copyright\nPlease correct the input and re-run.')]
result = gen.check_duplicated_columns(test_file)
assert expected == result
def test_check_duplicated_columns_handles_lower_upper_case(self):
test_file = get_test_loc('test_gen/dup_keys_with_diff_case.csv')
expected = [Error(ERROR, 'Duplicated column name(s): copyright with Copyright\nPlease correct the input and re-run.')]
result = gen.check_duplicated_columns(test_file)
assert expected == result
def test_check_duplicated_about_resource(self):
arp_list = ['/test/test.c', 'test/test1.h']
arp1 = '/test/test.c'
arp2 = '/test/tmp/test.c'
expected = Error(CRITICAL,
"The input has duplicated values in 'about_resource' field: " + arp1)
result1 = gen.check_duplicated_about_resource(arp1, arp_list)
result2 = gen.check_duplicated_about_resource(arp2, arp_list)
assert result1 == expected
assert result2 == ''
def test_check_newline_in_file_field(self):
test_dict1 = {'about_resource': '/test/test.c', 'name': 'test.c', 'notice_file': 'NOTICE\nNOTICE2'}
test_dict2 = {'about_resource': '/test/test.c', 'name': 'test.c', 'notice_file': 'NOTICE, NOTICE2'}
expected = [
Error(CRITICAL,
"New line character detected in 'notice_file' for '/test/test.c' which is not supported."
"\nPlease use ',' to declare multiple files.")]
result1 = gen.check_newline_in_file_field(test_dict1)
result2 = gen.check_newline_in_file_field(test_dict2)
assert result1 == expected
assert result2 == []
def test_check_about_resource_filename(self):
arp1 = '/test/t@est.c'
arp2 = '/test/t!est.c'
msg = ("Invalid characters present in 'about_resource' "
"field: " + arp2)
expected2 = Error(CRITICAL, msg)
result1 = gen.check_about_resource_filename(arp1)
result2 = gen.check_about_resource_filename(arp2)
assert result1 == ''
assert result2 == expected2
def test_load_inventory(self):
location = get_test_loc('test_gen/inv.csv')
base_dir = get_temp_dir()
errors, abouts = gen.load_inventory(location, base_dir)
expected_errors = [
Error(INFO, 'Field custom1 is a custom field.'),
Error(INFO, 'Field about_resource: Path')
]
for exp, err in zip(expected_errors, errors):
assert exp.severity == err.severity
assert err.message.startswith(exp.message)
expected = (
'''about_resource: .
name: AboutCode
version: 0.11.0
description: |
multi
line
custom1: |
multi
line
'''
)
result = [a.dumps() for a in abouts]
assert expected == result[0]
def test_load_inventory_with_errors(self):
location = get_test_loc('test_gen/inv4.csv')
base_dir = get_temp_dir()
errors, abouts = gen.load_inventory(location, base_dir)
expected_errors = [
Error(CRITICAL, "Field name: 'confirmed copyright' contains illegal name characters: 0 to 9, a to z, A to Z and _."),
Error(INFO, 'Field resource is a custom field.'),
Error(INFO, 'Field test is a custom field.'),
Error(INFO, 'Field about_resource: Path')
]
# assert [] == errors
for exp, err in zip(expected_errors, errors):
assert exp.severity == err.severity
assert err.message.startswith(exp.message)
expected = (
'about_resource: .\n'
'name: AboutCode\n'
'version: 0.11.0\n'
'description: |\n'
' multi\n'
' line\n'
# 'confirmed copyright: Copyright (c) nexB, Inc.\n'
'resource: this.ABOUT\n'
'test: This is a test\n'
)
result = [a.dumps() for a in abouts]
assert expected == result[0]
def test_generation_dir_endswith_space(self):
location = get_test_loc('test_gen/inventory/complex/about_file_path_dir_endswith_space.csv')
base_dir = get_temp_dir()
errors, _abouts = gen.generate(location, base_dir)
expected_errors_msg1 = 'contains directory name ends with spaces which is not allowed. Generation skipped.'
expected_errors_msg2 = 'Field about_resource'
assert errors
assert len(errors) == 2
assert expected_errors_msg1 in errors[0].message or expected_errors_msg1 in errors[1].message
assert expected_errors_msg2 in errors[0].message or expected_errors_msg2 in errors[1].message
def test_generation_with_no_about_resource(self):
location = get_test_loc('test_gen/inv2.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
expected = dict([('.', None)])
assert abouts[0].about_resource.value == expected
assert len(errors) == 1
def test_generation_with_no_about_resource_reference(self):
location = get_test_loc('test_gen/inv3.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
expected = dict([('test.tar.gz', None)])
assert abouts[0].about_resource.value == expected
assert len(errors) == 1
msg = 'Field about_resource'
assert msg in errors[0].message
def test_generation_with_no_about_resource_reference_no_resource_validation(self):
location = get_test_loc('test_gen/inv3.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
expected = dict([('test.tar.gz', None)])
assert abouts[0].about_resource.value == expected
assert len(errors) == 1
def test_generate(self):
location = get_test_loc('test_gen/inv.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
msg1 = 'Field custom1 is a custom field.'
msg2 = 'Field about_resource'
assert msg1 in errors[0].message
assert msg2 in errors[1].message
result = [a.dumps() for a in abouts][0]
expected = (
'''about_resource: .
name: AboutCode
version: 0.11.0
description: |
multi
line
custom1: |
multi
line
'''
)
assert expected == result
def test_generate_multi_lic_issue_443(self):
location = get_test_loc('test_gen/multi_lic_issue_443/test.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
result = [a.dumps() for a in abouts][0]
expected = (
'''about_resource: test
name: test
version: '1.5'
licenses:
- key: License1
name: License1
file: LIC1.LICENSE
- key: License2
name: License2
file: LIC2.LICENSE
- key: License3
name: License3
file: LIC3.LICENSE
'''
)
assert expected == result
def test_generate_multi_lic_issue_444(self):
location = get_test_loc('test_gen/multi_lic_issue_444/test1.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
result = [a.dumps() for a in abouts][0]
expected = (
'''about_resource: test.c
name: test.c
licenses:
- key: License1
name: License1
file: LIC1.LICENSE, LIC2.LICENSE
'''
)
assert expected == result
def test_generate_license_key_with_custom_file_450_no_fetch(self):
location = get_test_loc('test_gen/lic_issue_450/custom_and_valid_lic_key_with_file.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
result = [a.dumps() for a in abouts][0]
expected = (
'''about_resource: test.c
name: test.c
license_expression: public-domain AND custom
licenses:
- file: custom.txt
'''
)
assert expected == result
def test_generate_license_key_with_custom_file_450_with_fetch(self):
location = get_test_loc('test_gen/lic_issue_450/custom_and_valid_lic_key_with_file.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
lic_dict = {u'public-domain': [u'Public Domain',
u'This component is released to the public domain by the author.',
u'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:public-domain'
]}
a = abouts[0]
a.license_key.value.append('public-domain')
a.license_key.value.append('custom')
result = a.dumps(lic_dict)
expected = (
'''about_resource: test.c
name: test.c
license_expression: public-domain AND custom
licenses:
- key: public-domain
name: Public Domain
file: public-domain.LICENSE
url: https://enterprise.dejacode.com/urn/?urn=urn:dje:license:public-domain
- key: custom
name: custom
file: custom.txt
'''
)
assert expected == result
def test_generate_license_key_with_custom_file_450_with_fetch_with_order(self):
location = get_test_loc('test_gen/lic_issue_450/custom_and_valid_lic_key_with_file.csv')
base_dir = get_temp_dir()
errors, abouts = gen.generate(location, base_dir)
lic_dict = {u'public-domain': [u'Public Domain',
u'This component is released to the public domain by the author.',
u'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:public-domain'
]}
# The first row from the test file
a = abouts[0]
a.license_key.value.append('public-domain')
a.license_key.value.append('custom')
result1 = a.dumps(lic_dict)
# The second row from the test file
b = abouts[1]
b.license_key.value.append('custom')
b.license_key.value.append('public-domain')
result2 = b.dumps(lic_dict)
expected1 = (
'''about_resource: test.c
name: test.c
license_expression: public-domain AND custom
licenses:
- key: public-domain
name: Public Domain
file: public-domain.LICENSE
url: https://enterprise.dejacode.com/urn/?urn=urn:dje:license:public-domain
- key: custom
name: custom
file: custom.txt
'''
)
expected2 = (
'''about_resource: test.h
name: test.h
license_expression: custom AND public-domain
licenses:
- key: custom
name: custom
file: custom.txt
- key: public-domain
name: Public Domain
file: public-domain.LICENSE
url: https://enterprise.dejacode.com/urn/?urn=urn:dje:license:public-domain
'''
)
assert expected1 == result1
assert expected2 == result2
@skip('FIXME: this test is making a failed, live API call')
def test_generate_not_overwrite_original_license_file(self):
location = get_test_loc('test_gen/inv5.csv')
base_dir = get_temp_dir()
reference_dir = None
fetch_license = ['url', 'lic_key']
_errors, abouts = gen.generate(
location, base_dir, reference_dir, fetch_license)
result = [a.dumps()for a in abouts][0]
expected = (
'about_resource: .\n'
'name: AboutCode\n'
'version: 0.11.0\n'
'licenses:\n'
' - file: this.LICENSE\n')
assert expected == result
def test_boolean_value_not_lost(self):
location = get_test_loc('test_gen/inv6.csv')
base_dir = get_temp_dir()
_errors, abouts = gen.generate(location, base_dir)
in_mem_result = [a.dumps() for a in abouts][0]
expected = (u'about_resource: .\n'
u'name: AboutCode\n'
u'version: 0.11.0\n'
u'redistribute: yes\n'
u'attribute: yes\n'
u'modified: no\n')
assert expected == in_mem_result
| 35.060686 | 129 | 0.628989 | 1,693 | 13,288 | 4.724158 | 0.152392 | 0.052013 | 0.021255 | 0.028007 | 0.702926 | 0.641535 | 0.630158 | 0.569767 | 0.530133 | 0.514004 | 0 | 0.015913 | 0.257526 | 13,288 | 378 | 130 | 35.153439 | 0.79475 | 0.070138 | 0 | 0.44 | 0 | 0.017778 | 0.218192 | 0.035524 | 0 | 0 | 0 | 0 | 0.16 | 1 | 0.084444 | false | 0 | 0.04 | 0 | 0.128889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a6bab5a4f71c5cc1a73bd0a17487ba725836903 | 795 | py | Python | src/removeDupsList/rem.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | null | null | null | src/removeDupsList/rem.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | null | null | null | src/removeDupsList/rem.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | 1 | 2021-04-28T18:17:55.000Z | 2021-04-28T18:17:55.000Z | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def getList(self):
ans = []
while self:
ans.append(str(self.val))
self = self.next
return "->".join(ans)
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
curr = head
while curr and curr.next:
if curr.val == curr.next.val:
curr.next = curr.next.next
else:
curr = curr.next
return head
if __name__ == '__main__':
head = ListNode(1, ListNode(1, ListNode(1, ListNode(2))))
print(f"Input: {head.getList()}")
Solution().deleteDuplicates(head)
print(f"Output: {head.getList()}")
| 25.645161 | 61 | 0.555975 | 94 | 795 | 4.574468 | 0.382979 | 0.093023 | 0.118605 | 0.083721 | 0.081395 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009174 | 0.314465 | 795 | 30 | 62 | 26.5 | 0.779817 | 0.042767 | 0 | 0 | 0 | 0 | 0.075099 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.291667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a723b358d7f83c67e86b4116405a9e819b1a93e | 1,015 | py | Python | src/ensae_teaching_cs/automation_students/quick_tasks.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 73 | 2015-05-12T13:12:11.000Z | 2021-12-21T11:44:29.000Z | src/ensae_teaching_cs/automation_students/quick_tasks.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 90 | 2015-06-23T11:11:35.000Z | 2021-03-31T22:09:15.000Z | src/ensae_teaching_cs/automation_students/quick_tasks.py | Jerome-maker/ensae_teaching_cs | 43ea044361ee60c00c85aea354a7b25c21c0fd07 | [
"MIT"
] | 65 | 2015-01-13T08:23:55.000Z | 2022-02-11T22:42:07.000Z | # -*- coding: utf-8 -*-
"""
@file
@brief Some automation helpers to grab mails from students about projects.
"""
def build_mailing_list(names, domain, format="{first}.{last}@{domain}"):
"""
Infers mails from a list of names.
@param names list of strings
@param domain something like ``ensae.fr``.
@param format mail format
@return list of mails
Examples :
::
DUPRE Xavier
Everything upper case is the last name,
everything lower case is the first name.
"""
mails = []
for name in names:
words = name.split()
first = []
last = []
for w in words:
if w.upper() == w:
last.append(w)
else:
first.append(w)
first = ".".join(s.lower() for s in first)
last = ".".join(s.lower() for s in last)
mail = format.format(first=first, last=last, domain=domain)
mails.append(mail)
return mails
| 24.756098 | 74 | 0.534975 | 123 | 1,015 | 4.398374 | 0.447154 | 0.066543 | 0.033272 | 0.048059 | 0.05915 | 0.05915 | 0 | 0 | 0 | 0 | 0 | 0.001517 | 0.350739 | 1,015 | 40 | 75 | 25.375 | 0.819423 | 0.415764 | 0 | 0 | 0 | 0 | 0.046816 | 0.043071 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a75b1892ff23f756dabba12dd76d620d96637a5 | 10,329 | py | Python | raipy/GUI.py | threemeninaboat3247/raipy | b924a950b40874107b6974c5e49eb24e3c97bf90 | [
"MIT"
] | null | null | null | raipy/GUI.py | threemeninaboat3247/raipy | b924a950b40874107b6974c5e49eb24e3c97bf90 | [
"MIT"
] | null | null | null | raipy/GUI.py | threemeninaboat3247/raipy | b924a950b40874107b6974c5e49eb24e3c97bf90 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 08:52:15 2017
@author: Yuki
"""
from PyQt5.QtGui import QFont,QIcon
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QToolBar,QAction,QMainWindow,QVBoxLayout,QWidget,QHBoxLayout,QTabWidget,QStatusBar,QTextEdit,QApplication,QWidgetAction,QMenuBar,QMenu,QTextBrowser
from raipy.Constant import *
from raipy.Controller import *
from raipy.FileManager import *
from raipy.GraphManager import *
from raipy.LCD_Display import *
from raipy.MyPyqtGraph import *
from raipy.Time import *
from raipy.Help import helpText
from raipy.Example import ExampleMenu
ICON='Icons//logo.png' #the relative path of the logo image
class MyMenuBar(QMenuBar):
'''自作Menubar 状態を持ちその状態によって押せるボタンが変化する その状態としてはQMainWindowの状態を参照する QMainWindowのstateSignalと繋ぐことで状態をupdate'''
def __init__(self,ref):
super().__init__()
self.loadAction=QAction('Load a program', self)
self.loadAction.setShortcut('Ctrl+L')
self.tempAction = QAction('Output a template', self)
self.tempAction.setShortcut('Ctrl+T')
self.explaAction = QAction('About this program', self)
self.explaAction.setShortcut('Ctrl+H')
self.fileMenu =self.addMenu('File')
self.fileMenu.addAction(self.loadAction)
self.fileMenu.addAction(self.tempAction)
helpMenu=self.addMenu('Help')
helpMenu.addAction(self.explaAction)
self.exampleMenu=ExampleMenu('Examples',self)
helpMenu.addMenu(self.exampleMenu)
self.setState(ref.state)
ref.stateSignal.connect(self.setState)
def setState(self,end):
'''状態の遷移はこの関数を通して行われる'''
if end==RUNNING:
self.loadAction.setEnabled(False)
else:
self.loadAction.setEnabled(True)
class MyToolBar(QToolBar):
'''自作Toolbar 状態を持ちその状態によって押せるボタンが変化する その状態としてはQMainWindowの状態を参照する QMainWindowのstateSignalと繋ぐことで状態をupdate'''
def __init__(self,ref):
super().__init__()
self.exeAction=QAction('Run', self)
self.stopAction = QAction('Stop', self)
self.reloadAction = QAction('Reload', self)
self.addAction(self.exeAction)
self.addAction(self.stopAction)
self.addAction(self.reloadAction)
self.setState(ref.state)
ref.stateSignal.connect(self.setState)
def setState(self,end):
'''状態の遷移はこの関数を通して行われる'''
if end==INITIAL:
self.exeAction.setEnabled(False)
self.stopAction.setEnabled(False)
self.reloadAction.setEnabled(False)
elif end==READY:
self.exeAction.setEnabled(True)
self.stopAction.setEnabled(False)
self.reloadAction.setEnabled(True)
elif end==MISTAKE:
self.exeAction.setEnabled(False)
self.stopAction.setEnabled(False)
self.reloadAction.setEnabled(True)
elif end==RUNNING:
self.exeAction.setEnabled(False)
self.stopAction.setEnabled(True)
self.reloadAction.setEnabled(False)
class GUIWindow(QMainWindow):
stateSignal=pyqtSignal(int)
def __init__(self):
super().__init__()
self.state=INITIAL
self.setWindowIcon(QIcon('.\\icon\\python_logo.png'))
self.initUI()
self.setState(INITIAL)
self.params={} #programThreadの生成時に渡してcontrollerによる制御を可能にする
def initUI(self):
#add Menubar
menubar=MyMenuBar(self)
menubar.explaAction.triggered.connect(self.showExplanation)
self.setMenuBar(menubar)
#Toolbarを付ける
toolbar=MyToolBar(self)
toolbar.exeAction.triggered.connect(self.exePressed)
toolbar.stopAction.triggered.connect(self.stopPressed)
self.addToolBar(toolbar)
#tab1
self.pathbox=MyPathBox()
self.pathbox.importSig.connect(self.fileAppointed)
menubar.exampleMenu.setFileManager(self.pathbox)
menubar.loadAction.triggered.connect(self.pathbox.showDialog)
menubar.tempAction.triggered.connect(self.pathbox.tempPressed)
toolbar.reloadAction.triggered.connect(self.pathbox.rePressed)
self.gm=MyGraphManager('Graphs',self.pathbox.get_output_labels,self.pathbox.get_output_units,\
self.pathbox.outputLabelChangeSig,self.pathbox.outputUnitChangeSig,self.pathbox.graphSettingSig,self)
vbox=QVBoxLayout()
vbox.addWidget(self.pathbox)
vbox.addWidget(self.gm)
setTab=QWidget()
setTab.setLayout(vbox)
#tab2
self.lcdContainer=MyLCDContainer(self.pathbox.get_output_labels,self.pathbox.get_output_units,\
self.pathbox.outputLabelChangeSig,self.pathbox.outputUnitChangeSig,self)
self.time=MyTimeBox()
#サイズ調整
sizePolicyTime=self.time.sizePolicy()
sizePolicyLcd=self.lcdContainer.sizePolicy()
sizePolicyTime.setHorizontalStretch(1)
sizePolicyLcd.setHorizontalStretch(1)
self.time.setSizePolicy(sizePolicyTime)
self.lcdContainer.setSizePolicy(sizePolicyLcd)
hbox=QHBoxLayout()
hbox.addWidget(self.lcdContainer)
hbox.addWidget(self.time)
displayTab=QWidget()
displayTab.setLayout(hbox)
#tab3
self.sContainer=MyContainer(self.pathbox.getSliders,self.pathbox.sliderChangeSig,MySlider,self)
self.bContainer=MyContainer(self.pathbox.getBools,self.pathbox.boolChangeSig,MyBool,self)
self.dContainer=MyContainer(self.pathbox.getDials,self.pathbox.dialChangeSig,MyDial,self)
self.fContainer=MyContainer(self.pathbox.getFloats,self.pathbox.floatChangeSig,MyFloat,self)
self.sContainer.valueChanged.connect(self.updateParam)
self.bContainer.valueChanged.connect(self.updateParam)
self.dContainer.valueChanged.connect(self.updateParam)
self.fContainer.valueChanged.connect(self.updateParam)
hbox2=QHBoxLayout()
hbox2.addWidget(self.sContainer)
hbox2.addWidget(self.bContainer)
hbox2.addWidget(self.dContainer)
hbox2.addWidget(self.fContainer)
controlTab=QWidget()
controlTab.setLayout(hbox2)
#tabをまとめる
self.qTab=QTabWidget()
self.qTab.setStyleSheet('QTabWidget::tab-bar{alignment: center;}') #ツールバーのボタンとの押し間違えを防ぐために中央に配置
self.qTab.addTab(setTab,'setting')
self.qTab.addTab(displayTab,'display')
self.qTab.addTab(controlTab,'control')
self.setCentralWidget(self.qTab)
#status barを付ける
self.status_bar = QStatusBar(self)
self.setStatusBar(self.status_bar)
#windowを描画
self.setGeometry(10, 60, 960,900)
self.setWindowTitle(' raipy')
import raipy
import os
path=os.path.dirname(os.path.abspath(raipy.__file__))+'\\'+ICON
self.setWindowIcon(QIcon(path))
self.show()
def exePressed(self):
if self.state==READY:
if self.pathbox.showDataDialog():
self.gm.initData()
self.initParams(self.sContainer.getInits(),self.bContainer.getInits(),self.dContainer.getInits(),self.fContainer.getInits())
self.thread=self.pathbox.program.programThread(self.params)
self.thread.outputSignal.connect(self.lcdContainer.update_data)
self.thread.finished.connect(self.program_exit) #stopボタンが押されないまま全ての処理が終わった場合のため
self.thread.outputSignal.connect(self.gm.updateData)
self.thread.outputSignal.connect(self.pathbox.write_data)
self.thread.start()
self.time.startTimer()
self.setState(RUNNING)
self.qTab.setCurrentIndex(1)
def stopPressed(self):
if self.state==RUNNING:
self.thread.stop() #stopでの停止と全部の処理の終了での停止をまとめて処理するため ここでは状態を遷移しない
self.thread.wait()
def showExplanation(self):
try:
self.text.showNormal()
except:
self.text=QTextBrowser()
self.text.setOpenExternalLinks(True)
self.text.setGeometry(20, 120, 800, 700)
self.text.setWindowTitle('Help')
self.text.setFont(QFont('TimesNewRoman',12))
self.text.setHtml(helpText)
self.text.setReadOnly(True)
self.text.show()
def demoPressed(self):
pass
def initParams(self,*mydicts):
self.params={}
for mydict in mydicts:
for key in mydict:
self.params[key]=mydict[key]
print('----------------control parameters---------------------\n')
print(self.params)
def updateParam(self,mydict):
for key in mydict:
self.params[key]=mydict[key]
def fileAppointed(self,success):
#switch the state based on whether import succeeded or not
if success:
self.setState(READY)
else:
self.setState(MISTAKE)
# @classmethod
# def instSearch(cls,mylist):
# #指定された測定器が繋がっているかチェックする
# #currently not implemented.Might be removed.
# return True
def program_exit(self):
self.pathbox.data_file_close()
self.time.stopTimer()
self.setState(READY)
def setState(self,state):
'''状態は必ずこの関数を用いて遷移させる'''
self.state=state
if self.state==INITIAL:
self.status_bar.showMessage('choose file')
elif self.state==READY:
self.status_bar.showMessage('ready to start the program')
elif self.state==MISTAKE:
self.status_bar.showMessage('your program has a mistake at least.see the prompt window')
elif self.state==RUNNING:
self.status_bar.showMessage('program is running')
self.stateSignal.emit(self.state)
def closeEvent(self,event):
#to be called when the window is closed
self.pathbox.clean_up()
#メイン
if __name__ == '__main__':
app = QApplication(sys.argv)
ex=GUIWindow()
sys.exit(app.exec_()) | 38.255556 | 175 | 0.642173 | 1,011 | 10,329 | 6.501484 | 0.30366 | 0.048532 | 0.015974 | 0.012171 | 0.187433 | 0.155028 | 0.155028 | 0.147117 | 0.147117 | 0.136163 | 0 | 0.006616 | 0.253655 | 10,329 | 270 | 176 | 38.255556 | 0.846024 | 0.080163 | 0 | 0.150485 | 0 | 0 | 0.042673 | 0.011648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07767 | false | 0.004854 | 0.072816 | 0 | 0.169903 | 0.009709 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a7686edaf10f36226325c544e2b1973bd57d0ed | 1,215 | py | Python | screen.py | princesinghtomar/Classic-Brick-Breaker | acf339a1e242342f04ff27d5b0b508a87cf88014 | [
"MIT"
] | null | null | null | screen.py | princesinghtomar/Classic-Brick-Breaker | acf339a1e242342f04ff27d5b0b508a87cf88014 | [
"MIT"
] | null | null | null | screen.py | princesinghtomar/Classic-Brick-Breaker | acf339a1e242342f04ff27d5b0b508a87cf88014 | [
"MIT"
] | null | null | null | import numpy as np
import sys
class screen:
'''
This class handle creating and displaying task of game screen
'''
def __init__(self,HEIGHT,WIDTH):
self.width = WIDTH
self.height = HEIGHT
self.screenarray = np.full((HEIGHT,WIDTH),' ', dtype='<U25')
def showscreen(self):
'''
Just printing on Screen
'''
for i in range(0,self.height):
for j in range(0,self.width):
sys.stdout.write(self.screenarray[i][j])
sys.stdout.write('\n')
def create_scenery(self):
'''
Used to create walls and top regions of the game screen
'''
for i in range(0,self.height):
for j in range(0,self.width):
if(not i or i==4 or i == self.height-1):
self.screenarray[i][j] = '-'
if(not j or j == self.width -1):
self.screenarray[i][j] = '|'
if((not i or i==4 or i == self.height-1) and (not j or j == self.width-1)):
self.screenarray[i][j] = '*'
def return_screenarray(self):
'''
Return : self.screenarray
'''
return self.screenarray | 31.153846 | 91 | 0.512757 | 158 | 1,215 | 3.905063 | 0.322785 | 0.170178 | 0.051864 | 0.077796 | 0.377634 | 0.377634 | 0.377634 | 0.34684 | 0.34684 | 0.34684 | 0 | 0.015365 | 0.357202 | 1,215 | 39 | 92 | 31.153846 | 0.774648 | 0.137449 | 0 | 0.173913 | 0 | 0 | 0.010384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.086957 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a76e4f2e1c66a78ebd44d4fa3dc886e8da75a78 | 8,061 | py | Python | mall/apps/oauth/views.py | xxbsg/meiduo | 0e82628833c4b482884cd392b8d22cb8558f1ffd | [
"MIT"
] | null | null | null | mall/apps/oauth/views.py | xxbsg/meiduo | 0e82628833c4b482884cd392b8d22cb8558f1ffd | [
"MIT"
] | null | null | null | mall/apps/oauth/views.py | xxbsg/meiduo | 0e82628833c4b482884cd392b8d22cb8558f1ffd | [
"MIT"
] | null | null | null | import webbrowser
from django.shortcuts import render
# Create your views here.
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from QQLoginTool.QQtool import OAuthQQ
from libs import sinaweibopy3
from mall import settings
from oauth.models import OAuthQQUser, OAuthSinaUser
from oauth.serializers import OAuthQQUserSerializer, OAuthSinaUserSerializer
from oauth.utils import generic_open_id
"""
当用户点击qq按钮的时候,回发送一个请求
我们后端返回给他一个url(url是根据文档拼接出来的)
GET /oauth/qq/status/
"""
class OAuthQQURLAPIView(APIView):
def get(self,request):
state = '/'
# 1.创建oauthqq的实例对象
oauth = OAuthQQ(
client_id=settings.QQ_CLIENT_ID,
client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI,
state=state
)
# 2.获取跳转的url
auth_url = oauth.get_qq_url()
return Response({'auth_url':auth_url})
# return Response({'auth_url': 'http://www.itcast.cn'})
"""
1.当用户同意授权登录这个时候会返回一个code
2.我们用code换取token
3.有了token 我们再获取openid
"""
"""
1.分析需求 (到底要干什么)
2.把需要做的事情写下来(把思路梳理清楚)
3.路由和请求方式
4.确定视图
5.按照步骤实现功能
前段接受到用户的同意之后,前端应该奖这个code发送给后端
1.后端接受数据
2.用code换token
3.用token 换openid
GET /oauth/qq/users/?code=xxxx
"""
class OAuthQQUserAPIView(APIView):
def get(self,request):
# 1.后端接受数据
params = request.query_params
code = params.get('code')
if code is None:
return Response(status=status.HTTP_404_NOT_FOUND)
# 2.用code换token
oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID,
client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI)
token = oauth.get_access_token(code)
# 'EDBEC8459930A5A697736542BDC820FB'
# https://graph.qq.com/oauth2.0/me?access_token=EDBEC8459930A5A697736542BDC820FB
# 3.用token 换openid
openid = oauth.get_open_id(token)
"""
openid 是此网站上唯一对应用户身份的标示,网站可将此ID进行储存便于用户下次登陆时辨识其身份
获取openid有两种情况
1.用户之前绑定过
2.用户之前没有绑定过
"""
# 第一种
# 根据openid查询数据
try:
qquser = OAuthQQUser.objects.get(openid=openid)
except OAuthQQUser.DoesNotExist:
# 不存在
# openid很重要,我们需要对openid进行一个处理
# 绑定应该有一个时效
"""
封装和抽取的步骤
1. 定义一个函数
2. 将要抽取的代码 复制过来 哪里有问题改哪里 没有的变量定义为参数
3. 验证
"""
# s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
#
# # 2. 组织数据
# data = {
# 'openid': openid
# }
#
# # 3. 让序列化器对数据进行处理
# token = s.dumps(data)
token = generic_open_id(openid)
return Response({'access_token':token})
else:
# 存在,应该让用户登录
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(qquser.user)
token = jwt_encode_handler(payload)
return Response({
'token':token,
'username':qquser.user.username,
'user_id':qquser.user.id
})
"""
第二种
绑定:
当用户点击绑定的时候,我们需要将手机号,密码,短信验证码和加密的openid传递过来
1.接受数据
2.对数据进行效验
3.保存数据
4.返回响应
POST /oauth/qq/users/
"""
def post(self,request):
# 1.接受数据
data = request.data
# 2.对数据进行效验
serializer = OAuthQQUserSerializer(data=data)
serializer.is_valid(raise_exception=True)
# 3.保存数据
qquser = serializer.save()
# 4.返回响应 应该有token数据
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(qquser.user)
token = jwt_encode_handler(payload)
return Response({
'token':token,
'username':qquser.user.username,
'user_id':qquser.user.id
})
class OAuthSinaUrlAPIView(APIView):
def get(self,request):
# step 1 : sign a app in weibo and then define const app key,app srcret,redirect_url
APP_KEY = '1084785763'
APP_SECRET = '49cbfb0acc4dc49b71e4db88c78c9585'
REDIRECT_URL = 'http://www.meiduo.site:8080/sina_callback.html'
# step 2 : get authorize url and code
global client
client = sinaweibopy3.APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=REDIRECT_URL)
url = client.get_authorize_url()
return Response({'auth_url': url})
# print(url)
class OAuthSinaUserAPIView(APIView):
def get(self, request):
# 1.后端接受数据
params = request.query_params
code = params.get('code')
if code is None:
return Response(status=status.HTTP_404_NOT_FOUND)
# 2.用code换token
try:
# APP_KEY = '1084785763'
# APP_SECRET = '49cbfb0acc4dc49b71e4db88c78c9585'
# REDIRECT_URL = 'http://www.meiduo.site:8080/sina_callback.html'
# global client = sinaweibopy3.APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=REDIRECT_URL)
global client
result = client.request_access_token(code) # Enter the CODE obtained in the authorized address
openid=result.access_token
# At this point, the access_token and expires_in should be saved,
# because there is a validity period.A
# If you need to send the microblog multiple times in a short time,
# you can use it repeatedly without having to acquire it every time.
# client.set_access_token(result.access_token, result.expires_in)
#
# openid=client.get.account__get_uid()
except ValueError:
return Response(status=400)
try:
sinauser = OAuthSinaUser.objects.get(access_token=openid)
except OAuthSinaUser.DoesNotExist:
# 不存在
# openid很重要,我们需要对openid进行一个处理
# 绑定应该有一个时效
"""
封装和抽取的步骤
1. 定义一个函数
2. 将要抽取的代码 复制过来 哪里有问题改哪里 没有的变量定义为参数
3. 验证
"""
# s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
#
# # 2. 组织数据
# data = {
# 'openid': openid
# }
#
# # 3. 让序列化器对数据进行处理
# token = s.dumps(data)
token = generic_open_id(openid)
return Response({'access_token': token})
else:
# 存在,应该让用户登录
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(sinauser.user)
token = jwt_encode_handler(payload)
return Response({
'token': token,
'username': sinauser.user.username,
'user_id': sinauser.user.id
})
def post(self, request):
# 1.接受数据
data = request.data
# 2.对数据进行效验
serializer = OAuthSinaUserSerializer(data=data)
serializer.is_valid(raise_exception=True)
# 3.保存数据
sinauser = serializer.save()
# 4.返回响应 应该有token数据
from rest_framework_jwt.settings import api_settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(sinauser.user)
token = jwt_encode_handler(payload)
return Response({
'token': token,
'username': sinauser.user.username,
'user_id': sinauser.user.id
})
| 30.534091 | 119 | 0.608485 | 854 | 8,061 | 5.548009 | 0.263466 | 0.035458 | 0.035458 | 0.035458 | 0.596876 | 0.571549 | 0.571549 | 0.571549 | 0.571549 | 0.571549 | 0 | 0.030254 | 0.311128 | 8,061 | 263 | 120 | 30.65019 | 0.822979 | 0.190919 | 0 | 0.612069 | 0 | 0 | 0.03891 | 0.005738 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.12931 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a77fbc4decf7e17c5742e14d9eb6b9e49ab0a14 | 2,434 | py | Python | zci_bio/alignments/run_muscle.py | CroP-BioDiv/zcitools | 52332b4013486e983f962c085236189292228a02 | [
"MIT"
] | null | null | null | zci_bio/alignments/run_muscle.py | CroP-BioDiv/zcitools | 52332b4013486e983f962c085236189292228a02 | [
"MIT"
] | null | null | null | zci_bio/alignments/run_muscle.py | CroP-BioDiv/zcitools | 52332b4013486e983f962c085236189292228a02 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import yaml
import shutil
from concurrent.futures import ThreadPoolExecutor
import multiprocessing
from zipfile import ZipFile
_DEFAULT_EXE_NAME = 'muscle'
_ENV_VAR = 'MUSCLE_EXE'
# From my observations ...
#
# Calculation strategy:
# - First run short sequences on one thread. Sort them from longer (short) to shorter.
# - Than run long sequences sequential
_install_instructions = """
MUSCLE is not installed!
Check web page http://www.drive5.com/muscle/downloads.htm for installation instructions.
There are two ways for this script to locate executable to run:
- environment variable {env_var} points to executable location,
- or executable is called {exe} and placed on the PATH.
"""
# Note: it would be good that all scripts accept same format envs
def _find_exe(default_exe, env_var):
exe = os.getenv(env_var, default_exe)
if not shutil.which(exe):
print(_install_instructions.format(exe=default_exe, env_var=env_var))
raise ValueError(f'No MUSCLE installed! Tried {exe}')
return exe
def _alignment_file(f):
return os.path.join(os.path.dirname(f), 'alignment.fa')
def _run_single(muscle_exe, filename, output_file):
# Najbrze -maxiters 1 -diags
# -maxiters 2
cmd = f"{muscle_exe} -in {filename} -out {output_file} -maxiters 2"
print(f"Command: {cmd}")
os.system(cmd)
def run(locale=True, threads=None):
# Note: run from step's directory!!!
muscle_exe = _find_exe(_DEFAULT_EXE_NAME, _ENV_VAR)
threads = threads or multiprocessing.cpu_count()
outputs = []
# Files to run
with open('finish.yml', 'r') as r:
seq_files = yaml.load(r, Loader=yaml.CLoader) # dict with attrs: filename, short, max_seq_length
# if short_files:
# with ThreadPoolExecutor(max_workers=threads) as executor:
# for d in short_files:
# outputs.append(_alignment_file(d['filename']))
# executor.submit(_run_single, muscle_exe, d['filename'], outputs[-1], 1)
for d in seq_files:
outputs.append(_alignment_file(d['filename']))
_run_single(muscle_exe, d['filename'], outputs[-1])
# Zip files
if not locale:
with ZipFile('output.zip', 'w') as output:
for f in outputs:
output.write(f)
if __name__ == '__main__':
import sys
run(locale=False, threads=int(sys.argv[1]) if len(sys.argv) > 1 else None)
| 30.425 | 105 | 0.686935 | 343 | 2,434 | 4.693878 | 0.44898 | 0.026087 | 0.024224 | 0.03354 | 0.11677 | 0.093168 | 0.093168 | 0.043478 | 0 | 0 | 0 | 0.005168 | 0.205012 | 2,434 | 79 | 106 | 30.810127 | 0.826873 | 0.266639 | 0 | 0 | 0 | 0 | 0.271647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.162791 | 0.023256 | 0.302326 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a78400249697ab0ee51adf0fdb97c8a799f6570 | 7,255 | py | Python | tests/test_serialization.py | andriyor/pylibmc | 4cda09a3caac391c41d8231618170d606fb1527a | [
"BSD-3-Clause"
] | 226 | 2015-01-04T03:25:22.000Z | 2022-03-08T21:10:03.000Z | tests/test_serialization.py | andriyor/pylibmc | 4cda09a3caac391c41d8231618170d606fb1527a | [
"BSD-3-Clause"
] | 119 | 2015-01-04T15:23:45.000Z | 2022-03-29T16:38:58.000Z | tests/test_serialization.py | andriyor/pylibmc | 4cda09a3caac391c41d8231618170d606fb1527a | [
"BSD-3-Clause"
] | 53 | 2015-01-15T19:25:50.000Z | 2022-03-02T20:47:01.000Z | import datetime
import json
import pickle
from nose.tools import eq_
import pylibmc
import _pylibmc
from pylibmc.test import make_test_client
from tests import PylibmcTestCase
from tests import get_refcounts
f_none = 0
f_pickle, f_int, f_long, f_zlib, f_text = (1 << i for i in range(5))
class SerializationMethodTests(PylibmcTestCase):
"""Coverage tests for serialize and deserialize."""
def test_integers(self):
c = make_test_client(binary=True)
eq_(c.serialize(1), (b'1', f_long))
eq_(c.serialize(2**64), (b'18446744073709551616', f_long))
eq_(c.deserialize(b'18446744073709551616', f_long), 2**64)
eq_(c.deserialize(b'1', f_long), 1)
def test_nonintegers(self):
# tuples (python_value, (expected_bytestring, expected_flags))
SERIALIZATION_TEST_VALUES = [
# booleans are just ints
(True, (b'1', f_int)),
(False, (b'0', f_int)),
# bytestrings
(b'asdf', (b'asdf', f_none)),
(b'\xb5\xb1\xbf\xed\xa9\xc2{8', (b'\xb5\xb1\xbf\xed\xa9\xc2{8', f_none)),
(b'', (b'', f_none)),
# unicode objects
('åäö', ('åäö'.encode(), f_text)),
('', (b'', f_text)),
# objects
(datetime.date(2015, 12, 28), (pickle.dumps(datetime.date(2015, 12, 28),
protocol=-1), f_pickle)),
]
c = make_test_client(binary=True)
for value, serialized_value in SERIALIZATION_TEST_VALUES:
eq_(c.serialize(value), serialized_value)
eq_(c.deserialize(*serialized_value), value)
class SerializationTests(PylibmcTestCase):
"""Test coverage for overriding serialization behavior in subclasses."""
def test_override_deserialize(self):
class MyClient(pylibmc.Client):
ignored = []
def deserialize(self, bytes_, flags):
try:
return super().deserialize(bytes_, flags)
except Exception as error:
self.ignored.append(error)
raise pylibmc.CacheMiss
global MyObject # Needed by the pickling system.
class MyObject:
def __getstate__(self):
return dict(a=1)
def __eq__(self, other):
return type(other) is type(self)
def __setstate__(self, d):
assert d['a'] == 1
c = make_test_client(MyClient, behaviors={'cas': True})
eq_(c.get('notathing'), None)
refcountables = ['foo', 'myobj', 'noneobj', 'myobj2', 'cachemiss']
initial_refcounts = get_refcounts(refcountables)
c['foo'] = 'foo'
c['myobj'] = MyObject()
c['noneobj'] = None
c['myobj2'] = MyObject()
# Show that everything is initially regular.
eq_(c.get('myobj'), MyObject())
eq_(get_refcounts(refcountables), initial_refcounts)
eq_(c.get_multi(['foo', 'myobj', 'noneobj', 'cachemiss']),
dict(foo='foo', myobj=MyObject(), noneobj=None))
eq_(get_refcounts(refcountables), initial_refcounts)
eq_(c.gets('myobj2')[0], MyObject())
eq_(get_refcounts(refcountables), initial_refcounts)
# Show that the subclass can transform unpickling issues into a cache miss.
del MyObject # Break unpickling
eq_(c.get('myobj'), None)
eq_(get_refcounts(refcountables), initial_refcounts)
eq_(c.get_multi(['foo', 'myobj', 'noneobj', 'cachemiss']),
dict(foo='foo', noneobj=None))
eq_(get_refcounts(refcountables), initial_refcounts)
eq_(c.gets('myobj2'), (None, None))
eq_(get_refcounts(refcountables), initial_refcounts)
# The ignored errors are "AttributeError: test.test_client has no MyObject"
eq_(len(MyClient.ignored), 3)
assert all(isinstance(error, AttributeError) for error in MyClient.ignored)
def test_refcounts(self):
SENTINEL = object()
DUMMY = b"dummy"
KEY = b"fwLiDZKV7IlVByM5bVDNkg"
VALUE = "PVILgNVNkCfMkQup5vkGSQ"
class MyClient(_pylibmc.client):
"""Always serialize and deserialize to the same constants."""
def serialize(self, value):
return DUMMY, 1
def deserialize(self, bytes_, flags):
return SENTINEL
refcountables = [1, SENTINEL, DUMMY, KEY, VALUE]
c = make_test_client(MyClient)
initial_refcounts = get_refcounts(refcountables)
c.set(KEY, VALUE)
eq_(get_refcounts(refcountables), initial_refcounts)
assert c.get(KEY) is SENTINEL
eq_(get_refcounts(refcountables), initial_refcounts)
eq_(c.get_multi([KEY]), {KEY: SENTINEL})
eq_(get_refcounts(refcountables), initial_refcounts)
c.set_multi({KEY: True})
eq_(get_refcounts(refcountables), initial_refcounts)
def test_override_serialize(self):
class MyClient(pylibmc.Client):
def serialize(self, value):
return json.dumps(value).encode('utf-8'), 0
def deserialize(self, bytes_, flags):
assert flags == 0
return json.loads(bytes_.decode('utf-8'))
c = make_test_client(MyClient)
c['foo'] = (1, 2, 3, 4)
# json turns tuples into lists:
eq_(c['foo'], [1, 2, 3, 4])
raised = False
try:
c['bar'] = object()
except TypeError:
raised = True
assert raised
def _assert_set_raises(self, client, key, value):
"""Assert that set operations raise a ValueError when appropriate.
This is in a separate method to avoid confusing the reference counts.
"""
raised = False
try:
client[key] = value
except ValueError:
raised = True
assert raised
def test_invalid_flags_returned(self):
# test that nothing bad (memory leaks, segfaults) happens
# when subclasses implement `deserialize` incorrectly
DUMMY = b"dummy"
BAD_FLAGS = object()
KEY = 'foo'
VALUE = object()
refcountables = [KEY, DUMMY, VALUE, BAD_FLAGS]
class MyClient(pylibmc.Client):
def serialize(self, value):
return DUMMY, BAD_FLAGS
c = make_test_client(MyClient)
initial_refcounts = get_refcounts(refcountables)
self._assert_set_raises(c, KEY, VALUE)
eq_(get_refcounts(refcountables), initial_refcounts)
def test_invalid_flags_returned_2(self):
DUMMY = "ab"
KEY = "key"
VALUE = 123456
refcountables = [DUMMY, KEY, VALUE]
class MyClient(pylibmc.Client):
def serialize(self, value):
return DUMMY
c = make_test_client(MyClient)
initial_refcounts = get_refcounts(refcountables)
self._assert_set_raises(c, KEY, VALUE)
eq_(get_refcounts(refcountables), initial_refcounts)
try:
c.set_multi({KEY: DUMMY})
except ValueError:
raised = True
assert raised
eq_(get_refcounts(refcountables), initial_refcounts)
| 34.383886 | 85 | 0.596003 | 813 | 7,255 | 5.121771 | 0.234932 | 0.051873 | 0.102065 | 0.084294 | 0.413064 | 0.346782 | 0.284342 | 0.244717 | 0.209174 | 0.195485 | 0 | 0.022261 | 0.294142 | 7,255 | 210 | 86 | 34.547619 | 0.790861 | 0.110407 | 0 | 0.355263 | 0 | 0 | 0.052155 | 0.014991 | 0 | 0 | 0 | 0 | 0.065789 | 1 | 0.118421 | false | 0 | 0.059211 | 0.046053 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a79201a327607a64ac87de9dd8647d537e216f6 | 20,200 | py | Python | visualization/views.py | FSavoy/visuo-server | d9c93ec7ae9dd033f3f0290381ddbac413bb6f9a | [
"BSD-3-Clause"
] | 2 | 2017-11-16T08:32:46.000Z | 2018-04-02T13:36:42.000Z | visualization/views.py | FSavoy/visuo-server | d9c93ec7ae9dd033f3f0290381ddbac413bb6f9a | [
"BSD-3-Clause"
] | null | null | null | visualization/views.py | FSavoy/visuo-server | d9c93ec7ae9dd033f3f0290381ddbac413bb6f9a | [
"BSD-3-Clause"
] | 2 | 2017-11-16T08:33:52.000Z | 2021-05-12T06:31:54.000Z | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from data.models import SkyPicture, WeatherMeasurement, RadiosondeMeasurement, MeasuringDevice
from forms import DateStationForm, DateForm
from django.shortcuts import redirect
from django.conf import settings
import numpy as np
import json
@login_required
# in case cannot see wsi, we redirect to the download page
def show_image(request, shift = 0):
"""
Generates the visualization page, fetching all related data (images, weather and radiosonde)
"""
# If there is no sky imager device installed, redirect towards the images part
if not SkyPicture.objects.exists():
return redirect('pictures/')
# Check if the weather station form should be shown
if WeatherMeasurement.objects.exists():
weather = True
else:
weather = False
args = {}
args["title"] = "Data visualization"
if request.POST or 'POST_map' in request.session:
if request.POST:
form_values = request.POST
request.session['POST_map'] = form_values
else:
form_values = request.session['POST_map']
if weather:
form = DateStationForm(form_values)
else:
form = DateForm(form_values)
if form.is_valid():
queryDate = form.cleaned_data['date']
device = MeasuringDevice.objects.get(id = form.cleaned_data['imager'])
if not device.skypicture_set.all() > 0:
device = [device for device in MeasuringDevice.objects.all() if ((device.type == 'W') and (len(device.skypicture_set.all()) > 0))][0]
queryDate = SkyPicture.objects.get_closest_to(queryDate, device)
if shift == '1':
queryDateNew = SkyPicture.objects.get_next(queryDate, device)
if queryDateNew:
queryDate = queryDateNew
elif shift == '-1':
queryDateNew = SkyPicture.objects.get_previous(queryDate, device)
if queryDateNew:
queryDate = queryDateNew
if weather:
station = MeasuringDevice.objects.get(id = form.cleaned_data['station'])
else:
device = [device for device in MeasuringDevice.objects.all() if ((device.type == 'W') and (len(device.skypicture_set.all()) > 0))][0]
firstImage = SkyPicture.objects.filter(device = device).order_by('-date','-time')[0]
queryDate = firstImage.date
if weather:
station = [station for station in MeasuringDevice.objects.all() if station.type == 'S'][0]
else:
form = DateStationForm()
device = [device for device in MeasuringDevice.objects.all() if ((device.type == 'W') and (len(device.skypicture_set.all()) > 0))][0]
firstImage = SkyPicture.objects.filter(device = device).order_by('-date','-time')[0]
queryDate = firstImage.date
if weather:
station = [station for station in MeasuringDevice.objects.all() if station.type == 'S'][0]
# Generating data for the new form with the actual retrieved data
if weather:
new_data = {'date': unicode(queryDate), 'station': station.id, 'imager': device.id}
form = DateStationForm(new_data)
else:
new_data = {'date': unicode(queryDate), 'imager': device.id}
form = DateForm(new_data)
request.session['POST_map'] = new_data
args['form'] = form
args['longCtr'] = device.location.x
args['latCtr'] = device.location.y
# Check if there are images before or after to show Next or Previous buttons
if SkyPicture.objects.get_next(queryDate, device):
args['next'] = 1
if SkyPicture.objects.get_previous(queryDate, device):
args['previous'] = 1
#
# Fetching images
#
imagesData = SkyPicture.objects.filter(device = device, date = queryDate).order_by('time')
images = []
for im in imagesData:
thisIm = {}
thisIm['url'] = im.image.url
thisIm['url_tn'] = im.image.url.replace('.jpg', '.125x125.jpg')
thisIm['undistorted'] = im.undistorted.url
thisIm['time'] = im.time.strftime("%H:%M:%S")
thisIm['id'] = str(im.id)
images.append(thisIm)
args['images'] = images
#
# Fetching weather data
#
temperature = []
humidity = []
dew_point = []
wind_speed = []
wind_direction = []
pressure = []
rainfall_rate = []
solar_radiation = []
uv_index = []
cloud_height = []
if not weather:
args['boolWeather'] = False
else:
measurementsData = WeatherMeasurement.objects.filter(date = queryDate, device = station).order_by('time')
if not measurementsData:
args['boolWeather'] = False
else:
args['boolWeather'] = True
for meas in measurementsData:
if meas.temperature is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.temperature
temperature.append(thisMeas)
if meas.humidity is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.humidity
humidity.append(thisMeas)
if meas.dew_point is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.dew_point
dew_point.append(thisMeas)
if meas.wind_speed is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.wind_speed
wind_speed.append(thisMeas)
if meas.wind_direction is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.wind_direction
wind_direction.append(thisMeas)
if meas.pressure is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.pressure
pressure.append(thisMeas)
if meas.rainfall_rate is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.rainfall_rate
rainfall_rate.append(thisMeas)
if meas.solar_radiation is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.solar_radiation
solar_radiation.append(thisMeas)
if meas.uv_index is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = meas.uv_index
uv_index.append(thisMeas)
if meas.temperature is not None and meas.dew_point is not None:
thisMeas = {}
thisMeas['time'] = meas.time.strftime("%H:%M:%S")
thisMeas['value'] = round(125*(meas.temperature - meas.dew_point),1)
cloud_height.append(thisMeas)
# Giving weather data (potentially empty) to the template
args['temperature'] = temperature
args['humidity'] = humidity
args['dew_point'] = dew_point
args['wind_speed'] = wind_speed
args['wind_direction'] = wind_direction
args['pressure'] = pressure
args['rainfall_rate'] = rainfall_rate
args['solar_radiation'] = solar_radiation
args['uv_index'] = uv_index
args['cloud_height'] = cloud_height
# Items for the form
items = []
if temperature:
items.append(('temperature', 'Temperature (deg. C)'))
if humidity:
items.append(('humidity', 'Humidity (%)'))
if dew_point:
items.append(('dew_point', 'Dew point (deg. C)'))
if cloud_height:
items.append(('cloud_height', 'Cloud base height (m.)'))
if wind_speed:
items.append(('wind_speed', 'Wind speed (m/s)'))
if wind_direction:
items.append(('wind_direction', 'Wind direction azimuth (deg.)'))
if pressure:
items.append(('pressure', 'Pressure (hPa)'))
if rainfall_rate:
items.append(('rainfall_rate', 'Rainfall rate (mm/hr)'))
if solar_radiation:
items.append(('solar_radiation', 'Solar radiation (W/m2)'))
if uv_index:
items.append(('uv_index', 'UV index'))
args['items'] = json.dumps(items)
#
# Fetching radiosonde data
#
altitudes = np.arange(0,18,0.01)
radiosondeDataAM = RadiosondeMeasurement.objects.filter(date = queryDate).filter(time = 'AM').order_by('height')
if not radiosondeDataAM:
args['boolRadiosondeAM'] = False
args['allValuesRadAM'] = []
args['allSamplesRadAM'] = []
else:
args['boolRadiosondeAM'] = True
allValuesRadAM = {};
allSamplesRadAM = {};
PRES_samples_AM = []
PRES_heights_AM = np.empty(radiosondeDataAM.count())
PRES_values_AM = np.empty(radiosondeDataAM.count())
PRES_counter_AM = 0
TEMP_samples_AM = []
TEMP_heights_AM = np.empty(radiosondeDataAM.count())
TEMP_values_AM = np.empty(radiosondeDataAM.count())
TEMP_counter_AM = 0
DWPT_samples_AM = []
DWPT_heights_AM = np.empty(radiosondeDataAM.count())
DWPT_values_AM = np.empty(radiosondeDataAM.count())
DWPT_counter_AM = 0
RELH_samples_AM = []
RELH_heights_AM = np.empty(radiosondeDataAM.count())
RELH_values_AM = np.empty(radiosondeDataAM.count())
RELH_counter_AM = 0
DRCT_samples_AM = []
DRCT_heights_AM = np.empty(radiosondeDataAM.count())
DRCT_values_AM = np.empty(radiosondeDataAM.count())
DRCT_counter_AM = 0
SKNT_samples_AM = []
SKNT_heights_AM = np.empty(radiosondeDataAM.count())
SKNT_values_AM = np.empty(radiosondeDataAM.count())
SKNT_counter_AM = 0
for rad in radiosondeDataAM:
if rad.height:
if rad.pressure:
PRES_samples_AM.append(rad.height)
PRES_heights_AM[PRES_counter_AM] = rad.height/1000
PRES_values_AM[PRES_counter_AM] = rad.pressure
PRES_counter_AM = PRES_counter_AM + 1
if rad.temperature:
TEMP_samples_AM.append(rad.height)
TEMP_heights_AM[TEMP_counter_AM] = rad.height/1000
TEMP_values_AM[TEMP_counter_AM] = rad.temperature
TEMP_counter_AM = TEMP_counter_AM + 1
if rad.dew_point:
DWPT_samples_AM.append(rad.height)
DWPT_heights_AM[DWPT_counter_AM] = rad.height/1000
DWPT_values_AM[DWPT_counter_AM] = rad.dew_point
DWPT_counter_AM = DWPT_counter_AM + 1
if rad.rel_humidity:
RELH_samples_AM.append(rad.height)
RELH_heights_AM[RELH_counter_AM] = rad.height/1000
RELH_values_AM[RELH_counter_AM] = rad.rel_humidity
RELH_counter_AM = RELH_counter_AM + 1
if rad.wind_direction:
DRCT_samples_AM.append(rad.height)
DRCT_heights_AM[DRCT_counter_AM] = rad.height/1000
DRCT_values_AM[DRCT_counter_AM] = rad.wind_direction
DRCT_counter_AM = DRCT_counter_AM + 1
if rad.wind_speed:
SKNT_samples_AM.append(rad.height)
SKNT_heights_AM[SKNT_counter_AM] = rad.height/1000
SKNT_values_AM[SKNT_counter_AM] = rad.pressure
SKNT_counter_AM = SKNT_counter_AM + 1
PRES_heights_AM.resize(len(PRES_samples_AM))
PRES_values_AM.resize(len(PRES_samples_AM))
allValuesRadAM['PRES'] = np.interp(altitudes, PRES_heights_AM, PRES_values_AM).tolist()
allSamplesRadAM['PRES'] = PRES_samples_AM;
TEMP_heights_AM.resize(len(TEMP_samples_AM))
TEMP_values_AM.resize(len(TEMP_samples_AM))
allValuesRadAM['TEMP'] = np.interp(altitudes, TEMP_heights_AM, TEMP_values_AM).tolist()
allSamplesRadAM['TEMP'] = TEMP_samples_AM;
DWPT_heights_AM.resize(len(DWPT_samples_AM))
DWPT_values_AM.resize(len(DWPT_samples_AM))
allValuesRadAM['DWPT'] = np.interp(altitudes, DWPT_heights_AM, DWPT_values_AM).tolist()
allSamplesRadAM['DWPT'] = DWPT_samples_AM;
RELH_heights_AM.resize(len(RELH_samples_AM))
RELH_values_AM.resize(len(RELH_samples_AM))
allValuesRadAM['RELH'] = np.interp(altitudes, RELH_heights_AM, RELH_values_AM).tolist()
allSamplesRadAM['RELH'] = RELH_samples_AM;
DRCT_heights_AM.resize(len(DRCT_samples_AM))
DRCT_values_AM.resize(len(DRCT_samples_AM))
allValuesRadAM['DRCT'] = np.interp(altitudes, DRCT_heights_AM, DRCT_values_AM).tolist()
allSamplesRadAM['DRCT'] = DRCT_samples_AM;
SKNT_heights_AM.resize(len(SKNT_samples_AM))
SKNT_values_AM.resize(len(SKNT_samples_AM))
allValuesRadAM['SKNT'] = np.interp(altitudes, SKNT_heights_AM, SKNT_values_AM).tolist()
allSamplesRadAM['SKNT'] = SKNT_samples_AM;
# Critical humidity function
alpha = 1.0
beta = np.sqrt(3)
sigma = np.array(allValuesRadAM['PRES'])/allValuesRadAM['PRES'][0]
chum = 1 - alpha*sigma*(1 - sigma)*(1+beta*(sigma - 0.5));
allValuesRadAM['clouds'] = np.int_(np.array(allValuesRadAM['RELH']) > chum * 100).tolist()
allValuesRadAM['HGHT'] = altitudes.tolist()
args['allValuesRadAM'] = allValuesRadAM
args['allSamplesRadAM'] = allSamplesRadAM
radiosondeDataPM = RadiosondeMeasurement.objects.filter(date = queryDate).filter(time = 'PM').order_by('height')
if not radiosondeDataPM:
args['boolRadiosondePM'] = False
args['allValuesRadPM'] = []
args['allSamplesRadPM'] = []
else:
args['boolRadiosondePM'] = True
allValuesRadPM = {};
allSamplesRadPM = {};
PRES_samples_PM = []
PRES_heights_PM = np.empty(radiosondeDataPM.count())
PRES_values_PM = np.empty(radiosondeDataPM.count())
PRES_counter_PM = 0
TEMP_samples_PM = []
TEMP_heights_PM = np.empty(radiosondeDataPM.count())
TEMP_values_PM = np.empty(radiosondeDataPM.count())
TEMP_counter_PM = 0
DWPT_samples_PM = []
DWPT_heights_PM = np.empty(radiosondeDataPM.count())
DWPT_values_PM = np.empty(radiosondeDataPM.count())
DWPT_counter_PM = 0
RELH_samples_PM = []
RELH_heights_PM = np.empty(radiosondeDataPM.count())
RELH_values_PM = np.empty(radiosondeDataPM.count())
RELH_counter_PM = 0
DRCT_samples_PM = []
DRCT_heights_PM = np.empty(radiosondeDataPM.count())
DRCT_values_PM = np.empty(radiosondeDataPM.count())
DRCT_counter_PM = 0
SKNT_samples_PM = []
SKNT_heights_PM = np.empty(radiosondeDataPM.count())
SKNT_values_PM = np.empty(radiosondeDataPM.count())
SKNT_counter_PM = 0
for rad in radiosondeDataPM:
if rad.height:
if rad.pressure:
PRES_samples_PM.append(rad.height)
PRES_heights_PM[PRES_counter_PM] = rad.height/1000
PRES_values_PM[PRES_counter_PM] = rad.pressure
PRES_counter_PM = PRES_counter_PM + 1
if rad.temperature:
TEMP_samples_PM.append(rad.height)
TEMP_heights_PM[TEMP_counter_PM] = rad.height/1000
TEMP_values_PM[TEMP_counter_PM] = rad.temperature
TEMP_counter_PM = TEMP_counter_PM + 1
if rad.dew_point:
DWPT_samples_PM.append(rad.height)
DWPT_heights_PM[DWPT_counter_PM] = rad.height/1000
DWPT_values_PM[DWPT_counter_PM] = rad.dew_point
DWPT_counter_PM = DWPT_counter_PM + 1
if rad.rel_humidity:
RELH_samples_PM.append(rad.height)
RELH_heights_PM[RELH_counter_PM] = rad.height/1000
RELH_values_PM[RELH_counter_PM] = rad.rel_humidity
RELH_counter_PM = RELH_counter_PM + 1
if rad.wind_direction:
DRCT_samples_PM.append(rad.height)
DRCT_heights_PM[DRCT_counter_PM] = rad.height/1000
DRCT_values_PM[DRCT_counter_PM] = rad.wind_direction
DRCT_counter_PM = DRCT_counter_PM + 1
if rad.wind_speed:
SKNT_samples_PM.append(rad.height)
SKNT_heights_PM[SKNT_counter_PM] = rad.height/1000
SKNT_values_PM[SKNT_counter_PM] = rad.pressure
SKNT_counter_PM = SKNT_counter_PM + 1
PRES_heights_PM.resize(len(PRES_samples_PM))
PRES_values_PM.resize(len(PRES_samples_PM))
allValuesRadPM['PRES'] = np.interp(altitudes, PRES_heights_PM, PRES_values_PM).tolist()
allSamplesRadPM['PRES'] = PRES_samples_PM;
TEMP_heights_PM.resize(len(TEMP_samples_PM))
TEMP_values_PM.resize(len(TEMP_samples_PM))
allValuesRadPM['TEMP'] = np.interp(altitudes, TEMP_heights_PM, TEMP_values_PM).tolist()
allSamplesRadPM['TEMP'] = TEMP_samples_PM;
DWPT_heights_PM.resize(len(DWPT_samples_PM))
DWPT_values_PM.resize(len(DWPT_samples_PM))
allValuesRadPM['DWPT'] = np.interp(altitudes, DWPT_heights_PM, DWPT_values_PM).tolist()
allSamplesRadPM['DWPT'] = DWPT_samples_PM;
RELH_heights_PM.resize(len(RELH_samples_PM))
RELH_values_PM.resize(len(RELH_samples_PM))
allValuesRadPM['RELH'] = np.interp(altitudes, RELH_heights_PM, RELH_values_PM).tolist()
allSamplesRadPM['RELH'] = RELH_samples_PM;
DRCT_heights_PM.resize(len(DRCT_samples_PM))
DRCT_values_PM.resize(len(DRCT_samples_PM))
allValuesRadPM['DRCT'] = np.interp(altitudes, DRCT_heights_PM, DRCT_values_PM).tolist()
allSamplesRadPM['DRCT'] = DRCT_samples_PM;
SKNT_heights_PM.resize(len(SKNT_samples_PM))
SKNT_values_PM.resize(len(SKNT_samples_PM))
allValuesRadPM['SKNT'] = np.interp(altitudes, SKNT_heights_PM, SKNT_values_PM).tolist()
allSamplesRadPM['SKNT'] = SKNT_samples_PM;
# Critical humidity function
alpha = 1.0
beta = np.sqrt(3)
sigma = np.array(allValuesRadPM['PRES'])/allValuesRadPM['PRES'][0]
chum = 1 - alpha*sigma*(1 - sigma)*(1+beta*(sigma - 0.5));
allValuesRadPM['clouds'] = np.int_(np.array(allValuesRadPM['RELH']) > chum * 100).tolist()
allValuesRadPM['HGHT'] = altitudes.tolist()
args['allValuesRadPM'] = allValuesRadPM
args['allSamplesRadPM'] = allSamplesRadPM
args['itemsRad'] = {'Pressure (hPa)': 'PRES', 'Temperature (deg. C)': 'TEMP', 'Dew point (deg. C)': 'DWPT', 'Rel. humidity (%)': 'RELH', 'Wind direction (deg.)': 'DRCT', 'Wind speed (knot)': 'SKNT'}
args['googleKey'] = settings.GOOGLE_MAPS_API_KEY
return render(request, 'visualization/index.html', args)
| 44.395604 | 202 | 0.592574 | 2,242 | 20,200 | 5.095897 | 0.099911 | 0.023632 | 0.009453 | 0.026258 | 0.532254 | 0.374267 | 0.169716 | 0.133304 | 0.127177 | 0.127177 | 0 | 0.009115 | 0.299406 | 20,200 | 454 | 203 | 44.493392 | 0.798191 | 0.030297 | 0 | 0.183727 | 0 | 0 | 0.068682 | 0.001227 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002625 | false | 0 | 0.020997 | 0 | 0.028871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a7cc214e2f4821ed704a062a0b2c147e5cd7967 | 830 | py | Python | 2021_05_07_ProblemSolving/nextkhansole.py | lnugraha/code-in-place-sp21-extra | f70059749b597f56ec355a3725b864c12c8d6444 | [
"MIT"
] | 1 | 2021-05-19T22:58:10.000Z | 2021-05-19T22:58:10.000Z | 2021_05_07_ProblemSolving/nextkhansole.py | lnugraha/code-in-place-sp21-extra | f70059749b597f56ec355a3725b864c12c8d6444 | [
"MIT"
] | null | null | null | 2021_05_07_ProblemSolving/nextkhansole.py | lnugraha/code-in-place-sp21-extra | f70059749b597f56ec355a3725b864c12c8d6444 | [
"MIT"
] | 1 | 2021-05-13T15:26:14.000Z | 2021-05-13T15:26:14.000Z | import random
def main():
# Check if there questions have been answered consecutively
counter = 0
while True:
if counter != 3:
numOne = random.randint(0, 10)
numTwo = random.randint(0, 10)
print("What is the addition of {} and {}".format(numOne, numTwo))
answer = input("Your answer is ")
answer = int(answer)
if answer == (numOne + numTwo):
counter = counter + 1
print("Correct answer! You have answered {} questions correctly".format(counter))
else:
counter = 0
print("Wrong answer, the game is reset")
else:
print("The game ends here since you have answered 3 questions correctly")
break
if __name__ == "__main__":
main()
| 34.583333 | 97 | 0.546988 | 92 | 830 | 4.847826 | 0.5 | 0.035874 | 0.06278 | 0.071749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020755 | 0.361446 | 830 | 23 | 98 | 36.086957 | 0.820755 | 0.068675 | 0 | 0.190476 | 0 | 0 | 0.268482 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.095238 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a7f1b31a9abb71bba063204bbd924f084e3517c | 1,213 | py | Python | freeze_builds.py | tzaeru/NWNLauncher | 1de07b4c503805134112c9e2010aa85b447eaf03 | [
"WTFPL"
] | null | null | null | freeze_builds.py | tzaeru/NWNLauncher | 1de07b4c503805134112c9e2010aa85b447eaf03 | [
"WTFPL"
] | null | null | null | freeze_builds.py | tzaeru/NWNLauncher | 1de07b4c503805134112c9e2010aa85b447eaf03 | [
"WTFPL"
] | null | null | null | import sys, os
from cx_Freeze import setup, Executable
import subprocess
subprocess.call("set TCL_LIBRARY=C:\Program Files (x86)\Python35-32\tcl\tcl8.6", shell=True)
subprocess.call("set TK_LIBRARY=C:\Program Files (x86)\Python35-32\tcl\tk8.6", shell=True)
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"build_exe":"build",
"packages": ["os"],
"include_files":[("config/potm", "config/potm"),
("config/main_config.toml", "config/main_config.toml"),
("winmtr", "winmtr")]}
#("windows_manifest.xml", "NWN Launcher.xml.manifest")
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "guifoo",
version = "0.1",
description = "My GUI application!",
options = {"build_exe": build_exe_options},
executables = [Executable("main.py", base=base, targetName="NWN Launcher.exe", icon="ICO/nwn_composite_RA8_icon.ico")])
subprocess.call('mt -manifest windows_manifest.xml -outputresource:"build/NWN Launcher.exe"', shell=True)
os.remove("build/tk/images")
os.remove("build/tk/demos")
os.remove("build/tcl/tzdata") | 36.757576 | 127 | 0.71723 | 169 | 1,213 | 5.047337 | 0.52071 | 0.037515 | 0.045721 | 0.046893 | 0.084408 | 0.084408 | 0.084408 | 0.084408 | 0 | 0 | 0 | 0.021576 | 0.121187 | 1,213 | 33 | 128 | 36.757576 | 0.778612 | 0.182193 | 0 | 0 | 0 | 0 | 0.464575 | 0.202429 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a80d4a9f3eb8283798035348bd287f228b0488d | 1,074 | py | Python | plot.py | sahilm75/EEG-based-classification | 4555f56f22596836c28a47d9694be9a3d23dfce1 | [
"MIT"
] | null | null | null | plot.py | sahilm75/EEG-based-classification | 4555f56f22596836c28a47d9694be9a3d23dfce1 | [
"MIT"
] | null | null | null | plot.py | sahilm75/EEG-based-classification | 4555f56f22596836c28a47d9694be9a3d23dfce1 | [
"MIT"
] | 1 | 2021-11-14T09:42:33.000Z | 2021-11-14T09:42:33.000Z | import matplotlib.pyplot as plt
import numpy as np
t1 = 44.3
x1 = np.linspace(0, 5 , 5)
y1 = np.array([142 , 10, 10, 10 ,10])
# Lambda = 0.5
t2 = 2.67
x2 = np.linspace(0, 5 , 5)
y2 = np.array([142 , 14, 14, 14 ,14])
# Lambda = 2
t3 = 1.8
x3 = np.linspace(0, 5 , 5)
y3 = np.array([142 , 20, 20, 20 ,20])
# Lambda = 10
t4 = 2
x4 = np.linspace(0, 5 , 5)
y4 = np.array([142 , 33, 33, 33 ,33])
# Lambda = 0.05
t5 = 3
x5 = np.linspace(0, 5 , 5)
y5 = np.array([142 , 10, 10, 10 ,10])
plt.plot(x1,y1, marker = 'x', label = 'Laplacian Prior (Time taken = 44.3s)')
plt.plot(x5,y5 , marker = 'x', label = 'Gaussian Prior, lambda = 0.05 (Time taken = 3s)')
plt.plot(x2,y2, marker = 'x', label = 'Gaussian Prior, lambda = 0.5 (Time taken = 2s)')
plt.plot(x3,y3 , marker = 'x', label = 'Gaussian Prior, lambda = 2(Time taken = 2s)')
plt.plot(x4,y4 , marker = 'x', label = 'Gaussian Prior, lambda = 10 (Time taken = 2s)')
plt.xlabel('Iterations')
plt.ylabel('Mean Squared Error')
plt.title('Different prior for unknown inputs')
plt.legend()
plt.savefig('Different_priors.png')
| 26.85 | 91 | 0.613594 | 195 | 1,074 | 3.374359 | 0.333333 | 0.021277 | 0.083587 | 0.091185 | 0.399696 | 0.246201 | 0.151976 | 0 | 0 | 0 | 0 | 0.147465 | 0.191806 | 1,074 | 39 | 92 | 27.538462 | 0.610599 | 0.045624 | 0 | 0 | 0 | 0 | 0.29902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a824b560bd298afc6f2190f9d5184fad3b40bd5 | 1,671 | py | Python | create_anomaly_model.py | PandaMia/Smart_City | 62ca71bb4db64925f800a12cce1c3d5959039360 | [
"MIT"
] | 1 | 2021-12-04T00:56:32.000Z | 2021-12-04T00:56:32.000Z | create_anomaly_model.py | PandaMia/Smart_City | 62ca71bb4db64925f800a12cce1c3d5959039360 | [
"MIT"
] | null | null | null | create_anomaly_model.py | PandaMia/Smart_City | 62ca71bb4db64925f800a12cce1c3d5959039360 | [
"MIT"
] | 1 | 2022-02-18T01:54:24.000Z | 2022-02-18T01:54:24.000Z | import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow.keras.layers import LayerNormalization
from tensorflow.keras.models import load_model
def get_anomaly_model():
MODEL_PATH = "weights/anomaly_model.hdf5"
return load_model(MODEL_PATH, custom_objects={'LayerNormalization': LayerNormalization})
def get_single_test(images_path):
sz = 200
test = np.zeros(shape=(sz, 256, 256, 1))
cnt = 0
for f in sorted(os.listdir(images_path)):
if str(os.path.join(images_path, f))[-3:] == "jpg":
img = Image.open(os.path.join(images_path, f)).convert('L').resize((256, 256))
img = np.array(img, dtype=np.float32) / 256.0
test[cnt, :, :, 0] = img
cnt = cnt + 1
return test
def evaluate(model, images_path):
test = get_single_test(images_path)
sz = test.shape[0] - 10 + 1
sequences = np.zeros((sz, 10, 256, 256, 1))
for i in range(0, sz):
clip = np.zeros((10, 256, 256, 1))
for j in range(0, 10):
clip[j] = test[i + j, :, :, :]
sequences[i] = clip
reconstructed_sequences = model.predict(sequences, batch_size=4)
sequences_reconstruction_cost = np.array([np.linalg.norm(np.subtract(sequences[i],
reconstructed_sequences[i])) for i in range(0, sz)])
sa = (sequences_reconstruction_cost - np.min(sequences_reconstruction_cost)) / np.max(sequences_reconstruction_cost)
sr = 1.0 - sa
plt.plot(sr)
plt.ylabel('regularity score Sr(t)')
plt.xlabel('frame t')
#plt.savefig('source/saved_figure.png')
| 34.8125 | 125 | 0.626571 | 233 | 1,671 | 4.364807 | 0.381974 | 0.058997 | 0.106195 | 0.085546 | 0.138643 | 0.117994 | 0 | 0 | 0 | 0 | 0 | 0.045095 | 0.243567 | 1,671 | 47 | 126 | 35.553191 | 0.759494 | 0.022741 | 0 | 0 | 0 | 0 | 0.047181 | 0.015931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.162162 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a83358d18467513a3e40882dbd273ee017c5a04 | 5,009 | py | Python | ingest/prepare_scripts/sentinel_1/prep_s1a.py | ChetanKhanna/NE-GeoCloud | bad907045729cd9ffd086ede034ef1805eeecc8b | [
"Apache-2.0"
] | 1 | 2019-07-22T05:24:40.000Z | 2019-07-22T05:24:40.000Z | ingest/prepare_scripts/sentinel_1/prep_s1a.py | SivaramakrishnanKN/NE-GeoCloud | affcae49e0ccd7d29360a2771a9517147ed56590 | [
"Apache-2.0"
] | 1 | 2019-06-06T18:31:29.000Z | 2019-06-06T18:31:29.000Z | ingest/prepare_scripts/sentinel_1/prep_s1a.py | SivaramakrishnanKN/NE-GeoCloud | affcae49e0ccd7d29360a2771a9517147ed56590 | [
"Apache-2.0"
] | 5 | 2019-06-05T07:26:13.000Z | 2019-06-08T06:53:11.000Z | """
Prepare a dataset (specifically an orthorectified Sentinel 1 scene in BEAM-DIMAP format) for datacube indexing.
Note, this script is only an example. For production purposes, more metadata would be harvested.
The BEAM-DIMAP format (output by Sentinel Toolbox/SNAP) consists of an XML header file (.dim)
and a directory (.data) which stores different polarisations (different raster bands) separately,
each as ENVI format, that is, raw binary (.img) with ascii header (.hdr). GDAL can read ENVI
format (that is, when provided an img it checks for an accompanying hdr).
"""
# get corner coords in crs of source datafile,
# transform into crs of datacube index.
#
# TODO: datacube could perform this transformation itself rather than entrusting yamls.
# This may support more careful consideration of datums, and issues such as the corner
# coords failing to enclose the area due to curvature of the projected border segments.
import rasterio.warp
from osgeo import osr
import sys
import click
import yaml
from pathlib import Path
def get_geometry(path):
with rasterio.open(path) as img:
left, bottom, right, top = img.bounds
crs = str(str(getattr(img, 'crs_wkt', None) or img.crs.wkt))
corners = {
'ul': {
'x': left,
'y': top
},
'ur': {
'x': right,
'y': top
},
'll': {
'x': left,
'y': bottom
},
'lr': {
'x': right,
'y': bottom
}
}
projection = {'spatial_reference': crs, 'geo_ref_points': corners}
spatial_ref = osr.SpatialReference(crs)
t = osr.CoordinateTransformation(spatial_ref, spatial_ref.CloneGeogCS())
def transform(p):
lon, lat, z = t.TransformPoint(p['x'], p['y'])
return {'lon': lon, 'lat': lat}
extent = {key: transform(p) for key, p in corners.items()}
return projection, extent
# Construct metadata dict
import uuid
from xml.etree import ElementTree # should use cElementTree..
from dateutil import parser
import os
bands = ['vh', 'vv']
def band_name(path):
name = path.stem
# position = name.find('_')
if 'VH' in str(path):
layername = 'vh'
if 'VV' in str(path):
layername = 'vv'
return layername
def prep_dataset(path):
# input: path = .dim filename
# Read in the XML header
xml_path = path.joinpath(path.stem + '.dim')
xml = ElementTree.parse(
str(xml_path)).getroot().find("Dataset_Sources/MDElem[@name='metadata']/MDElem[@name='Abstracted_Metadata']")
scene_name = xml.find("MDATTR[@name='PRODUCT']").text
platform = xml.find("MDATTR[@name='MISSION']").text.replace('-', '_')
t0 = parser.parse(xml.find("MDATTR[@name='first_line_time']").text)
t1 = parser.parse(xml.find("MDATTR[@name='last_line_time']").text)
# TODO: which time goes where in what format?
# could also read processing graph, or
# could read production/productscenerasterstart(stop)time
# get bands
# TODO: verify band info from xml
images = {band_name(im_path): {'path': str(im_path.relative_to(path))} for im_path in path.glob('*.data/*.img')}
# trusting bands coaligned, use one to generate spatial bounds for all
projection, extent = get_geometry('/'.join([str(path), images['vv']['path']]))
# format metadata (i.e. construct hashtable tree for syntax of file interface)
return {
'id': str(uuid.uuid4()),
'processing_level': "CEOS_ARD",
'product_type': "gamma0",
'creation_dt': t0,
'platform': {
'code': 'SENTINEL_1'
},
'instrument': {
'name': 'SAR'
},
'extent': {
'coord': extent,
'from_dt': str(t0),
'to_dt': str(t1),
'center_dt': str(t0 + (t1 - t0) / 2)
},
'format': {
'name': 'ENVI'
}, # ENVI or BEAM-DIMAP ?
'grid_spatial': {
'projection': projection
},
'image': {
'bands': images
},
'lineage': {
'source_datasets': {},
'ga_label': scene_name
} # TODO!
# C band, etc...
}
@click.command(
help="Prepare S1A/B data processed with GPT in BEAM-DIMAP format dataset for ingestion into the Data Cube.")
@click.argument('datasets', type=click.Path(exists=True, readable=True, writable=True), nargs=-1)
def main(datasets):
for dataset in datasets:
path = Path(dataset)
assert path.glob('*.dim'), "Expect a directory with a BEAM-DIMAP header file as input"
print("Starting for dataset " + dataset)
metadata = prep_dataset(path)
yaml_path = str(path.joinpath('agdc-metadata.yaml'))
with open(yaml_path, 'w') as stream:
yaml.dump(metadata, stream)
if __name__ == "__main__":
main()
| 31.503145 | 117 | 0.598323 | 620 | 5,009 | 4.753226 | 0.427419 | 0.01527 | 0.017645 | 0.023074 | 0.019002 | 0.019002 | 0 | 0 | 0 | 0 | 0 | 0.004137 | 0.276103 | 5,009 | 158 | 118 | 31.702532 | 0.808605 | 0.280295 | 0 | 0.07767 | 0 | 0.009709 | 0.197372 | 0.05116 | 0 | 0 | 0 | 0.006329 | 0.009709 | 1 | 0.048544 | false | 0 | 0.097087 | 0 | 0.184466 | 0.009709 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a834d2d699da4978950b74bdcb9d457d00cc6de | 1,727 | py | Python | micro-services/src/micro_services/utils/yaml_types/timedelta_type.py | rob-blackbourn/micro-site-server | 7ac3ccba75789771f1e6149074947685ea3da809 | [
"Apache-2.0"
] | 1 | 2019-05-30T21:37:58.000Z | 2019-05-30T21:37:58.000Z | micro-services/src/micro_services/utils/yaml_types/timedelta_type.py | rob-blackbourn/micro-site-server | 7ac3ccba75789771f1e6149074947685ea3da809 | [
"Apache-2.0"
] | 8 | 2021-03-09T05:07:34.000Z | 2022-02-26T11:21:00.000Z | micro-services/src/micro_services/utils/yaml_types/timedelta_type.py | rob-blackbourn/micro-site-server | 7ac3ccba75789771f1e6149074947685ea3da809 | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
import re
from typing import Optional, Type
import yaml
from .common import add_custom_type
TIMEDELTA_TAG = '!timedelta'
TIMEDELTA_REGEX = re.compile(
r'^((?P<weeks>\d+?)w)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?$'
)
def format_timedelta(value: timedelta) -> str:
seconds = value.seconds
minutes = seconds // 60
hours = minutes // 60
weeks = value.days // 7
days = value.days % 7
hours %= 24
minutes %= 60
seconds %= 60
s = ''
if weeks:
s += str(weeks) + 'w'
if days:
s += str(days) + 'd'
if hours:
s += str(hours) + 'h'
if minutes:
s += str(minutes) + 'm'
if seconds:
s += str(seconds) + 's'
return s
def timedelta_representer(dumper: yaml.Dumper, data: timedelta) -> yaml.ScalarNode:
return dumper.represent_scalar(TIMEDELTA_TAG, format_timedelta(data))
def parse_timedelta(value: str) -> Optional[timedelta]:
parts = TIMEDELTA_REGEX.match(value)
if not parts:
return None
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.items():
if param:
time_params[name] = int(param)
return timedelta(**time_params)
def timedelta_constructor(loader: yaml.Loader, node: yaml.ScalarNode) -> Optional[timedelta]:
value = loader.construct_scalar(node)
data = parse_timedelta(value)
return data
def add_custom_type_timedelta(loader: Type[yaml.Loader], dumper: Type[yaml.Dumper]):
add_custom_type(
loader,
dumper,
timedelta,
TIMEDELTA_TAG,
timedelta_representer,
timedelta_constructor,
TIMEDELTA_REGEX
)
| 25.397059 | 107 | 0.623625 | 216 | 1,727 | 4.865741 | 0.277778 | 0.01903 | 0.037108 | 0.041865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009174 | 0.242617 | 1,727 | 67 | 108 | 25.776119 | 0.794343 | 0 | 0 | 0 | 0 | 0.017857 | 0.066589 | 0.057904 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0.089286 | 0.017857 | 0.267857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a84910832fc55b9383b2d731c05c53f42368631 | 6,619 | py | Python | src/finn/transformation/merge_onnx_models.py | alinavalinav/finn | e443a5859066a410a63c08dcfec4a90527ca24be | [
"BSD-3-Clause"
] | 1 | 2020-12-21T07:37:57.000Z | 2020-12-21T07:37:57.000Z | src/finn/transformation/merge_onnx_models.py | alinavalinav/finn | e443a5859066a410a63c08dcfec4a90527ca24be | [
"BSD-3-Clause"
] | null | null | null | src/finn/transformation/merge_onnx_models.py | alinavalinav/finn | e443a5859066a410a63c08dcfec4a90527ca24be | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import warnings
from onnx import helper
from finn.transformation import Transformation
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.infer_datatypes import InferDataTypes
from finn.transformation.infer_data_layouts import InferDataLayouts
from finn.transformation.general import (
GiveReadableTensorNames,
GiveRandomTensorNames,
GiveUniqueNodeNames,
GiveUniqueParameterTensors,
)
class MergeONNXModels(Transformation):
"""Merges two models. The model passed in the transformation will be inserted before
the model the transformation is applied on, the resulting model is returned.
This transformation will try to connect graph.output[0] of the pre model and
graph.input[0] of the post model.
If more than one input or output exists, a warning is raised."""
def __init__(self, pre_model):
super().__init__()
# use deep copy of model that should be inserted in the beginning of
# the other model to ensure that it stays unchanged
self.pre_model = copy.deepcopy(pre_model)
def apply(self, model):
graph_modified = False
pre_model = self.pre_model
post_model = copy.deepcopy(model)
# to avoid mix-ups, start by giving all tensors random names
pre_model = pre_model.transform(GiveRandomTensorNames())
post_model = post_model.transform(GiveRandomTensorNames())
# check for dynamic outputs of pre model
dyn_outp = []
for outp in pre_model.graph.output:
init_val = pre_model.get_initializer(outp.name)
if init_val is None:
dyn_outp.append(outp)
if len(dyn_outp) != 1:
warnings.warn(
"The pre model has more than one dynamic output! The transformation "
"tries to connect the first dynamic output to the first dynamic input "
"of the post model."
)
# check for dynamic inputs of post model
dyn_inp = []
for inp in post_model.graph.input:
init_val = post_model.get_initializer(inp.name)
if init_val is None:
dyn_inp.append(inp)
if len(dyn_inp) != 1:
warnings.warn(
"The post model has more than one dynamic input! The transformation "
"tries to connect the first dynamic input to the first dynamic output "
"of the pre model."
)
# erase all node names to avoid conflict
for n in pre_model.graph.node:
n.name = ""
for n in post_model.graph.node:
n.name = ""
# check if models can be merged
output_model_a = dyn_outp[0].name
input_model_b = dyn_inp[0].name
output_a_shape = pre_model.get_tensor_shape(output_model_a)
input_b_shape = post_model.get_tensor_shape(input_model_b)
assert (
output_a_shape == input_b_shape
), "Models can't be merged! Shapes don't match."
# connect output of one model to input of the other
for n in pre_model.graph.node:
if output_model_a == n.output[0]:
n.output[0] = input_model_b
# extract information for new model
# nodes
node_pre = [node for node in pre_model.graph.node]
node_post = [node for node in post_model.graph.node]
node_new = node_pre + node_post
# in and output
inp = pre_model.graph.input[0]
outp = post_model.graph.output[0]
vi_pre = [x for x in pre_model.graph.value_info]
out_pre = [x for x in pre_model.graph.output]
qa_pre = [x for x in pre_model.graph.quantization_annotation]
init_pre = [x for x in pre_model.graph.initializer]
vi_post = [x for x in post_model.graph.value_info]
qa_post = [x for x in post_model.graph.quantization_annotation]
init_post = [x for x in post_model.graph.initializer]
vi_new = vi_pre + vi_post + out_pre
qa_new = qa_pre + qa_post
init_new = init_pre + init_post
# create new graph and model
new_graph = helper.make_graph(
nodes=node_new,
name="fuse-graph",
inputs=[inp],
outputs=[outp],
value_info=vi_new,
)
new_model = helper.make_model(new_graph, producer_name="fuse_model")
new_model = ModelWrapper(new_model)
for i in init_new:
new_model.graph.initializer.append(i)
for qa in qa_new:
new_model.graph.quantization_annotation.append(qa)
# tidy-up new model
model = new_model
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
model = model.transform(InferDataLayouts())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveUniqueParameterTensors())
model = model.transform(GiveReadableTensorNames())
return (model, graph_modified)
| 40.115152 | 88 | 0.676537 | 892 | 6,619 | 4.878924 | 0.272422 | 0.040441 | 0.026884 | 0.027574 | 0.168428 | 0.123392 | 0.111443 | 0.090763 | 0.03125 | 0.03125 | 0 | 0.002856 | 0.259405 | 6,619 | 164 | 89 | 40.359756 | 0.884945 | 0.342499 | 0 | 0.083333 | 0 | 0 | 0.086227 | 0 | 0 | 0 | 0 | 0 | 0.010417 | 1 | 0.020833 | false | 0 | 0.09375 | 0 | 0.135417 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |