hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0bb049244e583c008dcf23ea943a3167788ea1
| 931
|
py
|
Python
|
ckanext/ottawa/listing.py
|
CityofOttawa/Ottawa-ckan
|
0405b27ab18a66b4052eb11f27527d7fc8fa2f37
|
[
"MIT"
] | null | null | null |
ckanext/ottawa/listing.py
|
CityofOttawa/Ottawa-ckan
|
0405b27ab18a66b4052eb11f27527d7fc8fa2f37
|
[
"MIT"
] | 12
|
2015-06-05T15:52:40.000Z
|
2017-04-25T15:17:47.000Z
|
ckanext/ottawa/listing.py
|
CityofOttawa/Ottawa-ckan
|
0405b27ab18a66b4052eb11f27527d7fc8fa2f37
|
[
"MIT"
] | null | null | null |
import os
import logging
from ckan.plugins.interfaces import IPackageController
from ckan.plugins import implements, SingletonPlugin
from genshi import Stream
class OttawaPackageListing(SingletonPlugin):
implements(IPackageController, inherit=True)
def after_search(self, search_results, search_params):
if search_results['count'] > 0:
for result in search_results['results']:
titre = filter(lambda extra:extra['key']=='titre', result['extras'])
if len(titre) > 0:
result['titre'] = titre[0]['value'][1:-1:].decode('unicode-escape')
resume = filter(lambda extra:extra['key']=='resume', result['extras'])
if len(resume) > 0:
result['resume'] = resume[0]['value'][1:-1:].decode('unicode-escape')
return search_results
| 40.478261
| 90
| 0.584318
|
4a0bb08047aaad084d0aba8d923e786a366b18bb
| 760
|
py
|
Python
|
AzureCli/azext_bot/azext_bot/_client_factory.py
|
mgbennet/botbuilder-tools
|
c16b6350e3575b642c0447781dc163f8251bde43
|
[
"MIT"
] | null | null | null |
AzureCli/azext_bot/azext_bot/_client_factory.py
|
mgbennet/botbuilder-tools
|
c16b6350e3575b642c0447781dc163f8251bde43
|
[
"MIT"
] | null | null | null |
AzureCli/azext_bot/azext_bot/_client_factory.py
|
mgbennet/botbuilder-tools
|
c16b6350e3575b642c0447781dc163f8251bde43
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
def get_botservice_management_client(cli_ctx, *_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azext_bot.botservice import AzureBotService
return get_mgmt_service_client(cli_ctx, AzureBotService)
def get_botOperations_client(cli_ctx, *_):
return get_botservice_management_client(cli_ctx).bots
def get_botServices_client(cli_ctx, *_):
return get_botservice_management_client(cli_ctx).bot_services
def get_botChannels_client(cli_ctx, *_):
return get_botservice_management_client(cli_ctx).channels
def get_operations_client(cli_ctx, *_):
return get_botservice_management_client(cli_ctx).operations
| 36.190476
| 78
| 0.822368
|
4a0bb0ca8f8cda4337ac98ed95bb43e8ce909c40
| 2,859
|
py
|
Python
|
tolua.py
|
lzubiaur/debugconsole
|
b049e23a4a806332e59970825c6c1e9adce52403
|
[
"MIT"
] | 2
|
2015-08-31T06:28:55.000Z
|
2015-11-06T06:12:27.000Z
|
tolua.py
|
lzubiaur/debugconsole
|
b049e23a4a806332e59970825c6c1e9adce52403
|
[
"MIT"
] | null | null | null |
tolua.py
|
lzubiaur/debugconsole
|
b049e23a4a806332e59970825c6c1e9adce52403
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
" Copyright (c) 2013 Laurent Zubiaur
"
" http://www.pix2d.com/
"
" Permission is hereby granted, free of charge, to any person obtaining a copy
" of this software and associated documentation files (the "Software"), to deal
" in the Software without restriction, including without limitation the rights
" to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
" copies of the Software, and to permit persons to whom the Software is
" furnished to do so, subject to the following conditions:
"
" The above copyright notice and this permission notice shall be included in
" all copies or substantial portions of the Software.
"
" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
" IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
" FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
" AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
" LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
" OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
" THE SOFTWARE.
"""
import os,sys,glob
import argparse
from subprocess import call
def main(argv=None):
argv = (argv or sys.argv)[1:]
parser = argparse.ArgumentParser(usage=("%(prog)s [--tolua|-t] directory"))
parser.add_argument("directory",
type=unicode,
help="tolua++ packages directory")
parser.add_argument("--tolua","-t",
dest="tolua",
type=unicode,
help="tolua++ executable path")
options, args = parser.parse_known_args(argv)
if not os.path.isdir(options.directory):
parser.error("Directory not found: '{0}'".format(options.directory))
if options.tolua:
if not os.path.isfile(options.tolua):
parser.error("tolua++ executable not found: '{0}'".format(options.tolua))
else:
if not os.path.isfile('./tolua++'):
parser.error("tolua++ not found in the current directory. Please provide the path of the tolua++ executable.")
else:
options.tolua = './tolua++'
if options.directory:
tolua(options.tolua,os.path.abspath(options.directory))
def tolua(tolua_exec,directory):
"""
tolua command line:
./tolua++ -n "basename" -H filename.hpp -o filename.cpp filename.pkg
"""
for filename in glob.glob(directory + '/*.pkg'):
filename, fileExt = os.path.splitext(filename)
basename = os.path.basename(filename)
arg1 = basename
arg2 = filename + ".hpp"
arg3 = filename + ".cpp"
arg4 = filename + ".pkg"
sys.stdout.write("processing " + filename + ".pkg... ")
call([tolua_exec,"-n",arg1,"-H",arg2,"-o",arg3,arg4])
print "done"
if __name__ == "__main__":
sys.exit(main())
| 35.7375
| 122
| 0.671913
|
4a0bb0dbbb635d05574b9a709b7444f4b08066e8
| 57,125
|
py
|
Python
|
qiita_db/test/test_util.py
|
unique-identifier/qiita
|
6f7a2471b96d65ab05fce55d1e5fdf35b4cae374
|
[
"BSD-3-Clause"
] | null | null | null |
qiita_db/test/test_util.py
|
unique-identifier/qiita
|
6f7a2471b96d65ab05fce55d1e5fdf35b4cae374
|
[
"BSD-3-Clause"
] | null | null | null |
qiita_db/test/test_util.py
|
unique-identifier/qiita
|
6f7a2471b96d65ab05fce55d1e5fdf35b4cae374
|
[
"BSD-3-Clause"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from tempfile import mkstemp, mkdtemp, NamedTemporaryFile, TemporaryFile
from os import close, remove, mkdir
from os.path import join, exists, basename
from shutil import rmtree
from datetime import datetime
from functools import partial
from string import punctuation
import h5py
from six import StringIO, BytesIO
import pandas as pd
from qiita_core.util import qiita_test_checker
import qiita_db as qdb
@qiita_test_checker()
class DBUtilTestsBase(TestCase):
def setUp(self):
self.table = 'study'
self.required = [
'study_title', 'mixs_compliant',
'metadata_complete', 'study_description', 'first_contact',
'reprocess', 'timeseries_type_id', 'study_alias',
'study_abstract', 'principal_investigator_id', 'email']
self.files_to_remove = []
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
class DBUtilTests(DBUtilTestsBase):
def test_max_preparation_samples(self):
"""Test that we get the correct max_preparation_samples"""
obs = qdb.util.max_preparation_samples()
self.assertEqual(obs, 800)
def test_filepath_id_to_object_id(self):
# filepaths 1, 2 belongs to artifact 1
self.assertEqual(qdb.util.filepath_id_to_object_id(1), 1)
self.assertEqual(qdb.util.filepath_id_to_object_id(2), 1)
# filepaths 3, 4 belongs to artifact 2
self.assertEqual(qdb.util.filepath_id_to_object_id(3), 2)
self.assertEqual(qdb.util.filepath_id_to_object_id(4), 2)
# filepaths 9 belongs to artifact 4
self.assertEqual(qdb.util.filepath_id_to_object_id(9), 4)
# filepath 16 belongs to anlaysis 1
self.assertEqual(qdb.util.filepath_id_to_object_id(16), 1)
# filepath 18 belongs to study 1
self.assertIsNone(qdb.util.filepath_id_to_object_id(18))
# filepath 22 belongs to analysis/artifact 7
self.assertEqual(qdb.util.filepath_id_to_object_id(22), 7)
def test_check_required_columns(self):
# Doesn't do anything if correct info passed, only errors if wrong info
qdb.util.check_required_columns(self.required, self.table)
def test_check_required_columns_fail(self):
self.required.remove('study_title')
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.util.check_required_columns(self.required, self.table)
def test_check_table_cols(self):
# Doesn't do anything if correct info passed, only errors if wrong info
qdb.util.check_table_cols(self.required, self.table)
def test_check_table_cols_fail(self):
self.required.append('BADTHINGNOINHERE')
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.util.check_table_cols(self.required, self.table)
def test_get_table_cols(self):
obs = qdb.util.get_table_cols("qiita_user")
exp = {"email", "user_level_id", "password", "name", "affiliation",
"address", "phone", "user_verify_code", "pass_reset_code",
"pass_reset_timestamp"}
self.assertEqual(set(obs), exp)
def test_exists_table(self):
"""Correctly checks if a table exists"""
# True cases
self.assertTrue(qdb.util.exists_table("filepath"))
self.assertTrue(qdb.util.exists_table("qiita_user"))
self.assertTrue(qdb.util.exists_table("analysis"))
self.assertTrue(qdb.util.exists_table("prep_1"))
self.assertTrue(qdb.util.exists_table("sample_1"))
# False cases
self.assertFalse(qdb.util.exists_table("sample_2"))
self.assertFalse(qdb.util.exists_table("prep_3"))
self.assertFalse(qdb.util.exists_table("foo_table"))
self.assertFalse(qdb.util.exists_table("bar_table"))
def test_convert_to_id(self):
"""Tests that ids are returned correctly"""
self.assertEqual(
qdb.util.convert_to_id("directory", "filepath_type"), 8)
self.assertEqual(
qdb.util.convert_to_id("private", "visibility", "visibility"), 3)
self.assertEqual(
qdb.util.convert_to_id("EMP", "portal_type", "portal"), 2)
def test_convert_to_id_bad_value(self):
"""Tests that ids are returned correctly"""
with self.assertRaises(qdb.exceptions.QiitaDBLookupError):
qdb.util.convert_to_id("FAKE", "filepath_type")
def test_get_artifact_types(self):
obs = qdb.util.get_artifact_types()
exp = {'SFF': 1, 'FASTA_Sanger': 2, 'FASTQ': 3, 'FASTA': 4,
'per_sample_FASTQ': 5, 'Demultiplexed': 6, 'BIOM': 7,
'beta_div_plots': 8, 'rarefaction_curves': 9,
'taxa_summary': 10}
self.assertEqual(obs, exp)
obs = qdb.util.get_artifact_types(key_by_id=True)
exp = {v: k for k, v in exp.items()}
self.assertEqual(obs, exp)
def test_get_filepath_types(self):
"""Tests that get_filepath_types works with valid arguments"""
obs = qdb.util.get_filepath_types()
exp = {'raw_forward_seqs': 1, 'raw_reverse_seqs': 2,
'raw_barcodes': 3, 'preprocessed_fasta': 4,
'preprocessed_fastq': 5, 'preprocessed_demux': 6, 'biom': 7,
'directory': 8, 'plain_text': 9, 'reference_seqs': 10,
'reference_tax': 11, 'reference_tree': 12, 'log': 13,
'sample_template': 14, 'prep_template': 15, 'qiime_map': 16,
}
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT filepath_type,filepath_type_id "
"FROM qiita.filepath_type")
exp = dict(qdb.sql_connection.TRN.execute_fetchindex())
self.assertEqual(obs, exp)
obs = qdb.util.get_filepath_types(key='filepath_type_id')
exp = {v: k for k, v in exp.items()}
self.assertEqual(obs, exp)
def test_get_filepath_types_fail(self):
"""Tests that get_Filetypes fails with invalid argument"""
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.util.get_filepath_types(key='invalid')
def test_get_data_types(self):
"""Tests that get_data_types works with valid arguments"""
obs = qdb.util.get_data_types()
exp = {'16S': 1, '18S': 2, 'ITS': 3, 'Proteomic': 4, 'Metabolomic': 5,
'Metagenomic': 6, 'Multiomic': 7, 'Metatranscriptomics': 8,
'Viromics': 9, 'Genomics': 10, 'Transcriptomics': 11}
self.assertEqual(obs, exp)
obs = qdb.util.get_data_types(key='data_type_id')
exp = {v: k for k, v in exp.items()}
self.assertEqual(obs, exp)
def test_create_rand_string(self):
set_punct = set(punctuation)
obs = qdb.util.create_rand_string(200)
self.assertEqual(len(obs), 200)
self.assertTrue(set_punct.intersection(set(obs)))
obs = qdb.util.create_rand_string(400, punct=False)
self.assertEqual(len(obs), 400)
self.assertFalse(set_punct.intersection(set(obs)))
def test_get_count(self):
"""Checks that get_count retrieves proper count"""
self.assertEqual(qdb.util.get_count('qiita.study_person'), 3)
def test_check_count(self):
"""Checks that check_count returns True and False appropriately"""
self.assertTrue(qdb.util.check_count('qiita.study_person', 3))
self.assertFalse(qdb.util.check_count('qiita.study_person', 2))
def test_insert_filepaths(self):
fd, fp = mkstemp()
close(fd)
with open(fp, "w") as f:
f.write("\n")
self.files_to_remove.append(fp)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT last_value FROM qiita.filepath_filepath_id_seq")
exp_new_id = 1 + qdb.sql_connection.TRN.execute_fetchflatten()[0]
obs = qdb.util.insert_filepaths([(fp, 1)], 2, "raw_data")
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.assertFalse(exists(fp))
self.files_to_remove.append(exp_fp)
# Check that the filepaths have been added to the DB
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.filepath "
"WHERE filepath_id=%d" % exp_new_id)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp_fp = "2_%s" % basename(fp)
exp = [[exp_new_id, exp_fp, 1, '852952723', 1, 5, 1]]
self.assertEqual(obs, exp)
qdb.util.purge_filepaths()
def test_insert_filepaths_copy(self):
fd, fp = mkstemp()
close(fd)
with open(fp, "w") as f:
f.write("\n")
self.files_to_remove.append(fp)
# The id's in the database are bigserials, i.e. they get
# autoincremented for each element introduced.
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT last_value FROM qiita.filepath_filepath_id_seq")
exp_new_id = 1 + qdb.sql_connection.TRN.execute_fetchflatten()[0]
obs = qdb.util.insert_filepaths([(fp, 1)], 2, "raw_data",
move_files=False, copy=True)
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.assertTrue(exists(fp))
self.files_to_remove.append(exp_fp)
# Check that the filepaths have been added to the DB
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.filepath "
"WHERE filepath_id=%d" % exp_new_id)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp_fp = "2_%s" % basename(fp)
exp = [[exp_new_id, exp_fp, 1, '852952723', 1, 5, 1]]
self.assertEqual(obs, exp)
# let's do that again but with move_files = True
exp_new_id += 1
obs = qdb.util.insert_filepaths([(fp, 1)], 2, "raw_data",
move_files=True, copy=True)
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.assertTrue(exists(fp))
self.files_to_remove.append(exp_fp)
qdb.util.purge_filepaths()
def test_insert_filepaths_string(self):
fd, fp = mkstemp()
close(fd)
with open(fp, "w") as f:
f.write("\n")
self.files_to_remove.append(fp)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT last_value FROM qiita.filepath_filepath_id_seq")
exp_new_id = 1 + qdb.sql_connection.TRN.execute_fetchflatten()[0]
obs = qdb.util.insert_filepaths(
[(fp, "raw_forward_seqs")], 2, "raw_data")
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.files_to_remove.append(exp_fp)
# Check that the filepaths have been added to the DB
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.filepath "
"WHERE filepath_id=%d" % exp_new_id)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp_fp = "2_%s" % basename(fp)
exp = [[exp_new_id, exp_fp, 1, '852952723', 1, 5, 1]]
self.assertEqual(obs, exp)
qdb.util.purge_filepaths()
def test_retrieve_filepaths(self):
obs = qdb.util.retrieve_filepaths('artifact_filepath',
'artifact_id', 1)
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 1,
'fp': path_builder("1_s_G1_L001_sequences.fastq.gz"),
'fp_type': "raw_forward_seqs",
'checksum': '2125826711',
'fp_size': 58},
{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
def test_retrieve_filepaths_sort(self):
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, sort='descending')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58},
{'fp_id': 1,
'fp': path_builder("1_s_G1_L001_sequences.fastq.gz"),
'fp_type': "raw_forward_seqs",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
def test_retrieve_filepaths_type(self):
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, sort='descending',
fp_type='raw_barcodes')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, fp_type='raw_barcodes')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, fp_type='biom')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
self.assertEqual(obs, [])
def test_retrieve_filepaths_error(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.util.retrieve_filepaths('artifact_filepath', 'artifact_id', 1,
sort='Unknown')
def test_empty_trash_upload_folder(self):
# creating file to delete so we know it actually works
study_id = '1'
uploads_fp = join(qdb.util.get_mountpoint("uploads")[0][1], study_id)
trash = join(uploads_fp, 'trash')
if not exists(trash):
mkdir(trash)
fp = join(trash, 'my_file_to_delete.txt')
open(fp, 'w').close()
self.assertTrue(exists(fp))
qdb.util.empty_trash_upload_folder()
self.assertFalse(exists(fp))
def test_move_filepaths_to_upload_folder(self):
# we are going to test the move_filepaths_to_upload_folder indirectly
# by creating an artifact and deleting it. To accomplish this we need
# to create a new prep info file, attach a biom with html_summary and
# then deleting it. However, we will do this twice to assure that
# there are no conflicts with this
study_id = 1
# creating the 2 sets of files for the 2 artifacts
fd, seqs_fp1 = mkstemp(suffix='_seqs.fastq')
close(fd)
html_fp1 = mkdtemp()
html_fp1 = join(html_fp1, 'support_files')
mkdir(html_fp1)
with open(join(html_fp1, 'index.html'), 'w') as fp:
fp.write(">AAA\nAAA")
fd, seqs_fp2 = mkstemp(suffix='_seqs.fastq')
close(fd)
html_fp2 = mkdtemp()
html_fp2 = join(html_fp2, 'support_files')
mkdir(html_fp2)
with open(join(html_fp2, 'index.html'), 'w') as fp:
fp.write(">AAA\nAAA")
# creating new prep info file
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'Illumina',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(
metadata_dict, orient='index', dtype=str)
pt1 = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(study_id), "16S")
pt2 = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(study_id), "16S")
# inserting artifact 1
artifact1 = qdb.artifact.Artifact.create(
[(seqs_fp1, 1), (html_fp1, 'html_summary')], "FASTQ",
prep_template=pt1)
# inserting artifact 2
artifact2 = qdb.artifact.Artifact.create(
[(seqs_fp2, 1), (html_fp2, 'html_summary')], "FASTQ",
prep_template=pt2)
# retrieving filepaths
filepaths = artifact1.filepaths
filepaths.extend(artifact2.filepaths)
# delete artifacts
qdb.artifact.Artifact.delete(artifact1.id)
qdb.artifact.Artifact.delete(artifact2.id)
# now let's create another artifact with the same filenames that
# artifact1 so we can test successfull overlapping of names
with open(seqs_fp1, 'w') as fp:
fp.write(">AAA\nAAA")
mkdir(html_fp1)
with open(join(html_fp1, 'index.html'), 'w') as fp:
fp.write(">AAA\nAAA")
artifact3 = qdb.artifact.Artifact.create(
[(seqs_fp1, 1), (html_fp1, 'html_summary')], "FASTQ",
prep_template=pt1)
filepaths.extend(artifact2.filepaths)
qdb.artifact.Artifact.delete(artifact3.id)
# check that they do not exist in the old path but do in the new one
path_for_removal = join(qdb.util.get_mountpoint("uploads")[0][1],
str(study_id))
for x in filepaths:
self.assertFalse(exists(x['fp']))
new_fp = join(path_for_removal, basename(x['fp']))
if x['fp_type'] == 'html_summary':
# The html summary gets removed, not moved
self.assertFalse(exists(new_fp))
else:
self.assertTrue(exists(new_fp))
self.files_to_remove.append(new_fp)
def test_get_mountpoint(self):
exp = [(5, join(qdb.util.get_db_files_base_dir(), 'raw_data'))]
obs = qdb.util.get_mountpoint("raw_data")
self.assertEqual(obs, exp)
exp = [(1, join(qdb.util.get_db_files_base_dir(), 'analysis'))]
obs = qdb.util.get_mountpoint("analysis")
self.assertEqual(obs, exp)
exp = [(2, join(qdb.util.get_db_files_base_dir(), 'job'))]
obs = qdb.util.get_mountpoint("job")
self.assertEqual(obs, exp)
# inserting new ones so we can test that it retrieves these and
# doesn't alter other ones
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.data_directory SET active=false WHERE "
"data_directory_id=1")
count = qdb.util.get_count('qiita.data_directory')
sql = """INSERT INTO qiita.data_directory (data_type, mountpoint,
subdirectory, active)
VALUES ('analysis', 'analysis_tmp', true, true),
('raw_data', 'raw_data_tmp', true, false)"""
qdb.sql_connection.perform_as_transaction(sql)
# this should have been updated
exp = [(count + 1, join(qdb.util.get_db_files_base_dir(),
'analysis_tmp'))]
obs = qdb.util.get_mountpoint("analysis")
self.assertEqual(obs, exp)
# these 2 shouldn't
exp = [(5, join(qdb.util.get_db_files_base_dir(), 'raw_data'))]
obs = qdb.util.get_mountpoint("raw_data")
self.assertEqual(obs, exp)
exp = [(2, join(qdb.util.get_db_files_base_dir(), 'job'))]
obs = qdb.util.get_mountpoint("job")
self.assertEqual(obs, exp)
# testing multi returns
exp = [(5, join(qdb.util.get_db_files_base_dir(), 'raw_data')),
(count + 2, join(qdb.util.get_db_files_base_dir(),
'raw_data_tmp'))]
obs = qdb.util.get_mountpoint("raw_data", retrieve_all=True)
self.assertEqual(obs, exp)
# testing retrieve subdirectory
exp = [
(5, join(qdb.util.get_db_files_base_dir(), 'raw_data'), False),
(count + 2, join(qdb.util.get_db_files_base_dir(), 'raw_data_tmp'),
True)]
obs = qdb.util.get_mountpoint("raw_data", retrieve_all=True,
retrieve_subdir=True)
self.assertEqual(obs, exp)
def test_get_mountpoint_path_by_id(self):
exp = join(qdb.util.get_db_files_base_dir(), 'raw_data')
obs = qdb.util.get_mountpoint_path_by_id(5)
self.assertEqual(obs, exp)
exp = join(qdb.util.get_db_files_base_dir(), 'analysis')
obs = qdb.util.get_mountpoint_path_by_id(1)
self.assertEqual(obs, exp)
exp = join(qdb.util.get_db_files_base_dir(), 'job')
obs = qdb.util.get_mountpoint_path_by_id(2)
self.assertEqual(obs, exp)
# inserting new ones so we can test that it retrieves these and
# doesn't alter other ones
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.data_directory SET active=false WHERE "
"data_directory_id=1")
count = qdb.util.get_count('qiita.data_directory')
sql = """INSERT INTO qiita.data_directory (data_type, mountpoint,
subdirectory, active)
VALUES ('analysis', 'analysis_tmp', true, true),
('raw_data', 'raw_data_tmp', true, false)"""
qdb.sql_connection.perform_as_transaction(sql)
# this should have been updated
exp = join(qdb.util.get_db_files_base_dir(), 'analysis_tmp')
obs = qdb.util.get_mountpoint_path_by_id(count + 1)
self.assertEqual(obs, exp)
# these 2 shouldn't
exp = join(qdb.util.get_db_files_base_dir(), 'raw_data')
obs = qdb.util.get_mountpoint_path_by_id(5)
self.assertEqual(obs, exp)
exp = join(qdb.util.get_db_files_base_dir(), 'job')
obs = qdb.util.get_mountpoint_path_by_id(2)
self.assertEqual(obs, exp)
def test_get_files_from_uploads_folders(self):
# something has been uploaded and ignoring hidden files/folders
# and folders
exp = (7, 'uploaded_file.txt', '0B')
obs = qdb.util.get_files_from_uploads_folders("1")
self.assertIn(exp, obs)
# nothing has been uploaded
exp = []
obs = qdb.util.get_files_from_uploads_folders("2")
self.assertEqual(obs, exp)
def test_move_upload_files_to_trash(self):
test_filename = 'this_is_a_test_file.txt'
# create file to move to trash
fid, folder = qdb.util.get_mountpoint("uploads")[0]
test_fp = join(folder, '1', test_filename)
with open(test_fp, 'w') as f:
f.write('test')
self.files_to_remove.append(test_fp)
exp = (fid, 'this_is_a_test_file.txt', '4B')
obs = qdb.util.get_files_from_uploads_folders("1")
self.assertIn(exp, obs)
# move file
qdb.util.move_upload_files_to_trash(1, [(fid, test_filename)])
obs = qdb.util.get_files_from_uploads_folders("1")
self.assertNotIn(obs, exp)
# if the file doesn't exist, don't raise any errors
qdb.util.move_upload_files_to_trash(1, [(fid, test_filename)])
# testing errors
# - study doesn't exist
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.util.move_upload_files_to_trash(100, [(fid, test_filename)])
# - fid doen't exist
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.util.move_upload_files_to_trash(1, [(10, test_filename)])
# removing trash folder
rmtree(join(folder, '1', 'trash'))
def test_get_environmental_packages(self):
obs = qdb.util.get_environmental_packages()
exp = [['air', 'ep_air'],
['built environment', 'ep_built_environment'],
['host-associated', 'ep_host_associated'],
['human-amniotic-fluid', 'ep_human_amniotic_fluid'],
['human-associated', 'ep_human_associated'],
['human-blood', 'ep_human_blood'],
['human-gut', 'ep_human_gut'],
['human-oral', 'ep_human_oral'],
['human-skin', 'ep_human_skin'],
['human-urine', 'ep_human_urine'],
['human-vaginal', 'ep_human_vaginal'],
['microbial mat/biofilm', 'ep_microbial_mat_biofilm'],
['miscellaneous natural or artificial environment',
'ep_misc_artif'],
['plant-associated', 'ep_plant_associated'],
['sediment', 'ep_sediment'],
['soil', 'ep_soil'],
['wastewater/sludge', 'ep_wastewater_sludge'],
['water', 'ep_water']]
self.assertEqual(sorted(obs), sorted(exp))
def test_get_timeseries_types(self):
obs = qdb.util.get_timeseries_types()
exp = [[1, 'None', 'None'],
[2, 'real', 'single intervention'],
[3, 'real', 'multiple intervention'],
[4, 'real', 'combo intervention'],
[5, 'pseudo', 'single intervention'],
[6, 'pseudo', 'multiple intervention'],
[7, 'pseudo', 'combo intervention'],
[8, 'mixed', 'single intervention'],
[9, 'mixed', 'multiple intervention'],
[10, 'mixed', 'combo intervention']]
self.assertEqual(obs, exp)
def test_get_filepath_information(self):
obs = qdb.util.get_filepath_information(1)
# This path is machine specific. Just checking that is not empty
self.assertIsNotNone(obs.pop('fullpath'))
exp = {'filepath_id': 1, 'filepath': '1_s_G1_L001_sequences.fastq.gz',
'filepath_type': 'raw_forward_seqs', 'checksum': '2125826711',
'data_type': 'raw_data', 'mountpoint': 'raw_data',
'subdirectory': False, 'active': True}
self.assertEqual(obs, exp)
def test_filepath_id_to_rel_path(self):
obs = qdb.util.filepath_id_to_rel_path(1)
exp = 'raw_data/1_s_G1_L001_sequences.fastq.gz'
self.assertEqual(obs, exp)
obs = qdb.util.filepath_id_to_rel_path(3)
exp = 'preprocessed_data/1_seqs.fna'
self.assertEqual(obs, exp)
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self.files_to_remove.append(fp)
test = qdb.util.insert_filepaths(
[(fp, "raw_forward_seqs")], 2, "FASTQ")[0]
sql = """INSERT INTO qiita.artifact_filepath
(artifact_id, filepath_id)
VALUES (%s, %s)"""
qdb.sql_connection.perform_as_transaction(sql, [2, test])
obs = qdb.util.filepath_id_to_rel_path(test)
exp = 'FASTQ/2/%s' % basename(fp)
self.assertEqual(obs, exp)
def test_filepath_ids_to_rel_paths(self):
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self.files_to_remove.append(fp)
test = qdb.util.insert_filepaths(
[(fp, "raw_forward_seqs")], 2, "FASTQ")[0]
sql = """INSERT INTO qiita.artifact_filepath
(artifact_id, filepath_id)
VALUES (%s, %s)"""
qdb.sql_connection.perform_as_transaction(sql, [2, test])
obs = qdb.util.filepath_ids_to_rel_paths([1, 3, test])
exp = {1: 'raw_data/1_s_G1_L001_sequences.fastq.gz',
3: 'preprocessed_data/1_seqs.fna',
test: 'FASTQ/2/%s' % basename(fp)}
self.assertEqual(obs, exp)
def test_add_message(self):
count = qdb.util.get_count('qiita.message') + 1
user = qdb.user.User.create('new@test.bar', 'password')
users = [user]
qdb.util.add_message("TEST MESSAGE", users)
obs = [[x[0], x[1]] for x in user.messages()]
exp = [[count, 'TEST MESSAGE']]
self.assertEqual(obs, exp)
def test_add_system_message(self):
count = qdb.util.get_count('qiita.message') + 1
qdb.util.add_system_message("SYS MESSAGE",
datetime(2015, 8, 5, 19, 41))
obs = [[x[0], x[1]]
for x in qdb.user.User('shared@foo.bar').messages()]
exp = [[count, 'SYS MESSAGE'], [1, 'message 1']]
self.assertEqual(obs, exp)
obs = [[x[0], x[1]] for x in qdb.user.User('admin@foo.bar').messages()]
exp = [[count, 'SYS MESSAGE']]
self.assertEqual(obs, exp)
sql = "SELECT expiration from qiita.message WHERE message_id = %s"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [count])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[datetime(2015, 8, 5, 19, 41)]]
self.assertEqual(obs, exp)
def test_clear_system_messages(self):
message_id = qdb.util.get_count('qiita.message') + 1
user = qdb.user.User.create('csm@test.bar', 'password')
obs = [[x[0], x[1]] for x in user.messages()]
exp = []
self.assertEqual(obs, exp)
qdb.util.add_system_message("SYS MESSAGE",
datetime(2015, 8, 5, 19, 41))
obs = [[x[0], x[1]] for x in user.messages()]
exp = [[message_id, 'SYS MESSAGE']]
self.assertCountEqual(obs, exp)
qdb.util.clear_system_messages()
obs = [[x[0], x[1]] for x in user.messages()]
exp = []
self.assertEqual(obs, exp)
# Run again with no system messages to make sure no errors
qdb.util.clear_system_messages()
def test_supported_filepath_types(self):
obs = qdb.util.supported_filepath_types("FASTQ")
exp = [["raw_forward_seqs", True], ["raw_reverse_seqs", False],
["raw_barcodes", True]]
self.assertCountEqual(obs, exp)
obs = qdb.util.supported_filepath_types("BIOM")
exp = [["biom", True], ["directory", False], ["log", False]]
self.assertCountEqual(obs, exp)
def test_generate_analysis_list(self):
self.assertEqual(qdb.util.generate_analysis_list([]), [])
obs = qdb.util.generate_analysis_list([1, 2, 3, 5])
exp = [{'mapping_files': [
(16, qdb.util.get_filepath_information(16)['fullpath'])],
'description': 'A test analysis', 'artifacts': [9], 'name':
'SomeAnalysis', 'analysis_id': 1, 'visibility': 'private'},
{'mapping_files': [], 'description': 'Another test analysis',
'artifacts': [], 'name': 'SomeSecondAnalysis',
'analysis_id': 2, 'visibility': 'private'}]
# removing timestamp for testing
for i in range(len(obs)):
del obs[i]['timestamp']
self.assertEqual(obs, exp)
self.assertEqual(
qdb.util.generate_analysis_list([1, 2, 3, 5], True), [])
@qiita_test_checker()
class UtilTests(TestCase):
"""Tests for the util functions that do not need to access the DB"""
def setUp(self):
fh, self.filepath = mkstemp()
close(fh)
with open(self.filepath, "w") as f:
f.write("Some text so we can actually compute a checksum")
def test_compute_checksum(self):
"""Correctly returns the file checksum"""
obs = qdb.util.compute_checksum(self.filepath)
exp = 1719580229
self.assertEqual(obs, exp)
def test_scrub_data_nothing(self):
"""Returns the same string without changes"""
self.assertEqual(qdb.util.scrub_data("nothing_changes"),
"nothing_changes")
def test_scrub_data_semicolon(self):
"""Correctly removes the semicolon from the string"""
self.assertEqual(qdb.util.scrub_data("remove_;_char"), "remove__char")
def test_scrub_data_single_quote(self):
"""Correctly removes single quotes from the string"""
self.assertEqual(qdb.util.scrub_data("'quotes'"), "quotes")
def test_get_visibilities(self):
obs = qdb.util.get_visibilities()
exp = ['awaiting_approval', 'sandbox', 'private', 'public']
self.assertEqual(obs, exp)
def test_infer_status(self):
obs = qdb.util.infer_status([])
self.assertEqual(obs, 'sandbox')
obs = qdb.util.infer_status([['private']])
self.assertEqual(obs, 'private')
obs = qdb.util.infer_status([['private'], ['public']])
self.assertEqual(obs, 'public')
obs = qdb.util.infer_status([['sandbox'], ['awaiting_approval']])
self.assertEqual(obs, 'awaiting_approval')
obs = qdb.util.infer_status([['sandbox'], ['sandbox']])
self.assertEqual(obs, 'sandbox')
def test_get_pubmed_ids_from_dois(self):
exp = {'10.100/123456': '123456'}
obs = qdb.util.get_pubmed_ids_from_dois(['', '10.100/123456'])
self.assertEqual(obs, exp)
def test_generate_study_list(self):
USER = qdb.user.User
STUDY = qdb.study.Study
PREP = qdb.metadata_template.prep_template.PrepTemplate
UTIL = qdb.util
# testing owner email as name
user = USER('test@foo.bar')
username = user.info['name']
# test without changes
self.assertDictEqual(
STUDY_INFO, UTIL.generate_study_list(user, 'user')[0])
# change user's name to None and tests again
user.info = {'name': None}
exp = STUDY_INFO.copy()
exp['owner'] = 'test@foo.bar'
self.assertDictEqual(
exp, qdb.util.generate_study_list(user, 'user')[0])
# returning original name
user.info = {'name': username}
# creating a new study to make sure that empty studies are also
# returned
info = {"timeseries_type_id": 1, "metadata_complete": True,
"mixs_compliant": True, "study_alias": "TST",
"study_description": "Some description of the study goes here",
"study_abstract": "Some abstract goes here",
"principal_investigator_id": qdb.study.StudyPerson(1),
"lab_person_id": qdb.study.StudyPerson(1)}
new_study = STUDY.create(
USER('shared@foo.bar'), 'test_study_1', info=info)
snew_info = {
'status': 'sandbox', 'study_title': 'test_study_1',
'metadata_complete': True, 'publication_pid': [],
'artifact_biom_ids': [], 'autoloaded': False,
'ebi_submission_status': 'not submitted',
'study_id': new_study.id, 'ebi_study_accession': None,
'owner': 'Shared', 'shared': [],
'study_abstract': 'Some abstract goes here',
'pi': ('lab_dude@foo.bar', 'LabDude'), 'publication_doi': [],
'study_alias': 'TST', 'study_tags': None,
'preparation_data_types': [], 'number_samples_collected': 0}
exp1 = [STUDY_INFO]
exp2 = [snew_info]
exp_both = [STUDY_INFO, snew_info]
# let's make sure that everything is private for study 1
for a in STUDY(1).artifacts():
a.visibility = 'private'
# owner of study
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'user')
self.assertEqual(len(obs), 1)
self.assertDictEqual(obs[0], exp1[0])
# shared with
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'user')
self.assertEqual(len(obs), 2)
self.assertDictEqual(obs[0], exp_both[0])
self.assertDictEqual(obs[1], exp_both[1])
# admin
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'user')
self.assertEqual(obs, exp_both)
# no access/hidden
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'user')
self.assertEqual(obs, [])
# public - none for everyone
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'public')
self.assertEqual(obs, [])
def _avoid_duplicated_tests(all_artifacts=False):
# nothing should shange for owner, shared
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'user')
self.assertEqual(obs, exp1)
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'user')
self.assertEqual(obs, exp_both)
# for admin it should be shown in public and user cause there are
# 2 preps and only one is public
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'user')
if not all_artifacts:
self.assertEqual(obs, exp_both)
else:
self.assertEqual(obs, exp2)
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'user')
self.assertEqual(obs, [])
# for the public query, everything should be same for owner, share
# and admin but demo should now see it as public but with limited
# artifacts
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'public')
if not all_artifacts:
exp1[0]['artifact_biom_ids'] = [7]
self.assertEqual(obs, exp1)
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'public')
self.assertEqual(obs, exp1)
# returning artifacts
exp1[0]['artifact_biom_ids'] = [4, 5, 6, 7]
# make artifacts of prep 2 public
PREP(2).artifact.visibility = 'public'
exp1[0]['status'] = 'public'
exp_both[0]['status'] = 'public'
_avoid_duplicated_tests()
# make artifacts of prep 1 awaiting_approval
PREP(1).artifact.visibility = 'awaiting_approval'
_avoid_duplicated_tests()
# making all studies public
PREP(1).artifact.visibility = 'public'
_avoid_duplicated_tests(True)
# deleting the new study study and returning artifact status
qdb.study.Study.delete(new_study.id)
PREP(1).artifact.visibility = 'private'
PREP(2).artifact.visibility = 'private'
def test_generate_study_list_errors(self):
with self.assertRaises(ValueError):
qdb.util.generate_study_list(qdb.user.User('test@foo.bar'), 'bad')
def test_generate_study_list_without_artifacts(self):
# creating a new study to make sure that empty studies are also
# returned
info = {"timeseries_type_id": 1, "metadata_complete": True,
"mixs_compliant": True, "study_alias": "TST",
"study_description": "Some description of the study goes here",
"study_abstract": "Some abstract goes here",
"principal_investigator_id": qdb.study.StudyPerson(1),
"lab_person_id": qdb.study.StudyPerson(1)}
new_study = qdb.study.Study.create(
qdb.user.User('shared@foo.bar'), 'test_study_1', info=info)
exp_info = [
{'status': 'private', 'study_title': (
'Identification of the Microbiomes for Cannabis Soils'),
'metadata_complete': True, 'publication_pid': [
'123456', '7891011'], 'ebi_submission_status': 'submitted',
'study_id': 1, 'ebi_study_accession': 'EBI123456-BB',
'autoloaded': False,
'study_abstract': (
'This is a preliminary study to examine the microbiota '
'associated with the Cannabis plant. Soils samples from '
'the bulk soil, soil associated with the roots, and the '
'rhizosphere were extracted and the DNA sequenced. Roots '
'from three independent plants of different strains were '
'examined. These roots were obtained November 11, 2011 from '
'plants that had been harvested in the summer. Future studies '
'will attempt to analyze the soils and rhizospheres from the '
'same location at different time points in the plant '
'lifecycle.'), 'pi': ('PI_dude@foo.bar', 'PIDude'),
'publication_doi': ['10.100/123456', '10.100/7891011'],
'study_alias': 'Cannabis Soils', 'number_samples_collected': 27},
{'status': 'sandbox', 'study_title': 'test_study_1',
'metadata_complete': True, 'publication_pid': [],
'ebi_submission_status': 'not submitted', 'autoloaded': False,
'study_id': new_study.id, 'ebi_study_accession': None,
'study_abstract': 'Some abstract goes here',
'pi': ('lab_dude@foo.bar', 'LabDude'), 'publication_doi': [],
'study_alias': 'TST', 'number_samples_collected': 0}]
obs_info = qdb.util.generate_study_list_without_artifacts([1, 2, 3, 4])
self.assertEqual(obs_info, exp_info)
obs_info = qdb.util.generate_study_list_without_artifacts(
[1, 2, 3, 4], 'EMP')
self.assertEqual(obs_info, [])
# deleting the old study
qdb.study.Study.delete(new_study.id)
def test_get_artifacts_information(self):
# we are going to test that it ignores 1 and 2 cause they are not biom,
# 4 has all information and 7 and 8 don't
obs = qdb.util.get_artifacts_information([1, 2, 4, 6, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp = [
{'artifact_id': 6, 'target_subfragment': ['V4'],
'prep_samples': 27, 'platform': 'Illumina',
'target_gene': '16S rRNA', 'name': 'BIOM', 'data_type': '16S',
'parameters': {'reference': '2', 'similarity': '0.97',
'sortmerna_e_value': '1',
'sortmerna_max_pos': '10000', 'threads': '1',
'sortmerna_coverage': '0.97'},
'algorithm': 'Pick closed-reference OTUs | Split libraries FASTQ',
'algorithm_az': 'd480799a0a7a2fbe0e9022bc9c602018',
'deprecated': False, 'active': True,
'files': ['1_study_1001_closed_reference_otu_table_Silva.biom']},
{'artifact_id': 4, 'target_subfragment': ['V4'],
'prep_samples': 27, 'platform': 'Illumina',
'target_gene': '16S rRNA', 'name': 'BIOM', 'data_type': '18S',
'parameters': {'reference': '1', 'similarity': '0.97',
'sortmerna_e_value': '1',
'sortmerna_max_pos': '10000', 'threads': '1',
'sortmerna_coverage': '0.97'},
'algorithm': 'Pick closed-reference OTUs | Split libraries FASTQ',
'algorithm_az': 'd480799a0a7a2fbe0e9022bc9c602018',
'deprecated': False, 'active': True,
'files': ['1_study_1001_closed_reference_otu_table.biom']},
{'artifact_id': 7, 'target_subfragment': ['V4'],
'prep_samples': 27, 'platform': 'Illumina',
'target_gene': '16S rRNA', 'name': 'BIOM', 'data_type': '16S',
'parameters': {}, 'algorithm': '', 'algorithm_az': '',
'deprecated': False, 'active': True,
'files': ['biom_table.biom']},
{'artifact_id': 8, 'target_subfragment': [], 'prep_samples': 0,
'platform': 'not provided', 'target_gene': 'not provided', 'name':
'noname', 'data_type': '18S', 'parameters': {}, 'algorithm': '',
'algorithm_az': '', 'deprecated': False, 'active': True,
'files': ['biom_table.biom']}]
self.assertCountEqual(obs, exp)
exp = exp[1:]
# now let's test that the order given by the commands actually give the
# correct results
with qdb.sql_connection.TRN:
# setting up database changes for just checking commands
qdb.sql_connection.TRN.add(
"""UPDATE qiita.command_parameter SET check_biom_merge = True
WHERE parameter_name = 'reference'""")
qdb.sql_connection.TRN.execute()
# testing that it works as expected
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1) '
'| Split libraries FASTQ')
exp[0]['algorithm_az'] = '33fed1b35728417d7ba4139b8f817d44'
self.assertCountEqual(obs, exp)
# setting up database changes for also command output
qdb.sql_connection.TRN.add(
"UPDATE qiita.command_output SET check_biom_merge = True")
qdb.sql_connection.TRN.execute()
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1, '
'BIOM: 1_study_1001_closed_reference_'
'otu_table.biom) | Split libraries FASTQ')
exp[0]['algorithm_az'] = 'de5b794a2cacd428f36fea86df196bfd'
self.assertCountEqual(obs, exp)
# let's test that we ignore the parent_info
qdb.sql_connection.TRN.add("""UPDATE qiita.software_command
SET ignore_parent_command = True""")
qdb.sql_connection.TRN.execute()
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1, '
'BIOM: 1_study_1001_closed_reference_'
'otu_table.biom)')
exp[0]['algorithm_az'] = '7f59a45b2f0d30cd1ed1929391c26e07'
self.assertCountEqual(obs, exp)
# let's test that we ignore the parent_info
qdb.sql_connection.TRN.add("""UPDATE qiita.software_command
SET ignore_parent_command = True""")
qdb.sql_connection.TRN.execute()
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1, '
'BIOM: 1_study_1001_closed_reference_'
'otu_table.biom)')
exp[0]['algorithm_az'] = '7f59a45b2f0d30cd1ed1929391c26e07'
self.assertCountEqual(obs, exp)
# returning database as it was
qdb.sql_connection.TRN.add(
"UPDATE qiita.command_output SET check_biom_merge = False")
qdb.sql_connection.TRN.add("""UPDATE qiita.software_command
SET ignore_parent_command = False""")
qdb.sql_connection.TRN.add(
"""UPDATE qiita.command_parameter SET check_biom_merge = False
WHERE parameter_name = 'reference'""")
qdb.sql_connection.TRN.execute()
class TestFilePathOpening(TestCase):
"""Tests adapted from scikit-bio's skbio.io.util tests"""
def test_is_string_or_bytes(self):
self.assertTrue(qdb.util._is_string_or_bytes('foo'))
self.assertTrue(qdb.util._is_string_or_bytes(u'foo'))
self.assertTrue(qdb.util._is_string_or_bytes(b'foo'))
self.assertFalse(qdb.util._is_string_or_bytes(StringIO('bar')))
self.assertFalse(qdb.util._is_string_or_bytes([1]))
def test_file_closed(self):
"""File gets closed in decorator"""
f = NamedTemporaryFile('r')
filepath = f.name
with qdb.util.open_file(filepath) as fh:
pass
self.assertTrue(fh.closed)
def test_file_closed_harder(self):
"""File gets closed in decorator, even if exceptions happen."""
f = NamedTemporaryFile('r')
filepath = f.name
try:
with qdb.util.open_file(filepath) as fh:
raise TypeError
except TypeError:
self.assertTrue(fh.closed)
else:
# If we're here, no exceptions have been raised inside the
# try clause, so the context manager swallowed them. No
# good.
raise Exception("`open_file` didn't propagate exceptions")
def test_filehandle(self):
"""Filehandles slip through untouched"""
with TemporaryFile('r') as fh:
with qdb.util.open_file(fh) as ffh:
self.assertTrue(fh is ffh)
# And it doesn't close the file-handle
self.assertFalse(fh.closed)
def test_StringIO(self):
"""StringIO (useful e.g. for testing) slips through."""
f = StringIO("File contents")
with qdb.util.open_file(f) as fh:
self.assertTrue(fh is f)
def test_BytesIO(self):
"""BytesIO (useful e.g. for testing) slips through."""
f = BytesIO(b"File contents")
with qdb.util.open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO(self):
"""This tests that if we send a file handler it returns it"""
f = h5py.File('test', driver='core', backing_store=False, mode='w')
with qdb.util.open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO_open(self):
with NamedTemporaryFile(delete=False) as fh:
name = fh.name
fh.close()
h5file = h5py.File(name, 'w')
h5file.close()
with qdb.util.open_file(name) as fh_inner:
self.assertTrue(isinstance(fh_inner, h5py.File))
remove(name)
class PurgeFilepathsTests(DBUtilTestsBase):
def _get_current_filepaths(self):
sql_fp = "SELECT filepath_id FROM qiita.filepath"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql_fp)
results = qdb.sql_connection.TRN.execute_fetchflatten()
return [qdb.util.get_filepath_information(_id)['fullpath']
for _id in results]
def _create_files(self, files):
# format is: [mp_id, fp_type_id, file_name]
sql = """INSERT INTO qiita.filepath (
data_directory_id, filepath_type_id, filepath, checksum,
checksum_algorithm_id)
VALUES (%s, %s, %s, '852952723', 1) RETURNING filepath_id"""
with qdb.sql_connection.TRN:
for f in files:
qdb.sql_connection.TRN.add(sql, tuple(f))
fid = qdb.sql_connection.TRN.execute_fetchflatten()[0]
qdb.util.get_filepath_information(fid)
def test_purge_filepaths_test(self):
# Get all the filepaths so we can test if they've been removed or not
fps_expected = self._get_current_filepaths()
# Make sure that the files exist - specially for travis
for fp in fps_expected:
if not exists(fp):
with open(fp, 'w') as f:
f.write('\n')
self.files_to_remove.append(fp)
# nothing shold be removed
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
# testing study filepath delete by inserting a new study sample info
# and make sure it gets deleted
mp_id, mp = qdb.util.get_mountpoint('templates')[0]
txt_id = qdb.util.convert_to_id('sample_template', "filepath_type")
self._create_files([[mp_id, txt_id, '100_filepath.txt']])
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
# testing artifact [A], creating a folder with an artifact that
# doesn't exist
_, mp = qdb.util.get_mountpoint('per_sample_FASTQ')[0]
not_an_artifact_fp = join(mp, '10000')
mkdir(not_an_artifact_fp)
# now let's add test for [B] by creating 2 filepaths without a
# link to the artifacts tables
mp_id, mp = qdb.util.get_mountpoint('BIOM')[0]
biom_id = qdb.util.convert_to_id('biom', "filepath_type")
self._create_files([
[mp_id, txt_id, 'artifact_filepath.txt'],
[mp_id, biom_id, 'my_biom.biom']
])
# adding files to tests
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
self.assertFalse(exists(not_an_artifact_fp))
# testing analysis filepath delete by filepaths for 2 different files
# and making sure they get deleted
mp_id, mp = qdb.util.get_mountpoint('analysis')[0]
biom_id = qdb.util.convert_to_id('biom', "filepath_type")
self._create_files([
[mp_id, txt_id, '10000_my_analysis_map.txt'],
[mp_id, biom_id, '10000_my_analysis_biom.biom']
])
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
STUDY_INFO = {
'study_id': 1,
'owner': 'Dude',
'study_alias': 'Cannabis Soils',
'status': 'private',
'study_abstract':
'This is a preliminary study to examine the microbiota '
'associated with the Cannabis plant. Soils samples '
'from the bulk soil, soil associated with the roots, '
'and the rhizosphere were extracted and the DNA '
'sequenced. Roots from three independent plants of '
'different strains were examined. These roots were '
'obtained November 11, 2011 from plants that had been '
'harvested in the summer. Future studies will attempt '
'to analyze the soils and rhizospheres from the same '
'location at different time points in the plant '
'lifecycle.',
'metadata_complete': True,
'autoloaded': False,
'ebi_study_accession': 'EBI123456-BB',
'ebi_submission_status': 'submitted',
'study_title':
'Identification of the Microbiomes for Cannabis Soils',
'number_samples_collected': 27,
'shared': [('shared@foo.bar', 'Shared')],
'publication_doi': ['10.100/123456', '10.100/7891011'],
'publication_pid': ['123456', '7891011'],
'pi': ('PI_dude@foo.bar', 'PIDude'),
'artifact_biom_ids': [4, 5, 6, 7],
'preparation_data_types': ['18S'],
'study_tags': None,
}
if __name__ == '__main__':
main()
| 43.342185
| 79
| 0.594433
|
4a0bb1163e03f7f0af9a868079320000f6c2c5e8
| 5,905
|
py
|
Python
|
nn_optimizers.py
|
Sentimentron/Dracula
|
878f81c1c56a8ac12cf02d8f15bd93c544e29611
|
[
"BSD-3-Clause"
] | 100
|
2016-04-25T17:48:50.000Z
|
2021-02-11T04:11:54.000Z
|
nn_optimizers.py
|
Sentimentron/Dracula
|
878f81c1c56a8ac12cf02d8f15bd93c544e29611
|
[
"BSD-3-Clause"
] | 1
|
2017-02-25T21:26:40.000Z
|
2017-02-27T18:04:51.000Z
|
nn_optimizers.py
|
Sentimentron/Dracula
|
878f81c1c56a8ac12cf02d8f15bd93c544e29611
|
[
"BSD-3-Clause"
] | 25
|
2016-07-27T23:11:44.000Z
|
2020-01-21T01:55:36.000Z
|
"""
This file contains implementations of the various methods
used to train stuff.
"""
import theano
from theano import tensor
from util import numpy_floatX
def sgd(lr, tparams, grads, x_c, x_w, mask, wmask, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x_c, x_w, mask, wmask, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x_c, mask, y_mask, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x_c, mask, y_mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared', on_unused_input='warn')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x_c, x_w, mask, wmask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x_c, x_w, mask, wmask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
| 35.787879
| 90
| 0.552752
|
4a0bb159bfef66ad0815de7a7501169455fd5041
| 53
|
py
|
Python
|
software/python/simple_pendulum/controllers/energy_shaping/__init__.py
|
alopezrivera/torque_limited_simple_pendulum
|
2164a41d65c16743ba260a79a04a04cdd72c3903
|
[
"BSD-3-Clause"
] | 15
|
2021-10-16T04:50:34.000Z
|
2022-03-26T23:54:19.000Z
|
software/python/simple_pendulum/controllers/energy_shaping/__init__.py
|
alopezrivera/torque_limited_simple_pendulum
|
2164a41d65c16743ba260a79a04a04cdd72c3903
|
[
"BSD-3-Clause"
] | 17
|
2021-11-30T22:17:28.000Z
|
2022-03-21T12:28:45.000Z
|
software/python/simple_pendulum/controllers/energy_shaping/__init__.py
|
alopezrivera/torque_limited_simple_pendulum
|
2164a41d65c16743ba260a79a04a04cdd72c3903
|
[
"BSD-3-Clause"
] | 13
|
2021-10-18T07:45:29.000Z
|
2022-03-22T12:56:33.000Z
|
"""
Energy Shaping Control
======================
"""
| 13.25
| 22
| 0.377358
|
4a0bb168c595382337f9aa0844fd3c05d267b5eb
| 2,699
|
py
|
Python
|
Chapter15/c15_17_GIR_GARCH_result.py
|
John-ye666/Python-for-Finance-Second-Edition
|
dabef09bcdd7b0ec2934774741bd0a7e1950de73
|
[
"MIT"
] | 236
|
2017-07-02T03:06:54.000Z
|
2022-03-31T03:15:33.000Z
|
Chapter15/c15_17_GIR_GARCH_result.py
|
John-ye666/Python-for-Finance-Second-Edition
|
dabef09bcdd7b0ec2934774741bd0a7e1950de73
|
[
"MIT"
] | null | null | null |
Chapter15/c15_17_GIR_GARCH_result.py
|
John-ye666/Python-for-Finance-Second-Edition
|
dabef09bcdd7b0ec2934774741bd0a7e1950de73
|
[
"MIT"
] | 139
|
2017-06-30T10:28:16.000Z
|
2022-01-19T19:43:34.000Z
|
"""
Name : c15_17_gir_GARCH_result.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import numpy as np
import scipy as sp
from numpy.linalg import inv
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
from scipy.optimize import fmin_slsqp
from numpy import size, log, pi, sum, diff, array, zeros, diag, dot, mat, asarray, sqrt
#
def gjr_garch_likelihood(parameters, data, sigma2, out=None):
mu = parameters[0]
omega = parameters[1]
alpha = parameters[2]
gamma = parameters[3]
beta = parameters[4]
T = size(data,0)
eps = data-mu
for t in xrange(1,T):
sigma2[t]=(omega+alpha*eps[t-1]**2+gamma*eps[t-1]**2*(eps[t- 1]<0)+beta*sigma2[t-1])
logliks = 0.5*(log(2*pi) + log(sigma2) + eps**2/sigma2)
loglik = sum(logliks)
if out is None:
return loglik
else:
return loglik, logliks, copy(sigma2)
#
def gjr_constraint(parameters,data, sigma2, out=None):
alpha = parameters[2]
gamma = parameters[3]
beta = parameters[4]
return array([1-alpha-gamma/2-beta]) # Constraint alpha+gamma/2+beta<=1
#
def hessian_2sided(fun, theta, args):
f = fun(theta, *args)
h = 1e-5*np.abs(theta)
thetah = theta + h
h = thetah-theta
K = size(theta,0)
h = np.diag(h)
fp = zeros(K)
fm = zeros(K)
for i in xrange(K):
fp[i] = fun(theta+h[i], *args)
fm[i] = fun(theta-h[i], *args)
fpp = zeros((K,K))
fmm = zeros((K,K))
for i in xrange(K):
for j in xrange(i,K):
fpp[i,j] = fun(theta + h[i] + h[j], *args)
fpp[j,i] = fpp[i,j]
fmm[i,j] = fun(theta-h[i]-h[j], *args)
fmm[j,i] = fmm[i,j]
hh = (diag(h))
hh = hh.reshape((K,1))
hh = dot(hh,hh.T)
H = zeros((K,K))
for i in xrange(K):
for j in xrange(i,K):
H[i,j] = (fpp[i,j]-fp[i]-fp[j] + f+ f-fm[i]-fm[j] + fmm[i,j])/hh[i,j]/2
H[j,i] = H[i,j]
return H
#
#
def GJR_GARCH(ret):
import numpy as np
import scipy.optimize as op
startV=np.array([ret.mean(),ret.var()*0.01,0.03,0.09,0.90])
finfo=np.finfo(np.float64)
t=(0.0,1.0)
bounds=[(-10*ret.mean(),10*ret.mean()),(finfo.eps,2*ret.var()),t,t,t]
T=np.size(ret,0)
sigma2=np.repeat(ret.var(),T)
inV=(ret,sigma2)
return op.fmin_slsqp(gjr_garch_likelihood,startV,f_ieqcons=gjr_constraint,bounds=bounds,args=inV)
#
sp.random.seed(12345)
returns=sp.random.uniform(-0.2,0.3,100)
tt=GJR_GARCH(returns)
print(tt)
| 30.325843
| 101
| 0.575028
|
4a0bb21c2a91bd576269156f2d30f322e84c462a
| 2,456
|
py
|
Python
|
insta/models.py
|
DorcasWanjiku/Instagram-page
|
e99b8ac27467ff95d82d01f47ded99faa71866b8
|
[
"RSA-MD"
] | null | null | null |
insta/models.py
|
DorcasWanjiku/Instagram-page
|
e99b8ac27467ff95d82d01f47ded99faa71866b8
|
[
"RSA-MD"
] | null | null | null |
insta/models.py
|
DorcasWanjiku/Instagram-page
|
e99b8ac27467ff95d82d01f47ded99faa71866b8
|
[
"RSA-MD"
] | null | null | null |
# Create your models here.
from django.db import models
from django.utils import timezone
from datetime import datetime
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# from pyuploadcare.dj.models import ImageField
# Create your models here.
class Profile(models.Model):
profile_pic = CloudinaryField('image')
bio = models.CharField(max_length=50,blank=True)
user = models.OneToOneField(User,blank=True, on_delete=models.CASCADE, related_name="profile")
def __str__(self):
return self.bio
#Save profile
def profile_save(self):
self.save()
#delete profile
def delete_profile(self):
self.delete()
#Get profile
@classmethod
def get_by_id(cls, id):
profile = Profile.objects.filter(user=id)
return profile
@classmethod
def get_profile_by_username(cls, owner):
profiles = cls.objects.filter(user__contains=user)
return profiles
#Image
class Image(models.Model):
time_created= models.DateTimeField(default=datetime.now, blank=True)
image = CloudinaryField('image')
message = models.CharField(max_length=80, blank=True)
name = models.CharField(max_length=80)
caption = models.TextField(blank=True)
profile = models.ForeignKey(Profile, blank=True,on_delete=models.CASCADE)
# profile_details = models.ForeignKey(Profile)
def __str__(self):
return self.name
#Save image
def save_image(self):
self.save()
#Delete image
def delete_image(self):
self.delete()
#Get image
@classmethod
def get_profile_images(cls, profile):
images = Image.objects.filter(profile__pk=profile)
return images
# Likes
class Likes(models.Model):
likes = models.ForeignKey(User, blank=True,on_delete=models.CASCADE)
image = models.ForeignKey(Image,blank=True,on_delete=models.CASCADE)
class Comment(models.Model):
image = models.ForeignKey(Image,blank=True, on_delete=models.CASCADE,related_name='comment')
comment_title = models.ForeignKey(User, blank=True,on_delete=models.CASCADE)
comment= models.TextField()
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
@classmethod
def get_image_comments(cls, id):
comments = Comment.objects.filter(image__pk=id)
return comments
def __str__(self):
return str(self.comment)
| 27.288889
| 98
| 0.702769
|
4a0bb360aee5b3fe8469ddadec20c64615b0828b
| 80
|
py
|
Python
|
run.py
|
sinivaal/Flask-API
|
a237fab61024c8717c0de7983216e69fa3c8f4ed
|
[
"MIT"
] | null | null | null |
run.py
|
sinivaal/Flask-API
|
a237fab61024c8717c0de7983216e69fa3c8f4ed
|
[
"MIT"
] | null | null | null |
run.py
|
sinivaal/Flask-API
|
a237fab61024c8717c0de7983216e69fa3c8f4ed
|
[
"MIT"
] | null | null | null |
from application import app
if __name__ == "__main__":
app.run(host='0.0.0.0')
| 20
| 27
| 0.7
|
4a0bb3acb0fd0ad2596d2149f6e3f60a57987dff
| 4,269
|
py
|
Python
|
speccer/ops.py
|
bensimner/speccer
|
85cd4fa72e87b104b072663dfe373eeb47d44b61
|
[
"MIT"
] | 1
|
2022-02-09T01:39:33.000Z
|
2022-02-09T01:39:33.000Z
|
speccer/ops.py
|
bensimner/speccer
|
85cd4fa72e87b104b072663dfe373eeb47d44b61
|
[
"MIT"
] | null | null | null |
speccer/ops.py
|
bensimner/speccer
|
85cd4fa72e87b104b072663dfe373eeb47d44b61
|
[
"MIT"
] | null | null | null |
# ops.py - Operations over strategies
# author: Ben Simner
from __future__ import generator_stop
import typing
import logging
import contextlib
import collections
from . import strategy
from . import typeable
from . import _errors
log = logging.getLogger('ops')
__all__ = [
'value_args',
'values',
'mapS',
'implies',
'assume',
]
def assume(b):
'''Assume b is like a silent assertion
if b is True, it's a no-op
if b is False, it silently fails (in this case it raises a FailedAssumption which is caught by the strategy'''
if not b:
raise _errors.FailedAssumption
def implies(f, t: type):
''' f => t
'''
impl_name = f.__name__
# generate a new type which is t[f]
typ = typeable.from_type(t)
t_pretty = typ.pretty()
t_name = '{}->{}'.format(impl_name, t_pretty)
t_new = type(t_name, (typ.typ,), {})
t_new._failed_implications = 0
@mapS(strategy.Strategy[typ.typ], register_type=t_new)
def newStrat(d, v, *args):
try:
if f(v) is False:
raise AssertionError('{}[{}] failed'.format(impl_name, t_pretty))
except AssertionError:
t_new._failed_implications += 1
else:
yield v
newStrat.__name__ = t_name
newStrat.__qualname__ = t_name
return t_new
def values(depth, t, **kwargs):
yield from strategy.Strategy.get_strat_instance(t)(depth, **kwargs)
def value_args(depth, *types, **kwargs):
'''Creates a `Strategy' which generates all tuples of type *types
i.e.
value_args(1, str, int) ->
('a', 0)
('a', 1)
('a', -1)
('b', 0)
('b', 1)
('b', -1)
...
('bb' 0)
('bb' 1)
('bb' -1)
If any given type has no strategy instance then a MissingStrategyError is put there instead
i.e.
value_args(1, int, MyTypeWithNoStratInstance) ->
(0, MissingStrategyError)
(1, MissingStrategyError)
(-1, MissingStrategyError)
'''
yield from strategy.generate_args_from_strategies(*map(lambda t: values(depth, t, **kwargs), types))
def mapS(strat, register_type=None, autoregister=False, **kwargs):
'''
Maps some function over a Strategy _class_.
To automatically register the new strategy either set
autoregister=True overwrite the old strategy with this one
register_type=t register this strategy for type 't'
i.e.
@mapS(Strategy[int])
def NewIntStrat(depth, value):
yield 'new({})'.format(value)
NewIntStrat(3) ~= ['new(0)', 'new(1)', 'new(-1)', 'new(2)', 'new(-2)', 'new(3)', 'new(-3)']
'''
def decorator(f):
class MapStrat(strat, autoregister=autoregister, **kwargs):
def generate(self, depth, *args):
val_gens = collections.deque()
def _yield_one():
if not val_gens:
raise StopIteration
g = val_gens.popleft()
try:
v = next(g)
except StopIteration:
return _yield_one()
val_gens.append(g)
return v
s = strat(depth, *args)
gen = iter(s)
while True:
try:
v = next(gen)
except StopIteration:
# TODO(BenSimner) this seems horribly wrong
# except that the below code needs to access the generator
# and so no for loop allowed
return
val_gens.append(f(depth, v, *args))
with contextlib.suppress(StopIteration):
yield _yield_one()
if register_type:
strategy.register(register_type, MapStrat)
MapStrat.__name__ = f.__name__
MapStrat.__qualname__ = f.__qualname__
MapStrat.__module__ = strat.__module__
return MapStrat
return decorator
| 30.71223
| 115
| 0.532677
|
4a0bb46513fade2530013618ab7cd1fe558f87cb
| 5,819
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_network_interface_load_balancers_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_network_interface_load_balancers_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_network_interface_load_balancers_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceLoadBalancerListResult"]
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_09_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceLoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
| 47.696721
| 196
| 0.668843
|
4a0bb623c2f5a320a5a650ca1bb494fc567341f6
| 946
|
py
|
Python
|
api/resources/admin.py
|
xmetadata/smart-campus
|
84fa5eaafda6f875aaf3d7b53248f941ad1506d9
|
[
"MIT"
] | 1
|
2018-02-27T01:23:48.000Z
|
2018-02-27T01:23:48.000Z
|
api/resources/admin.py
|
xmetadata/smart-campus
|
84fa5eaafda6f875aaf3d7b53248f941ad1506d9
|
[
"MIT"
] | null | null | null |
api/resources/admin.py
|
xmetadata/smart-campus
|
84fa5eaafda6f875aaf3d7b53248f941ad1506d9
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource, request
from flask_jwt import jwt_required, current_identity
from sqlalchemy.exc import SQLAlchemyError
import json
from common.satree import TreeManager
from common.database import db
from models.nodetree import NodeTree, InNodeSchema
class AdminData(Resource):
@jwt_required()
def post(self):
json_data = json.dumps(request.get_json())
load_data, errors = InNodeSchema().loads(json_data)
if errors:
return errors, 400
user_data = NodeTree(title=load_data['title'],is_student=load_data['is_student'])
tm = TreeManager(NodeTree, db.session)
try:
if load_data['node_uuid']:
tm.add_node(load_data['node_uuid'], user_data)
else:
tm.add_node(node=user_data)
except SQLAlchemyError as e:
return e.message, 500
return user_data.node_id
| 35.037037
| 90
| 0.658562
|
4a0bb7e9be01bbacf756d56caa45d02afb03c8ea
| 12,118
|
py
|
Python
|
core/player.py
|
JoshuaSkelly/TroubleInCloudLand
|
2642124f7549c91b89060d424524f69bb7edc169
|
[
"MIT"
] | 2
|
2017-04-13T09:59:10.000Z
|
2017-04-13T21:07:22.000Z
|
core/player.py
|
JoshuaSkelly/TroubleInCloudLand
|
2642124f7549c91b89060d424524f69bb7edc169
|
[
"MIT"
] | 2
|
2017-04-14T15:33:19.000Z
|
2017-04-21T19:55:01.000Z
|
core/player.py
|
JoshuaSkelly/TroubleInCloudLand
|
2642124f7549c91b89060d424524f69bb7edc169
|
[
"MIT"
] | 4
|
2019-02-12T05:48:17.000Z
|
2020-10-15T23:12:45.000Z
|
import copy
import random
from core import actor, bullet, particle
from core.actor import *
from ui import text, infobubble
from utils import utility
def load_data():
Player.master_animation_list.build_animation('Idle', ['kuunIdle'])
Player.master_animation_list.build_animation('Fire', ['kuunShoot'])
Player.master_animation_list.build_animation('HurtIdle', ['kuunIdle', 'blank'])
Player.master_animation_list.build_animation('HurtFire', ['kuunShoot', 'blank'])
Player.master_animation_list.build_animation('Die', ['kuunDie'])
Player.NUM_OW_SOUNDS = 2 #plus one for a total of 3
Player.lose_life_sound.append(utility.load_sound('ow1'))
Player.lose_life_sound.append(utility.load_sound('ow2'))
Player.lose_life_sound.append(utility.load_sound('ow3'))
Player.NUM_FIRE_SOUNDS = 2 #plus one for total of 3
Player.fire_sound.append(utility.load_sound('shot1'))
Player.fire_sound.append(utility.load_sound('shot2'))
Player.fire_sound.append(utility.load_sound('shot3'))
Player.death_sound.append(utility.load_sound('playerDeath1'))
Player.death_sound.append(utility.load_sound('playerDeath2'))
Player.death_sound.append(utility.load_sound('playerDeath3'))
Player.extraLifeSound = utility.load_sound('extraLife')
class Player(actor.Actor):
death_sound = []
fire_sound = []
lose_life_sound = []
master_animation_list = animation.Animation()
def __init__(self, bullet_group, effects_group, life_board, score_board):
actor.Actor.__init__(self)
# COMMON VARIABLES
self.actor_type = ACTOR_PLAYER
self.animation_list = copy.copy(self.master_animation_list)
self.animation_list.set_parent(self)
self.animation_list.play('Idle')
self.rect = self.image.get_rect()
self.bound_style = BOUND_STYLE_REFLECT
self.bounds = 0 + 46, 0 + 60, SCREEN_WIDTH - 46, SCREEN_HEIGHT - 32
self.can_collide = True
self.hitrect = pygame.Rect(0, 0, 80, 90)
self.position = vector.Vector2d((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4))
self.velocity = vector.Vector2d.zero
# UNIQUE VARIABLES
self.bullet_speed = BULLET_SPEED
self.default_fire_timer = 2
self.reset_fire_timer = self.default_fire_timer
self.fire_timer = self.reset_fire_timer
self.max_speed = 54
self.hitrect_offset_y = -15
self.score = 0
self.lives = 3
self.stun_timer = 0
self.life_board = life_board
self.score_board = score_board
self.life_board.set_text('x' + str(self.lives))
self.score_board.set_text(self.score)
self.next_bonus = 50000
self.dying = 0
self.dead = False
# BONUS VARIABLES
self.damage_bonus = 0
self.bullet_bonus = 0
self.reflect_bonus = 0
self.dual_shot = 0
self.fast_shot = 0
self.point_bonus = 0
self.combo_bonus = 0
self.combo_kills = 0
# BULLET VARIABLES
self.bullet_damage = 1
self.bullet_bound_style = BOUND_STYLE_KILL
self.bullet_collide_style = COLLIDE_STYLE_HURT
self.bullet_group = bullet_group
self.effects_group = effects_group
# SOUND VARIABLES
self.current_sound = 0
def actor_update(self):
if self.lives <= 0:
self.active = False
self.velocity -= vector.Vector2d(0.0, -0.3)
self.die()
return
if not self.damage_bonus:
self.bullet_damage = 1
if not self.reflect_bonus:
self.bullet_bound_style = BOUND_STYLE_KILL
self.bullet_collide_style = COLLIDE_STYLE_HURT
if not self.fast_shot:
self.reset_fire_timer = self.default_fire_timer
if self.point_bonus:
self.point_bonus -= 1
if self.damage_bonus:
self.damage_bonus -= 1
if self.reflect_bonus:
self.reflect_bonus -= 1
if self.dual_shot:
self.dual_shot -= 1
if self.stun_timer:
self.stun_timer -= 1
if self.fast_shot:
self.fast_shot -= 1
if self.combo_bonus:
self.combo_bonus -= 1
if not self.combo_bonus:
combo_counter = 0
bonus_points = 0
while combo_counter <= self.combo_kills:
combo_counter += 1
bonus_points += combo_counter * 25
self.increment_score_no_text(bonus_points)
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Combo Points:' + str(bonus_points) + '!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.bullet_group.add(help_bubble)
self.combo_kills = 0
self.fire_timer -= 1
self.velocity *= .95
if not self.active:
self.active = True
if not self.fire_timer:
self.animation_list.stop('Idle', self.animation_list.current_frame)
if self.stun_timer:
self.animation_list.play('HurtIdle', self.animation_list.current_frame)
def die(self):
if self.dying == 0:
death_type = int(random.random() * 3)
if death_type == 0:
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Blast!').image
utility.play_sound(self.death_sound[0], OW_CHANNEL)
elif death_type == 1:
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Oh No!').image
utility.play_sound(self.death_sound[1], OW_CHANNEL)
elif death_type == 2:
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Bother!').image
utility.play_sound(self.death_sound[2], OW_CHANNEL)
self.animation_list.play('Die')
self.bounds = -1000, -1000, SCREEN_WIDTH + 1000, SCREEN_HEIGHT + 32
self.bound_style = BOUND_STYLE_CUSTOM
help_bubble = infobubble.InfoBubble(temp_image, self, 5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.bullet_group.add(help_bubble)
self.dying += 1
if settings_list[PARTICLES] and not self.dying % 2:
puffs_to_create = 4
while puffs_to_create:
puffs_to_create -= 1
temp_puff = particle.SmokeParticle(self.position, (1, 0))
temp_puff.velocity.set_angle(359 * random.random())
self.effects_group.add(temp_puff)
def custom_bounds(self):
self.dead = True
def hurt(self, value):
if self.stun_timer <= 0:
self.animation_list.play('HurtIdle', self.animation_list.current_frame)
self.lives -= value
sound_to_play = random.randint(0, 2)
if self.lives != 0:
utility.play_sound(self.lose_life_sound[sound_to_play], OW_CHANNEL)
self.life_board.set_text('x' + str(self.lives))
self.stun_timer = 1.5 * FRAMES_PER_SECOND
def increment_score_no_text(self, value):
self.score += value
self.score_board.set_text(self.score)
if self.score > self.next_bonus:
utility.play_sound(self.extraLifeSound, OW_CHANNEL)
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Extra Life!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.effects_group.add(help_bubble)
self.lives += 1
self.life_board.set_text('x' + str(self.lives))
self.next_bonus += 50000
def increment_score(self, value, textPosition, text_group):
if self.combo_bonus and value <= 250:
self.combo_bonus += int(.2 * FRAMES_PER_SECOND)
self.combo_kills += 1
temp_image = text.Text(FONT_PATH, 30, FONT_COLOR, 'x' + str(self.combo_kills) + '!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 0.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
self.bullet_group.add(help_bubble)
if self.point_bonus:
value *= 2
temp_text = text.Text(FONT_PATH, 36, FONT_COLOR, str(value), 15)
temp_text.set_alignment(CENTER_MIDDLE)
temp_text.position = vector.Vector2d(textPosition)
text_group.add(temp_text)
self.score += value
self.score_board.set_text(self.score)
if self.score >= self.next_bonus:
utility.play_sound(self.extraLifeSound, OW_CHANNEL)
temp_image = text.TextSurface(FONT_PATH, 30, FONT_COLOR, 'Extra Life!').image
help_bubble = infobubble.InfoBubble(temp_image, self, 1.5 * FRAMES_PER_SECOND)
help_bubble.offset = vector.Vector2d(0.0, -100.0)
text_group.add(help_bubble)
self.lives += 1
self.life_board.set_text('x' + str(self.lives))
self.next_bonus += 50000
def fire(self):
if self.stun_timer:
self.animation_list.play('HurtFire', self.animation_list.current_frame)
else:
self.animation_list.play('Fire')
if self.fire_timer <= 0:
utility.play_sound(self.fire_sound[random.randint(0, 2)], PLAYER_CHANNEL)
if self.velocity[:] != vector.Vector2d.zero:
bullet_velocity = vector.Vector2d(self.velocity)
bullet_velocity.set_magnitude(self.bullet_speed)
new_bullet = bullet.Bullet(self.position,
bullet_velocity,
self.effects_group,
self.bullet_damage,
self.bullet_bound_style,
self.bullet_collide_style)
new_bullet.set_owner(self)
if self.reflect_bonus and self.damage_bonus:
new_bullet.animation_list.play('DamageReflect')
elif self.bullet_collide_style == COLLIDE_STYLE_REFLECT:
new_bullet.animation_list.play('Reflect')
elif self.bullet_damage > 1:
new_bullet.animation_list.play('Damage')
self.bullet_group.add(new_bullet)
self.fire_timer = self.reset_fire_timer
if self.dual_shot:
if self.velocity:
bullet_velocity = vector.Vector2d(self.velocity * -1)
bullet_velocity.set_magnitude(self.bullet_speed)
new_bullet = bullet.Bullet((self.position),
(bullet_velocity),
self.effects_group,
self.bullet_damage,
self.bullet_bound_style,
self.bullet_collide_style)
new_bullet.set_owner(self)
if self.reflect_bonus and self.damage_bonus:
new_bullet.animation_list.play('DamageReflect')
elif self.bullet_collide_style == COLLIDE_STYLE_REFLECT: new_bullet.animation_list.play('Reflect')
elif self.bullet_damage > 1: new_bullet.animation_list.play('Damage')
self.bullet_group.add(new_bullet)
def set_velocity(self, new_velocity):
self.velocity = new_velocity
if new_velocity.get_magnitude() > self.max_speed:
self.velocity.set_magnitude(self.max_speed)
| 38.348101
| 121
| 0.592177
|
4a0bb812c581ccc4f479b1ff4bafba0abceaee63
| 19,085
|
py
|
Python
|
sequence/BioReaders.py
|
dpryan79/cDNA_Cupcake
|
8d3fb3ad087cdc70e2e8ce98c815d118b2bc5987
|
[
"BSD-3-Clause-Clear"
] | 60
|
2016-01-22T19:09:05.000Z
|
2022-01-21T02:38:46.000Z
|
sequence/BioReaders.py
|
dpryan79/cDNA_Cupcake
|
8d3fb3ad087cdc70e2e8ce98c815d118b2bc5987
|
[
"BSD-3-Clause-Clear"
] | 93
|
2016-03-03T04:54:40.000Z
|
2022-03-31T18:43:28.000Z
|
sequence/BioReaders.py
|
dpryan79/cDNA_Cupcake
|
8d3fb3ad087cdc70e2e8ce98c815d118b2bc5987
|
[
"BSD-3-Clause-Clear"
] | 22
|
2015-12-13T14:49:00.000Z
|
2021-11-22T09:08:07.000Z
|
"""
Should always be faithful duplicate of sequence/BioReaders.py
Duplicated here for tofu installation. This one is called via cupcake.io.BioReaders.
"""
import re, sys
from collections import namedtuple
Interval = namedtuple('Interval', ['start', 'end'])
class SimpleSAMReader:
"""
A simplified SAM reader meant for speed. Skips CIGAR & FLAG parsing; identity/coverage calculation.
"""
SAMheaders = ['@HD', '@SQ', '@RG', '@PG', '@CO']
def __init__(self, filename, has_header):
self.filename = filename
self.f = open(filename)
self.header = ''
if has_header:
while True:
cur = self.f.tell()
line = self.f.readline()
if line[:3] not in SimpleSAMReader.SAMheaders:
break
self.header += line
self.f.seek(cur)
def __iter__(self):
return self
def __next__(self):
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
return SimpleSAMRecord(line)
class SimpleSAMRecord:
cigar_rex = re.compile('(\d+)([MIDSHN])')
SAMflag = namedtuple('SAMflag', ['is_paired', 'strand', 'PE_read_num'])
def __init__(self, record_line):
"""
Simple bare bones version: only has
qID, sID, sStart, sEnd, qStart, qEnd, cigar
Simplified assumptions:
-- must be end-to-end alignment (so qStart always 0)
-- must be unspliced (no 'N' in cigar string)
"""
self.qID = None
self.sID = None
self.sStart = None
self.sEnd = None
self.qStart = 0
self.qEnd = None # length of SEQ
self.cigar = None
self.process(record_line)
def __str__(self):
msg = \
"""
qID: {q}
sID: {s}
sStart-sEnd: {ss}-{se}
qStart-qEnd: {qs}-{qe}
cigar: {c}
""".format(q=self.qID, s=self.sID, \
ss=self.sStart, se=self.sEnd, qs=self.qStart, qe=self.qEnd, c=self.cigar)
return msg
def parse_cigar(self, cigar, start):
"""
M - match
I - insertion w.r.t. to ref
D - deletion w.r.t. to ref
N - skipped (which means splice junction)
S - soft clipped
H - hard clipped (not shown in SEQ)
= - read match
X - read mismatch
ex: 50M43N3D
NOTE: sets qStart & qEnd, which are often incorrect because of different ways to write CIGAR strings
instead rely on XS/XE flags (from blasr or pbalign.py) to overwrite this later!!!
Returns: genomic segment locations (using <start> as offset)
"""
cur_end = start
q_aln_len = 0
for (num, type) in re.findall('(\d+)(\S)', cigar):
num = int(num)
if type == 'I':
q_aln_len += num
elif type in ('M', '=', 'X'):
cur_end += num
q_aln_len += num
elif type == 'D':
cur_end += num
self.qEnd = self.qStart + q_aln_len
self.sEnd = cur_end
def process(self, record_line):
"""
Only process cigar to get qEnd and sEnd
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.parse_cigar(self.cigar, self.sStart)
#self.flag = SimpleSAMRecord.parse_sam_flag(int(raw[1]))
class SAMReader:
SAMheaders = ['@HD', '@SQ', '@RG', '@PG', '@CO']
def __init__(self, filename, has_header, ref_len_dict=None, query_len_dict=None):
self.filename = filename
self.f = open(filename)
self.header = ''
self.ref_len_dict = ref_len_dict
self.query_len_dict = query_len_dict
if has_header:
while True:
cur = self.f.tell()
line = self.f.readline()
if line[:3] not in SAMReader.SAMheaders:
break
self.header += line
self.f.seek(cur)
def __iter__(self):
return self
def __next__(self):
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
return SAMRecord(line, self.ref_len_dict, self.query_len_dict)
class SAMRecord:
SAMflag = namedtuple('SAMflag', ['is_paired', 'strand', 'PE_read_num'])
def __init__(self, record_line=None, ref_len_dict=None, query_len_dict=None):
"""
Designed to handle BowTie SAM output for unaligned reads (PE read not yet supported)
Can handle map to transfrag (no splicing) and genome (splicing)
"""
self.qID = None
self.sID = None
self.sStart = None
self.sEnd = None
self.segments = None
self.num_nonmatches = None
self.num_ins = None
self.num_del = None
self.num_mat_or_sub = None
self.qCoverage = None
self.sCoverage = None
self.sLen = None
self.qLen = None
# qStart, qEnd might get changed in parse_cigar
self.qStart = 0
self.qEnd = None # length of SEQ
self.cigar = None
self.flag = None
self.identity = None
self.record_line = record_line
if record_line is not None:
self.process(record_line, ref_len_dict, query_len_dict)
def __str__(self):
msg =\
"""
qID: {q}
sID: {s}
cigar: {c}
sStart-sEnd: {ss}-{se}
qStart-qEnd: {qs}-{qe}
segments: {seg}
flag: {f}
coverage (of query): {qcov}
coverage (of subject): {scov}
alignment identity: {iden}
""".format(q=self.qID, s=self.sID, seg=self.segments, c=self.cigar, f=self.flag,\
ss=self.sStart, se=self.sEnd, qs=self.qStart, qe=self.qEnd, iden=self.identity,\
qcov=self.qCoverage, scov=self.sCoverage)
return msg
def __eq__(self, other):
return self.qID == other.qID and self.sID == other.sID and\
self.sStart == other.sStart and self.sEnd == other.sEnd and\
self.segments == other.segments and self.qCoverage == other.qCoverage and\
self.sCoverage == other.sCoverage and self.qLen == other.qLen and\
self.sLen == other.sLen and self.qStart == other.qStart and\
self.cigar == other.cigar and self.flag == other.flag and self.identity == other.identity
def process(self, record_line, ref_len_dict, query_len_dict):
"""
If SAM is from pbalign.py output, then have flags:
XS: 1-based qStart, XE: 1-based qEnd, XQ: query length, NM: number of non-matches
ignore_XQ should be False for BLASR/pbalign.py's SAM, True for GMAP's SAM
0. qID
1. flag
2. sID
3. 1-based offset sStart
4. mapping quality (ignore)
5. cigar
6. name of ref of mate alignment (ignore)
7. 1-based offset sStart of mate (ignore)
8. inferred fragment length (ignore)
9. sequence (ignore)
10. read qual (ignore)
11. optional fields
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.segments = self.parse_cigar(self.cigar, self.sStart)
self.sEnd = self.segments[-1].end
self.flag = SAMRecord.parse_sam_flag(int(raw[1]))
# process optional fields
# XM: number of mismatches
# NM: edit distance (sub/ins/del)
for x in raw[11:]:
if x.startswith('NM:i:'):
self.num_nonmatches = int(x[5:])
if ref_len_dict is not None:
self.sCoverage = (self.sEnd - self.sStart) * 1. / ref_len_dict[self.sID]
self.sLen = ref_len_dict[self.sID]
if self.flag.strand == '-' and self.qLen is not None:
self.qStart, self.qEnd = self.qLen - self.qEnd, self.qLen - self.qStart
if query_len_dict is not None: # over write qLen and qCoverage, should be done LAST
self.qLen = query_len_dict[self.qID]
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
if self.num_nonmatches is not None:
self.identity = 1. - (self.num_nonmatches * 1. / (self.num_del + self.num_ins + self.num_mat_or_sub))
def parse_cigar(self, cigar, start):
"""
M - match
I - insertion w.r.t. to ref
D - deletion w.r.t. to ref
N - skipped (which means splice junction)
S - soft clipped
H - hard clipped (not shown in SEQ)
= - read match
X - read mismatch
ex: 50M43N3D
NOTE: sets qStart & qEnd, which are often incorrect because of different ways to write CIGAR strings
Returns: genomic segment locations (using <start> as offset)
"""
segments = []
cur_start = start
cur_end = start
first_thing = True
q_aln_len = 0
self.num_del = 0
self.num_ins = 0
self.num_mat_or_sub = 0
for (num, type) in re.findall('(\d+)(\S)', cigar):
num = int(num)
if type == 'H' or type == 'S':
if first_thing:
self.qStart += num
elif type == 'I':
q_aln_len += num
self.num_ins += num
elif type in ('M','=','X'):
cur_end += num
q_aln_len += num
self.num_mat_or_sub += num
elif type == 'D':
cur_end += num
self.num_del += num
elif type == 'N': # junction, make a new segment
segments.append(Interval(cur_start, cur_end))
cur_start = cur_end + num
cur_end = cur_start
else:
raise Exception("Unrecognized cigar character {0}!".format(type))
first_thing = False
if cur_start != cur_end:
segments.append(Interval(cur_start, cur_end))
self.qEnd = self.qStart + q_aln_len
return segments
@classmethod
def parse_sam_flag(self, flag):
"""
Heng Li's SAM https://samtools.github.io/hts-specs/SAMv1.pdf
1 -- read is one of a pair
2 -- alignment is one end of proper PE alignment (IGNORE)
4 -- read has no reported alignments (IGNORE)
8 -- read is one of a pair and has no reported alignments (IGNORE)
16 -- reverse ref strand
32 -- other mate is aligned to ref strand
64 -- first mate in pair
128 -- second mate in pair
256 -- not primary alignment
512 -- not passing filters
1024 -- PCR or optical duplicate
2048 -- supplementary alignment
Return: SAMflag
"""
PE_read_num = 0
strand = '+'
if flag >= 2048: # supplementary alignment
flag -= 2048
if flag >= 1024: #PCR or optical duplicate, should never see this...
flag -= 1024
if flag >= 512: #not passing QC, should never see this
flag -= 512
if flag >= 256: #secondary alignment, OK to see this if option given in BowTie
flag -= 256
if flag >= 128:
PE_read_num = 2
flag -= 128
elif flag >= 64:
PE_read_num = 1
flag -= 64
if flag >= 32:
flag -= 32
if flag >= 16:
strand = '-'
flag -= 16
if flag >= 8:
flag -= 8
if flag >= 4:
flag -= 4
if flag >= 2:
flag -= 2
assert flag == 0 or flag == 1
is_paired = flag == 1
return SAMRecord.SAMflag(is_paired, strand, PE_read_num)
class BLASRSAMReader(SAMReader):
def __next__(self):
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
return BLASRSAMRecord(line, self.ref_len_dict, self.query_len_dict)
class BLASRSAMRecord(SAMRecord):
def process(self, record_line, ref_len_dict=None, query_len_dict=None):
"""
SAM files from pbalign.py have following optional fields:
XS: 1-based qStart, XE: 1-based qEnd, XQ: query length, NM: number of non-matches
0. qID
1. flag
2. sID
3. 1-based offset sStart
4. mapping quality (ignore)
5. cigar
6. name of ref of mate alignment (ignore)
7. 1-based offset sStart of mate (ignore)
8. inferred fragment length (ignore)
9. sequence (ignore)
10. read qual (ignore)
11. optional fields
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.segments = self.parse_cigar(self.cigar, self.sStart)
self.sEnd = self.segments[-1].end
self.flag = SAMRecord.parse_sam_flag(int(raw[1]))
# In Yuan Li's BLASR-to-SAM, XQ:i:<subread length>
# see https://github.com/PacificBiosciences/blasr/blob/master/common/datastructures/alignmentset/SAMAlignment.h
for x in raw[11:]:
if x.startswith('XQ:i:'): # XQ should come last, after XS and XE
_qLen = int(x[5:])
if _qLen > 0: # this is for GMAP's SAM, which has XQ:i:0
self.qLen = _qLen
elif x.startswith('XS:i:'): # must be PacBio's SAM, need to update qStart
qs = int(x[5:]) - 1 # XS is 1-based
if qs > 0:
print("qStart:", self.qStart)
assert self.qStart == 0
self.qStart = qs
self.qEnd += qs
elif x.startswith('XE:i:'): # must be PacBio's SAM and comes after XS:i:
qe = int(x[5:]) # XE is 1-based
assert self.qEnd - self.qStart == qe - 1 # qEnd should've been updated already, confirm this
elif x.startswith('NM:i:'): # number of non-matches
self.num_nonmatches = int(x[5:])
self.identity = 1. - (self.num_nonmatches * 1. / (self.num_del + self.num_ins + self.num_mat_or_sub))
if ref_len_dict is not None:
self.sCoverage = (self.sEnd - self.sStart) * 1. / ref_len_dict[self.sID]
self.sLen = ref_len_dict[self.sID]
if self.flag.strand == '-' and self.qLen is not None:
self.qStart, self.qEnd = self.qLen - self.qEnd, self.qLen - self.qStart
if self.qLen is not None:
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
if query_len_dict is not None: # over write qLen and qCoverage, should be done LAST
try:
self.qLen = query_len_dict[self.qID]
except KeyError: # HACK for blasr's extended qID
self.qLen = query_len_dict[self.qID[:self.qID.rfind('/')]]
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
class GMAPSAMReader(SAMReader):
def __next__(self):
while True:
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
if not line.startswith('@'): # header can occur at file end if the SAM was sorted
break
return GMAPSAMRecord(line, self.ref_len_dict, self.query_len_dict)
class GMAPSAMRecord(SAMRecord):
def process(self, record_line, ref_len_dict=None, query_len_dict=None):
"""
SAM files from pbalign.py have following optional fields:
XS: 1-based qStart, XE: 1-based qEnd, XQ: query length, NM: number of non-matches
0. qID
1. flag
2. sID
3. 1-based offset sStart
4. mapping quality (ignore)
5. cigar
6. name of ref of mate alignment (ignore)
7. 1-based offset sStart of mate (ignore)
8. inferred fragment length (ignore)
9. sequence (ignore)
10. read qual (ignore)
11. optional fields
"""
raw = record_line.split('\t')
self.qID = raw[0]
self.sID = raw[2]
if self.sID == '*': # means no match! STOP here
return
self.sStart = int(raw[3]) - 1
self.cigar = raw[5]
self.segments = self.parse_cigar(self.cigar, self.sStart)
self.sEnd = self.segments[-1].end
self.flag = SAMRecord.parse_sam_flag(int(raw[1])) # strand can be overwritten by XS:A flag
self._flag_strand = self.flag.strand # serve as backup for debugging
# In Yuan Li's BLASR-to-SAM, XQ:i:<subread length>
# see https://github.com/PacificBiosciences/blasr/blob/master/common/datastructures/alignmentset/SAMAlignment.h
for x in raw[11:]:
if x.startswith('NM:i:'): # number of non-matches
self.num_nonmatches = int(x[5:])
self.identity = 1. - (self.num_nonmatches * 1. / (self.num_del + self.num_ins + self.num_mat_or_sub))
elif x.startswith('XS:A:'): # strand ifnormation
_s = x[5:]
if _s!='?':
self._flag_strand = self.flag.strand # serve as backup for debugging
self.flag = SAMRecord.SAMflag(self.flag.is_paired, _s, self.flag.PE_read_num)
if ref_len_dict is not None:
self.sCoverage = (self.sEnd - self.sStart) * 1. / ref_len_dict[self.sID]
self.sLen = ref_len_dict[self.sID]
if self.flag.strand == '-' and self.qLen is not None:
self.qStart, self.qEnd = self.qLen - self.qEnd, self.qLen - self.qStart
if self.qLen is not None:
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
if query_len_dict is not None: # over write qLen and qCoverage, should be done LAST
try:
self.qLen = query_len_dict[self.qID]
except KeyError: # HACK for blasr's extended qID
k = self.qID.rfind('/')
if k >= 0:
try:
self.qLen = query_len_dict[self.qID[:self.qID.rfind('/')]]
except KeyError:
self.qLen = query_len_dict[self.qID]
else:
raise Exception("Unable to find qID {0} in the input fasta/fastq!".format(self.qID))
self.qCoverage = (self.qEnd - self.qStart) * 1. / self.qLen
| 36.701923
| 119
| 0.543149
|
4a0bb8878c8edb86ad22b36a4219949fb6ce1604
| 1,951
|
py
|
Python
|
src/datasets/eeg_epilepsy.py
|
ajrcampbell/early-exit-ensembles
|
361b232f0e18cf1c6f8f49d70f8b630eeb1e2b49
|
[
"MIT"
] | 11
|
2021-11-14T10:47:11.000Z
|
2022-01-05T12:56:38.000Z
|
src/datasets/eeg_epilepsy.py
|
ajrcampbell/early-exit-ensembles
|
361b232f0e18cf1c6f8f49d70f8b630eeb1e2b49
|
[
"MIT"
] | 2
|
2022-01-07T09:18:31.000Z
|
2022-01-10T03:13:08.000Z
|
src/datasets/eeg_epilepsy.py
|
ajrcampbell/early-exit-ensembles
|
361b232f0e18cf1c6f8f49d70f8b630eeb1e2b49
|
[
"MIT"
] | 3
|
2021-12-04T07:32:44.000Z
|
2021-12-23T12:42:37.000Z
|
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from src.transforms import Compose, FlipTime, Shift, FlipPolarity, GuassianNoise
from src.datasets.utils import calculate_sample_weights
def get_eeg_epilepsy(data_dir, valid_prop=0.10, test_prop=0.10, seed=1234):
data = pd.read_csv(data_dir / "eeg_epilepsy/dataset.csv")
x, y = data.drop(columns=["Unnamed: 0", "y"]), data["y"]
x, x_test, y, y_test = train_test_split(x, y, test_size=test_prop, shuffle=True, random_state=seed)
x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=valid_prop, shuffle=True, random_state=seed)
train_sample_weights = calculate_sample_weights(y_train)
reverse = FlipTime(p=0.5)
shift = Shift(p=0.5)
flip = FlipPolarity(p=0.5)
noise = GuassianNoise(min_amplitude=0.01, max_amplitude=1.0, p=0.5)
transforms = Compose([reverse, flip, shift, noise])
datasets = {}
for stage, x, y in zip(["train", "valid", "test"], [x_train, x_valid, x_test], [y_train, y_valid, y_test]):
dataset = EEGEpilepsyDataset(x, y, transforms=transforms if stage=="train" else None)
if stage == "train":
dataset.sample_weights = train_sample_weights
datasets[stage] = dataset
return datasets
class EEGEpilepsyDataset(Dataset):
def __init__(self, data, label, transforms=None):
self.data = data.values
self.label = label.values - 1
self.transforms = transforms
self.num_classes = 5
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x, y = self.data[idx], self.label[idx]
if self.transforms:
x = self.transforms(x, sample_rate=None)
x = torch.from_numpy(x).unsqueeze(0).float()
y = torch.tensor(y).long()
return x, y
| 32.516667
| 118
| 0.662737
|
4a0bb8beac20af54ac02e661bb96cb744fc88085
| 4,836
|
py
|
Python
|
vae_ld/models/decoders.py
|
bonheml/VAE_learning_dynamics
|
2bdbc421a5e80841e753c6ee0986dd4670284dde
|
[
"Apache-2.0"
] | 3
|
2021-03-13T15:25:02.000Z
|
2021-11-19T09:53:58.000Z
|
vae_ld/models/decoders.py
|
bonheml/VAE_learning_dynamics
|
2bdbc421a5e80841e753c6ee0986dd4670284dde
|
[
"Apache-2.0"
] | 4
|
2020-11-13T19:07:39.000Z
|
2021-09-04T15:50:00.000Z
|
vae_ld/models/decoders.py
|
bonheml/VAE_learning_dynamics
|
2bdbc421a5e80841e753c6ee0986dd4670284dde
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
class DeconvolutionalDecoder(tf.keras.Model):
""" Deconvolutional decoder initially used in beta-VAE [1]. Based on Locatello et al. [2] implementation
(https://github.com/google-research/disentanglement_lib)
[1] Higgins, I. et al. (2017). β-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework.
In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France.
[2] Locatello, F. et al. (2019). Challenging Common Assumptions in the Unsupervised Learning of Disentangled
Representations. In K. Chaudhuri and R. Salakhutdinov, eds., Proceedings of the 36th International Conference
on Machine Learning, Proceedings of Machine Learning Research, vol. 97, Long Beach, California, USA: PMLR,
pp. 4114–4124.
"""
def __init__(self, input_shape, output_shape):
super(DeconvolutionalDecoder, self).__init__()
self.d1 = layers.Dense(256, activation="relu", name="decoder/1", input_shape=(input_shape,))
self.d2 = layers.Dense(1024, activation="relu", name="decoder/2")
self.d3 = layers.Reshape((4, 4, 64), name="decoder/reshape")
self.d4 = layers.Conv2DTranspose(filters=64, kernel_size=4, strides=2, activation="relu", padding="same",
name="decoder/3")
self.d5 = layers.Conv2DTranspose(filters=32, kernel_size=4, strides=2, activation="relu", padding="same",
name="decoder/4")
self.d6 = layers.Conv2DTranspose(filters=32, kernel_size=4, strides=2, activation="relu", padding="same",
name="decoder/5")
self.d7 = layers.Conv2DTranspose(filters=output_shape[2], kernel_size=4, strides=2, padding="same",
name="decoder/6")
self.d8 = layers.Reshape(output_shape, name="decoder/output")
def call(self, inputs):
x1 = self.d1(inputs)
x2 = self.d2(x1)
x3 = self.d3(x2)
x4 = self.d4(x3)
x5 = self.d5(x4)
x6 = self.d6(x5)
x7 = self.d7(x6)
x8 = self.d8(x7)
return x1, x2, x3, x4, x5, x6, x7, x8
class FullyConnectedDecoder(tf.keras.Model):
""" Fully connected decoder initially used in beta-VAE [1]. Based on Locatello et al. [2] implementation
(https://github.com/google-research/disentanglement_lib)
[1] Higgins, I. et al. (2017). β-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework.
In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France.
[2] Locatello, F. et al. (2019). Challenging Common Assumptions in the Unsupervised Learning of Disentangled
Representations. In K. Chaudhuri and R. Salakhutdinov, eds., Proceedings of the 36th International Conference
on Machine Learning, Proceedings of Machine Learning Research, vol. 97, Long Beach, California, USA: PMLR,
pp. 4114–4124.
"""
def __init__(self, input_shape, output_shape):
super(FullyConnectedDecoder, self).__init__()
self.d1 = layers.Dense(1200, activation="tanh", name="decoder/1", input_shape=(input_shape,))
self.d2 = layers.Dense(1200, activation="tanh", name="decoder/2")
self.d3 = layers.Dense(1200, activation="tanh", name="decoder/3")
self.d4 = layers.Dense(np.prod(output_shape), activation=None, name="decoder/4")
self.d5 = layers.Reshape(output_shape, name="decoder/output")
def call(self, inputs):
x1 = self.d1(inputs)
x2 = self.d2(x1)
x3 = self.d3(x2)
x4 = self.d4(x3)
x5 = self.d5(x4)
return x1, x2, x3, x4, x5
class MnistDecoder(tf.keras.Model):
""" Deconvolutional decoder initially used in Keras VAE tutorial for mnist data.
(https://keras.io/examples/generative/vae/#define-the-vae-as-a-model-with-a-custom-trainstep)
"""
def __init__(self, input_shape, output_shape):
super(MnistDecoder, self).__init__()
self.d1 = layers.Dense(7 * 7 * 64, activation="relu", name="decoder/1", input_shape=(input_shape,))
self.d2 = layers.Reshape((7, 7, 64), name="decoder/2")
self.d3 = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same", name="decoder/3")
self.d4 = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same", name="decoder/4")
self.d5 = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same", name="decoder/5")
self.d6 = layers.Reshape(output_shape, name="decoder/output")
def call(self, inputs):
x1 = self.d1(inputs)
x2 = self.d2(x1)
x3 = self.d2(x2)
x4 = self.d2(x3)
x5 = self.d2(x4)
return x1, x2, x3, x4, x5
| 52.565217
| 114
| 0.651365
|
4a0bb8f34e13d5eb4296bd7a476d88e4840aa26f
| 2,202
|
py
|
Python
|
dfs_sweeper.py
|
dungba88/cleaner_robot
|
ce2b21e40ce6f3db9ae007c5ceb7c1cb911b225f
|
[
"Unlicense"
] | 55
|
2017-10-06T17:59:34.000Z
|
2022-03-08T07:03:50.000Z
|
dfs_sweeper.py
|
dungba88/cleaner_robot
|
ce2b21e40ce6f3db9ae007c5ceb7c1cb911b225f
|
[
"Unlicense"
] | 1
|
2019-07-30T14:14:36.000Z
|
2019-08-22T03:45:38.000Z
|
dfs_sweeper.py
|
dungba88/cleaner_robot
|
ce2b21e40ce6f3db9ae007c5ceb7c1cb911b225f
|
[
"Unlicense"
] | 19
|
2018-07-12T12:48:36.000Z
|
2021-12-05T14:06:34.000Z
|
class DFSSweeper(object):
def __init__(self, robot):
self.observed_map = {}
self.robot = robot
self.loggable = True
def sweep(self):
self.move({'x': 0, 'y': 0}, 0)
def move(self, cur, dir):
self.observed_map[str(cur['x'])+'_'+str(cur['y'])] = 1
straight = self.next_straight(cur, dir)
if not self.visited(straight) and self.robot.move():
self.move(straight, dir)
turn_taken = 0
right = self.next_right(cur, dir)
if not self.visited(right):
self.robot.turn_right()
turn_taken += 1
if self.robot.move():
self.move(right, (dir + 1) % 4)
down = self.next_down(cur, dir)
if not self.visited(down):
for _ in range(2 - turn_taken):
self.robot.turn_right()
turn_taken += 1
if self.robot.move():
self.move(down, (dir + 2) % 4)
left = self.next_left(cur, dir)
if not self.visited(left):
for _ in range(3 - turn_taken):
self.robot.turn_right()
turn_taken += 1
if self.robot.move():
self.move(left, (dir + 3) % 4)
left_turns = turn_taken - 2
if left_turns < 0:
for _ in range(abs(left_turns)):
self.robot.turn_right()
else:
for _ in range(left_turns):
self.robot.turn_left()
self.robot.move()
self.robot.turn_left().turn_left()
def next_straight(self, cur, dir):
return {'x': cur['x'] - ((dir + 1) % 2) * (dir - 1), 'y': cur['y'] - (dir % 2) * (dir - 2)}
def next_right(self, cur, dir):
return {'x': cur['x'] + (dir % 2) * (dir - 2), 'y': cur['y'] - ((dir + 1) % 2) * (dir - 1)}
def next_left(self, cur, dir):
return {'x': cur['x'] - (dir % 2) * (dir - 2), 'y': cur['y'] + ((dir + 1) % 2) * (dir - 1)}
def next_down(self, cur, dir):
return {'x': cur['x'] + ((dir + 1) % 2) * (dir - 1), 'y': cur['y'] + (dir % 2) * (dir - 2)}
def visited(self, node):
return (str(node['x'])+'_'+str(node['y'])) in self.observed_map
| 33.876923
| 99
| 0.485468
|
4a0bb992fd9897c6d6b9c148e794f6e4c7510fb6
| 25,290
|
py
|
Python
|
src/etc/unicode.py
|
eyolfson/rust
|
458a6a2f6e9dfb6ed3d76f14418ff1f2f5e97f86
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
src/etc/unicode.py
|
eyolfson/rust
|
458a6a2f6e9dfb6ed3d76f14418ff1f2f5e97f86
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
src/etc/unicode.py
|
eyolfson/rust
|
458a6a2f6e9dfb6ed3d76f14418ff1f2f5e97f86
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script uses the following Unicode tables:
# - DerivedCoreProperties.txt
# - EastAsianWidth.txt
# - PropList.txt
# - Scripts.txt
# - UnicodeData.txt
#
# Since this should not require frequent updates, we just store this
# out-of-line and check the unicode.rs file into git.
import fileinput, re, os, sys, operator
preamble = '''// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
#![allow(missing_docs, non_upper_case_globals, non_snake_case)]
'''
# Mapping taken from Table 12 from:
# http://www.unicode.org/reports/tr44/#General_Category_Values
expanded_categories = {
'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
'Lm': ['L'], 'Lo': ['L'],
'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
}
# Grapheme cluster data
# taken from UAX29, http://www.unicode.org/reports/tr29/
# these code points are excluded from the Control category
# NOTE: CR and LF are also technically excluded, but for
# the sake of convenience we leave them in the Control group
# and manually check them in the appropriate place. This is
# still compliant with the implementation requirements.
grapheme_control_exceptions = set([0x200c, 0x200d])
# the Regional_Indicator category
grapheme_regional_indicator = [(0x1f1e6, 0x1f1ff)]
# "The following ... are specifically excluded" from the SpacingMark category
# http://www.unicode.org/reports/tr29/#SpacingMark
grapheme_spacingmark_exceptions = [(0x102b, 0x102c), (0x1038, 0x1038),
(0x1062, 0x1064), (0x1067, 0x106d), (0x1083, 0x1083), (0x1087, 0x108c),
(0x108f, 0x108f), (0x109a, 0x109c), (0x19b0, 0x19b4), (0x19b8, 0x19b9),
(0x19bb, 0x19c0), (0x19c8, 0x19c9), (0x1a61, 0x1a61), (0x1a63, 0x1a64),
(0xaa7b, 0xaa7b), (0xaa7d, 0xaa7d)]
# these are included in the SpacingMark category
grapheme_spacingmark_extra = set([0xe33, 0xeb3])
def fetch(f):
if not os.path.exists(f):
os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
% f)
if not os.path.exists(f):
sys.stderr.write("cannot load %s" % f)
exit(1)
def is_valid_unicode(n):
return 0 <= n <= 0xD7FF or 0xE000 <= n <= 0x10FFFF
def load_unicode_data(f):
fetch(f)
gencats = {}
upperlower = {}
lowerupper = {}
combines = {}
canon_decomp = {}
compat_decomp = {}
for line in fileinput.input(f):
fields = line.split(";")
if len(fields) != 15:
continue
[code, name, gencat, combine, bidi,
decomp, deci, digit, num, mirror,
old, iso, upcase, lowcase, titlecase ] = fields
code_org = code
code = int(code, 16)
if not is_valid_unicode(code):
continue
# generate char to char direct common and simple conversions
# uppercase to lowercase
if gencat == "Lu" and lowcase != "" and code_org != lowcase:
upperlower[code] = int(lowcase, 16)
# lowercase to uppercase
if gencat == "Ll" and upcase != "" and code_org != upcase:
lowerupper[code] = int(upcase, 16)
# store decomposition, if given
if decomp != "":
if decomp.startswith('<'):
seq = []
for i in decomp.split()[1:]:
seq.append(int(i, 16))
compat_decomp[code] = seq
else:
seq = []
for i in decomp.split():
seq.append(int(i, 16))
canon_decomp[code] = seq
# place letter in categories as appropriate
for cat in [gencat, "Assigned"] + expanded_categories.get(gencat, []):
if cat not in gencats:
gencats[cat] = []
gencats[cat].append(code)
# record combining class, if any
if combine != "0":
if combine not in combines:
combines[combine] = []
combines[combine].append(code)
# generate Not_Assigned from Assigned
gencats["Cn"] = gen_unassigned(gencats["Assigned"])
# Assigned is not a real category
del(gencats["Assigned"])
# Other contains Not_Assigned
gencats["C"].extend(gencats["Cn"])
gencats = group_cats(gencats)
combines = to_combines(group_cats(combines))
return (canon_decomp, compat_decomp, gencats, combines, lowerupper, upperlower)
def group_cats(cats):
cats_out = {}
for cat in cats:
cats_out[cat] = group_cat(cats[cat])
return cats_out
def group_cat(cat):
cat_out = []
letters = sorted(set(cat))
cur_start = letters.pop(0)
cur_end = cur_start
for letter in letters:
assert letter > cur_end, \
"cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
if letter == cur_end + 1:
cur_end = letter
else:
cat_out.append((cur_start, cur_end))
cur_start = cur_end = letter
cat_out.append((cur_start, cur_end))
return cat_out
def ungroup_cat(cat):
cat_out = []
for (lo, hi) in cat:
while lo <= hi:
cat_out.append(lo)
lo += 1
return cat_out
def gen_unassigned(assigned):
assigned = set(assigned)
return ([i for i in range(0, 0xd800) if i not in assigned] +
[i for i in range(0xe000, 0x110000) if i not in assigned])
def to_combines(combs):
combs_out = []
for comb in combs:
for (lo, hi) in combs[comb]:
combs_out.append((lo, hi, comb))
combs_out.sort(key=lambda comb: comb[0])
return combs_out
def format_table_content(f, content, indent):
line = " "*indent
first = True
for chunk in content.split(","):
if len(line) + len(chunk) < 98:
if first:
line += chunk
else:
line += ", " + chunk
first = False
else:
f.write(line + ",\n")
line = " "*indent + chunk
f.write(line)
def load_properties(f, interestingprops):
fetch(f)
props = {}
re1 = re.compile("^([0-9A-F]+) +; (\w+)")
re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+) +; (\w+)")
for line in fileinput.input(f):
prop = None
d_lo = 0
d_hi = 0
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
prop = m.group(2)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
prop = m.group(3)
else:
continue
if interestingprops and prop not in interestingprops:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if prop not in props:
props[prop] = []
props[prop].append((d_lo, d_hi))
return props
# load all widths of want_widths, except those in except_cats
def load_east_asian_width(want_widths, except_cats):
f = "EastAsianWidth.txt"
fetch(f)
widths = {}
re1 = re.compile("^([0-9A-F]+);(\w+) +# (\w+)")
re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+);(\w+) +# (\w+)")
for line in fileinput.input(f):
width = None
d_lo = 0
d_hi = 0
cat = None
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
width = m.group(2)
cat = m.group(3)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
width = m.group(3)
cat = m.group(4)
else:
continue
if cat in except_cats or width not in want_widths:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if width not in widths:
widths[width] = []
widths[width].append((d_lo, d_hi))
return widths
def escape_char(c):
return "'\\u{%x}'" % c
def emit_bsearch_range_table(f):
f.write("""
fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
r.binary_search(|&(lo,hi)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}).found().is_some()
}\n
""")
def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))):
pub_string = ""
if is_pub:
pub_string = "pub "
f.write(" %sstatic %s: %s = &[\n" % (pub_string, name, t_type))
data = ""
first = True
for dat in t_data:
if not first:
data += ","
first = False
data += pfun(dat)
format_table_content(f, data, 8)
f.write("\n ];\n\n")
def emit_property_module(f, mod, tbl, emit_fn):
f.write("pub mod %s {\n" % mod)
keys = tbl.keys()
keys.sort()
for cat in keys:
emit_table(f, "%s_table" % cat, tbl[cat])
if cat in emit_fn:
f.write(" pub fn %s(c: char) -> bool {\n" % cat)
f.write(" super::bsearch_range_table(c, %s_table)\n" % cat)
f.write(" }\n\n")
f.write("}\n\n")
def emit_regex_module(f, cats, w_data):
f.write("pub mod regex {\n")
regex_class = "&'static [(char, char)]"
class_table = "&'static [(&'static str, &'static %s)]" % regex_class
emit_table(f, "UNICODE_CLASSES", cats, class_table,
pfun=lambda x: "(\"%s\",&super::%s::%s_table)" % (x[0], x[1], x[0]))
f.write(" pub static PERLD: &'static %s = &super::general_category::Nd_table;\n\n"
% regex_class)
f.write(" pub static PERLS: &'static %s = &super::property::White_Space_table;\n\n"
% regex_class)
emit_table(f, "PERLW", w_data, regex_class)
f.write("}\n\n")
def emit_conversions_module(f, lowerupper, upperlower):
f.write("pub mod conversions {")
f.write("""
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
use core::option::Option;
use core::option::Option::{Some, None};
use core::slice;
pub fn to_lower(c: char) -> char {
match bsearch_case_table(c, LuLl_table) {
None => c,
Some(index) => LuLl_table[index].1
}
}
pub fn to_upper(c: char) -> char {
match bsearch_case_table(c, LlLu_table) {
None => c,
Some(index) => LlLu_table[index].1
}
}
fn bsearch_case_table(c: char, table: &'static [(char, char)]) -> Option<uint> {
match table.binary_search(|&(key, _)| {
if c == key { Equal }
else if key < c { Less }
else { Greater }
}) {
slice::BinarySearchResult::Found(i) => Some(i),
slice::BinarySearchResult::NotFound(_) => None,
}
}
""")
emit_table(f, "LuLl_table",
sorted(upperlower.iteritems(), key=operator.itemgetter(0)), is_pub=False)
emit_table(f, "LlLu_table",
sorted(lowerupper.iteritems(), key=operator.itemgetter(0)), is_pub=False)
f.write("}\n\n")
def emit_grapheme_module(f, grapheme_table, grapheme_cats):
f.write("""pub mod grapheme {
use core::kinds::Copy;
use core::slice::SliceExt;
pub use self::GraphemeCat::*;
use core::slice;
#[allow(non_camel_case_types)]
#[derive(Clone)]
pub enum GraphemeCat {
""")
for cat in grapheme_cats + ["Any"]:
f.write(" GC_" + cat + ",\n")
f.write(""" }
impl Copy for GraphemeCat {}
fn bsearch_range_value_table(c: char, r: &'static [(char, char, GraphemeCat)]) -> GraphemeCat {
use core::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
slice::BinarySearchResult::Found(idx) => {
let (_, _, cat) = r[idx];
cat
}
slice::BinarySearchResult::NotFound(_) => GC_Any
}
}
pub fn grapheme_category(c: char) -> GraphemeCat {
bsearch_range_value_table(c, grapheme_cat_table)
}
""")
emit_table(f, "grapheme_cat_table", grapheme_table, "&'static [(char, char, GraphemeCat)]",
pfun=lambda x: "(%s,%s,GC_%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]),
is_pub=False)
f.write("}\n")
def emit_charwidth_module(f, width_table):
f.write("pub mod charwidth {\n")
f.write(" use core::option::Option;\n")
f.write(" use core::option::Option::{Some, None};\n")
f.write(" use core::slice::SliceExt;\n")
f.write(" use core::slice;\n")
f.write("""
fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search(|&(lo, hi, _, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
slice::BinarySearchResult::Found(idx) => {
let (_, _, r_ncjk, r_cjk) = r[idx];
if is_cjk { r_cjk } else { r_ncjk }
}
slice::BinarySearchResult::NotFound(_) => 1
}
}
""")
f.write("""
pub fn width(c: char, is_cjk: bool) -> Option<uint> {
match c as uint {
_c @ 0 => Some(0), // null is zero width
cu if cu < 0x20 => None, // control sequences have no width
cu if cu < 0x7F => Some(1), // ASCII
cu if cu < 0xA0 => None, // more control sequences
_ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as uint)
}
}
""")
f.write(" // character width table. Based on Markus Kuhn's free wcwidth() implementation,\n")
f.write(" // http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c\n")
emit_table(f, "charwidth_table", width_table, "&'static [(char, char, u8, u8)]", is_pub=False,
pfun=lambda x: "(%s,%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2], x[3]))
f.write("}\n\n")
def emit_norm_module(f, canon, compat, combine, norm_props):
canon_keys = canon.keys()
canon_keys.sort()
compat_keys = compat.keys()
compat_keys.sort()
canon_comp = {}
comp_exclusions = norm_props["Full_Composition_Exclusion"]
for char in canon_keys:
if True in map(lambda (lo, hi): lo <= char <= hi, comp_exclusions):
continue
decomp = canon[char]
if len(decomp) == 2:
if not canon_comp.has_key(decomp[0]):
canon_comp[decomp[0]] = []
canon_comp[decomp[0]].append( (decomp[1], char) )
canon_comp_keys = canon_comp.keys()
canon_comp_keys.sort()
f.write("pub mod normalization {\n")
def mkdata_fun(table):
def f(char):
data = "(%s,&[" % escape_char(char)
first = True
for d in table[char]:
if not first:
data += ","
first = False
data += escape_char(d)
data += "])"
return data
return f
f.write(" // Canonical decompositions\n")
emit_table(f, "canonical_table", canon_keys, "&'static [(char, &'static [char])]",
pfun=mkdata_fun(canon))
f.write(" // Compatibility decompositions\n")
emit_table(f, "compatibility_table", compat_keys, "&'static [(char, &'static [char])]",
pfun=mkdata_fun(compat))
def comp_pfun(char):
data = "(%s,&[" % escape_char(char)
canon_comp[char].sort(lambda x, y: x[0] - y[0])
first = True
for pair in canon_comp[char]:
if not first:
data += ","
first = False
data += "(%s,%s)" % (escape_char(pair[0]), escape_char(pair[1]))
data += "])"
return data
f.write(" // Canonical compositions\n")
emit_table(f, "composition_table", canon_comp_keys,
"&'static [(char, &'static [(char, char)])]", pfun=comp_pfun)
f.write("""
fn bsearch_range_value_table(c: char, r: &'static [(char, char, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
use core::slice;
match r.binary_search(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
slice::BinarySearchResult::Found(idx) => {
let (_, _, result) = r[idx];
result
}
slice::BinarySearchResult::NotFound(_) => 0
}
}\n
""")
emit_table(f, "combining_class_table", combine, "&'static [(char, char, u8)]", is_pub=False,
pfun=lambda x: "(%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]))
f.write(" pub fn canonical_combining_class(c: char) -> u8 {\n"
+ " bsearch_range_value_table(c, combining_class_table)\n"
+ " }\n")
f.write("""
}
""")
def remove_from_wtable(wtable, val):
wtable_out = []
while wtable:
if wtable[0][1] < val:
wtable_out.append(wtable.pop(0))
elif wtable[0][0] > val:
break
else:
(wt_lo, wt_hi, width, width_cjk) = wtable.pop(0)
if wt_lo == wt_hi == val:
continue
elif wt_lo == val:
wtable_out.append((wt_lo+1, wt_hi, width, width_cjk))
elif wt_hi == val:
wtable_out.append((wt_lo, wt_hi-1, width, width_cjk))
else:
wtable_out.append((wt_lo, val-1, width, width_cjk))
wtable_out.append((val+1, wt_hi, width, width_cjk))
if wtable:
wtable_out.extend(wtable)
return wtable_out
def optimize_width_table(wtable):
wtable_out = []
w_this = wtable.pop(0)
while wtable:
if w_this[1] == wtable[0][0] - 1 and w_this[2:3] == wtable[0][2:3]:
w_tmp = wtable.pop(0)
w_this = (w_this[0], w_tmp[1], w_tmp[2], w_tmp[3])
else:
wtable_out.append(w_this)
w_this = wtable.pop(0)
wtable_out.append(w_this)
return wtable_out
if __name__ == "__main__":
r = "tables.rs"
if os.path.exists(r):
os.remove(r)
with open(r, "w") as rf:
# write the file's preamble
rf.write(preamble)
# download and parse all the data
fetch("ReadMe.txt")
with open("ReadMe.txt") as readme:
pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
unicode_version = re.search(pattern, readme.read()).groups()
rf.write("""
/// The version of [Unicode](http://www.unicode.org/)
/// that the `UnicodeChar` and `UnicodeStrPrelude` traits are based on.
pub const UNICODE_VERSION: (uint, uint, uint) = (%s, %s, %s);
""" % unicode_version)
(canon_decomp, compat_decomp, gencats, combines,
lowerupper, upperlower) = load_unicode_data("UnicodeData.txt")
want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase"]
other_derived = ["Default_Ignorable_Code_Point", "Grapheme_Extend"]
derived = load_properties("DerivedCoreProperties.txt", want_derived + other_derived)
scripts = load_properties("Scripts.txt", [])
props = load_properties("PropList.txt",
["White_Space", "Join_Control", "Noncharacter_Code_Point"])
norm_props = load_properties("DerivedNormalizationProps.txt",
["Full_Composition_Exclusion"])
# grapheme cluster category from DerivedCoreProperties
# the rest are defined below
grapheme_cats = {}
grapheme_cats["Extend"] = derived["Grapheme_Extend"]
del(derived["Grapheme_Extend"])
# bsearch_range_table is used in all the property modules below
emit_bsearch_range_table(rf)
# all of these categories will also be available as \p{} in libregex
allcats = []
for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \
("derived_property", derived, want_derived), \
("script", scripts, []), \
("property", props, ["White_Space"]):
emit_property_module(rf, name, cat, pfuns)
allcats.extend(map(lambda x: (x, name), cat))
allcats.sort(key=lambda c: c[0])
# the \w regex corresponds to Alphabetic + Mark + Decimal_Number +
# Connector_Punctuation + Join-Control according to UTS#18
# http://www.unicode.org/reports/tr18/#Compatibility_Properties
perl_words = []
for cat in derived["Alphabetic"], gencats["M"], gencats["Nd"], \
gencats["Pc"], props["Join_Control"]:
perl_words.extend(ungroup_cat(cat))
perl_words = group_cat(perl_words)
# emit lookup tables for \p{}, along with \d, \w, and \s for libregex
emit_regex_module(rf, allcats, perl_words)
# normalizations and conversions module
emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props)
emit_conversions_module(rf, lowerupper, upperlower)
### character width module
width_table = []
for zwcat in ["Me", "Mn", "Cf"]:
width_table.extend(map(lambda (lo, hi): (lo, hi, 0, 0), gencats[zwcat]))
width_table.append((4448, 4607, 0, 0))
# get widths, except those that are explicitly marked zero-width above
ea_widths = load_east_asian_width(["W", "F", "A"], ["Me", "Mn", "Cf"])
# these are doublewidth
for dwcat in ["W", "F"]:
width_table.extend(map(lambda (lo, hi): (lo, hi, 2, 2), ea_widths[dwcat]))
width_table.extend(map(lambda (lo, hi): (lo, hi, 1, 2), ea_widths["A"]))
width_table.sort(key=lambda w: w[0])
# soft hyphen is not zero width in preformatted text; it's used to indicate
# a hyphen inserted to facilitate a linebreak.
width_table = remove_from_wtable(width_table, 173)
# optimize the width table by collapsing adjacent entities when possible
width_table = optimize_width_table(width_table)
emit_charwidth_module(rf, width_table)
### grapheme cluster module
# from http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Break_Property_Values
# Hangul syllable categories
want_hangul = ["L", "V", "T", "LV", "LVT"]
grapheme_cats.update(load_properties("HangulSyllableType.txt", want_hangul))
# Control
# This category also includes Cs (surrogate codepoints), but Rust's `char`s are
# Unicode Scalar Values only, and surrogates are thus invalid `char`s.
grapheme_cats["Control"] = set()
for cat in ["Zl", "Zp", "Cc", "Cf"]:
grapheme_cats["Control"] |= set(ungroup_cat(gencats[cat]))
grapheme_cats["Control"] = group_cat(list(
grapheme_cats["Control"]
- grapheme_control_exceptions
| (set(ungroup_cat(gencats["Cn"]))
& set(ungroup_cat(derived["Default_Ignorable_Code_Point"])))))
# Regional Indicator
grapheme_cats["RegionalIndicator"] = grapheme_regional_indicator
# Prepend - "Currently there are no characters with this value"
# (from UAX#29, Unicode 7.0)
# SpacingMark
grapheme_cats["SpacingMark"] = group_cat(list(
set(ungroup_cat(gencats["Mc"]))
- set(ungroup_cat(grapheme_cats["Extend"]))
| grapheme_spacingmark_extra
- set(ungroup_cat(grapheme_spacingmark_exceptions))))
grapheme_table = []
for cat in grapheme_cats:
grapheme_table.extend([(x, y, cat) for (x, y) in grapheme_cats[cat]])
grapheme_table.sort(key=lambda w: w[0])
emit_grapheme_module(rf, grapheme_table, grapheme_cats.keys())
| 35.027701
| 100
| 0.564966
|
4a0bbace6820dd52b39e5209a482aa500b14ec7d
| 5,856
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/globals/topology/pce/pce_5defd13c57ea406c73fd4b2cb010a30f.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/globals/topology/pce/pce_5defd13c57ea406c73fd4b2cb010a30f.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/globals/topology/pce/pce_5defd13c57ea406c73fd4b2cb010a30f.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Pce(Base):
"""PCE Port Specific Data
The Pce class encapsulates a required pce resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'pce'
_SDM_ATT_MAP = {
'BindingSIDDraftVersion': 'bindingSIDDraftVersion',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'RowNames': 'rowNames',
}
def __init__(self, parent):
super(Pce, self).__init__(parent)
@property
def StartRate(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.startrate.startrate_2bc83a4fb9730935e8259bdb40af2dc0.StartRate): An instance of the StartRate class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.startrate.startrate_2bc83a4fb9730935e8259bdb40af2dc0 import StartRate
return StartRate(self)._select()
@property
def StopRate(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.stoprate.stoprate_4ea9a1b38960d2b21012777131469a04.StopRate): An instance of the StopRate class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.ethernet.stoprate.stoprate_4ea9a1b38960d2b21012777131469a04 import StopRate
return StopRate(self)._select()
@property
def BindingSIDDraftVersion(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Depending on this field backward compatibility will be given. All draft versions before IETF draft will follow existing implementation. New IETF draft will be using new implementation and TLV structure.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BindingSIDDraftVersion']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def RowNames(self):
"""
Returns
-------
- list(str): Name of rows
"""
return self._get_attribute(self._SDM_ATT_MAP['RowNames'])
def update(self, Name=None):
"""Updates pce resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, BindingSIDDraftVersion=None):
"""Base class infrastructure that gets a list of pce device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- BindingSIDDraftVersion (str): optional regex of bindingSIDDraftVersion
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 37.538462
| 251
| 0.676059
|
4a0bbb7bd6d1db31b95975c167d4543d43d005e1
| 4,415
|
py
|
Python
|
app/emr_autoscaling/scaler.py
|
ImmobilienScout24/emr-autoscaling
|
f32e2e9e72306927ae28d4bff98c2bc5bf8eec66
|
[
"Apache-2.0"
] | 11
|
2016-05-13T09:01:06.000Z
|
2017-05-10T08:47:06.000Z
|
app/emr_autoscaling/scaler.py
|
ImmobilienScout24/emr-autoscaling
|
f32e2e9e72306927ae28d4bff98c2bc5bf8eec66
|
[
"Apache-2.0"
] | null | null | null |
app/emr_autoscaling/scaler.py
|
ImmobilienScout24/emr-autoscaling
|
f32e2e9e72306927ae28d4bff98c2bc5bf8eec66
|
[
"Apache-2.0"
] | null | null | null |
import boto3
from datetime import datetime
from app.pytz import timezone
from app.emr_autoscaling.utils import get_logger
from app.emr_autoscaling.constants import UP, DOWN
class EmrScaler:
def __init__(self, emr, min_instances=0, max_instances=20, office_hours_start=7, office_hours_end=18,
shutdown_time=23, parent_stack=None, stack_deletion_role=None):
self.min_instances = min_instances
self.max_instances = max_instances
self.office_hours_start = office_hours_start
self.office_hours_end = office_hours_end
self.logger = get_logger('EMRScaler')
self.emr = emr
self.time_zone = timezone('Europe/Berlin')
#Calculating offset of timezone to subtract from the shutdown time
self.time_offset = int(
(datetime
.now(self.time_zone)
.utcoffset()
.total_seconds()) / (60 * 60)
)
self.shutdown_time = datetime\
.now(self.time_zone)\
.replace(hour=shutdown_time - self.time_offset, minute=0, second=0, microsecond=0)
self.parent_stack = parent_stack
self.cloud_formation = boto3.client('cloudformation')
self.stack_deletion_role = stack_deletion_role
def is_in_office_hours(self, curr_time):
self.logger.info("it is now {HOUR}:{MINUTE} on {WEEKDAY} ({DAY_NUMBER})"
.format(HOUR=curr_time.hour, MINUTE=curr_time.minute,
WEEKDAY=curr_time.strftime("%A"), DAY_NUMBER=curr_time.weekday()))
self.logger.info("office hours are from {OFFICE_HOURS_START} until {OFFICE_HOURS_END}"
.format(OFFICE_HOURS_START=self.office_hours_start, OFFICE_HOURS_END=self.office_hours_end))
return (
curr_time.hour >= self.office_hours_start and (
curr_time.hour < self.office_hours_end or
curr_time.hour == self.office_hours_end and (curr_time.minute == curr_time.second == curr_time.microsecond == 0)
) and (
curr_time.weekday() <= 4
)
)
def should_scale_down(self, threshold):
memory_used_ratio = self.emr.get_average_of_last_hour("MemoryAllocatedMB") / self.emr.get_average_of_last_hour("MemoryTotalMB")
if memory_used_ratio <= threshold:
if self.is_in_office_hours(datetime.now(self.time_zone)):
self.logger.info (
"Memory used ratio {} is below threshold of {}, but won't scale down due to office hours.".format (
memory_used_ratio, threshold
)
)
return False
self.logger.info (
"Memory used ratio {} is below threshold of {}, should scale down.".format (
memory_used_ratio, threshold
)
)
return True
return False
def should_scale_up(self):
container_pending = self.emr.get_pending_containers()
if container_pending > 0:
self.logger.info("{} containers are waiting, should scale up.".format(container_pending))
return True
else:
return False
def maybe_scale(self, threshold):
if self.emr.scaling_in_progress():
self.logger.info("Scaling is already running, doing nothing.")
else:
scale_up_needed = self.should_scale_up()
scale_down_needed = self.should_scale_down(threshold)
if scale_up_needed:
self.emr.scale(UP)
elif scale_down_needed:
self.emr.scale(DOWN)
else:
self.logger.info("Nothing to do, going back to sleep.")
def maybe_shutdown(self):
self.logger.info("Parent stack: %s" % self.parent_stack)
if self.is_after_shutdown_time() and not self.emr.is_termination_protected() and self.parent_stack:
self.shutdown()
def shutdown(self):
self.cloud_formation.delete_stack(StackName=self.parent_stack, RoleARN=self.stack_deletion_role)
def is_after_shutdown_time(self, time=None):
time = time or datetime.now()
time = self.time_zone.localize(time)
self.logger.info("Current time: %s, shutdown time %s" % (time, self.shutdown_time))
return self.shutdown_time.time() <= time.time()
| 42.047619
| 135
| 0.62333
|
4a0bbb8e5217530d0fa580c7cdaea79743b9d29a
| 10,533
|
py
|
Python
|
trainer.py
|
Edresson/deepspeech-playground
|
9af761b7729ae44d0f53b6d55a404a8ee497bf95
|
[
"Apache-2.0"
] | null | null | null |
trainer.py
|
Edresson/deepspeech-playground
|
9af761b7729ae44d0f53b6d55a404a8ee497bf95
|
[
"Apache-2.0"
] | null | null | null |
trainer.py
|
Edresson/deepspeech-playground
|
9af761b7729ae44d0f53b6d55a404a8ee497bf95
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
import numpy as np
from utils import conv_chain_output_length, word_error_rate, save_model
logger = logging.getLogger(__name__)
def _last_of_list_or_none(l):
return None if len(l) == 0 else l[-1]
class Trainer(object):
"""
Training and validation routines
Properties:
best_cost (flaot)
last_cost (float)
best_val_cost (float)
last_val_cost (float)
wers (list(float))
val_wers (list(float))
"""
def __init__(self, model, train_fn, val_fn, on_text=True, on_phoneme=False):
self.model = model
self.train_fn = train_fn
self.val_fn = val_fn
self.on_text = on_text
self.on_phoneme = on_phoneme
self.wers, self.text_costs, self.phoneme_costs = [], [], []
self.val_wers, self.val_text_costs, self.val_phoneme_costs = [], [], []
self.best_cost = np.iinfo(np.int32).max
self.best_val_cost = np.iinfo(np.int32).max
if not (on_text or on_phoneme):
raise ValueError("Model should train against at least text or "
"phoneme")
@property
def last_text_cost(self):
return _last_of_list_or_none(self.text_costs)
@property
def last_phoneme_cost(self):
return _last_of_list_or_none(self.phoneme_costs)
@property
def last_val_text_cost(self):
return _last_of_list_or_none(self.val_text_costs)
@property
def last_val_phoneme_cost(self):
return _last_of_list_or_none(self.val_phoneme_costs)
@property
def last_wer(self):
return _last_of_list_or_none(self.wers)
@property
def last_val_wer(self):
return _last_of_list_or_none(self.val_wers)
@property
def last_cost(self):
""" Cost of last minibatch on train """
if self.on_text:
return self.last_text_cost
if self.on_phoneme:
return self.last_phoneme_cost
@property
def last_val_cost(self):
""" Last cost on whole validation set """
if self.on_text:
return self.last_val_text_cost
if self.on_phoneme:
return self.last_val_phoneme_cost
@property
def best_cost(self):
""" Best cost among minibatchs of training set """
if self.on_text:
return self.best_text_cost
if self.on_phoneme:
return self.best_phoneme_cost
@best_cost.setter
def best_cost(self, val):
if self.on_text:
self.best_text_cost = val
elif self.on_phoneme:
self.best_phoneme_cost = val
@property
def best_val_cost(self):
""" Best cost on whole validation set so far """
if self.on_text:
return self.best_text_val_cost
if self.on_phoneme:
return self.best_phoneme_val_cost
@best_val_cost.setter
def best_val_cost(self, val):
if self.on_text:
self.best_text_val_cost = val
elif self.on_phoneme:
self.best_phoneme_val_cost = val
def run(self, datagen, save_dir, epochs=10, mb_size=16, do_sortagrad=False,
stateful=False, save_best_weights=False, save_best_val_weights=True,
iters_to_valid=100, iters_to_checkout=500):
""" Run trainig loop
Args:
datagen (DataGenerator)
save_dir (str): directory path that will contain the model
epochs (int): number of epochs
mb_size (int): mini-batch size
do_sortagrad (bool): sort dataset by duration on first epoch
stateful (bool): is model stateful or not
save_best_weights (bool): save weights whenever cost over
training mini-batch reduced
save_best_val_weights (bool): save weights whenever cost over
validation set reduced
iters_to_valid (int): after this amount of iterations validate
model by whole validation set
iters_to_checkout (int): after this amount of iterations save
model
"""
logger.info("Training model..")
iters = 0
for e in range(epochs):
if not isinstance(do_sortagrad, bool):
sortagrad = e < do_sortagrad
shuffle = not sortagrad
elif do_sortagrad:
shuffle = False
sortagrad = True
else:
shuffle = True
sortagrad = False
train_iter = datagen.iterate_train(mb_size, shuffle=shuffle,
sort_by_duration=sortagrad)
for i, batch in enumerate(train_iter):
if stateful and batch['x'].shape[0] != mb_size:
break
self.train_minibatch(batch, i % 10 == 0)
if i % 10 == 0:
logger.info("Epoch: {} Iteration: {}({}) TextLoss: {}"
" PhonemeLoss: {} WER: {}"
.format(e, i, iters, self.last_text_cost,
self.last_phoneme_cost,
self.last_wer))
iters += 1
if save_best_weights and self.best_cost < self.last_cost:
self.save_weights(save_dir, 'best-weights.h5')
if iters_to_valid is not None and iters % iters_to_valid == 0:
self.validate(datagen, mb_size, stateful,
save_best_val_weights, save_dir)
if i and i % iters_to_checkout == 0:
self.save_model(save_dir, iters)
if iters_to_valid is not None and iters % iters_to_valid != 0:
self.validate(datagen, mb_size, stateful, save_best_val_weights,
save_dir)
if i % iters_to_checkout != 0:
self.save_model(save_dir, iters)
def train_minibatch(self, batch, compute_wer=False):
inputs = batch['x']
input_lengths = batch['input_lengths']
ctc_input_lens = self.ctc_input_length(input_lengths)
if self.on_text and self.on_phoneme:
_, ctc_phoneme, pred_texts, ctc_text = self.train_fn([
inputs, ctc_input_lens, batch['phonemes'],
batch['phoneme_lengths'], batch['y'], batch['label_lengths'],
True])
elif self.on_text:
pred_texts, ctc_text = self.train_fn([inputs, ctc_input_lens,
batch['y'],
batch['label_lengths'], True])
elif self.on_phoneme:
_, ctc_phoneme = self.train_fn([inputs, ctc_input_lens,
batch['phonemes'],
batch['phoneme_lengths'],
True])
if self.on_text:
if compute_wer:
wer = word_error_rate(batch['texts'], pred_texts).mean()
self.wers.append(wer)
self.text_costs.append(ctc_text)
if self.on_phoneme:
self.phoneme_costs.append(ctc_phoneme)
def validate(self, datagen, mb_size, stateful, save_best_weights, save_dir):
text_avg_cost, phoneme_avg_cost = 0.0, 0.0
total_wers = []
i = 0
for batch in datagen.iterate_validation(mb_size):
if stateful and batch['x'].shape[0] != mb_size:
break
text_cost, phoneme_cost, wers = self.validate_minibatch(batch)
if self.on_text:
text_avg_cost += text_cost
total_wers.append(wers)
if self.on_phoneme:
phoneme_avg_cost += phoneme_cost
i += 1
if i != 0:
text_avg_cost /= i
phoneme_avg_cost /= i
if self.on_text:
self.val_wers.append(np.concatenate(total_wers).mean())
self.val_text_costs.append(text_avg_cost)
if self.on_phoneme:
self.val_phoneme_costs.append(phoneme_avg_cost)
logger.info("Validation TextLoss: {} Validation PhonemeLoss: {} "
"Validation WER: {}".format(self.last_val_text_cost,
self.last_val_phoneme_cost,
self.last_val_wer))
if save_best_weights and self.last_val_cost < self.best_val_cost:
self.best_val_cost = self.last_val_cost
self.save_weights(save_dir, 'best-val-weights.h5')
def validate_minibatch(self, batch):
inputs = batch['x']
input_lengths = batch['input_lengths']
ctc_input_lens = self.ctc_input_length(input_lengths)
text_ctc, phoneme_ctc, wers = None, None, None
if self.on_text and self.on_phoneme:
_, phoneme_ctc, pred_text, text_ctc = self.val_fn([
inputs, ctc_input_lens, batch['phonemes'],
batch['phoneme_lengths'], batch['y'], batch['label_lengths'],
True])
elif self.on_text:
pred_text, text_ctc = self.val_fn([
inputs, ctc_input_lens, batch['y'], batch['label_lengths'],
True])
elif self.on_phoneme:
_, phoneme_ctc = self.val_fn([
inputs, ctc_input_lens, batch['phonemes'],
batch['phoneme_lengths'], True
])
if self.on_text:
wers = word_error_rate(batch['texts'], pred_text)
return text_ctc, phoneme_ctc, wers
def ctc_input_length(self, input_lengths):
import keras.layers
conv_class = (getattr(keras.layers, 'Conv1D', None) or
keras.layers.Convolution1D)
conv_lays = [l for l in self.model.layers if isinstance(l, conv_class)]
return [conv_chain_output_length(l, conv_lays) for l in input_lengths]
def save_weights(self, save_dir, filename):
self.model.save_weights(os.path.join(save_dir, filename),
overwrite=True)
def save_model(self, save_dir, index):
save_model(save_dir, self.model, self.text_costs, self.val_text_costs,
wer=self.wers, val_wer=self.val_wers,
phoneme=self.phoneme_costs,
val_phoneme=self.val_phoneme_costs, index=index)
| 39.597744
| 80
| 0.568974
|
4a0bbd3e001a062ddc9e3be6bd3b4bedc2c70776
| 65,938
|
py
|
Python
|
myvenv/lib/python3.5/site-packages/babel/dates.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | 35
|
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
myvenv/lib/python3.5/site-packages/babel/dates.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | 28
|
2020-03-04T22:01:48.000Z
|
2022-03-12T00:59:47.000Z
|
myvenv/lib/python3.5/site-packages/babel/dates.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | 88
|
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
# -*- coding: utf-8 -*-
"""
babel.dates
~~~~~~~~~~~
Locale dependent formatting and parsing of dates and times.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_TIME``,
* ``LC_ALL``, and
* ``LANG``
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division
import re
import warnings
import pytz as _pytz
from datetime import date, datetime, time, timedelta
from bisect import bisect_right
from babel.core import default_locale, get_global, Locale
from babel.util import UTC, LOCALTZ
from babel._compat import string_types, integer_types, number_types
LC_TIME = default_locale('LC_TIME')
# Aliases for use in scopes where the modules are shadowed by local variables
date_ = date
datetime_ = datetime
time_ = time
def _get_dt_and_tzinfo(dt_or_tzinfo):
"""
Parse a `dt_or_tzinfo` value into a datetime and a tzinfo.
See the docs for this function's callers for semantics.
:rtype: tuple[datetime, tzinfo]
"""
if dt_or_tzinfo is None:
dt = datetime.now()
tzinfo = LOCALTZ
elif isinstance(dt_or_tzinfo, string_types):
dt = None
tzinfo = get_timezone(dt_or_tzinfo)
elif isinstance(dt_or_tzinfo, integer_types):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = _get_datetime(dt_or_tzinfo)
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
return dt, tzinfo
def _get_datetime(instant):
"""
Get a datetime out of an "instant" (date, time, datetime, number).
.. warning:: The return values of this function may depend on the system clock.
If the instant is None, the current moment is used.
If the instant is a time, it's augmented with today's date.
Dates are converted to naive datetimes with midnight as the time component.
>>> _get_datetime(date(2015, 1, 1))
datetime.datetime(2015, 1, 1, 0, 0)
UNIX timestamps are converted to datetimes.
>>> _get_datetime(1400000000)
datetime.datetime(2014, 5, 13, 16, 53, 20)
Other values are passed through as-is.
>>> x = datetime(2015, 1, 1)
>>> _get_datetime(x) is x
True
:param instant: date, time, datetime, integer, float or None
:type instant: date|time|datetime|int|float|None
:return: a datetime
:rtype: datetime
"""
if instant is None:
return datetime_.utcnow()
elif isinstance(instant, integer_types) or isinstance(instant, float):
return datetime_.utcfromtimestamp(instant)
elif isinstance(instant, time):
return datetime_.combine(date.today(), instant)
elif isinstance(instant, date) and not isinstance(instant, datetime):
return datetime_.combine(instant, time())
# TODO (3.x): Add an assertion/type check for this fallthrough branch:
return instant
def _ensure_datetime_tzinfo(datetime, tzinfo=None):
"""
Ensure the datetime passed has an attached tzinfo.
If the datetime is tz-naive to begin with, UTC is attached.
If a tzinfo is passed in, the datetime is normalized to that timezone.
>>> _ensure_datetime_tzinfo(datetime(2015, 1, 1)).tzinfo.zone
'UTC'
>>> tz = get_timezone("Europe/Stockholm")
>>> _ensure_datetime_tzinfo(datetime(2015, 1, 1, 13, 15, tzinfo=UTC), tzinfo=tz).hour
14
:param datetime: Datetime to augment.
:param tzinfo: Optional tznfo.
:return: datetime with tzinfo
:rtype: datetime
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
if tzinfo is not None:
datetime = datetime.astimezone(get_timezone(tzinfo))
if hasattr(tzinfo, 'normalize'): # pytz
datetime = tzinfo.normalize(datetime)
return datetime
def _get_time(time, tzinfo=None):
"""
Get a timezoned time from a given instant.
.. warning:: The return values of this function may depend on the system clock.
:param time: time, datetime or None
:rtype: time
"""
if time is None:
time = datetime.utcnow()
elif isinstance(time, number_types):
time = datetime.utcfromtimestamp(time)
if time.tzinfo is None:
time = time.replace(tzinfo=UTC)
if isinstance(time, datetime):
if tzinfo is not None:
time = time.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
time = tzinfo.normalize(time)
time = time.timetz()
elif tzinfo is not None:
time = time.replace(tzinfo=tzinfo)
return time
def get_timezone(zone=None):
"""Looks up a timezone by name and returns it. The timezone object
returned comes from ``pytz`` and corresponds to the `tzinfo` interface and
can be used with all of the functions of Babel that operate with dates.
If a timezone is not known a :exc:`LookupError` is raised. If `zone`
is ``None`` a local zone object is returned.
:param zone: the name of the timezone to look up. If a timezone object
itself is passed in, mit's returned unchanged.
"""
if zone is None:
return LOCALTZ
if not isinstance(zone, string_types):
return zone
try:
return _pytz.timezone(zone)
except _pytz.UnknownTimeZoneError:
raise LookupError('Unknown timezone %s' % zone)
def get_next_timezone_transition(zone=None, dt=None):
"""Given a timezone it will return a :class:`TimezoneTransition` object
that holds the information about the next timezone transition that's going
to happen. For instance this can be used to detect when the next DST
change is going to happen and how it looks like.
The transition is calculated relative to the given datetime object. The
next transition that follows the date is used. If a transition cannot
be found the return value will be `None`.
Transition information can only be provided for timezones returned by
the :func:`get_timezone` function.
:param zone: the timezone for which the transition should be looked up.
If not provided the local timezone is used.
:param dt: the date after which the next transition should be found.
If not given the current time is assumed.
"""
zone = get_timezone(zone)
dt = _get_datetime(dt).replace(tzinfo=None)
if not hasattr(zone, '_utc_transition_times'):
raise TypeError('Given timezone does not have UTC transition '
'times. This can happen because the operating '
'system fallback local timezone is used or a '
'custom timezone object')
try:
idx = max(0, bisect_right(zone._utc_transition_times, dt))
old_trans = zone._transition_info[idx - 1]
new_trans = zone._transition_info[idx]
old_tz = zone._tzinfos[old_trans]
new_tz = zone._tzinfos[new_trans]
except (LookupError, ValueError):
return None
return TimezoneTransition(
activates=zone._utc_transition_times[idx],
from_tzinfo=old_tz,
to_tzinfo=new_tz,
reference_date=dt
)
class TimezoneTransition(object):
"""A helper object that represents the return value from
:func:`get_next_timezone_transition`.
"""
def __init__(self, activates, from_tzinfo, to_tzinfo, reference_date=None):
#: the time of the activation of the timezone transition in UTC.
self.activates = activates
#: the timezone from where the transition starts.
self.from_tzinfo = from_tzinfo
#: the timezone for after the transition.
self.to_tzinfo = to_tzinfo
#: the reference date that was provided. This is the `dt` parameter
#: to the :func:`get_next_timezone_transition`.
self.reference_date = reference_date
@property
def from_tz(self):
"""The name of the timezone before the transition."""
return self.from_tzinfo._tzname
@property
def to_tz(self):
"""The name of the timezone after the transition."""
return self.to_tzinfo._tzname
@property
def from_offset(self):
"""The UTC offset in seconds before the transition."""
return int(self.from_tzinfo._utcoffset.total_seconds())
@property
def to_offset(self):
"""The UTC offset in seconds after the transition."""
return int(self.to_tzinfo._utcoffset.total_seconds())
def __repr__(self):
return '<TimezoneTransition %s -> %s (%s)>' % (
self.from_tz,
self.to_tz,
self.activates,
)
def get_period_names(width='wide', context='stand-alone', locale=LC_TIME):
"""Return the names for day periods (AM/PM) used by the locale.
>>> get_period_names(locale='en_US')['am']
u'AM'
:param width: the width to use, one of "abbreviated", "narrow", or "wide"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).day_periods[context][width]
def get_day_names(width='wide', context='format', locale=LC_TIME):
"""Return the day names used by the locale for the specified format.
>>> get_day_names('wide', locale='en_US')[1]
u'Tuesday'
>>> get_day_names('short', locale='en_US')[1]
u'Tu'
>>> get_day_names('abbreviated', locale='es')[1]
u'mar.'
>>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1]
u'D'
:param width: the width to use, one of "wide", "abbreviated", "short" or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).days[context][width]
def get_month_names(width='wide', context='format', locale=LC_TIME):
"""Return the month names used by the locale for the specified format.
>>> get_month_names('wide', locale='en_US')[1]
u'January'
>>> get_month_names('abbreviated', locale='es')[1]
u'ene.'
>>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1]
u'J'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).months[context][width]
def get_quarter_names(width='wide', context='format', locale=LC_TIME):
"""Return the quarter names used by the locale for the specified format.
>>> get_quarter_names('wide', locale='en_US')[1]
u'1st quarter'
>>> get_quarter_names('abbreviated', locale='de_DE')[1]
u'Q1'
>>> get_quarter_names('narrow', locale='de_DE')[1]
u'1'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).quarters[context][width]
def get_era_names(width='wide', locale=LC_TIME):
"""Return the era names used by the locale for the specified format.
>>> get_era_names('wide', locale='en_US')[1]
u'Anno Domini'
>>> get_era_names('abbreviated', locale='de_DE')[1]
u'n. Chr.'
:param width: the width to use, either "wide", "abbreviated", or "narrow"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).eras[width]
def get_date_format(format='medium', locale=LC_TIME):
"""Return the date formatting patterns used by the locale for the specified
format.
>>> get_date_format(locale='en_US')
<DateTimePattern u'MMM d, y'>
>>> get_date_format('full', locale='de_DE')
<DateTimePattern u'EEEE, d. MMMM y'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).date_formats[format]
def get_datetime_format(format='medium', locale=LC_TIME):
"""Return the datetime formatting patterns used by the locale for the
specified format.
>>> get_datetime_format(locale='en_US')
u'{1}, {0}'
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
patterns = Locale.parse(locale).datetime_formats
if format not in patterns:
format = None
return patterns[format]
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).time_formats[format]
def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME, return_z=False):
"""Return the timezone associated with the given `datetime` object formatted
as string indicating the offset from GMT.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> get_timezone_gmt(dt, locale='en')
u'GMT+00:00'
>>> get_timezone_gmt(dt, locale='en', return_z=True)
'Z'
>>> get_timezone_gmt(dt, locale='en', width='iso8601_short')
u'+00'
>>> tz = get_timezone('America/Los_Angeles')
>>> dt = tz.localize(datetime(2007, 4, 1, 15, 30))
>>> get_timezone_gmt(dt, locale='en')
u'GMT-07:00'
>>> get_timezone_gmt(dt, 'short', locale='en')
u'-0700'
>>> get_timezone_gmt(dt, locale='en', width='iso8601_short')
u'-07'
The long format depends on the locale, for example in France the acronym
UTC string is used instead of GMT:
>>> get_timezone_gmt(dt, 'long', locale='fr_FR')
u'UTC-07:00'
.. versionadded:: 0.9
:param datetime: the ``datetime`` object; if `None`, the current date and
time in UTC is used
:param width: either "long" or "short" or "iso8601" or "iso8601_short"
:param locale: the `Locale` object, or a locale string
:param return_z: True or False; Function returns indicator "Z"
when local time offset is 0
"""
datetime = _ensure_datetime_tzinfo(_get_datetime(datetime))
locale = Locale.parse(locale)
offset = datetime.tzinfo.utcoffset(datetime)
seconds = offset.days * 24 * 60 * 60 + offset.seconds
hours, seconds = divmod(seconds, 3600)
if return_z and hours == 0 and seconds == 0:
return 'Z'
elif seconds == 0 and width == 'iso8601_short':
return u'%+03d' % hours
elif width == 'short' or width == 'iso8601_short':
pattern = u'%+03d%02d'
elif width == 'iso8601':
pattern = u'%+03d:%02d'
else:
pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
return pattern % (hours, seconds // 60)
def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME, return_city=False):
u"""Return a representation of the given timezone using "location format".
The result depends on both the local display name of the country and the
city associated with the time zone:
>>> tz = get_timezone('America/St_Johns')
>>> print(get_timezone_location(tz, locale='de_DE'))
Kanada (St. John’s) Zeit
>>> print(get_timezone_location(tz, locale='en'))
Canada (St. John’s) Time
>>> print(get_timezone_location(tz, locale='en', return_city=True))
St. John’s
>>> tz = get_timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt) Zeit'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned:
>>> tz = get_timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Mitteleurop\\xe4ische Zeit'
.. versionadded:: 0.9
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if `None`, the current date and time in
UTC is assumed
:param locale: the `Locale` object, or a locale string
:param return_city: True or False, if True then return exemplar city (location)
for the time zone
:return: the localized timezone name using location format
"""
dt, tzinfo = _get_dt_and_tzinfo(dt_or_tzinfo)
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt or datetime.utcnow())
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Otherwise, if there is only one timezone for the country, return the
# localized country name
region_format = locale.zone_formats['region']
territory = get_global('zone_territories').get(zone)
if territory not in locale.territories:
territory = 'ZZ' # invalid/unknown
territory_name = locale.territories[territory]
if not return_city and territory and len(get_global('territory_zones').get(territory, [])) == 1:
return region_format % (territory_name)
# Otherwise, include the city in the output
fallback_format = locale.zone_formats['fallback']
if 'city' in info:
city_name = info['city']
else:
metazone = get_global('meta_zones').get(zone)
metazone_info = locale.meta_zones.get(metazone, {})
if 'city' in metazone_info:
city_name = metazone_info['city']
elif '/' in zone:
city_name = zone.split('/', 1)[1].replace('_', ' ')
else:
city_name = zone.replace('_', ' ')
if return_city:
return city_name
return region_format % (fallback_format % {
'0': city_name,
'1': territory_name
})
def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False,
locale=LC_TIME, zone_variant=None, return_zone=False):
r"""Return the localized display name for the given timezone. The timezone
may be specified using a ``datetime`` or `tzinfo` object.
>>> dt = time(15, 30, tzinfo=get_timezone('America/Los_Angeles'))
>>> get_timezone_name(dt, locale='en_US')
u'Pacific Standard Time'
>>> get_timezone_name(dt, locale='en_US', return_zone=True)
'America/Los_Angeles'
>>> get_timezone_name(dt, width='short', locale='en_US')
u'PST'
If this function gets passed only a `tzinfo` object and no concrete
`datetime`, the returned display name is indenpendent of daylight savings
time. This can be used for example for selecting timezones, or to set the
time of events that recur across DST changes:
>>> tz = get_timezone('America/Los_Angeles')
>>> get_timezone_name(tz, locale='en_US')
u'Pacific Time'
>>> get_timezone_name(tz, 'short', locale='en_US')
u'PT'
If no localized display name for the timezone is available, and the timezone
is associated with a country that uses only a single timezone, the name of
that country is returned, formatted according to the locale:
>>> tz = get_timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Mitteleurop\xe4ische Zeit'
>>> get_timezone_name(tz, locale='pt_BR')
u'Hor\xe1rio da Europa Central'
On the other hand, if the country uses multiple timezones, the city is also
included in the representation:
>>> tz = get_timezone('America/St_Johns')
>>> get_timezone_name(tz, locale='de_DE')
u'Neufundland-Zeit'
Note that short format is currently not supported for all timezones and
all locales. This is partially because not every timezone has a short
code in every locale. In that case it currently falls back to the long
format.
For more information see `LDML Appendix J: Time Zone Display Names
<http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_
.. versionadded:: 0.9
.. versionchanged:: 1.0
Added `zone_variant` support.
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if a ``tzinfo`` object is used, the
resulting display name will be generic, i.e.
independent of daylight savings time; if `None`, the
current date in UTC is assumed
:param width: either "long" or "short"
:param uncommon: deprecated and ignored
:param zone_variant: defines the zone variation to return. By default the
variation is defined from the datetime object
passed in. If no datetime object is passed in, the
``'generic'`` variation is assumed. The following
values are valid: ``'generic'``, ``'daylight'`` and
``'standard'``.
:param locale: the `Locale` object, or a locale string
:param return_zone: True or False. If true then function
returns long time zone ID
"""
dt, tzinfo = _get_dt_and_tzinfo(dt_or_tzinfo)
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt)
if zone_variant is None:
if dt is None:
zone_variant = 'generic'
else:
dst = tzinfo.dst(dt)
if dst:
zone_variant = 'daylight'
else:
zone_variant = 'standard'
else:
if zone_variant not in ('generic', 'standard', 'daylight'):
raise ValueError('Invalid zone variation')
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
if return_zone:
return zone
info = locale.time_zones.get(zone, {})
# Try explicitly translated zone names first
if width in info:
if zone_variant in info[width]:
return info[width][zone_variant]
metazone = get_global('meta_zones').get(zone)
if metazone:
metazone_info = locale.meta_zones.get(metazone, {})
if width in metazone_info:
if zone_variant in metazone_info[width]:
return metazone_info[width][zone_variant]
# If we have a concrete datetime, we assume that the result can't be
# independent of daylight savings time, so we return the GMT offset
if dt is not None:
return get_timezone_gmt(dt, width=width, locale=locale)
return get_timezone_location(dt_or_tzinfo, locale=locale)
def format_date(date=None, format='medium', locale=LC_TIME):
"""Return a date formatted according to the given pattern.
>>> d = date(2007, 4, 1)
>>> format_date(d, locale='en_US')
u'Apr 1, 2007'
>>> format_date(d, format='full', locale='de_DE')
u'Sonntag, 1. April 2007'
If you don't want to use the locale default formats, you can specify a
custom date pattern:
>>> format_date(d, "EEE, MMM d, ''yy", locale='en')
u"Sun, Apr 1, '07"
:param date: the ``date`` or ``datetime`` object; if `None`, the current
date is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param locale: a `Locale` object or a locale identifier
"""
if date is None:
date = date_.today()
elif isinstance(date, datetime):
date = date.date()
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_date_format(format, locale=locale)
pattern = parse_pattern(format)
return pattern.apply(date, locale)
def format_datetime(datetime=None, format='medium', tzinfo=None,
locale=LC_TIME):
r"""Return a date formatted according to the given pattern.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> format_datetime(dt, locale='en_US')
u'Apr 1, 2007, 3:30:00 PM'
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> format_datetime(dt, 'full', tzinfo=get_timezone('Europe/Paris'),
... locale='fr_FR')
u'dimanche 1 avril 2007 \xe0 17:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
>>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
... tzinfo=get_timezone('US/Eastern'), locale='en')
u'2007.04.01 AD at 11:30:00 EDT'
:param datetime: the `datetime` object; if `None`, the current date and
time is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the timezone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
"""
datetime = _ensure_datetime_tzinfo(_get_datetime(datetime), tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
return get_datetime_format(format, locale=locale) \
.replace("'", "") \
.replace('{0}', format_time(datetime, format, tzinfo=None,
locale=locale)) \
.replace('{1}', format_date(datetime, format, locale=locale))
else:
return parse_pattern(format).apply(datetime, locale)
def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME):
r"""Return a time formatted according to the given pattern.
>>> t = time(15, 30)
>>> format_time(t, locale='en_US')
u'3:30:00 PM'
>>> format_time(t, format='short', locale='de_DE')
u'15:30'
If you don't want to use the locale default formats, you can specify a
custom time pattern:
>>> format_time(t, "hh 'o''clock' a", locale='en')
u"03 o'clock PM"
For any pattern requiring the display of the time-zone a
timezone has to be specified explicitly:
>>> t = datetime(2007, 4, 1, 15, 30)
>>> tzinfo = get_timezone('Europe/Paris')
>>> t = tzinfo.localize(t)
>>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
u'15:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
>>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=get_timezone('US/Eastern'),
... locale='en')
u"09 o'clock AM, Eastern Daylight Time"
As that example shows, when this function gets passed a
``datetime.datetime`` value, the actual time in the formatted string is
adjusted to the timezone specified by the `tzinfo` parameter. If the
``datetime`` is "naive" (i.e. it has no associated timezone information),
it is assumed to be in UTC.
These timezone calculations are **not** performed if the value is of type
``datetime.time``, as without date information there's no way to determine
what a given time would translate to in a different timezone without
information about whether daylight savings time is in effect or not. This
means that time values are left as-is, and the value of the `tzinfo`
parameter is only used to display the timezone name if needed:
>>> t = time(15, 30)
>>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'),
... locale='fr_FR')
u'15:30:00 heure normale d\u2019Europe centrale'
>>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'),
... locale='en_US')
u'3:30:00 PM Eastern Standard Time'
:param time: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the time-zone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
"""
time = _get_time(time, tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_time_format(format, locale=locale)
return parse_pattern(format).apply(time, locale)
def format_skeleton(skeleton, datetime=None, tzinfo=None, fuzzy=True, locale=LC_TIME):
r"""Return a time and/or date formatted according to the given pattern.
The skeletons are defined in the CLDR data and provide more flexibility
than the simple short/long/medium formats, but are a bit harder to use.
The are defined using the date/time symbols without order or punctuation
and map to a suitable format for the given locale.
>>> t = datetime(2007, 4, 1, 15, 30)
>>> format_skeleton('MMMEd', t, locale='fr')
u'dim. 1 avr.'
>>> format_skeleton('MMMEd', t, locale='en')
u'Sun, Apr 1'
>>> format_skeleton('yMMd', t, locale='fi') # yMMd is not in the Finnish locale; yMd gets used
u'1.4.2007'
>>> format_skeleton('yMMd', t, fuzzy=False, locale='fi') # yMMd is not in the Finnish locale, an error is thrown
Traceback (most recent call last):
...
KeyError: yMMd
After the skeleton is resolved to a pattern `format_datetime` is called so
all timezone processing etc is the same as for that.
:param skeleton: A date time skeleton as defined in the cldr data.
:param datetime: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param tzinfo: the time-zone to apply to the time for display
:param fuzzy: If the skeleton is not found, allow choosing a skeleton that's
close enough to it.
:param locale: a `Locale` object or a locale identifier
"""
locale = Locale.parse(locale)
if fuzzy and skeleton not in locale.datetime_skeletons:
skeleton = match_skeleton(skeleton, locale.datetime_skeletons)
format = locale.datetime_skeletons[skeleton]
return format_datetime(datetime, format, tzinfo, locale)
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
def format_timedelta(delta, granularity='second', threshold=.85,
add_direction=False, format='long',
locale=LC_TIME):
"""Return a time delta according to the rules of the given locale.
>>> format_timedelta(timedelta(weeks=12), locale='en_US')
u'3 months'
>>> format_timedelta(timedelta(seconds=1), locale='es')
u'1 segundo'
The granularity parameter can be provided to alter the lowest unit
presented, which defaults to a second.
>>> format_timedelta(timedelta(hours=3), granularity='day',
... locale='en_US')
u'1 day'
The threshold parameter can be used to determine at which value the
presentation switches to the next higher unit. A higher threshold factor
means the presentation will switch later. For example:
>>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US')
u'1 day'
>>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US')
u'23 hours'
In addition directional information can be provided that informs
the user if the date is in the past or in the future:
>>> format_timedelta(timedelta(hours=1), add_direction=True, locale='en')
u'in 1 hour'
>>> format_timedelta(timedelta(hours=-1), add_direction=True, locale='en')
u'1 hour ago'
The format parameter controls how compact or wide the presentation is:
>>> format_timedelta(timedelta(hours=3), format='short', locale='en')
u'3 hr'
>>> format_timedelta(timedelta(hours=3), format='narrow', locale='en')
u'3h'
:param delta: a ``timedelta`` object representing the time difference to
format, or the delta in seconds as an `int` value
:param granularity: determines the smallest unit that should be displayed,
the value can be one of "year", "month", "week", "day",
"hour", "minute" or "second"
:param threshold: factor that determines at which point the presentation
switches to the next higher unit
:param add_direction: if this flag is set to `True` the return value will
include directional information. For instance a
positive timedelta will include the information about
it being in the future, a negative will be information
about the value being in the past.
:param format: the format, can be "narrow", "short" or "long". (
"medium" is deprecated, currently converted to "long" to
maintain compatibility)
:param locale: a `Locale` object or a locale identifier
"""
if format not in ('narrow', 'short', 'medium', 'long'):
raise TypeError('Format must be one of "narrow", "short" or "long"')
if format == 'medium':
warnings.warn('"medium" value for format param of format_timedelta'
' is deprecated. Use "long" instead',
category=DeprecationWarning)
format = 'long'
if isinstance(delta, timedelta):
seconds = int((delta.days * 86400) + delta.seconds)
else:
seconds = delta
locale = Locale.parse(locale)
def _iter_patterns(a_unit):
if add_direction:
unit_rel_patterns = locale._data['date_fields'][a_unit]
if seconds >= 0:
yield unit_rel_patterns['future']
else:
yield unit_rel_patterns['past']
a_unit = 'duration-' + a_unit
yield locale._data['unit_patterns'].get(a_unit, {}).get(format)
for unit, secs_per_unit in TIMEDELTA_UNITS:
value = abs(seconds) / secs_per_unit
if value >= threshold or unit == granularity:
if unit == granularity and value > 0:
value = max(1, value)
value = int(round(value))
plural_form = locale.plural_form(value)
pattern = None
for patterns in _iter_patterns(unit):
if patterns is not None:
pattern = patterns[plural_form]
break
# This really should not happen
if pattern is None:
return u''
return pattern.replace('{0}', str(value))
return u''
def _format_fallback_interval(start, end, skeleton, tzinfo, locale):
if skeleton in locale.datetime_skeletons: # Use the given skeleton
format = lambda dt: format_skeleton(skeleton, dt, tzinfo, locale=locale)
elif all((isinstance(d, date) and not isinstance(d, datetime)) for d in (start, end)): # Both are just dates
format = lambda dt: format_date(dt, locale=locale)
elif all((isinstance(d, time) and not isinstance(d, date)) for d in (start, end)): # Both are times
format = lambda dt: format_time(dt, tzinfo=tzinfo, locale=locale)
else:
format = lambda dt: format_datetime(dt, tzinfo=tzinfo, locale=locale)
formatted_start = format(start)
formatted_end = format(end)
if formatted_start == formatted_end:
return format(start)
return (
locale.interval_formats.get(None, "{0}-{1}").
replace("{0}", formatted_start).
replace("{1}", formatted_end)
)
def format_interval(start, end, skeleton=None, tzinfo=None, fuzzy=True, locale=LC_TIME):
"""
Format an interval between two instants according to the locale's rules.
>>> format_interval(date(2016, 1, 15), date(2016, 1, 17), "yMd", locale="fi")
u'15.\u201317.1.2016'
>>> format_interval(time(12, 12), time(16, 16), "Hm", locale="en_GB")
'12:12 \u2013 16:16'
>>> format_interval(time(5, 12), time(16, 16), "hm", locale="en_US")
'5:12 AM \u2013 4:16 PM'
>>> format_interval(time(16, 18), time(16, 24), "Hm", locale="it")
'16:18\u201316:24'
If the start instant equals the end instant, the interval is formatted like the instant.
>>> format_interval(time(16, 18), time(16, 18), "Hm", locale="it")
'16:18'
Unknown skeletons fall back to "default" formatting.
>>> format_interval(date(2015, 1, 1), date(2017, 1, 1), "wzq", locale="ja")
'2015/01/01\uff5e2017/01/01'
>>> format_interval(time(16, 18), time(16, 24), "xxx", locale="ja")
'16:18:00\uff5e16:24:00'
>>> format_interval(date(2016, 1, 15), date(2016, 1, 17), "xxx", locale="de")
'15.01.2016 \u2013 17.01.2016'
:param start: First instant (datetime/date/time)
:param end: Second instant (datetime/date/time)
:param skeleton: The "skeleton format" to use for formatting.
:param tzinfo: tzinfo to use (if none is already attached)
:param fuzzy: If the skeleton is not found, allow choosing a skeleton that's
close enough to it.
:param locale: A locale object or identifier.
:return: Formatted interval
"""
locale = Locale.parse(locale)
# NB: The quote comments below are from the algorithm description in
# http://www.unicode.org/reports/tr35/tr35-dates.html#intervalFormats
# > Look for the intervalFormatItem element that matches the "skeleton",
# > starting in the current locale and then following the locale fallback
# > chain up to, but not including root.
interval_formats = locale.interval_formats
if skeleton not in interval_formats or not skeleton:
# > If no match was found from the previous step, check what the closest
# > match is in the fallback locale chain, as in availableFormats. That
# > is, this allows for adjusting the string value field's width,
# > including adjusting between "MMM" and "MMMM", and using different
# > variants of the same field, such as 'v' and 'z'.
if skeleton and fuzzy:
skeleton = match_skeleton(skeleton, interval_formats)
else:
skeleton = None
if not skeleton: # Still no match whatsoever?
# > Otherwise, format the start and end datetime using the fallback pattern.
return _format_fallback_interval(start, end, skeleton, tzinfo, locale)
skel_formats = interval_formats[skeleton]
if start == end:
return format_skeleton(skeleton, start, tzinfo, fuzzy=fuzzy, locale=locale)
start = _ensure_datetime_tzinfo(_get_datetime(start), tzinfo=tzinfo)
end = _ensure_datetime_tzinfo(_get_datetime(end), tzinfo=tzinfo)
start_fmt = DateTimeFormat(start, locale=locale)
end_fmt = DateTimeFormat(end, locale=locale)
# > If a match is found from previous steps, compute the calendar field
# > with the greatest difference between start and end datetime. If there
# > is no difference among any of the fields in the pattern, format as a
# > single date using availableFormats, and return.
for field in PATTERN_CHAR_ORDER: # These are in largest-to-smallest order
if field in skel_formats:
if start_fmt.extract(field) != end_fmt.extract(field):
# > If there is a match, use the pieces of the corresponding pattern to
# > format the start and end datetime, as above.
return "".join(
parse_pattern(pattern).apply(instant, locale)
for pattern, instant
in zip(skel_formats[field], (start, end))
)
# > Otherwise, format the start and end datetime using the fallback pattern.
return _format_fallback_interval(start, end, skeleton, tzinfo, locale)
def get_period_id(time, tzinfo=None, type=None, locale=LC_TIME):
"""
Get the day period ID for a given time.
This ID can be used as a key for the period name dictionary.
>>> get_period_names(locale="de")[get_period_id(time(7, 42), locale="de")]
u'Morgen'
:param time: The time to inspect.
:param tzinfo: The timezone for the time. See ``format_time``.
:param type: The period type to use. Either "selection" or None.
The selection type is used for selecting among phrases such as
“Your email arrived yesterday evening” or “Your email arrived last night”.
:param locale: the `Locale` object, or a locale string
:return: period ID. Something is always returned -- even if it's just "am" or "pm".
"""
time = _get_time(time, tzinfo)
seconds_past_midnight = int(time.hour * 60 * 60 + time.minute * 60 + time.second)
locale = Locale.parse(locale)
# The LDML rules state that the rules may not overlap, so iterating in arbitrary
# order should be alright.
for rule_id, rules in locale.day_period_rules.get(type, {}).items():
for rule in rules:
if "at" in rule and rule["at"] == seconds_past_midnight:
return rule_id
start_ok = end_ok = False
if "from" in rule and seconds_past_midnight >= rule["from"]:
start_ok = True
if "to" in rule and seconds_past_midnight <= rule["to"]:
# This rule type does not exist in the present CLDR data;
# excuse the lack of test coverage.
end_ok = True
if "before" in rule and seconds_past_midnight < rule["before"]:
end_ok = True
if "after" in rule and seconds_past_midnight > rule["after"]:
start_ok = True
if start_ok and end_ok:
return rule_id
if seconds_past_midnight < 43200:
return "am"
else:
return "pm"
def parse_date(string, locale=LC_TIME):
"""Parse a date from a string.
This function uses the date format for the locale as a hint to determine
the order in which the date fields appear in the string.
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string: the string containing the date
:param locale: a `Locale` object or a locale identifier
"""
# TODO: try ISO format first?
format = get_date_format(locale=locale).pattern.lower()
year_idx = format.index('y')
month_idx = format.index('m')
if month_idx < 0:
month_idx = format.index('l')
day_idx = format.index('d')
indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: this currently only supports numbers, but should also support month
# names, both in the requested locale, and english
numbers = re.findall('(\d+)', string)
year = numbers[indexes['Y']]
if len(year) == 2:
year = 2000 + int(year)
else:
year = int(year)
month = int(numbers[indexes['M']])
day = int(numbers[indexes['D']])
if month > 12:
month, day = day, month
return date(year, month, day)
def parse_time(string, locale=LC_TIME):
"""Parse a time from a string.
This function uses the time format for the locale as a hint to determine
the order in which the time fields appear in the string.
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string: the string containing the time
:param locale: a `Locale` object or a locale identifier
:return: the parsed time
:rtype: `time`
"""
# TODO: try ISO format first?
format = get_time_format(locale=locale).pattern.lower()
hour_idx = format.index('h')
if hour_idx < 0:
hour_idx = format.index('k')
min_idx = format.index('m')
sec_idx = format.index('s')
indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: support 12 hour clock, and 0-based hour specification
# and seconds should be optional, maybe minutes too
# oh, and time-zones, of course
numbers = re.findall('(\d+)', string)
hour = int(numbers[indexes['H']])
minute = int(numbers[indexes['M']])
second = int(numbers[indexes['S']])
return time(hour, minute, second)
class DateTimePattern(object):
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def __unicode__(self):
return self.pattern
def __mod__(self, other):
if type(other) is not DateTimeFormat:
return NotImplemented
return self.format % other
def apply(self, datetime, locale):
return self % DateTimeFormat(datetime, locale)
class DateTimeFormat(object):
def __init__(self, value, locale):
assert isinstance(value, (date, datetime, time))
if isinstance(value, (datetime, time)) and value.tzinfo is None:
value = value.replace(tzinfo=UTC)
self.value = value
self.locale = Locale.parse(locale)
def __getitem__(self, name):
char = name[0]
num = len(name)
if char == 'G':
return self.format_era(char, num)
elif char in ('y', 'Y', 'u'):
return self.format_year(char, num)
elif char in ('Q', 'q'):
return self.format_quarter(char, num)
elif char in ('M', 'L'):
return self.format_month(char, num)
elif char in ('w', 'W'):
return self.format_week(char, num)
elif char == 'd':
return self.format(self.value.day, num)
elif char == 'D':
return self.format_day_of_year(num)
elif char == 'F':
return self.format_day_of_week_in_month()
elif char in ('E', 'e', 'c'):
return self.format_weekday(char, num)
elif char == 'a':
# TODO: Add support for the rest of the period formats (a*, b*, B*)
return self.format_period(char)
elif char == 'h':
if self.value.hour % 12 == 0:
return self.format(12, num)
else:
return self.format(self.value.hour % 12, num)
elif char == 'H':
return self.format(self.value.hour, num)
elif char == 'K':
return self.format(self.value.hour % 12, num)
elif char == 'k':
if self.value.hour == 0:
return self.format(24, num)
else:
return self.format(self.value.hour, num)
elif char == 'm':
return self.format(self.value.minute, num)
elif char == 's':
return self.format(self.value.second, num)
elif char == 'S':
return self.format_frac_seconds(num)
elif char == 'A':
return self.format_milliseconds_in_day(num)
elif char in ('z', 'Z', 'v', 'V', 'x', 'X', 'O'):
return self.format_timezone(char, num)
else:
raise KeyError('Unsupported date/time field %r' % char)
def extract(self, char):
char = str(char)[0]
if char == 'y':
return self.value.year
elif char == 'M':
return self.value.month
elif char == 'd':
return self.value.day
elif char == 'H':
return self.value.hour
elif char == 'h':
return (self.value.hour % 12 or 12)
elif char == 'm':
return self.value.minute
elif char == 'a':
return int(self.value.hour >= 12) # 0 for am, 1 for pm
else:
raise NotImplementedError("Not implemented: extracting %r from %r" % (char, self.value))
def format_era(self, char, num):
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
era = int(self.value.year >= 0)
return get_era_names(width, self.locale)[era]
def format_year(self, char, num):
value = self.value.year
if char.isupper():
week = self.get_week_number(self.get_day_of_year())
if week == 0:
value -= 1
year = self.format(value, num)
if num == 2:
year = year[-2:]
return year
def format_quarter(self, char, num):
quarter = (self.value.month - 1) // 3 + 1
if num <= 2:
return ('%%0%dd' % num) % quarter
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'Q': 'format', 'q': 'stand-alone'}[char]
return get_quarter_names(width, context, self.locale)[quarter]
def format_month(self, char, num):
if num <= 2:
return ('%%0%dd' % num) % self.value.month
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'M': 'format', 'L': 'stand-alone'}[char]
return get_month_names(width, context, self.locale)[self.value.month]
def format_week(self, char, num):
if char.islower(): # week of year
day_of_year = self.get_day_of_year()
week = self.get_week_number(day_of_year)
if week == 0:
date = self.value - timedelta(days=day_of_year)
week = self.get_week_number(self.get_day_of_year(date),
date.weekday())
return self.format(week, num)
else: # week of month
week = self.get_week_number(self.value.day)
if week == 0:
date = self.value - timedelta(days=self.value.day)
week = self.get_week_number(date.day, date.weekday())
return '%d' % week
def format_weekday(self, char='E', num=4):
"""
Return weekday from parsed datetime according to format pattern.
>>> format = DateTimeFormat(date(2016, 2, 28), Locale.parse('en_US'))
>>> format.format_weekday()
u'Sunday'
'E': Day of week - Use one through three letters for the abbreviated day name, four for the full (wide) name,
five for the narrow name, or six for the short name.
>>> format.format_weekday('E',2)
u'Sun'
'e': Local day of week. Same as E except adds a numeric value that will depend on the local starting day of the
week, using one or two letters. For this example, Monday is the first day of the week.
>>> format.format_weekday('e',2)
'01'
'c': Stand-Alone local day of week - Use one letter for the local numeric value (same as 'e'), three for the
abbreviated day name, four for the full (wide) name, five for the narrow name, or six for the short name.
>>> format.format_weekday('c',1)
'1'
:param char: pattern format character ('e','E','c')
:param num: count of format character
"""
if num < 3:
if char.islower():
value = 7 - self.locale.first_week_day + self.value.weekday()
return self.format(value % 7 + 1, num)
num = 3
weekday = self.value.weekday()
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow', 6: 'short'}[num]
if char == 'c':
context = 'stand-alone'
else:
context = 'format'
return get_day_names(width, context, self.locale)[weekday]
def format_day_of_year(self, num):
return self.format(self.get_day_of_year(), num)
def format_day_of_week_in_month(self):
return '%d' % ((self.value.day - 1) // 7 + 1)
def format_period(self, char):
period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)]
for width in ('wide', 'narrow', 'abbreviated'):
period_names = get_period_names(context='format', width=width, locale=self.locale)
if period in period_names:
return period_names[period]
raise ValueError('Could not format period %s in %s' % (period, self.locale))
def format_frac_seconds(self, num):
""" Return fractional seconds.
Rounds the time's microseconds to the precision given by the number \
of digits passed in.
"""
value = self.value.microsecond / 1000000
return self.format(round(value, num) * 10**num, num)
def format_milliseconds_in_day(self, num):
msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
self.value.minute * 60000 + self.value.hour * 3600000
return self.format(msecs, num)
def format_timezone(self, char, num):
width = {3: 'short', 4: 'long', 5: 'iso8601'}[max(3, num)]
if char == 'z':
return get_timezone_name(self.value, width, locale=self.locale)
elif char == 'Z':
if num == 5:
return get_timezone_gmt(self.value, width, locale=self.locale, return_z=True)
return get_timezone_gmt(self.value, width, locale=self.locale)
elif char == 'O':
if num == 4:
return get_timezone_gmt(self.value, width, locale=self.locale)
# TODO: To add support for O:1
elif char == 'v':
return get_timezone_name(self.value.tzinfo, width,
locale=self.locale)
elif char == 'V':
if num == 1:
return get_timezone_name(self.value.tzinfo, width,
uncommon=True, locale=self.locale)
elif num == 2:
return get_timezone_name(self.value.tzinfo, locale=self.locale, return_zone=True)
elif num == 3:
return get_timezone_location(self.value.tzinfo, locale=self.locale, return_city=True)
return get_timezone_location(self.value.tzinfo, locale=self.locale)
# Included additional elif condition to add support for 'Xx' in timezone format
elif char == 'X':
if num == 1:
return get_timezone_gmt(self.value, width='iso8601_short', locale=self.locale,
return_z=True)
elif num in (2, 4):
return get_timezone_gmt(self.value, width='short', locale=self.locale,
return_z=True)
elif num in (3, 5):
return get_timezone_gmt(self.value, width='iso8601', locale=self.locale,
return_z=True)
elif char == 'x':
if num == 1:
return get_timezone_gmt(self.value, width='iso8601_short', locale=self.locale)
elif num in (2, 4):
return get_timezone_gmt(self.value, width='short', locale=self.locale)
elif num in (3, 5):
return get_timezone_gmt(self.value, width='iso8601', locale=self.locale)
def format(self, value, length):
return ('%%0%dd' % length) % value
def get_day_of_year(self, date=None):
if date is None:
date = self.value
return (date - date.replace(month=1, day=1)).days + 1
def get_week_number(self, day_of_period, day_of_week=None):
"""Return the number of the week of a day within a period. This may be
the week number in a year or the week number in a month.
Usually this will return a value equal to or greater than 1, but if the
first week of the period is so short that it actually counts as the last
week of the previous period, this function will return 0.
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
>>> format.get_week_number(6)
1
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
>>> format.get_week_number(6)
2
:param day_of_period: the number of the day in the period (usually
either the day of month or the day of year)
:param day_of_week: the week day; if ommitted, the week day of the
current date is assumed
"""
if day_of_week is None:
day_of_week = self.value.weekday()
first_day = (day_of_week - self.locale.first_week_day -
day_of_period + 1) % 7
if first_day < 0:
first_day += 7
week_number = (day_of_period + first_day - 1) // 7
if 7 - first_day >= self.locale.min_week_days:
week_number += 1
return week_number
PATTERN_CHARS = {
'G': [1, 2, 3, 4, 5], # era
'y': None, 'Y': None, 'u': None, # year
'Q': [1, 2, 3, 4, 5], 'q': [1, 2, 3, 4, 5], # quarter
'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month
'w': [1, 2], 'W': [1], # week
'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day
'E': [1, 2, 3, 4, 5, 6], 'e': [1, 2, 3, 4, 5, 6], 'c': [1, 3, 4, 5, 6], # week day
'a': [1], # period
'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour
'm': [1, 2], # minute
's': [1, 2], 'S': None, 'A': None, # second
'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4, 5], 'O': [1, 4], 'v': [1, 4], # zone
'V': [1, 2, 3, 4], 'x': [1, 2, 3, 4, 5], 'X': [1, 2, 3, 4, 5] # zone
}
#: The pattern characters declared in the Date Field Symbol Table
#: (http://www.unicode.org/reports/tr35/tr35-dates.html#Date_Field_Symbol_Table)
#: in order of decreasing magnitude.
PATTERN_CHAR_ORDER = "GyYuUQqMLlwWdDFgEecabBChHKkjJmsSAzZOvVXx"
_pattern_cache = {}
def parse_pattern(pattern):
"""Parse date, time, and datetime format patterns.
>>> parse_pattern("MMMMd").format
u'%(MMMM)s%(d)s'
>>> parse_pattern("MMM d, yyyy").format
u'%(MMM)s %(d)s, %(yyyy)s'
Pattern can contain literal strings in single quotes:
>>> parse_pattern("H:mm' Uhr 'z").format
u'%(H)s:%(mm)s Uhr %(z)s'
An actual single quote can be used by using two adjacent single quote
characters:
>>> parse_pattern("hh' o''clock'").format
u"%(hh)s o'clock"
:param pattern: the formatting pattern to parse
"""
if type(pattern) is DateTimePattern:
return pattern
if pattern in _pattern_cache:
return _pattern_cache[pattern]
result = []
for tok_type, tok_value in tokenize_pattern(pattern):
if tok_type == "chars":
result.append(tok_value.replace('%', '%%'))
elif tok_type == "field":
fieldchar, fieldnum = tok_value
limit = PATTERN_CHARS[fieldchar]
if limit and fieldnum not in limit:
raise ValueError('Invalid length for field: %r'
% (fieldchar * fieldnum))
result.append('%%(%s)s' % (fieldchar * fieldnum))
else:
raise NotImplementedError("Unknown token type: %s" % tok_type)
_pattern_cache[pattern] = pat = DateTimePattern(pattern, u''.join(result))
return pat
def tokenize_pattern(pattern):
"""
Tokenize date format patterns.
Returns a list of (token_type, token_value) tuples.
``token_type`` may be either "chars" or "field".
For "chars" tokens, the value is the literal value.
For "field" tokens, the value is a tuple of (field character, repetition count).
:param pattern: Pattern string
:type pattern: str
:rtype: list[tuple]
"""
result = []
quotebuf = None
charbuf = []
fieldchar = ['']
fieldnum = [0]
def append_chars():
result.append(('chars', ''.join(charbuf).replace('\0', "'")))
del charbuf[:]
def append_field():
result.append(('field', (fieldchar[0], fieldnum[0])))
fieldchar[0] = ''
fieldnum[0] = 0
for idx, char in enumerate(pattern.replace("''", '\0')):
if quotebuf is None:
if char == "'": # quote started
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
quotebuf = []
elif char in PATTERN_CHARS:
if charbuf:
append_chars()
if char == fieldchar[0]:
fieldnum[0] += 1
else:
if fieldchar[0]:
append_field()
fieldchar[0] = char
fieldnum[0] = 1
else:
if fieldchar[0]:
append_field()
charbuf.append(char)
elif quotebuf is not None:
if char == "'": # end of quote
charbuf.extend(quotebuf)
quotebuf = None
else: # inside quote
quotebuf.append(char)
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
return result
def untokenize_pattern(tokens):
"""
Turn a date format pattern token stream back into a string.
This is the reverse operation of ``tokenize_pattern``.
:type tokens: Iterable[tuple]
:rtype: str
"""
output = []
for tok_type, tok_value in tokens:
if tok_type == "field":
output.append(tok_value[0] * tok_value[1])
elif tok_type == "chars":
if not any(ch in PATTERN_CHARS for ch in tok_value): # No need to quote
output.append(tok_value)
else:
output.append("'%s'" % tok_value.replace("'", "''"))
return "".join(output)
def split_interval_pattern(pattern):
"""
Split an interval-describing datetime pattern into multiple pieces.
> The pattern is then designed to be broken up into two pieces by determining the first repeating field.
- http://www.unicode.org/reports/tr35/tr35-dates.html#intervalFormats
>>> split_interval_pattern(u'E d.M. \u2013 E d.M.')
[u'E d.M. \u2013 ', 'E d.M.']
>>> split_interval_pattern("Y 'text' Y 'more text'")
["Y 'text '", "Y 'more text'"]
>>> split_interval_pattern(u"E, MMM d \u2013 E")
[u'E, MMM d \u2013 ', u'E']
>>> split_interval_pattern("MMM d")
['MMM d']
>>> split_interval_pattern("y G")
['y G']
>>> split_interval_pattern(u"MMM d \u2013 d")
[u'MMM d \u2013 ', u'd']
:param pattern: Interval pattern string
:return: list of "subpatterns"
"""
seen_fields = set()
parts = [[]]
for tok_type, tok_value in tokenize_pattern(pattern):
if tok_type == "field":
if tok_value[0] in seen_fields: # Repeated field
parts.append([])
seen_fields.clear()
seen_fields.add(tok_value[0])
parts[-1].append((tok_type, tok_value))
return [untokenize_pattern(tokens) for tokens in parts]
def match_skeleton(skeleton, options, allow_different_fields=False):
"""
Find the closest match for the given datetime skeleton among the options given.
This uses the rules outlined in the TR35 document.
>>> match_skeleton('yMMd', ('yMd', 'yMMMd'))
'yMd'
>>> match_skeleton('yMMd', ('jyMMd',), allow_different_fields=True)
'jyMMd'
>>> match_skeleton('yMMd', ('qyMMd',), allow_different_fields=False)
>>> match_skeleton('hmz', ('hmv',))
'hmv'
:param skeleton: The skeleton to match
:type skeleton: str
:param options: An iterable of other skeletons to match against
:type options: Iterable[str]
:return: The closest skeleton match, or if no match was found, None.
:rtype: str|None
"""
# TODO: maybe implement pattern expansion?
# Based on the implementation in
# http://source.icu-project.org/repos/icu/icu4j/trunk/main/classes/core/src/com/ibm/icu/text/DateIntervalInfo.java
# Filter out falsy values and sort for stability; when `interval_formats` is passed in, there may be a None key.
options = sorted(option for option in options if option)
if 'z' in skeleton and not any('z' in option for option in options):
skeleton = skeleton.replace('z', 'v')
get_input_field_width = dict(t[1] for t in tokenize_pattern(skeleton) if t[0] == "field").get
best_skeleton = None
best_distance = None
for option in options:
get_opt_field_width = dict(t[1] for t in tokenize_pattern(option) if t[0] == "field").get
distance = 0
for field in PATTERN_CHARS:
input_width = get_input_field_width(field, 0)
opt_width = get_opt_field_width(field, 0)
if input_width == opt_width:
continue
if opt_width == 0 or input_width == 0:
if not allow_different_fields: # This one is not okay
option = None
break
distance += 0x1000 # Magic weight constant for "entirely different fields"
elif field == 'M' and ((input_width > 2 and opt_width <= 2) or (input_width <= 2 and opt_width > 2)):
distance += 0x100 # Magic weight for "text turns into a number"
else:
distance += abs(input_width - opt_width)
if not option: # We lost the option along the way (probably due to "allow_different_fields")
continue
if not best_skeleton or distance < best_distance:
best_skeleton = option
best_distance = distance
if distance == 0: # Found a perfect match!
break
return best_skeleton
| 37.57151
| 119
| 0.613546
|
4a0bbddbc112272baa309235058082b5e2be9ab0
| 3,519
|
py
|
Python
|
gpgraph/tests/test_base.py
|
lperezmo/gpgraph
|
7c62a8181734be8a6bc46a434839a06808736a2c
|
[
"Unlicense"
] | null | null | null |
gpgraph/tests/test_base.py
|
lperezmo/gpgraph
|
7c62a8181734be8a6bc46a434839a06808736a2c
|
[
"Unlicense"
] | 1
|
2020-07-02T21:55:17.000Z
|
2020-07-02T21:55:17.000Z
|
gpgraph/tests/test_base.py
|
lperezmo/gpgraph
|
7c62a8181734be8a6bc46a434839a06808736a2c
|
[
"Unlicense"
] | 3
|
2019-04-24T20:42:43.000Z
|
2020-06-25T04:12:37.000Z
|
import pytest
from gpmap.gpm import GenotypePhenotypeMap
from gpgraph.base import get_neighbors, GenotypePhenotypeGraph
import numpy as np
import time
@pytest.fixture
def gpmap_base():
# Data
wildtype = "AAA"
genotypes = ["AAA", "AAT", "ATA", "TAA", "ATT", "TAT", "TTA", "TTT"]
phenotypes = [0.1, 0.2, 0.2, 0.6, 0.4, 0.6, 1.0, 1.1]
stdeviations = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
# Initialize the object
gpm = GenotypePhenotypeMap(wildtype,
genotypes,
phenotypes,
stdeviations=stdeviations)
return gpm
@pytest.fixture
def gpgraph_test(gpmap_base):
gpgraph_test = GenotypePhenotypeGraph(gpmap_base)
return gpgraph_test
@pytest.fixture
def binary_gpgraph():
gpbinary = get_neighbors('000', {0: ['0', '1'], 1: ['0', '1'], 2: ['0', '1']})
gpbinary = np.array(gpbinary).sort()
return gpbinary
def test_attributes(gpmap_base):
""""Test data is same as passed to GenotypePhenotypeMap object"""
assert gpmap_base.wildtype == "AAA"
np.testing.assert_array_equal(gpmap_base.genotypes,
np.array(["AAA", "AAT", "ATA", "TAA", "ATT", "TAT", "TTA", "TTT"]))
np.testing.assert_array_equal(gpmap_base.phenotypes,
np.array([0.1, 0.2, 0.2, 0.6, 0.4, 0.6, 1.0, 1.1]))
np.testing.assert_array_equal(gpmap_base.stdeviations,
np.array([0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]))
def test_get_neighbors(gpmap_base, binary_gpgraph):
""""Test if function can find all neighboring nodes"""
found_neighbors = np.array(get_neighbors(gpmap_base.genotypes[1], gpmap_base.mutations)).sort()
desired_neighbors = np.array(['TAT', 'TTT', 'TTA']).sort()
np.testing.assert_array_equal(found_neighbors,
desired_neighbors)
np.testing.assert_array_equal(binary_gpgraph, np.array(['100', '110', '111']).sort())
def test_type(gpgraph_test):
assert isinstance(gpgraph_test, GenotypePhenotypeGraph)
def test_add_gpm(gpgraph_test, gpmap_base):
assert gpgraph_test.gpm == gpmap_base
def test_read_json(gpgraph_test):
"""Test reading from json"""
read_gpm = GenotypePhenotypeGraph.read_json("data/test_data.json")
# Test instance was created
assert isinstance(read_gpm, GenotypePhenotypeGraph)
# Test elements align
np.testing.assert_array_equal(gpgraph_test.nodes, read_gpm.nodes)
def test_read_csv(gpgraph_test):
"""Test reading from csv"""
read_gpm = GenotypePhenotypeGraph.read_csv("data/test_data_external.csv", wildtype='RDDWKAQ')
# Test instance was created
assert isinstance(read_gpm, GenotypePhenotypeGraph)
def test_data_integrity_csv(gpmap_base):
# Export gpmap_base to csv, give it time to generate file, and then check integrity
# of the data read by "read_csv". This is necessary because phenotypes are randomly generated.
gpmap_base.to_csv(filename="data/test_data.csv")
time.sleep(1)
read_gpgraph = GenotypePhenotypeMap.read_csv(fname="data/test_data.csv", wildtype='AAA')
# Test elements align
np.testing.assert_array_equal(gpmap_base.genotypes, read_gpgraph.genotypes)
np.testing.assert_array_equal(gpmap_base.phenotypes, read_gpgraph.phenotypes)
np.testing.assert_array_equal(gpmap_base.mutations, read_gpgraph.mutations)
np.testing.assert_array_equal(gpmap_base.binary, read_gpgraph.binary)
| 37.43617
| 101
| 0.677749
|
4a0bbe5cce2650418164ab3c34905870982aca19
| 947
|
py
|
Python
|
reqs/pk_raffle_handler.py
|
paipaitou/bili2.0
|
73dbe4738706b05fce57106544e4a784ccb52760
|
[
"MIT"
] | null | null | null |
reqs/pk_raffle_handler.py
|
paipaitou/bili2.0
|
73dbe4738706b05fce57106544e4a784ccb52760
|
[
"MIT"
] | null | null | null |
reqs/pk_raffle_handler.py
|
paipaitou/bili2.0
|
73dbe4738706b05fce57106544e4a784ccb52760
|
[
"MIT"
] | null | null | null |
from bili_global import API_LIVE
from json_rsp_ctrl import ZERO_ONLY_CTRL
class PkRaffleHandlerReq:
@staticmethod
async def check(user, real_roomid):
url = f'{API_LIVE}/xlive/lottery-interface/v1/lottery/Check?roomid={real_roomid}'
json_rsp = await user.bililive_session.request_json('GET', url, ctrl=ZERO_ONLY_CTRL)
print('PK', json_rsp)
return json_rsp
@staticmethod
async def join(user, real_roomid, raffle_id):
url = f"{API_LIVE}/xlive/lottery-interface/v1/pk/join"
data = {
'roomid': real_roomid,
'id': raffle_id,
'csrf_token': user.dict_bili['csrf'],
'csrf': user.dict_bili['csrf'],
}
response = await user.bililive_session.request_json('POST', url, data=data,
headers=user.dict_bili['pcheaders'])
return response
| 35.074074
| 97
| 0.594509
|
4a0bbee905688be1158f8769495dae907375dba0
| 514
|
py
|
Python
|
src/application/forms.py
|
yakudzam/flask-gae-blog
|
ab422a72f93ddc838dca9bc139ddcac978306937
|
[
"MIT",
"CC-BY-3.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
src/application/forms.py
|
yakudzam/flask-gae-blog
|
ab422a72f93ddc838dca9bc139ddcac978306937
|
[
"MIT",
"CC-BY-3.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
src/application/forms.py
|
yakudzam/flask-gae-blog
|
ab422a72f93ddc838dca9bc139ddcac978306937
|
[
"MIT",
"CC-BY-3.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
forms.py
Web forms based on Flask-WTForms
See: http://flask.pocoo.org/docs/patterns/wtforms/
http://wtforms.simplecodes.com/
"""
from lib.wtforms import Form
from lib.wtforms import validators
from lib.wtforms.ext.appengine.ndb import model_form
from models import Post
# App Engine ndb model form example
PostForm = model_form(Post, Form, field_args={'title': {"validators": [validators.Required()]},
'body': {"validators": [validators.Required()]}})
| 24.47619
| 95
| 0.673152
|
4a0bbf9c91dce0dba9876e54704308da4fa921ad
| 11,491
|
py
|
Python
|
salt/modules/npm.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:21.000Z
|
2020-01-02T09:03:21.000Z
|
salt/modules/npm.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/npm.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
# -*- coding: utf-8 -*-
'''
Manage and query NPM packages.
'''
from __future__ import absolute_import
try:
from shlex import quote as _cmd_quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _cmd_quote
# Import python libs
import json
import logging
# Import salt libs
import salt.utils
import salt.utils.path
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
log = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
def __virtual__():
'''
Only work when npm is installed.
'''
try:
if salt.utils.path.which('npm') is not None:
_check_valid_version()
return True
else:
return (False, 'npm execution module could not be loaded '
'because the npm binary could not be located')
except CommandExecutionError as exc:
return (False, str(exc))
def _check_valid_version():
'''
Check the version of npm to ensure this module will work. Currently
npm must be at least version 1.2.
'''
# pylint: disable=no-member
npm_version = _LooseVersion(
salt.modules.cmdmod.run('npm --version', output_loglevel='quiet'))
valid_version = _LooseVersion('1.2')
# pylint: enable=no-member
if npm_version < valid_version:
raise CommandExecutionError(
'\'npm\' is not recent enough({0} < {1}). Please Upgrade.'.format(
npm_version, valid_version
)
)
def install(pkg=None,
pkgs=None,
dir=None,
runas=None,
registry=None,
env=None,
dry_run=False,
silent=True):
'''
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
'''
# Protect against injection
if pkg:
pkgs = [_cmd_quote(pkg)]
elif pkgs:
pkgs = [_cmd_quote(v) for v in pkgs]
else:
pkgs = []
if registry:
registry = _cmd_quote(registry)
cmd = ['npm', 'install', '--json']
if silent:
cmd.append('--silent')
if not dir:
cmd.append('--global')
if registry:
cmd.append('--registry="{0}"'.format(registry))
if dry_run:
cmd.append('--dry-run')
cmd.extend(pkgs)
env = env or {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd, python_shell=True, cwd=dir, runas=runas, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
# npm >1.2.21 is putting the output to stderr even though retcode is 0
npm_output = result['stdout'] or result['stderr']
try:
return json.loads(npm_output)
except ValueError:
pass
json_npm_output = _extract_json(npm_output)
return json_npm_output or npm_output
def _extract_json(npm_output):
lines = npm_output.splitlines()
log.error(lines)
# Strip all lines until JSON output starts
while lines and not lines[0].startswith('{') and not lines[0].startswith('['):
lines = lines[1:]
while lines and not lines[-1].startswith('}') and not lines[-1].startswith(']'):
lines = lines[:-1]
# macOS with fsevents includes the following line in the return
# when a new module is installed which is invalid JSON:
# [fsevents] Success: "..."
while lines and (lines[0].startswith('[fsevents]') or lines[0].startswith('Pass ')):
lines = lines[1:]
try:
return json.loads(''.join(lines))
except ValueError:
pass
return None
def uninstall(pkg, dir=None, runas=None, env=None):
'''
Uninstall an NPM package.
If no directory is specified, the package will be uninstalled globally.
pkg
A package name in any format accepted by NPM
dir
The target directory from which to uninstall the package, or None for
global installation
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' npm.uninstall coffee-script
'''
# Protect against injection
if pkg:
pkg = _cmd_quote(pkg)
env = env or {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = ['npm', 'uninstall', '"{0}"'.format(pkg)]
if not dir:
cmd.append('--global')
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd, python_shell=True, cwd=dir, runas=runas, env=env)
if result['retcode'] != 0:
log.error(result['stderr'])
return False
return True
def list_(pkg=None, dir=None, runas=None, env=None, depth=None):
'''
List installed NPM packages.
If no directory is specified, this will return the list of globally-
installed packages.
pkg
Limit package listing by name
dir
The directory whose packages will be listed, or None for global
installation
runas
The user to run NPM with
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
depth
Limit the depth of the packages listed
.. versionadded:: 2016.11.6, 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' npm.list
'''
env = env or {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = ['npm', 'list', '--json', '--silent']
if not dir:
cmd.append('--global')
if depth is not None:
if not isinstance(depth, (int, float)):
raise salt.exceptions.SaltInvocationError('Error: depth {0} must be a number'.format(depth))
cmd.append('--depth={0}'.format(int(depth)))
if pkg:
# Protect against injection
pkg = _cmd_quote(pkg)
cmd.append('"{0}"'.format(pkg))
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](
cmd, cwd=dir, runas=runas, env=env, python_shell=True, ignore_retcode=True)
# npm will return error code 1 for both no packages found and an actual
# error. The only difference between the two cases are if stderr is empty
if result['retcode'] != 0 and result['stderr']:
raise CommandExecutionError(result['stderr'])
return json.loads(result['stdout']).get('dependencies', {})
def cache_clean(path=None, runas=None, env=None, force=False):
'''
Clean cached NPM packages.
If no path for a specific package is provided the entire cache will be cleared.
path
The cache subpath to delete, or None to clear the entire cache
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
force
Force cleaning of cache. Required for npm@5 and greater
.. versionadded:: 2016.11.6
CLI Example:
.. code-block:: bash
salt '*' npm.cache_clean force=True
'''
env = env or {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = ['npm', 'cache', 'clean']
if path:
cmd.append(path)
if force is True:
cmd.append('--force')
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](
cmd, cwd=None, runas=runas, env=env, python_shell=True, ignore_retcode=True)
if result['retcode'] != 0:
log.error(result['stderr'])
return False
return True
def cache_list(path=None, runas=None, env=None):
'''
List NPM cached packages.
If no path for a specific package is provided this will list all the cached packages.
path
The cache subpath to list, or None to list the entire cache
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' npm.cache_clean
'''
env = env or {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = ['npm', 'cache', 'ls']
if path:
cmd.append(path)
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](
cmd, cwd=None, runas=runas, env=env, python_shell=True, ignore_retcode=True)
if result['retcode'] != 0 and result['stderr']:
raise CommandExecutionError(result['stderr'])
return result['stdout']
def cache_path(runas=None, env=None):
'''
List path of the NPM cache directory.
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' npm.cache_path
'''
env = env or {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = 'npm config get cache'
result = __salt__['cmd.run_all'](
cmd, cwd=None, runas=runas, env=env, python_shell=True, ignore_retcode=True)
return result.get('stdout') or result.get('stderr')
| 25.14442
| 104
| 0.606127
|
4a0bc08c7a375c724e5e5f65271a7ec7f48729cf
| 7,572
|
py
|
Python
|
example_app/faceRecognition_notifications.py
|
SIOTLAB/EdgeAP
|
c644aaefe17d9b855c82ed2ccbd4bfd28bc05024
|
[
"Apache-2.0"
] | 1
|
2020-10-12T02:10:46.000Z
|
2020-10-12T02:10:46.000Z
|
example_app/faceRecognition_notifications.py
|
SIOTLAB/EdgeAP
|
c644aaefe17d9b855c82ed2ccbd4bfd28bc05024
|
[
"Apache-2.0"
] | null | null | null |
example_app/faceRecognition_notifications.py
|
SIOTLAB/EdgeAP
|
c644aaefe17d9b855c82ed2ccbd4bfd28bc05024
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import os
import sys
from twilio.rest import Client
import RPi.GPIO as GPIO
import time
import json
import argparse
# CLI options
argparser = argparse.ArgumentParser(description='Face Detection application with text notifications')
argparser.add_argument('-c', '--config', type=str,
default='config',
help='configuration file containing user ids (default is \'config\')')
args = argparser.parse_args()
config = args.config
if not os.path.isfile(config):
print("Error: config file \'{}\' does not exist".format(config), file=sys.stderr)
sys.exit(-1)
# get names associated with user ids
names = None
try:
with open(config, 'r') as f:
c = json.load(f)
names = ["None"] + c["ids"]
except Exception as e:
print(e)
print("Error parsing config file", file=sys.stderr)
sys.exit(-1)
#motion sensor stuff
#FOR EASY HELP: run command "pinout"
GPIO.setmode(GPIO.BOARD) #numbering pins based on their actual order on the board, not their label
GPIO.setup(11, GPIO.IN)
#Twilio stuff
person_recognized = "RPi has detected {0}."
person_unrecognized = "RPi has detected a stranger."
# Your Account Sid and Auth Token from twilio.com/console
# Your recipient's phone number
# Make sure to run `source ./twilio.env` to set env vars
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
outgoing = os.environ['TWILIO_OUTGOING']
recipient = os.environ['TWILIO_RECIPIENT']
client = Client(account_sid, auth_token)
# Face classifier
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id = 0
# names related to ids: example ==> Marcelo: id=1, etc
#ames = ['None', "Cyrus","Philip","Jake","Michael","Chris","Justin"]
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
print("Everything set up and ready to go")
name_notified = {}
name_detected = {}
for id in names:
name_notified[id] = 0
name_detected[id] = 0
use_motion = input("Use motion detector? Input 1 if yes, 0 if no: ")
if use_motion == '1':
while True:
motion = GPIO.input(11)
if motion==0:
print("No motion detected")
else:
print("Motion detected")
for i in range(0,50): #keep from going indefinitely (needs to trigger according to motion)
print("i: " + str(i))
ret, img =cam.read()
img = cv2.flip(img, 1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
#print("x: {}\ty: {}\tw: {}\th: {}".format(x, y, w, h))
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 100):
id = names[id]
confidence_val = confidence
confidence = " {0}%".format(round(100 -confidence))
if round(100-confidence_val) > 25:
name_detected[id] = 1
else:
id = "unknown"
confidence = " {0}%".format(round(100 -confidence))
name_detected["None"] = 1
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
for name, detected in name_detected.items():
if detected == 1 and name_notified[name] == 0:
if name == "None":
message = client.messages.create(
body=person_unrecognized,
from_=outgoing,
to=recipient
)
else:
message = client.messages.create(
body=person_recognized.format(name),
from_=outgoing,
to=recipient
)
name_notified[name] = 1
else:
while True:
ret, img =cam.read()
img = cv2.flip(img, 1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
#print("x: {}\ty: {}\tw: {}\th: {}".format(x, y, w, h))
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 100):
id = names[id]
confidence_val = confidence
confidence = " {0}%".format(round(100 -confidence))
if round(100-confidence_val) > 25:
name_detected[id] = 1
else:
id = "unknown"
confidence = " {0}%".format(round(100 -confidence))
name_detected["None"] = 1
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.namedWindow('camera')
cv2.moveWindow('camera', 100, 300)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
for name, detected in name_detected.items():
if detected == 1 and name_notified[name] == 0:
if name == "None":
message = client.messages.create(
body=person_unrecognized,
from_=outgoing,
to=recipient
)
else:
message = client.messages.create(
body=person_recognized.format(name),
from_=outgoing,
to=recipient
)
name_notified[name] = 1
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| 34.108108
| 105
| 0.507528
|
4a0bc0b3a92451de8bbb9c09c86c1695dcd617fa
| 3,687
|
py
|
Python
|
lib/rucio/db/sqla/migrate_repo/versions/45378a1e76a8_create_collection_replica_table.py
|
fno2010/rucio
|
47e93cfbe5887071c70de4ba815c1bbdddfac2ce
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/45378a1e76a8_create_collection_replica_table.py
|
fno2010/rucio
|
47e93cfbe5887071c70de4ba815c1bbdddfac2ce
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/45378a1e76a8_create_collection_replica_table.py
|
fno2010/rucio
|
47e93cfbe5887071c70de4ba815c1bbdddfac2ce
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright CERN since 2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' create collection replica table '''
import datetime
import sqlalchemy as sa
from alembic import context
from alembic.op import (create_table, create_primary_key, create_foreign_key,
create_check_constraint, create_index, drop_table)
from rucio.db.sqla.constants import ReplicaState, DIDType
from rucio.db.sqla.types import GUID
from rucio.db.sqla.util import try_drop_constraint
# Alembic revision identifiers
revision = '45378a1e76a8'
down_revision = 'a93e4e47bda'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
create_table('collection_replicas',
sa.Column('scope', sa.String(25)),
sa.Column('name', sa.String(255)),
sa.Column('did_type', sa.Enum(DIDType,
name='COLLECTION_REPLICAS_TYPE_CHK',
create_constraint=True,
values_callable=lambda obj: [e.value for e in obj])),
sa.Column('rse_id', GUID()),
sa.Column('bytes', sa.BigInteger),
sa.Column('length', sa.BigInteger),
sa.Column('state', sa.Enum(ReplicaState,
name='COLLECTION_REPLICAS_STATE_CHK',
create_constraint=True,
values_callable=lambda obj: [e.value for e in obj]),
default=ReplicaState.UNAVAILABLE),
sa.Column('accessed_at', sa.DateTime),
sa.Column('created_at', sa.DateTime, default=datetime.datetime.utcnow),
sa.Column('updated_at', sa.DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow))
create_primary_key('COLLECTION_REPLICAS_PK', 'collection_replicas', ['scope', 'name', 'rse_id'])
create_foreign_key('COLLECTION_REPLICAS_LFN_FK', 'collection_replicas', 'dids', ['scope', 'name'], ['scope', 'name'])
create_foreign_key('COLLECTION_REPLICAS_RSE_ID_FK', 'collection_replicas', 'rses', ['rse_id'], ['id'])
create_check_constraint('COLLECTION_REPLICAS_SIZE_NN', 'collection_replicas', 'bytes IS NOT NULL')
create_check_constraint('COLLECTION_REPLICAS_STATE_NN', 'collection_replicas', 'state IS NOT NULL')
create_index('COLLECTION_REPLICAS_RSE_ID_IDX', 'collection_replicas', ['rse_id'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name == 'oracle':
try_drop_constraint('COLLECTION_REPLICAS_STATE_CHK', 'collection_replicas')
drop_table('collection_replicas')
elif context.get_context().dialect.name == 'postgresql':
drop_table('collection_replicas')
elif context.get_context().dialect.name == 'mysql':
drop_table('collection_replicas')
| 45.518519
| 127
| 0.630594
|
4a0bc0b8ed492c737e54e76fa67d6dfd43eba6fb
| 1,191
|
py
|
Python
|
crawler1.py
|
vsai121/Web-crawler
|
727db4bac41301ecf45889919f0123ad331f4a22
|
[
"MIT"
] | null | null | null |
crawler1.py
|
vsai121/Web-crawler
|
727db4bac41301ecf45889919f0123ad331f4a22
|
[
"MIT"
] | null | null | null |
crawler1.py
|
vsai121/Web-crawler
|
727db4bac41301ecf45889919f0123ad331f4a22
|
[
"MIT"
] | null | null | null |
import os
def create_project_directory(directory):
if not os.path.exists(directory):
print('Creating Project ' + directory)
os.makedirs(directory)
def write_file(filename , data):
f = open(filename , 'w')
f.write(data)
f.close()
def create_datafiles(directory , url):
queue = directory + "/queue" +".txt" #List of links waiting to be crawled
crawled = directory + "/crawled" + ".txt"
if not os.path.isfile(queue):
write_file(queue , url)
if not os.path.isfile(crawled):
write_file(crawled , "")
def append_to_file(filename , data):
f = open(filename , 'a')
f.write(data + "\n")
f.close()
def del_file(filename):
f = open(filename , 'w')
def file_to_set(filename):
result = set()
f = open(filename , 'rt')
for line in f:
result.add(line.replace('\n' ,'')) #Removing newline from url
return result
def set_to_file(result , filename):
del_file(filename)
for link in result:
append_to_file(filename , link)
| 20.534483
| 78
| 0.546599
|
4a0bc0c41869bcfd8f40b9179ab30b56425f65b4
| 217
|
py
|
Python
|
Agent/tests/functional_test.py
|
a523/storagestatck
|
cea14d5e276af1d90f0fc8c0b00881fa7e2bff9a
|
[
"MIT"
] | 3
|
2019-11-19T09:38:51.000Z
|
2020-12-21T13:23:52.000Z
|
Agent/tests/functional_test.py
|
a523/storagestatck
|
cea14d5e276af1d90f0fc8c0b00881fa7e2bff9a
|
[
"MIT"
] | 7
|
2020-01-20T04:02:23.000Z
|
2021-06-10T17:54:36.000Z
|
Agent/tests/functional_test.py
|
a523/storagestack
|
cea14d5e276af1d90f0fc8c0b00881fa7e2bff9a
|
[
"MIT"
] | null | null | null |
"""功能测试,测试前请先启动服务"""
import requests
def test_server_status():
resp = requests.get('http://localhost:8600/hello')
assert resp.status_code == 200, 'Agent web server unavailable'
assert resp.text == 'OK'
| 21.7
| 66
| 0.691244
|
4a0bc0e893221a12675380335b725316ccddb270
| 1,938
|
py
|
Python
|
pyhdx/__init__.py
|
sebaztiano/PyHDX
|
12fc2b5f67200885706226823bd8e1f46e3b5db1
|
[
"MIT"
] | null | null | null |
pyhdx/__init__.py
|
sebaztiano/PyHDX
|
12fc2b5f67200885706226823bd8e1f46e3b5db1
|
[
"MIT"
] | null | null | null |
pyhdx/__init__.py
|
sebaztiano/PyHDX
|
12fc2b5f67200885706226823bd8e1f46e3b5db1
|
[
"MIT"
] | null | null | null |
"""Top-level package for PyHDX."""
import setuptools # Import prevents warning
from .models import PeptideMasterTable, PeptideMeasurements, KineticsSeries, Coverage
from .fileIO import read_dynamx
from .output import Output
from pathlib import Path
package_name = 'pyhdx'
try:
import importlib.metadata
__version__ = importlib.metadata.version(package_name)
has_importlib_metadata = True
except ModuleNotFoundError:
__version__ = None
has_importlib_metadata = False
try:
from pbr import version
from pbr import git
has_pbr = True
except ModuleNotFoundError:
has_pbr = False
if not has_importlib_metadata and not has_pbr:
# This can also happen with py>3.8 on conda editable install
raise ModuleNotFoundError('Must have pbr for python < 3.8')
git_dir = Path(__file__).parent.parent / '.git'
if has_pbr:
try:
info = version.VersionInfo(package_name)
__version__ = __version__ or info.version_string()
__git_sha__ = git.get_git_short_sha(git_dir)
except Exception: # Pbr throws very broad Exception, for some reason DistributionNotFound does not want to be caught
git_dir = Path(__file__).parent.parent / '.git'
try:
tagged = git._run_git_command(
['describe', '--tags'], git_dir,
throw_on_error=True).replace('-', '.')
semantic_version = version.SemanticVersion.from_pip_string(tagged)
__version__ = __version__ or semantic_version._long_version(None)
__git_sha__ = git.get_git_short_sha(git_dir)
except FileNotFoundError:
# Git not installed
__git_sha__ = None
else:
__git_sha__ = None
VERSION_STRING = f'pyHDX version {__version__}'
VERSION_STRING_SHORT = f'pyHDX v{__version__}'
if __git_sha__ is not None:
VERSION_STRING += f', development version {__git_sha__}'
VERSION_STRING_SHORT += f' ({__git_sha__})'
| 32.847458
| 121
| 0.706914
|
4a0bc1c4f4ec77c2adfe8d07ea7700b0ae9e7572
| 2,258
|
py
|
Python
|
tests/test_utils/salesforce_system_helpers.py
|
gtossou/airflow
|
0314a3a218f864f78ec260cc66134e7acae34bc5
|
[
"Apache-2.0"
] | 2
|
2020-12-03T01:29:54.000Z
|
2020-12-03T01:30:06.000Z
|
tests/test_utils/salesforce_system_helpers.py
|
gtossou/airflow
|
0314a3a218f864f78ec260cc66134e7acae34bc5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils/salesforce_system_helpers.py
|
gtossou/airflow
|
0314a3a218f864f78ec260cc66134e7acae34bc5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
from contextlib import contextmanager
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.utils.process_utils import patch_environ
CONFIG_REQUIRED_FIELDS = ["host", "login", "password", "security_token"]
SALESFORCE_CONNECTION_ID = os.environ.get('SALESFORCE_CONNECTION_ID', 'salesforce_default')
CONNECTION_TYPE = os.environ.get('CONNECTION_TYPE', 'http')
@contextmanager
def provide_salesforce_connection(key_file_path: str):
"""
Context manager that provides a temporary value of SALESFORCE_DEFAULT connection.
:param key_file_path: Path to file with SALESFORCE credentials .json file.
:type key_file_path: str
"""
if not key_file_path.endswith(".json"):
raise AirflowException("Use a JSON key file.")
with open(key_file_path, 'r') as credentials:
creds = json.load(credentials)
missing_keys = CONFIG_REQUIRED_FIELDS - creds.keys()
if missing_keys:
message = "{missing_keys} fields are missing".format(missing_keys=missing_keys)
raise AirflowException(message)
conn = Connection(
conn_id=SALESFORCE_CONNECTION_ID,
conn_type=CONNECTION_TYPE,
host=creds["host"],
login=creds["login"],
password=creds["password"],
extra=json.dumps({"security_token": creds["security_token"]}),
)
with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}):
yield
| 39.614035
| 91
| 0.742693
|
4a0bc23eff2dc97f638c6e4ffd1b8fd49aa94f0b
| 34,413
|
py
|
Python
|
conference.py
|
preveyj/Udacity-FSWDND-Project-4
|
3364ec0cb6e0394370322f3adc74b78fc3894fac
|
[
"Apache-2.0"
] | null | null | null |
conference.py
|
preveyj/Udacity-FSWDND-Project-4
|
3364ec0cb6e0394370322f3adc74b78fc3894fac
|
[
"Apache-2.0"
] | null | null | null |
conference.py
|
preveyj/Udacity-FSWDND-Project-4
|
3364ec0cb6e0394370322f3adc74b78fc3894fac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
from datetime import date
from datetime import timedelta
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionForms
from models import UserWishlist
from models import UserWishlistForm
from models import FeaturedSpeakerMemcacheEntry
from models import FeaturedSpeakerMemcacheEntryForm
from models import FeaturedSpeakerMemcacheEntryForms
from models import FeaturedSpeakerMemcacheKeys
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
MEMCACHE_FEATURED_SPEAKER_KEY = "FeaturedSpeaker"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CONFERENCE_DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
SESSION_DEFAULTS = {
"name": "Test Session",
"highlights": [ "good stuff", "better stuff" ],
"speaker": "Jesse",
"duration": 0,
"typeOfSession": "Test Session",
"startDate": str(date.today()),
"startTime": str(datetime.now()),
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1)
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1)
)
CONF_GET_BY_CITY = endpoints.ResourceContainer(
message_types.VoidMessage,
conferenceCity=messages.StringField(1)
)
CONF_GET_BY_TOPIC = endpoints.ResourceContainer(
message_types.VoidMessage,
conferenceTopic=messages.StringField(2)
)
SESS_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1)
)
SESS_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1)
)
SESS_GET_BY_TYPE_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
sessionType=messages.StringField(2)
)
SESS_GET_BY_SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1)
)
SESS_ADD_TO_WISHLIST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1)
)
GET_SESSIONS_BY_NONTYPE_AND_BEFORE_TIME = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionType=messages.StringField(1, required=True),
endTime=messages.StringField(2, required=True)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# Helper functions
def _getLoggedInUser(self):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
return user;
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date') or field.name.endswith('Time'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _copySessionToForm(self, theSession):
wl = SessionForm()
for field in wl.all_fields():
if hasattr(theSession, field.name):
# Same as above; convert date or time to string
if field.name.endswith('Date') or field.name.endswith('Time'):
setattr(wl, field.name, str(getattr(theSession, field.name)))
else:
setattr(wl, field.name, getattr(theSession, field.name))
elif field.name == "websafeKey":
setattr(wl, field.name, theSession.key.urlsafe())
wl.check_initialized()
return wl
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = self._getLoggedInUser()
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in CONFERENCE_DEFAULTS:
if data[df] in (None, []):
data[df] = CONFERENCE_DEFAULTS[df]
setattr(request, df, CONFERENCE_DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
def _getUserWishlistByProfile(self, profile):
#Given a profile, get its key and return the sessions on the wishlist
if not profile:
raise endpoints.BadRequestException("Invalid profile!")
#Get the wishlist entries and add them to the wishlist to return.
wishlistEntries = UserWishlist.query(ancestor=profile.key).fetch(limit=None)
finishedWishlist = SessionForms()
if wishlistEntries:
for entry in wishlistEntries:
theSession = ndb.Key(urlsafe=getattr(entry, "wishlistedSessionKey")).get()
sf = self._copySessionToForm(theSession)
sf.check_initialized()
finishedWishlist.items.append(sf)
return finishedWishlist
#TODO
def _addSessionToWishlist(self, request):
#Check if the user is logged in
user = self._getLoggedInUser()
user_id = getUserId(user)
#Check if the theSession exists
theSession = ndb.Key(urlsafe=request.websafeSessionKey).get()
if not theSession:
raise endpoints.BadRequestException("Invalid session key")
#Get user profile
prof = ndb.Key(Profile, user_id).get()
if not prof:
raise endpoints.BadRequestException("Unable to find user profile")
wishlistEntry = UserWishlist.query(ancestor=prof.key).filter(
getattr(UserWishlist, "wishlistedSessionKey") == request.websafeSessionKey
).get()
print wishlistEntry
#If the desired wishlist entry doesn't already exist, create it.
if not wishlistEntry:
wishlistEntry = UserWishlist(parent=prof.key)
setattr(wishlistEntry, "wishlistedSessionKey", request.websafeSessionKey)
wishlistEntry.put()
return self._getUserWishlistByProfile(prof)
#Same as above, but for sessions.
def _createSessionObject(self, request):
self._getLoggedInUser()
if not request.websafeConferenceKey:
raise endpoints.BadRequestException("Websafe Conference Key field required")
theConference = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not theConference:
raise endpoints.BadRequestException("That conference doesn't exist!")
# Same as above, copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
theConferenceWebsafeKey = data['websafeConferenceKey']
del data['websafeKey']
del data['websafeConferenceKey']
# Defaults for missing values
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
# Convert date and time
try:
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
except ValueError:
data['startDate'] = date.today()
try:
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'], "%H:%M:%S")
except ValueError:
data['startTime'] = datetime.now()
# Generate keys
Session(parent=theConference.key, **data).put()
if (data['speaker']):
# Check speaker name at this conference for the Featured Speaker memcaches
speakerSessions = Session.query(ancestor=theConference.key).filter(Session.speaker == data['speaker']).fetch(limit=None)
if (speakerSessions) and (len(speakerSessions) > 1):
#Check if the memcache entry exists for this speaker and this conference.
theEntry = memcache.get(data['speaker'] + "_" + theConferenceWebsafeKey)
theKeys = memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY)
entryKey = key = (data['speaker'] + "_" + theConferenceWebsafeKey)
if theKeys is None:
theKeys = FeaturedSpeakerMemcacheKeys()
if theEntry is None:
theEntry = FeaturedSpeakerMemcacheEntry()
theEntry.speaker = data['speaker']
theEntry.conferenceWebsafeKey = theConferenceWebsafeKey
theEntry.sessions = []
for speakerSession in speakerSessions:
theEntry.sessions.append(speakerSession.name)
memcache.set(key = entryKey,
value = theEntry)
if entryKey not in theKeys.items:
theKeys.items.append(entryKey)
memcache.set(key = MEMCACHE_FEATURED_SPEAKER_KEY, value = theKeys)
return request
def _getFeaturedSpeakersFromMemcache(self):
# inserted via key MEMCACHE_FEATURED_SPEAKER_KEY
theKeys = memcache.get(key = MEMCACHE_FEATURED_SPEAKER_KEY)
thingsToReturn = FeaturedSpeakerMemcacheEntryForms()
thingsToReturn.check_initialized()
if theKeys:
for speakerKey in theKeys.items:
entry = memcache.get(key = speakerKey)
if entry:
thingsToReturn.items.append(self._copyFeaturedSpeakerToForm(entry))
return thingsToReturn
def _copyFeaturedSpeakerToForm(self, Speaker):
toReturn = FeaturedSpeakerMemcacheEntryForm()
toReturn.check_initialized()
toReturn.speaker = Speaker.speaker
toReturn.conferenceWebsafeKey = Speaker.conferenceWebsafeKey
toReturn.sessions = Speaker.sessions
return toReturn
@ndb.transactional()
def _updateConferenceObject(self, request):
user = self._getLoggedInUser()
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
# createSession(SessionForm, websafeConferenceKey)
@endpoints.method(SESS_POST_REQUEST, SessionForm, path='session',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new session."""
return self._copySessionToForm(self._createSessionObject(request))
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(message_types.VoidMessage,
FeaturedSpeakerMemcacheEntryForms,
http_method="GET",
name="getFeaturedSpeaker",
path="getFeaturedSpeaker")
def getFeaturedSpeaker(self, request):
"""Get the featured speaker for each conference."""
return self._getFeaturedSpeakersFromMemcache()
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
#Return sessions by conference.
@endpoints.method(CONF_GET_REQUEST,
SessionForms, path='getConferenceSessions/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Get conference sessions by websafe conference key."""
self._getLoggedInUser()
theConference = ndb.Key(urlsafe=request.websafeConferenceKey).get()
theSessions = Session.query(ancestor=theConference.key)
return SessionForms(
items=[self._copySessionToForm(oneSession) for oneSession in theSessions]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = self._getLoggedInUser()
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
@endpoints.method(CONF_GET_BY_CITY, ConferenceForms,
path="getConferencesByCity",
http_method="POST", name="getConferencesByCity")
def getConferencesByCity(self, request):
"""Get conferences by city."""
confs = Conference.query().filter(getattr(Conference, "city") == request.conferenceCity)
prof = self._getProfileFromUser()
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
@endpoints.method(CONF_GET_BY_TOPIC, ConferenceForms,
path="getConferencesByExactTopic",
http_method="POST", name="getConferencesByExactTopic")
def getConferencesByExactTopic(self, request):
"""Get conferences by topic. Must be a complete match; use getConferencesCreated and copy a topic from there."""
confs = Conference.query(Conference.topics == request.conferenceTopic)
prof = self._getProfileFromUser()
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
@endpoints.method(GET_SESSIONS_BY_NONTYPE_AND_BEFORE_TIME, SessionForms,
path="getSessionsNotOfTypeAndBeforeTime",
http_method="POST", name="getSessionsNotOfTypeAndBeforeTime")
def getSessionsNotOfTypeAndBeforeTime(self, request):
"""Get sessions that are NOT a given type, and that finish before the given 24H time."""
sessions = Session.query(Session.typeOfSession != request.sessionType)
sessionsToReturn = SessionForms()
sessionsToReturn.check_initialized()
cutoffTime = datetime.strptime(request.endTime, "%H:%M:%S")
for sess in sessions:
#For each session that finishes before the cutoff time, add it to the list to return.
if (cutoffTime > (sess.startTime + timedelta(minutes = sess.duration))):
sessionsToReturn.items.append(self._copySessionToForm(sess))
return sessionsToReturn
@endpoints.method(SESS_GET_BY_TYPE_REQUEST, SessionForms,
path='getConferenceSessionsByType',
http_method='POST', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Get conference sessions by conference and session type."""
self._getLoggedInUser()
theConference = ndb.Key(urlsafe=request.websafeConferenceKey).get()
theSessions = Session.query(ancestor=theConference.key).filter(
getattr(Session, "typeOfSession") == request.sessionType)
return SessionForms(
items=[self._copySessionToForm(oneSession) for oneSession in theSessions]
)
@endpoints.method(SESS_GET_BY_SPEAKER_REQUEST, SessionForms,
path='getConferenceSessionsBySpeaker',
http_method='POST', name='getConferenceSessionsBySpeaker')
def getConferenceSessionsBySpeaker(self, request):
"""Get conference sessions by speaker."""
self._getLoggedInUser()
theSessions = Session.query(getattr(Session, "speaker") == request.speaker)
return SessionForms(
items=[self._copySessionToForm(oneSession) for oneSession in theSessions]
)
@endpoints.method(SESS_GET_REQUEST, SessionForms,
path='addSessionToWishlist', http_method="POST",
name="addSessionToWishlist")
def addSessionToWishlist(self, request):
"""Add a session to a user's wishist by session websafe key."""
return self._addSessionToWishlist(request)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='getSessionsInWishlist', http_method="GET",
name="getSessionsInWishlist")
def getSessionsInWishlist(self, request):
"""Get the sessions on your wishlist."""
# Get user profile, and pass it to _getUserWishlistByProfile()
user = self._getLoggedInUser()
user_id = getUserId(user)
prof = ndb.Key(Profile, user_id).get()
return self._getUserWishlistByProfile(prof)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = self._getLoggedInUser()
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API
| 38.364548
| 132
| 0.628048
|
4a0bc30b730625d6f5618e9e8db9ce6a23133c92
| 148
|
py
|
Python
|
Part 2/Week_4_7/Fatorial.py
|
Arielcarv/Intro-Ciencia-da-Computacao-USP
|
03d69b9e0f3fa5fca053d0fc406aa1a584d33de1
|
[
"MIT"
] | null | null | null |
Part 2/Week_4_7/Fatorial.py
|
Arielcarv/Intro-Ciencia-da-Computacao-USP
|
03d69b9e0f3fa5fca053d0fc406aa1a584d33de1
|
[
"MIT"
] | null | null | null |
Part 2/Week_4_7/Fatorial.py
|
Arielcarv/Intro-Ciencia-da-Computacao-USP
|
03d69b9e0f3fa5fca053d0fc406aa1a584d33de1
|
[
"MIT"
] | null | null | null |
def fatorial(x):
if x < 1: # base da recursão
return 1
else:
return x * fatorial(x-1) # chamada recursiva
| 24.666667
| 52
| 0.5
|
4a0bc34866f393ad194c709c22a955355c159ef9
| 311
|
py
|
Python
|
School/Between (1-7).py
|
Bamgm14/My-Random-Work
|
b9678a3a84dd8ff00efd638890cff76eb6967c1b
|
[
"MIT"
] | null | null | null |
School/Between (1-7).py
|
Bamgm14/My-Random-Work
|
b9678a3a84dd8ff00efd638890cff76eb6967c1b
|
[
"MIT"
] | null | null | null |
School/Between (1-7).py
|
Bamgm14/My-Random-Work
|
b9678a3a84dd8ff00efd638890cff76eb6967c1b
|
[
"MIT"
] | null | null | null |
#To accept a single digit Between (1-7) and display a 3 digit number as n*100+(n+1)*10+(n+2))
while True:
a=int(input("Enter Number(n):"))
if a>7 or a<1:
print ("INPUT NUMBER BETWEEN 1 AND 7")
else:
b=(a+1)*10
c=(a+2)
a=a*100
print (a+b+c)
break
| 23.923077
| 93
| 0.514469
|
4a0bc3acb4646d473b50a9a5ba794efa81d2361f
| 8,665
|
py
|
Python
|
docs/source/conf.py
|
jtoD5T/TinCanPython
|
e98b5087cb7f3a29c6a00392b327cd6241c4a354
|
[
"Apache-2.0"
] | 38
|
2015-02-26T12:40:15.000Z
|
2021-08-19T11:19:25.000Z
|
docs/source/conf.py
|
jtoD5T/TinCanPython
|
e98b5087cb7f3a29c6a00392b327cd6241c4a354
|
[
"Apache-2.0"
] | 21
|
2015-01-07T23:12:01.000Z
|
2022-02-27T12:35:49.000Z
|
docs/source/conf.py
|
jtoD5T/TinCanPython
|
e98b5087cb7f3a29c6a00392b327cd6241c4a354
|
[
"Apache-2.0"
] | 27
|
2015-02-09T17:21:44.000Z
|
2022-02-27T12:36:25.000Z
|
# -*- coding: utf-8 -*-
#
# Tin Can Python documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 10 12:52:27 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# Ensure that the __init__ method gets documented.
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# If your documentation needs a minimal Sphinx version, state it here.
# #needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# #source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tin Can Python'
copyright = u'2014, Rustici Software'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pkg_resources.require("tincan")[0].version
# The full version, including alpha/beta/rc tags.
release = pkg_resources.require("tincan")[0].version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# #language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# #today = ''
# Else, today_fmt is used as the format for a strftime call.
# #today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# #default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# #add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# #add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# #show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# #modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# #keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# #html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# #html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# #html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# #html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# #html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# #html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# #html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# #html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# #html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# #html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# #html_additional_pages = {}
# If false, no module index is generated.
# #html_domain_indices = True
# If false, no index is generated.
# #html_use_index = True
# If true, the index is split into individual pages for each letter.
# #html_split_index = False
# If true, links to the reST sources are added to the pages.
# #html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# #html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# #html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# #html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# #html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TinCanPythondoc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letterpaper' or 'a4paper').
# #'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# #'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# #'preamble': '',
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TinCanPython.tex', u'Tin Can Python Documentation',
u'Rustici Software', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# #latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# #latex_use_parts = False
# If true, show page references after internal links.
# #latex_show_pagerefs = False
# If true, show URL addresses after external links.
# #latex_show_urls = False
# Documents to append as an appendix to all manuals.
# #latex_appendices = []
# If false, no module index is generated.
# #latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tincanpython', u'Tin Can Python Documentation',
[u'Rustici Software'], 1)
]
# If true, show URL addresses after external links.
# #man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TinCanPython', u'Tin Can Python Documentation',
u'Rustici Software', 'TinCanPython',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# #texinfo_appendices = []
# If false, no module index is generated.
# #texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# #texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# #texinfo_no_detailmenu = False
| 31.856618
| 79
| 0.709636
|
4a0bc3c06d504a5876d38ff1c06be7d231caee8e
| 2,179
|
py
|
Python
|
Jogo_da_velha/controller/TabuleirosCtrl.py
|
Lucas-Dinelli/Projeto_Como_As_Maquinas_Aprendem
|
5b47cb9b3f237af32c4caac758380f823637d236
|
[
"MIT"
] | null | null | null |
Jogo_da_velha/controller/TabuleirosCtrl.py
|
Lucas-Dinelli/Projeto_Como_As_Maquinas_Aprendem
|
5b47cb9b3f237af32c4caac758380f823637d236
|
[
"MIT"
] | null | null | null |
Jogo_da_velha/controller/TabuleirosCtrl.py
|
Lucas-Dinelli/Projeto_Como_As_Maquinas_Aprendem
|
5b47cb9b3f237af32c4caac758380f823637d236
|
[
"MIT"
] | null | null | null |
class TabuleiroCtrl:
def __init__(self, ambiente, entradas, preJogo, inteligencia,
telaTabuleiro, preJogoCtrl, fimDeJogosCtrl,
pecasCtrl, atuadoresCtrl, todosTiposJogadores):
self.IAmbiente = ambiente
self.IEntradas = entradas
self.IPreJogo = preJogo
self.IInteligencia = inteligencia
self.ITelaTabuleiro = telaTabuleiro
self.IPreJogoCtrl = preJogoCtrl
self.IFimDeJogosCtrl = fimDeJogosCtrl
self.IPecasCtrl = pecasCtrl
self.IAtuadoresCtrl = atuadoresCtrl
self.ITodosTiposJogadores = todosTiposJogadores
def fluxoDeJogo(self):
tipos = self.ITodosTiposJogadores
self.ITelaTabuleiro.tabuleiro()
while True:
self.extrairDadosCtrl()
if self.efeitosDasAcoes():
if self.IPreJogoCtrl.getTipoJogadorPelaVezCtrl() != tipos[0]:
self.extrairDadosCtrl()
break
self.ITelaTabuleiro.limpaTabuleiro()
def extrairDadosCtrl(self):
tipos = self.ITodosTiposJogadores
if self.IPreJogoCtrl.getTipoJogadorPelaVezCtrl() == tipos[0]:
self.ITelaTabuleiro.extrairDados(False)
else:
self.ITelaTabuleiro.extrairDados(100)
def efeitosDasAcoes(self):
jogada, peca = self.jogadaQualquerJogador()
self.IInteligencia.guardaEstado()
self.IAmbiente.setCampo(jogada, peca)
self.IEntradas.setTodosJogadasDaRodada(jogada)
self.IInteligencia.guardaEstado()
if self.IFimDeJogosCtrl.fimDeJogoCtrl(peca):
return True
def jogadaQualquerJogador(self):
tipos = self.ITodosTiposJogadores
peca = self.IPecasCtrl.getPecasPelaVezCtrl()
jogada = self.IAtuadoresCtrl.jogadaJogadoresCtrl()
if jogada == tipos[0]:
jogada = self.ITelaTabuleiro.insercaoClique()
while not self.IEntradas.valida(jogada):
jogada = self.ITelaTabuleiro.jogadaInvalida()
self.ITelaTabuleiro.preencherClique(peca)
else:
self.ITelaTabuleiro.insercaoManual(jogada, peca)
return jogada, peca
| 36.932203
| 77
| 0.653052
|
4a0bc4a15286267d1623b6fe6af9154794e866e2
| 2,875
|
py
|
Python
|
tests/test_additional_responses_router.py
|
broHeryk/squall
|
47c844d45548ed62745ee8385d6e6bda9e8269ad
|
[
"MIT"
] | 27
|
2021-12-04T15:54:59.000Z
|
2022-02-19T15:37:35.000Z
|
tests/test_additional_responses_router.py
|
broHeryk/squall
|
47c844d45548ed62745ee8385d6e6bda9e8269ad
|
[
"MIT"
] | 21
|
2021-12-04T21:17:54.000Z
|
2022-01-30T23:45:43.000Z
|
tests/test_additional_responses_router.py
|
broHeryk/squall
|
47c844d45548ed62745ee8385d6e6bda9e8269ad
|
[
"MIT"
] | 2
|
2021-12-29T10:53:59.000Z
|
2022-01-12T05:01:02.000Z
|
from squall import Router, Squall
from squall.testclient import TestClient
app = Squall()
router = Router()
@router.get("/a", responses={501: {"description": "Error 1"}})
async def a():
return "a"
@router.get(
"/b",
responses={
502: {"description": "Error 2"},
"4XX": {"description": "Error with range, upper"},
},
)
async def b():
return "b"
@router.get(
"/c",
responses={
"400": {"description": "Error with str"},
"5XX": {"description": "Error with range"},
"default": {"description": "A default response"},
},
)
async def c():
return "c"
app.include_router(router)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "Squall", "version": "0.1.0"},
"paths": {
"/a": {
"get": {
"responses": {
"501": {"description": "Error 1"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
"summary": "A",
"operationId": "a_a_get",
}
},
"/b": {
"get": {
"responses": {
"502": {"description": "Error 2"},
"4XX": {"description": "Error with range, upper"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
"summary": "B",
"operationId": "b_b_get",
}
},
"/c": {
"get": {
"responses": {
"400": {"description": "Error with str"},
"5XX": {"description": "Error with range"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"default": {"description": "A default response"},
},
"summary": "C",
"operationId": "c_c_get",
}
},
},
}
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_a():
response = client.get("/a")
assert response.status_code == 200, response.text
assert response.json() == "a"
def test_b():
response = client.get("/b")
assert response.status_code == 200, response.text
assert response.json() == "b"
def test_c():
response = client.get("/c")
assert response.status_code == 200, response.text
assert response.json() == "c"
| 25.669643
| 72
| 0.446957
|
4a0bc5c96ff510d02c34838ef46a2c2007a3b74f
| 20,969
|
py
|
Python
|
Benchmarks/common/utility/makefile_gen/makegen.py
|
LemonAndRabbit/rodinia-hls
|
097e8cf572a9ab04403c4eb0cfdb042f233f4aea
|
[
"BSD-2-Clause"
] | 364
|
2016-10-18T18:07:32.000Z
|
2022-03-31T05:21:25.000Z
|
Benchmarks/common/utility/makefile_gen/makegen.py
|
LemonAndRabbit/rodinia-hls
|
097e8cf572a9ab04403c4eb0cfdb042f233f4aea
|
[
"BSD-2-Clause"
] | 54
|
2016-09-19T09:11:27.000Z
|
2021-04-19T04:42:58.000Z
|
Benchmarks/common/utility/makefile_gen/makegen.py
|
LemonAndRabbit/rodinia-hls
|
097e8cf572a9ab04403c4eb0cfdb042f233f4aea
|
[
"BSD-2-Clause"
] | 227
|
2016-08-18T23:12:33.000Z
|
2022-01-19T14:06:12.000Z
|
#!/usr/bin/env python
from sys import argv
import json
import glob
import os
import re
import subprocess
def create_params(target,data):
target.write("# Points to Utility Directory\n")
dirName = os.getcwd()
dirNameList = list(dirName.split("/"))
dirNameIndex = dirNameList.index("apps")
diff = len(dirNameList) - dirNameIndex - 1
target.write("COMMON_REPO = ")
while diff > 0:
target.write("../")
diff -= 1
target.write("\n")
target.write("ABS_COMMON_REPO = $(shell readlink -f $(COMMON_REPO))\n")
target.write("\n")
target.write("TARGETS := hw\n")
target.write("TARGET := $(TARGETS)\n")
target.write("DEVICE := $(DEVICES)\n")
target.write("XCLBIN := ./xclbin\n")
target.write("\n")
target.write("include ./utils.mk\n")
target.write("\n")
target.write("DSA := $(call device2dsa, $(DEVICE))\n")
target.write("BUILD_DIR := ./_x.$(TARGET).$(DSA)\n")
target.write("\n")
if "containers" in data:
for con in data["containers"]:
target.write("BUILD_DIR_")
target.write(con["name"])
target.write(" = $(BUILD_DIR)/")
target.write(con["name"])
target.write("\n")
target.write("\n")
target.write("CXX := ")
target.write("$(XILINX_SDX)/bin/xcpp\n")
target.write("XOCC := ")
target.write("$(XILINX_SDX)/bin/xocc\n")
target.write("\n")
add_libs1(target, data)
add_libs2(target, data)
if "config_make" in data:
target.write("include ")
target.write(data["config_make"])
target.write("\n\n")
target.write("CXXFLAGS += $(opencl_CXXFLAGS) -Wall -O0 -g -std=c++14\n")
target.write("LDFLAGS += $(opencl_LDFLAGS)\n")
target.write("\n")
return
def add_libs1(target, data):
target.write("#Include Libraries\n")
target.write("include $(ABS_COMMON_REPO)/libs/opencl/opencl.mk\n")
if "libs" in data:
for lib in data["libs"]:
target.write("include $(ABS_COMMON_REPO)/libs/")
target.write(lib)
target.write("/")
target.write(lib)
target.write(".mk")
target.write("\n")
return
def add_libs2(target, data):
if "libs" in data:
target.write("CXXFLAGS +=")
for lib in data["libs"]:
target.write(" $(")
target.write(lib)
target.write("_CXXFLAGS)")
target.write("\n")
target.write("LDFLAGS +=")
for lib in data["libs"]:
target.write(" $(")
target.write(lib)
target.write("_LDFLAGS)")
target.write("\n")
target.write("HOST_SRCS +=")
for lib in data["libs"]:
target.write(" $(")
target.write(lib)
target.write("_SRCS)")
target.write("\n")
if "linker" in data:
target.write("\nCXXFLAGS +=")
if "libraries" in data["linker"]:
for lin in data["linker"]["libraries"]:
target.write(" ")
target.write("-l")
target.write(lin)
if "options" in data["linker"]:
for lin in data["linker"]["options"]:
target.write(" ")
target.write(lin)
target.write("\n")
return
def add_host_flags(target, data):
target.write("HOST_SRCS += ")
if "host_srcs" in data:
target.write(data["host_srcs"])
target.write("\n")
else:
target.write("src/host.cpp\n")
if "host_hdrs" in data:
target.write("HOST_HDRS += ")
target.write(data["host_hdrs"])
target.write("\n")
target.write("\n")
target.write("# Host compiler global settings\n")
target.write("CXXFLAGS += ")
target.write("-fmessage-length=0")
if "compiler" in data:
if "options" in data["compiler"]:
target.write(data["compiler"]["options"])
target.write("\n")
target.write("LDFLAGS += ")
target.write("-lrt -lstdc++ ")
target.write("\n\n")
return
def add_kernel_flags(target, data):
target.write("# Kernel compiler global settings\n")
target.write("CLFLAGS += ")
target.write("-t $(TARGET) --platform $(DEVICE) --save-temps \n")
if "containers" in data:
for con in data["containers"]:
for acc in con["accelerators"]:
if "max_memory_ports" in acc:
target.write("CLFLAGS += ")
target.write(" --max_memory_ports ")
target.write(acc["name"])
target.write("\n")
if "containers" in data:
for con in data["containers"]:
for acc in con["accelerators"]:
if "clflags" in acc:
target.write("CLFLAGS +=")
flags = acc["clflags"].split(" ")
for flg in flags[0:]:
target.write(" ")
flg = flg.replace('PROJECT', '.')
target.write(flg)
target.write("\n")
if "compiler" in data:
if "symbols" in data["compiler"]:
target.write("\nCXXFLAGS +=")
for sym in data["compiler"]["symbols"]:
target.write(" ")
target.write("-D")
target.write(sym)
target.write("\n")
if "containers" in data:
for con in data["containers"]:
if "ldclflags" in con:
target.write("\n")
target.write("# Kernel linker flags\n")
target.write("LDCLFLAGS +=")
ldclflags = con["ldclflags"].split(" ")
for flg in ldclflags[0:]:
target.write(" ")
flg = flg.replace('PROJECT', '.')
target.write(flg)
target.write("\n")
target.write("\n")
target.write("EXECUTABLE = ")
if "host_exe" in data:
target.write(data["host_exe"])
else:
target.write("host")
if "cmd_args" in data:
target.write("\n")
target.write("CMD_ARGS =")
cmd_args = data["cmd_args"].split(" ")
for cmdargs in cmd_args[0:]:
target.write(" ")
cmdargs = cmdargs.replace('.xclbin', '')
cmdargs = cmdargs.replace('BUILD', '$(XCLBIN)')
cmdargs = cmdargs.replace('PROJECT', '.')
target.write(cmdargs)
if "$(XCLBIN)" in cmdargs:
target.write(".$(TARGET).$(DSA).xclbin")
target.write("\n\n")
target.write("EMCONFIG_DIR = $(XCLBIN)/$(DSA)")
target.write("\n\n")
return
def add_containers(target, data):
if "containers" in data:
for con in data["containers"]:
target.write("BINARY_CONTAINERS += $(XCLBIN)/")
target.write(con["name"])
target.write(".$(TARGET).$(DSA)")
target.write(".xclbin\n")
if "accelerators" in con:
for acc in con["accelerators"]:
target.write("BINARY_CONTAINER_")
target.write(con["name"])
target.write("_OBJS += $(XCLBIN)/")
target.write(acc["name"])
target.write(".$(TARGET).$(DSA)")
target.write(".xo\n")
target.write("\n")
def building_kernel(target, data):
target.write("# Building kernel\n")
if "containers" in data:
for con in data["containers"]:
if "accelerators" in con:
for acc in con["accelerators"]:
target.write("$(XCLBIN)/")
target.write(acc["name"])
target.write(".$(TARGET).$(DSA)")
target.write(".xo: ")
target.write(acc["location"])
target.write("\n")
target.write("\tmkdir -p $(XCLBIN)\n")
target.write("\t$(XOCC) $(CLFLAGS) --temp_dir ")
target.write("$(BUILD_DIR_"+ con["name"] +") ")
target.write("-c -k ")
target.write(acc["name"])
target.write(" -I'$(<D)'")
target.write(" -o'$@' '$<'\n")
if "containers" in data:
for con in data["containers"]:
target.write("$(XCLBIN)/")
target.write(con["name"])
target.write(".$(TARGET).$(DSA)")
target.write(".xclbin:")
target.write(" $(BINARY_CONTAINER_")
target.write(con["name"])
target.write("_OBJS)\n")
target.write("\tmkdir -p $(XCLBIN)\n")
target.write("\t$(XOCC) $(CLFLAGS) --temp_dir ")
target.write("$(BUILD_DIR_"+ con["name"] +") ")
target.write("-l $(LDCLFLAGS)")
for acc in con["accelerators"]:
target.write(" --nk ")
target.write(acc["name"])
if "num_compute_units" in acc.keys():
target.write(":")
target.write(acc["num_compute_units"])
else:
target.write(":1")
target.write(" -o'$@' $(+)\n")
target.write("\n")
return
def building_kernel_rtl(target, data):
target.write("# Building kernel\n")
if "containers" in data:
for con in data["containers"]:
target.write("$(XCLBIN)/")
target.write(con["name"])
target.write(".$(TARGET).$(DSA)")
target.write(".xclbin:")
target.write(" $(BINARY_CONTAINER_")
target.write(con["name"])
target.write("_OBJS)\n")
target.write("\tmkdir -p $(XCLBIN)\n")
target.write("\t$(XOCC) $(CLFLAGS) $(LDCLFLAGS) -lo")
target.write(" $(XCLBIN)/")
target.write(con["name"])
target.write(".$(TARGET).$(DSA).xclbin")
for acc in con["accelerators"]:
target.write(" $(XCLBIN)/")
target.write(acc["name"])
target.write(".$(TARGET).$(DSA).xo")
target.write("\n\n")
return
def building_host(target, data):
target.write("# Building Host\n")
target.write("$(EXECUTABLE): check-xrt $(HOST_SRCS) $(HOST_HDRS)\n")
target.write("\t$(CXX) $(CXXFLAGS) $(HOST_SRCS) $(HOST_HDRS) -o '$@' $(LDFLAGS)\n")
target.write("\n")
target.write("emconfig:$(EMCONFIG_DIR)/emconfig.json\n")
target.write("$(EMCONFIG_DIR)/emconfig.json:\n")
target.write("\temconfigutil --platform $(DEVICE) --od $(EMCONFIG_DIR)")
if "num_devices" in data:
target.write(" --nd ")
target.write(data["num_devices"])
target.write("\n\n")
return
def building_host_rtl(target, data):
target.write("# Building Host\n")
target.write("$(EXECUTABLE): check-xrt $(HOST_SRCS) $(HOST_HDRS)\n")
target.write("\t$(CXX) $(CXXFLAGS) $(HOST_SRCS) $(HOST_HDRS) -o '$@' $(LDFLAGS)\n")
target.write("\n")
target.write("emconfig:emconfig.json\n")
target.write("emconfig.json:\n")
target.write("\temconfigutil --platform $(DSA) --nd 1\n\n")
return
def profile_report(target):
target.write("[Debug]\n")
target.write("profile=true\n")
return
def mk_clean(target, data):
target.write("# Cleaning stuff\n")
target.write("clean:\n")
target.write("\t-$(RMDIR) $(EXECUTABLE) $(XCLBIN)/{*sw_emu*,*hw_emu*} \n")
target.write("\t-$(RMDIR) profile_* TempConfig system_estimate.xtxt *.rpt *.csv \n")
target.write("\t-$(RMDIR) src/*.ll _xocc_* .Xil emconfig.json dltmp* xmltmp* *.log *.jou *.wcfg *.wdb\n")
target.write("\n")
target.write("cleanall: clean\n")
target.write("\t-$(RMDIR) $(XCLBIN)\n")
target.write("\t-$(RMDIR) _x.*\n")
if "output_files" in data:
target.write("\t-$(RMDIR) ")
args = data["output_files"].split(" ")
for arg in args[0:]:
target.write("./")
target.write(arg)
target.write(" ")
target.write("\n")
return
def mk_build_all(target, data):
target.write("CP = cp -rf\n")
args = []
if "cmd_args" in data:
args = data["cmd_args"].split(" ")
if any("/data" in string for string in args):
target.write("DATA = ./data\n")
target.write("\n")
target.write(".PHONY: all clean cleanall docs emconfig\n")
target.write("all: check-devices $(EXECUTABLE) $(BINARY_CONTAINERS) emconfig\n")
target.write("\n")
target.write(".PHONY: exe\n")
target.write("exe: $(EXECUTABLE)\n")
target.write("\n")
target.write(".PHONY: build\n")
target.write("build: $(BINARY_CONTAINERS)\n")
target.write("\n")
counter = 0
if "containers" in data:
for con in data["containers"]:
if "accelerators" in con:
for acc in con["accelerators"]:
if "kernel_type" in acc:
if acc["kernel_type"] == "RTL":
counter = 1
if counter == 1:
building_kernel_rtl(target, data)
else:
building_kernel(target, data)
building_host(target, data)
return
def mk_check(target, data):
target.write("check: all\n")
if "nboard" in data:
for board in data["nboard"]:
target.write("ifeq ($(findstring ")
target.write(board)
target.write(", $(DEVICE)), ")
target.write(board)
target.write(")\n")
target.write("$(error Nothing to be done for make)\n")
target.write("endif\n")
target.write("\n")
target.write("ifeq ($(TARGET),$(filter $(TARGET),sw_emu hw_emu))\n")
target.write("\t$(CP) $(EMCONFIG_DIR)/emconfig.json .\n")
target.write("\tXCL_EMULATION_MODE=$(TARGET) ./$(EXECUTABLE)")
if "cmd_args" in data:
args = data["cmd_args"].split(" ")
for arg in args[0:]:
target.write(" ")
arg = arg.replace('.xclbin', '')
arg = arg.replace('BUILD', '$(XCLBIN)')
arg = arg.replace('PROJECT', '.')
target.write(arg)
if "$(XCLBIN)" in arg:
target.write(".$(TARGET).$(DSA).xclbin")
target.write("\nelse\n")
target.write("\t ./$(EXECUTABLE)")
if "cmd_args" in data:
args = data["cmd_args"].split(" ")
for arg in args[0:]:
target.write(" ")
arg = arg.replace('.xclbin', '')
arg = arg.replace('BUILD', '$(XCLBIN)')
arg = arg.replace('PROJECT', '.')
target.write(arg)
if "$(XCLBIN)" in arg:
target.write(".$(TARGET).$(DSA).xclbin")
target.write("\nendif\n")
if "targets" in data:
target.write("ifneq ($(TARGET),$(findstring $(TARGET),")
args = data["targets"]
for arg in args:
target.write(" ")
target.write(arg)
target.write("))\n")
target.write("$(warning WARNING:Application supports only")
for arg in args:
target.write(" ")
target.write(arg)
target.write(" TARGET. Please use the target for running the application)\n")
target.write("endif\n")
target.write("\n")
if data["example"] != "00 Matrix Multiplication":
target.write("\tsdx_analyze profile -i profile_summary.csv -f html\n")
target.write("\n")
def run_nimbix(target, data):
target.write("run_nimbix: all\n")
if "cmd_args" in data:
target.write("\t$(COMMON_REPO)/utility/nimbix/run_nimbix.py $(EXECUTABLE) $(CMD_ARGS) $(DSA)\n\n")
else:
target.write("\t$(COMMON_REPO)/utility/nimbix/run_nimbix.py $(EXECUTABLE) $(DSA)\n\n")
def aws_build(target):
target.write("aws_build: check-aws_repo $(BINARY_CONTAINERS)\n")
target.write("\t$(COMMON_REPO)/utility/aws/run_aws.py $(BINARY_CONTAINERS)\n\n")
def mk_help(target):
target.write(".PHONY: help\n")
target.write("\n")
target.write("help::\n")
target.write("\t$(ECHO) \"Makefile Usage:\"\n")
target.write("\t$(ECHO) \" make all TARGET=<sw_emu/hw_emu/hw> DEVICE=<FPGA platform>\"\n");
target.write("\t$(ECHO) \" Command to generate the design for specified Target and Device.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\t$(ECHO) \" make clean \"\n");
target.write("\t$(ECHO) \" Command to remove the generated non-hardware files.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\t$(ECHO) \" make cleanall\"\n")
target.write("\t$(ECHO) \" Command to remove all the generated files.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\t$(ECHO) \" make check TARGET=<sw_emu/hw_emu/hw> DEVICE=<FPGA platform>\"\n");
target.write("\t$(ECHO) \" Command to run application in emulation.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\t$(ECHO) \" make build TARGET=<sw_emu/hw_emu/hw> DEVICE=<FPGA platform>\"\n");
target.write("\t$(ECHO) \" Command to build xclbin application.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\t$(ECHO) \" make run_nimbix DEVICE=<FPGA platform>\"\n");
target.write("\t$(ECHO) \" Command to run application on Nimbix Cloud.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\t$(ECHO) \" make aws_build DEVICE=<FPGA platform>\"\n");
target.write("\t$(ECHO) \" Command to build AWS xclbin application on AWS Cloud.\"\n")
target.write("\t$(ECHO) \"\"\n")
target.write("\n")
def report_gen(target, data):
target.write("#+-------------------------------------------------------------------------------\n")
target.write("# The following parameters are assigned with default values. These parameters can\n")
target.write("# be overridden through the make command line\n")
target.write("#+-------------------------------------------------------------------------------\n")
target.write("\n")
if "testinfo" in data and "profile" in data["testinfo"] and data["testinfo"]["profile"] == "no":
pass
else:
target.write("PROFILE := no\n")
target.write("\n")
target.write("#Generates profile summary report\n")
target.write("ifeq ($(PROFILE), yes)\n")
target.write("LDCLFLAGS += --profile_kernel data:all:all:all\n")
target.write("endif\n")
target.write("\n")
target.write("DEBUG := no\n")
target.write("\n")
target.write("#Generates debug summary report\n")
target.write("ifeq ($(DEBUG), yes)\n")
target.write("CLFLAGS += --dk protocol:all:all:all\n")
target.write("endif\n")
target.write("\n")
def device2dsa_gen(target):
target.write("# device2dsa - create a filesystem friendly name from device name\n")
target.write("# $(1) - full name of device\n")
target.write("device2dsa = $(strip $(patsubst %.xpfm, % , $(shell basename $(DEVICE))))\n")
target.write("\n")
def util_checks(target):
target.write("#Checks for XILINX_SDX\n")
target.write("ifndef XILINX_SDX\n")
target.write("$(error XILINX_SDX variable is not set, please set correctly and rerun)\n")
target.write("endif\n")
target.write("\n")
target.write("#Checks for XILINX_XRT\n")
target.write("check-xrt:\n")
target.write("ifndef XILINX_XRT\n")
target.write("\t$(error XILINX_XRT variable is not set, please set correctly and rerun)\n")
target.write("endif\n")
target.write("\n")
target.write("check-devices:\n")
target.write("ifndef DEVICE\n")
target.write("\t$(error DEVICE not set. Please set the DEVICE properly and rerun. Run \"make help\" for more details.)\n")
target.write("endif\n")
target.write("\n")
target.write("check-aws_repo:\n")
target.write("ifndef SDACCEL_DIR\n")
target.write("\t$(error SDACCEL_DIR not set. Please set it properly and rerun. Run \"make help\" for more details.)\n")
target.write("endif\n")
target.write("\n")
def clean_util(target):
target.write("# Cleaning stuff\n")
target.write("RM = rm -f\n")
target.write("RMDIR = rm -rf\n")
target.write("\n")
target.write("ECHO:= @echo\n")
target.write("\n")
def readme_gen(target):
target.write("docs: README.md\n")
target.write("\n")
target.write("README.md: description.json\n")
target.write("\t$(ABS_COMMON_REPO)/utility/readme_gen/readme_gen.py description.json")
target.write("\n")
def create_mk(target, data):
mk_help(target)
create_params(target,data)
add_host_flags(target, data)
add_kernel_flags(target, data)
add_containers(target, data)
mk_build_all(target, data)
mk_check(target, data)
run_nimbix(target, data)
aws_build(target)
mk_clean(target,data)
return
def create_utils(target, data):
report_gen(target, data)
util_checks(target)
device2dsa_gen(target)
clean_util(target)
readme_gen(target)
return
script, desc_file = argv
desc = open(desc_file, 'r')
data = json.load(desc)
desc.close()
if "match_ini" in data and data["match_ini"] == "false":
print "Error:: xrt.ini File Manually Edited:: Auto-file Generator Failed"
err = False
else:
print "Generating xrt.ini file for %s" %data["example"]
target = open("xrt.ini","w+")
profile_report(target)
if "match_makefile" in data and data["match_makefile"] == "false":
print "Error:: Makefile Manually Edited:: AutoMakefile Generator Failed"
else:
print "Generating Auto-Makefile for %s" %data["example"]
target = open("Makefile", "w")
create_mk(target, data)
print "Generating utils.mk file for %s" %data["example"]
target = open("utils.mk", "w+")
create_utils(target, data)
target.close
| 35.420608
| 126
| 0.571033
|
4a0bc7ce96c813eb90cc9895886bc9abf7376ee0
| 2,840
|
py
|
Python
|
jacobi/sharedAlt.py
|
nqiao/Voxel-Modeling
|
ee4cb96037cfde6adc9ad7b719d1ccf1f5ced5ae
|
[
"MIT"
] | null | null | null |
jacobi/sharedAlt.py
|
nqiao/Voxel-Modeling
|
ee4cb96037cfde6adc9ad7b719d1ccf1f5ced5ae
|
[
"MIT"
] | null | null | null |
jacobi/sharedAlt.py
|
nqiao/Voxel-Modeling
|
ee4cb96037cfde6adc9ad7b719d1ccf1f5ced5ae
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
from numba import jit, cuda, float32, int32
'''Create Global Variable that is the block size'''
TPB = 8
RAD = 1
SH_N = 10 # MUST AGREE WITH TPB+2*RAD
'''
stencil = (
(.05, .2, .05),
(.2, 0., .2),
(.05, .2, .05))
See, for example, en.wikipedia.org/wiki/Discrete_Laplace_operator
The -3 in the center cancels out for Laplace's equation.
'''
#@cuda.jit("void(float32[:,:],float32[:,:])")
@cuda.jit
def updateKernel(d_v, d_u):
#i = cuda.blockIdx.x*cuda.blockDim.x + cuda.threadIdx.x
i,j = cuda.grid(2)
dims = d_u.shape
if i >= dims[0] or j >= dims[1]:
return
t_i = cuda.threadIdx.x
t_j = cuda.threadIdx.y
NX = cuda.blockDim.x
NY = cuda.blockDim.y
sh_i = t_i + RAD
sh_j = t_j + RAD
sh_u = cuda.shared.array(shape = (SH_N,SH_N), dtype = float32)
#Load regular values
sh_u[sh_i, sh_j] = d_u[i, j]
#Halo edge values
if t_i<RAD:
sh_u[sh_i - RAD, sh_j] = d_u[i-RAD, j]
sh_u[sh_i + NX , sh_j] = d_u[i+NX , j]
if t_j<RAD:
sh_u[sh_i, sh_j - RAD] = d_u[i, j - RAD]
sh_u[sh_i, sh_j + NY ] = d_u[i, j + NY ]
#Halo corner values
if t_i<RAD and t_j<RAD:
#upper left
sh_u[sh_i - RAD, sh_j - RAD] = d_u[i-RAD, j - RAD]
sh_u[sh_i - RAD, sh_j - RAD] = d_u[i-RAD, j - RAD]
#upper right
sh_u[sh_i + NX, sh_j - RAD] = d_u[i + NX, j - RAD]
sh_u[sh_i + NX, sh_j - RAD] = d_u[i + NX, j - RAD]
#lower left
sh_u[sh_i - RAD, sh_j + NY] = d_u[i-RAD, j + NY]
sh_u[sh_i - RAD, sh_j + NY] = d_u[i-RAD, j + NY]
#lower right
sh_u[sh_i + NX, sh_j + NX] = d_u[i + NX, j + NY]
sh_u[sh_i + NX, sh_j + NY] = d_u[i + NX, j + NY]
cuda.syncthreads()
stencil = cuda.local.array(shape=(3,3), dtype = float32)
stencil = [[.05, .25, .05],[ .25, 0.0, .25], [.05, .25, .05]]
if i>0 and j>0 and i<n-1 and j<n-1:
d_v[i, j] = \
sh_u[sh_i-1, sh_j-1]*stencil[0, 0] + \
sh_u[sh_i, sh_j-1]*stencil[1, 0] + \
sh_u[sh_i+1, sh_j-1]*stencil[2, 0] + \
sh_u[sh_i-1, sh_j]*stencil[0, 1] + \
sh_u[sh_i, sh_j]*stencil[1, 1] + \
sh_u[sh_i+1, sh_j]*stencil[2, 1] + \
sh_u[sh_i-1, sh_j+1]*stencil[0, 2] + \
sh_u[sh_i, sh_j+1]*stencil[1, 2] + \
sh_u[sh_i+1, sh_j+1]*stencil[2, 2]
def update(u, iter_count):
''' Compute number of entries in shared array'''
#s_memSize = (TPB + 2*RAD)*(TPB + 2*RAD)# * u.itemsize
''' Compute memory needed for dynamic shared array'''
#s_memSize = dist_array.itemsize*shN
d_u = cuda.to_device(u)
d_v = cuda.to_device(u)
dims = u.shape
gridSize = [(dims[0]+TPB-1)//TPB, (dims[1]+TPB-1)//TPB]
blockSize = [TPB, TPB]
'''Launch kernel with optional parameters specifying the stream number
and the amount of shared memory to allocate'''
#not using dyn shared due to lack of 2D arrays???
for k in range(iter_count):
updateKernel[gridSize, blockSize](d_v, d_u)
updateKernel[gridSize, blockSize](d_u, d_v)
return d_u.copy_to_host()
| 27.572816
| 71
| 0.617958
|
4a0bc7f8136f33722818a1aba9b12935ff527f34
| 1,526
|
py
|
Python
|
setup.py
|
twilio-labs/twilio-anchore
|
4f0413b1c9c093a971d3c966b83cd920ff9c00be
|
[
"MIT"
] | 12
|
2022-01-21T19:25:07.000Z
|
2022-03-14T20:47:46.000Z
|
setup.py
|
twilio-labs/twilio-anchore
|
4f0413b1c9c093a971d3c966b83cd920ff9c00be
|
[
"MIT"
] | 1
|
2022-01-21T21:05:45.000Z
|
2022-01-24T20:32:37.000Z
|
setup.py
|
twilio-labs/twilio-anchore
|
4f0413b1c9c093a971d3c966b83cd920ff9c00be
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="twilio-anchore",
version="1.0.0",
description="Twilio python library that facilitates the use of some features of the Anchore API.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://code.hq.twilio.com/security/twilio-anchore-python-library",
author="Juan Jose Lopez",
author_email="jualopez@twilio.com",
license='MIT',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: OS Independent"
],
keywords="anchore, containers, security",
project_urls={
"Source": "https://code.hq.twilio.com/security/twilio-anchore-python-library",
},
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"requests==2.26.0",
"pydantic==1.8.2"
],
extras_require={ # Optional
"dev": ["python-dotenv==0.19.2"]
},
python_requires=">=3.5"
)
| 34.681818
| 102
| 0.621232
|
4a0bc894f8c9e6aae7aeb0032ff235c583419c4b
| 6,048
|
py
|
Python
|
zaifapi/exchange_api/trade.py
|
techbureau/zaifapi
|
5b7db7d6abdc76b4e911a74457140b3faf0b7317
|
[
"MIT"
] | 62
|
2017-05-10T12:24:48.000Z
|
2021-03-17T07:03:29.000Z
|
zaifapi/exchange_api/trade.py
|
techbureau/zaifapi
|
5b7db7d6abdc76b4e911a74457140b3faf0b7317
|
[
"MIT"
] | 15
|
2017-06-12T07:12:14.000Z
|
2020-01-30T13:28:53.000Z
|
zaifapi/exchange_api/trade.py
|
techbureau/zaifapi
|
5b7db7d6abdc76b4e911a74457140b3faf0b7317
|
[
"MIT"
] | 19
|
2017-08-23T20:47:14.000Z
|
2018-11-21T10:01:06.000Z
|
import time
import hmac
import hashlib
from decimal import Decimal
from datetime import datetime
from abc import ABCMeta, abstractmethod
from typing import Optional
from urllib.parse import urlencode
from zaifapi.api_common import ApiUrl, get_response, get_api_url, method_name
from zaifapi.api_error import ZaifApiError, ZaifApiNonceError
from . import ZaifExchangeApi
class _ZaifTradeApiBase(ZaifExchangeApi, metaclass=ABCMeta):
@abstractmethod
def _get_header(self, params):
raise NotImplementedError()
@staticmethod
def _get_nonce():
now = datetime.now()
nonce = str(int(time.mktime(now.timetuple())))
microseconds = "{0:06d}".format(now.microsecond)
return Decimal(nonce + "." + microseconds)
def _execute_api(self, func_name, schema_keys=None, params=None):
schema_keys = schema_keys or []
params = params or {}
params = self._params_pre_processing(schema_keys, params, func_name)
header = self._get_header(params)
url = self._url.get_absolute_url()
res = get_response(url, params, header)
if res["success"] == 0:
if res["error"].startswith("nonce"):
raise ZaifApiNonceError(res["error"])
raise ZaifApiError(res["error"])
return res["return"]
def _params_pre_processing(self, keys, params, func_name):
params = self._validator.params_pre_processing(keys, params)
params["method"] = func_name
params["nonce"] = self._get_nonce()
return urlencode(params)
def _make_signature(key, secret, params):
signature = hmac.new(bytearray(secret.encode("utf-8")), digestmod=hashlib.sha512)
signature.update(params.encode("utf-8"))
return {"key": key, "sign": signature.hexdigest()}
class ZaifTradeApi(_ZaifTradeApiBase):
def __init__(self, key, secret, api_url=None):
super().__init__(get_api_url(api_url, "tapi"))
self._key = key
self._secret = secret
def _get_header(self, params):
return _make_signature(self._key, self._secret, params)
def get_info(self):
return self._execute_api(method_name())
def get_info2(self):
return self._execute_api(method_name())
def get_personal_info(self):
return self._execute_api(method_name())
def get_id_info(self):
return self._execute_api(method_name())
def trade_history(self, **kwargs):
schema_keys = [
"from_num",
"count",
"from_id",
"end_id",
"order",
"since",
"end",
"currency_pair",
"is_token",
]
return self._execute_api(method_name(), schema_keys, kwargs)
def active_orders(self, **kwargs):
schema_keys = ["currency_pair", "is_token", "is_token_both"]
return self._execute_api(method_name(), schema_keys, kwargs)
def _inner_history_api(self, func_name, kwargs):
schema_keys = [
"currency",
"from_num",
"count",
"from_id",
"end_id",
"order",
"since",
"end",
"is_token",
]
return self._execute_api(func_name, schema_keys, kwargs)
def withdraw_history(self, **kwargs):
return self._inner_history_api(method_name(), kwargs)
def deposit_history(self, **kwargs):
return self._inner_history_api(method_name(), kwargs)
def withdraw(self, **kwargs):
schema_keys = ["currency", "address", "message", "amount", "opt_fee"]
return self._execute_api(method_name(), schema_keys, kwargs)
def cancel_order(self, **kwargs):
schema_keys = ["order_id", "is_token", "currency_pair"]
return self._execute_api(method_name(), schema_keys, kwargs)
def trade(self, **kwargs):
schema_keys = ["currency_pair", "action", "price", "amount", "limit", "comment"]
return self._execute_api(method_name(), schema_keys, kwargs)
class ZaifLeverageTradeApi(_ZaifTradeApiBase):
def __init__(self, key, secret, api_url=None):
api_url = get_api_url(api_url, "tlapi")
super().__init__(api_url)
self._key = key
self._secret = secret
def _get_header(self, params):
return _make_signature(self._key, self._secret, params)
def get_positions(self, **kwargs):
schema_keys = [
"type",
"group_id",
"from_num",
"count",
"from_id",
"end_id",
"order",
"since",
"end",
"currency_pair",
]
return self._execute_api(method_name(), schema_keys, kwargs)
def position_history(self, **kwargs):
schema_keys = ["type", "group_id", "leverage_id"]
return self._execute_api(method_name(), schema_keys, kwargs)
def active_positions(self, **kwargs):
schema_keys = ["type", "group_id", "currency_pair"]
return self._execute_api(method_name(), schema_keys, kwargs)
def create_position(self, **kwargs):
schema_keys = [
"type",
"group_id",
"currency_pair",
"action",
"price",
"amount",
"leverage",
"limit",
"stop",
]
return self._execute_api(method_name(), schema_keys, kwargs)
def change_position(self, **kwargs):
schema_keys = ["type", "group_id", "leverage_id", "price", "limit", "stop"]
return self._execute_api(method_name(), schema_keys, kwargs)
def cancel_position(self, **kwargs):
schema_keys = ["type", "group_id", "leverage_id"]
return self._execute_api(method_name(), schema_keys, kwargs)
class ZaifTokenTradeApi(ZaifTradeApi):
def __init__(self, token: str, api_url: Optional[ApiUrl] = None):
self._token = token
super().__init__(None, None, api_url)
def get_header(self, params):
return {"token": self._token}
| 31.831579
| 88
| 0.616237
|
4a0bc8d6873e4464d5c8f0bada57b6731693a8a4
| 1,628
|
py
|
Python
|
lib/JumpScale/baselib/influxdb/Influxdb.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | null | null | null |
lib/JumpScale/baselib/influxdb/Influxdb.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | null | null | null |
lib/JumpScale/baselib/influxdb/Influxdb.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | null | null | null |
from JumpScale import j
import redis
from influxdb import client as influxdb
import requests
from requests.auth import HTTPBasicAuth
class InfluxdbFactory:
"""
"""
def __init__(self):
pass
def get(self, host='localhost', port=8086,username='root', password='root', database=None, ssl=False, verify_ssl=False, timeout=None, use_udp=False, udp_port=4444):
db = influxdb.InfluxDBClient(host=host, port=port,username=username, password=password, database=database, ssl=ssl, \
verify_ssl=verify_ssl, timeout=timeout, use_udp=use_udp, udp_port=udp_port)
return db
def getByInstance(self, instancename):
config = j.core.config.get("influxdb_client", instancename)
ipaddr=config.get("address")
port=config.get("port")
login=config.get("login")
passwd=config.get("passwd")
return j.clients.influxdb.get(host=ipaddr, port=port,username=login, password=passwd, database="main")
def postraw(self,data,host='localhost', port=8086,username='root', password='root', database="main"):
"""
format in is
'''
hdiops,machine=unit42,datacenter=gent,type=new avg=25,max=37 1434059627
temperature,machine=unit42,type=assembly external=25,internal=37 1434059627
'''
"""
url='http://%s:%s/write?db=%s&precision=s'%(host,port,database)
r = requests.post(url, data=data,auth=HTTPBasicAuth(username, password))
if r.content!="":
raise RuntimeError("Could not send data to influxdb.\n%s\n############\n%s"%(data,r.content))
| 37.860465
| 168
| 0.654177
|
4a0bca79186cb5cf7a4a292bae6693efb3efe8df
| 8,330
|
py
|
Python
|
plugins/m/dox.py
|
marinjurjevic/m.css
|
e6eff549fb5edeabacf01369d6b845a2a59c2ebe
|
[
"MIT"
] | 8
|
2021-12-06T16:17:17.000Z
|
2022-03-05T09:23:45.000Z
|
plugins/m/dox.py
|
marinjurjevic/m.css
|
e6eff549fb5edeabacf01369d6b845a2a59c2ebe
|
[
"MIT"
] | 11
|
2021-01-21T08:32:35.000Z
|
2021-06-29T16:13:13.000Z
|
plugins/m/dox.py
|
marinjurjevic/m.css
|
e6eff549fb5edeabacf01369d6b845a2a59c2ebe
|
[
"MIT"
] | 1
|
2021-11-02T02:06:41.000Z
|
2021-11-02T02:06:41.000Z
|
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from docutils.parsers.rst.states import Inliner
from docutils import nodes, utils
from docutils.parsers import rst
from docutils.parsers.rst.roles import set_classes
import xml.etree.ElementTree as ET
import os
import re
import logging
logger = logging.getLogger(__name__)
# Modified from abbr / gh / gl / ... to add support for queries and hashes
link_regexp = re.compile(r'(?P<title>.*) <(?P<link>[^?#]+)(?P<hash>[?#].+)?>')
def parse_link(text):
link = utils.unescape(text)
m = link_regexp.match(link)
if m:
title, link, hash = m.group('title', 'link', 'hash')
if not hash: hash = '' # it's None otherwise
else:
title, hash = '', ''
return title, link, hash
def init(tagfiles, input):
global symbol_mapping, symbol_prefixes, tagfile_basenames
# Pre-round to populate subclasses. Clear everything in case we init'd
# before already.
tagfile_basenames = []
symbol_mapping = {}
symbol_prefixes = ['']
for f in tagfiles:
tagfile, path = f[:2]
prefixes = f[2] if len(f) > 2 else []
css_classes = f[3] if len(f) > 3 else []
tagfile_basenames += [(os.path.splitext(os.path.basename(tagfile))[0], path, css_classes)]
symbol_prefixes += prefixes
tree = ET.parse(os.path.join(input, tagfile))
root = tree.getroot()
for child in root:
if child.tag == 'compound' and 'kind' in child.attrib:
# Linking to pages
if child.attrib['kind'] == 'page':
link = path + child.find('filename').text + '.html'
symbol_mapping[child.find('name').text] = (child.find('title').text, link, css_classes)
# Linking to files
if child.attrib['kind'] == 'file':
file_path = child.find('path')
link = path + child.find('filename').text + ".html"
symbol_mapping[(file_path.text if file_path is not None else '') + child.find('name').text] = (None, link, css_classes)
for member in child.findall('member'):
if not 'kind' in member.attrib: continue
# Preprocessor defines and macros
if member.attrib['kind'] == 'define':
symbol_mapping[member.find('name').text + ('()' if member.find('arglist').text else '')] = (None, link + '#' + member.find('anchor').text, css_classes)
# Linking to namespaces, structs and classes
if child.attrib['kind'] in ['class', 'struct', 'namespace']:
name = child.find('name').text
link = path + child.findtext('filename') # <filename> can be empty (cppreference tag file)
symbol_mapping[name] = (None, link, css_classes)
for member in child.findall('member'):
if not 'kind' in member.attrib: continue
# Typedefs, constants
if member.attrib['kind'] == 'typedef' or member.attrib['kind'] == 'enumvalue':
symbol_mapping[name + '::' + member.find('name').text] = (None, link + '#' + member.find('anchor').text, css_classes)
# Functions
if member.attrib['kind'] == 'function':
# <filename> can be empty (cppreference tag file)
symbol_mapping[name + '::' + member.find('name').text + "()"] = (None, link + '#' + member.findtext('anchor'), css_classes)
# Enums with values
if member.attrib['kind'] == 'enumeration':
enumeration = name + '::' + member.find('name').text
symbol_mapping[enumeration] = (None, link + '#' + member.find('anchor').text, css_classes)
for value in member.findall('enumvalue'):
symbol_mapping[enumeration + '::' + value.text] = (None, link + '#' + value.attrib['anchor'], css_classes)
# Sections
for section in child.findall('docanchor'):
symbol_mapping[section.text] = (section.attrib.get('title', ''), link + '#' + section.text, css_classes)
def dox(name, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
title, target, hash = parse_link(text)
# Otherwise adding classes to the options behaves globally (uh?)
_options = dict(options)
set_classes(_options)
# Avoid assert on adding to undefined member later
if 'classes' not in _options: _options['classes'] = []
# Try linking to the whole docs first
for basename, url, css_classes in tagfile_basenames:
if basename == target:
if not title:
# TODO: extract title from index page in the tagfile
logger.warning("Link to main page `{}` requires a title".format(target))
title = target
_options['classes'] += css_classes
node = nodes.reference(rawtext, title, refuri=url + hash, **_options)
return [node], []
for prefix in symbol_prefixes:
if prefix + target in symbol_mapping:
link_title, url, css_classes = symbol_mapping[prefix + target]
if title:
use_title = title
elif link_title:
use_title = link_title
else:
if link_title is not None:
logger.warning("Doxygen anchor `{}` has no title, using its ID as link title".format(target))
use_title = target
_options['classes'] += css_classes
node = nodes.reference(rawtext, use_title, refuri=url + hash, **_options)
return [node], []
# TODO: print file and line
#msg = inliner.reporter.warning(
#'Doxygen symbol %s not found' % target, line=lineno)
#prb = inliner.problematic(rawtext, rawtext, msg)
if title:
logger.warning("Doxygen symbol `{}` not found, rendering just link title".format(target))
node = nodes.inline(rawtext, title, **_options)
else:
logger.warning("Doxygen symbol `{}` not found, rendering as monospace".format(target))
node = nodes.literal(rawtext, target, **_options)
return [node], []
def register_mcss(mcss_settings, **kwargs):
rst.roles.register_local_role('dox', dox)
init(input=mcss_settings['INPUT'],
tagfiles=mcss_settings.get('M_DOX_TAGFILES', []))
# Below is only Pelican-specific functionality. If Pelican is not found, these
# do nothing.
def _pelican_configure(pelicanobj):
settings = {
# For backwards compatibility, the input directory is pelican's CWD
'INPUT': os.getcwd(),
}
for key in ['M_DOX_TAGFILES']:
if key in pelicanobj.settings: settings[key] = pelicanobj.settings[key]
register_mcss(mcss_settings=settings)
def register(): # for Pelican
import pelican.signals
pelican.signals.initialized.connect(_pelican_configure)
| 43.160622
| 179
| 0.601801
|
4a0bcad4d804f55aa5180c6c62c4a3fa389f3cdf
| 643
|
py
|
Python
|
ProyectoFinal/repartidor/models.py
|
PredadorAkrid/IS-2020-2-La-Orden-De-Turing
|
cc292723a7bc4e4c1f848d00484f62ac75e7ad20
|
[
"Apache-2.0"
] | null | null | null |
ProyectoFinal/repartidor/models.py
|
PredadorAkrid/IS-2020-2-La-Orden-De-Turing
|
cc292723a7bc4e4c1f848d00484f62ac75e7ad20
|
[
"Apache-2.0"
] | null | null | null |
ProyectoFinal/repartidor/models.py
|
PredadorAkrid/IS-2020-2-La-Orden-De-Turing
|
cc292723a7bc4e4c1f848d00484f62ac75e7ad20
|
[
"Apache-2.0"
] | null | null | null |
"""Modelos Repartidor"""
#Django
from django.db import models
from django.contrib.auth.models import User
class Repartidor(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, unique=True)
id_repatidor = models.AutoField(primary_key=True, db_column='id_repatidor')
nombre_repartidor = models.CharField(max_length=64)
apellido_paterno_repartidor = models.CharField(max_length=100)
apellido_materno_repartidor = models.CharField(max_length=100)
def __str__(self):
return self.nombre_repartidor + " " + self.apellido_paterno_repartidor
class Meta:
db_table = 'repartidor'
verbose_name_plural = "Repartidores"
| 35.722222
| 76
| 0.802488
|
4a0bcbc2e8a14114ef6c769931ff8ff4e5c92e71
| 5,692
|
py
|
Python
|
import_data.py
|
SaeedSharifiMa/FairDP
|
8c984e2f11ab465d3155412d3427c5be5b99ab70
|
[
"Xnet",
"X11"
] | 2
|
2020-07-18T06:55:40.000Z
|
2020-11-02T09:09:54.000Z
|
import_data.py
|
SaeedSharifiMa/FairDP
|
8c984e2f11ab465d3155412d3427c5be5b99ab70
|
[
"Xnet",
"X11"
] | null | null | null |
import_data.py
|
SaeedSharifiMa/FairDP
|
8c984e2f11ab465d3155412d3427c5be5b99ab70
|
[
"Xnet",
"X11"
] | 4
|
2020-05-11T03:04:15.000Z
|
2021-04-17T18:11:20.000Z
|
import numpy as np
import pandas as pd
def center(X):
for col in X.columns:
X.loc[:, col] = X.loc[:, col]-np.mean(X.loc[:, col])
return X
def add_intercept(X):
"""Add all 1's column to predictor matrix"""
X['intercept'] = [1]*X.shape[0]
def one_hot_code(df1, sens_dict):
cols = df1.columns
for c in cols:
if isinstance(df1[c][0], str):
column = df1[c]
df1 = df1.drop(c, 1)
unique_values = list(set(column))
n = len(unique_values)
if n > 2:
for i in range(n):
col_name = '{}.{}'.format(c, i)
col_i = [1 if el == unique_values[i] else 0 for el in column]
df1[col_name] = col_i
sens_dict[col_name] = sens_dict[c]
del sens_dict[c]
else:
col_name = c
col = [1 if el == unique_values[0] else 0 for el in column]
df1[col_name] = col
return df1, sens_dict
# num_sens in 1:18
def clean_communities(num_sens):
"""Clean communities & crime data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/communities.csv')
df = df.fillna(0)
y = df['ViolentCrimesPerPop']
q_y = np.percentile(y, 70) ################### 70 or 20 ????????????????????????? #####################
# convert y's to binary predictions on whether the neighborhood is
# especially violent
y = [np.round((1 + np.sign(s - q_y)) / 2) for s in y]
X = df.iloc[:, 0:122]
# hot code categorical variables
sens_df = pd.read_csv('dataset/communities_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
#print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, sens_dict = one_hot_code(df, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
#print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names[num_sens-1]].copy()
x_prime = 1*(x_prime > np.median(x_prime))
X = X.drop(sens_names[num_sens-1], axis = 1)
return X, x_prime, pd.Series(y)
# num_sens in 1:11
def clean_lawschool(num_sens):
"""Clean law school data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/lawschool.csv')
df = df.dropna()
# convert categorical column variables to 0,1
df['gender'] = df['gender'].map({'female': 1, 'male': 0})
# remove y from df
df_y = df['bar1']
df = df.drop('bar1', 1)
y = [int(a == 'P') for a in df_y]
y = pd.Series(y)
sens_df = pd.read_csv('dataset/lawschool_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
# one hot coding of race variable
for i in range(1, 9):
col_name = 'race{}'.format(i)
if 'race' in sens_cols:
sens_dict[col_name] = 1
else:
sens_dict[col_name] = 0
race_code = [np.int(r == i) for r in df['race']]
df[col_name] = race_code
sens_dict['race'] = 0
df = df.drop('race', 1)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
#print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names].copy()
x_prime.age = 1*(x_prime.age > np.median(x_prime.age)) ########## OK ??????????? ##############
x_prime.fam_inc = 1*(x_prime.fam_inc > np.median(x_prime.fam_inc)) ########## OK ????????? ##############
x_prime = x_prime[sens_names[num_sens-1]]
df = df.drop(sens_names[num_sens-1], axis = 1)
df.index = range(len(df))
x_prime.index = range(len(x_prime))
return df, x_prime, pd.Series(y)
# num_sens 1:7
def clean_adult(num_sens):
df = pd.read_csv('dataset/adult.csv')
df = df.dropna()
# binarize and remove y value
df['income'] = df['income'].map({' <=50K': 0, ' >50K': 1})
y = df['income']
df = df.drop('income', 1)
# hot code categorical variables
sens_df = pd.read_csv('dataset/adult_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
#print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, sens_dict = one_hot_code(df, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
#print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names].copy()
x_prime.age = 1*(x_prime.age > np.median(x_prime.age))
x_prime = x_prime[sens_names[num_sens-1]]
df = df.drop(sens_names[num_sens-1], axis = 1)
return df, x_prime, y
# num_sens 1:5
# binarize the sensitive features
def clean_student(num_sens):
df = pd.read_csv('dataset/student-mat.csv', sep=';')
df = df.dropna()
y = df['G3']
y = [0 if y < 11 else 1 for y in y]
df = df.drop(['G3', 'G2', 'G1'], 1)
sens_df = pd.read_csv('dataset/student_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, sens_dict = one_hot_code(df, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names].copy()
df = df.drop(sens_names, axis = 1)
return df, x_prime, pd.Series(y)
| 40.94964
| 109
| 0.599438
|
4a0bcbc8699a39847f8a1e78609c925a23b8aca5
| 1,741
|
py
|
Python
|
retailapp/user/app/auth/auth.py
|
kmsarabu/auroraglobaldb_eks
|
ac4d7064c889e9e5706a828d4e6d9d25d12debb5
|
[
"MIT"
] | null | null | null |
retailapp/user/app/auth/auth.py
|
kmsarabu/auroraglobaldb_eks
|
ac4d7064c889e9e5706a828d4e6d9d25d12debb5
|
[
"MIT"
] | null | null | null |
retailapp/user/app/auth/auth.py
|
kmsarabu/auroraglobaldb_eks
|
ac4d7064c889e9e5706a828d4e6d9d25d12debb5
|
[
"MIT"
] | null | null | null |
from functools import wraps
from flask import request, make_response, jsonify
import os
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
def get_token_auth_header():
"""Obtains the access token from the Authorization Header
"""
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthError({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
elif len(parts) == 1:
raise AuthError({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def requires_auth(f):
"""Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header()
valid_token = os.environ.get('AUTHTOKEN', 'krishna')
print (valid_token)
print (token)
if (token != valid_token):
raise AuthError({"code": "invalid_token",
"description": "invalid token"}, 401)
return f(*args, **kwargs)
return decorated
| 32.849057
| 69
| 0.549684
|
4a0bcbf8ecdb946bb6f72e93922006fd6b48dba2
| 77
|
py
|
Python
|
recognition/arcface_torch/backbones/__init__.py
|
PatrickHwang/insightface
|
e97344135a88bc0815f60e17f12f13eba1dd875c
|
[
"MIT"
] | 18
|
2018-04-13T02:55:03.000Z
|
2020-10-12T01:37:52.000Z
|
recognition/arcface_torch/backbones/__init__.py
|
DrSnowbird/insightface-docker
|
788cc1410dbff45b1d2aadd73c1d3be5e585e16e
|
[
"MIT"
] | 1
|
2022-01-27T16:36:50.000Z
|
2022-02-04T04:28:37.000Z
|
recognition/arcface_torch/backbones/__init__.py
|
DrSnowbird/insightface-docker
|
788cc1410dbff45b1d2aadd73c1d3be5e585e16e
|
[
"MIT"
] | 7
|
2018-08-29T06:41:19.000Z
|
2019-09-11T06:10:39.000Z
|
from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200
| 38.5
| 76
| 0.831169
|
4a0bccd4c4500390faa7f88d34b806e0f35e2566
| 4,161
|
py
|
Python
|
tests/api/v1_3_3/test_clients.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
tests/api/v1_3_3/test_clients.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
tests/api/v1_3_3/test_clients.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNACenterAPI clients API fixtures and tests.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
from tests.models.schema_validator import json_schema_validate
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.3', reason='version does not match')
def is_valid_get_overall_client_health(obj):
json_schema_validate('jsd_149aa93b4ddb80dd_v1_3_3').validate(obj)
return True
def get_overall_client_health(api):
endpoint_result = api.clients.get_overall_client_health(
timestamp=0
)
return endpoint_result
@pytest.mark.clients
def test_get_overall_client_health(api):
assert is_valid_get_overall_client_health(
get_overall_client_health(api)
)
def get_overall_client_health_default(api):
endpoint_result = api.clients.get_overall_client_health(
timestamp=None
)
return endpoint_result
@pytest.mark.clients
def test_get_overall_client_health_default(api):
try:
assert is_valid_get_overall_client_health(
get_overall_client_health_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_client_enrichment_details(obj):
json_schema_validate('jsd_b199685d4d089a67_v1_3_3').validate(obj)
return True
def get_client_enrichment_details(api):
endpoint_result = api.clients.get_client_enrichment_details(
)
return endpoint_result
@pytest.mark.clients
def test_get_client_enrichment_details(api):
assert is_valid_get_client_enrichment_details(
get_client_enrichment_details(api)
)
def get_client_enrichment_details_default(api):
endpoint_result = api.clients.get_client_enrichment_details(
)
return endpoint_result
@pytest.mark.clients
def test_get_client_enrichment_details_default(api):
try:
assert is_valid_get_client_enrichment_details(
get_client_enrichment_details_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_client_detail(obj):
json_schema_validate('jsd_e2adba7943bab3e9_v1_3_3').validate(obj)
return True
def get_client_detail(api):
endpoint_result = api.clients.get_client_detail(
mac_address='string',
timestamp=0
)
return endpoint_result
@pytest.mark.clients
def test_get_client_detail(api):
assert is_valid_get_client_detail(
get_client_detail(api)
)
def get_client_detail_default(api):
endpoint_result = api.clients.get_client_detail(
mac_address=None,
timestamp=None
)
return endpoint_result
@pytest.mark.clients
def test_get_client_detail_default(api):
try:
assert is_valid_get_client_detail(
get_client_detail_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
| 29.097902
| 95
| 0.759673
|
4a0bcd956bf2acabafd67287f793f0613d6d5606
| 28,705
|
py
|
Python
|
usienarl/models/deep_sarsa.py
|
InsaneMonster/USienaRL
|
95eb35913b448c970b83ab941a24657156cfcd9b
|
[
"BSD-3-Clause"
] | 6
|
2019-08-24T08:40:09.000Z
|
2020-05-20T10:54:26.000Z
|
usienarl/models/deep_sarsa.py
|
InsaneMonster/USienaRL
|
95eb35913b448c970b83ab941a24657156cfcd9b
|
[
"BSD-3-Clause"
] | null | null | null |
usienarl/models/deep_sarsa.py
|
InsaneMonster/USienaRL
|
95eb35913b448c970b83ab941a24657156cfcd9b
|
[
"BSD-3-Clause"
] | null | null | null |
# Import packages
import tensorflow
import numpy
import math
# Import required src
from usienarl import SpaceType, Model, Config
from usienarl.utils import SumTree
class Buffer:
"""
Prioritized experience replay buffer. It uses a sum tree to store not only the samples but also the priority of
the samples, where the priority of the samples is a representation of the probability of a sample.
When storing a new sample, the default priority value of the new node associated with such sample is set to
the maximum defined priority. This value is iteratively changed by the algorithm when update is called.
It automatically serializes all the parallel data stored in it.
When getting the samples, a priority segment inversely proportional to the amount of required samples is generated.
Samples are then uniformly taken for that segment, and stored in a minibatch which is returned.
Also, a weight to compensate for the over-presence of higher priority samples is returned by the get method,
along with the minibatch as second returned value.
"""
_MINIMUM_ALLOWED_PRIORITY: float = 1.0
_IMPORTANCE_SAMPLING_VALUE_UPPER_BOUND: float = 1.0
_ABSOLUTE_ERROR_UPPER_BOUND: float = 1.0
def __init__(self,
capacity: int,
parallel_amount: int,
minimum_sample_probability: float, random_sample_trade_off: float,
importance_sampling_value: float, importance_sampling_value_increment: float):
# Define internal prioritized experience replay buffer attributes
self._capacity: int = capacity
self._parallel_amount: int = parallel_amount
self._minimum_sample_probability: float = minimum_sample_probability
self._random_sample_trade_off: float = random_sample_trade_off
self._importance_sampling_value_increment: float = importance_sampling_value_increment
self._importance_sampling_starting_value: float = importance_sampling_value
self._sum_tree = SumTree(self._capacity)
self._importance_sampling_value = self._importance_sampling_starting_value
# Define internal prioritized experience replay buffer empty attributes
self._sum_tree_last_sampled_indexes = None
self._episode_done_previous_step: numpy.ndarray = numpy.zeros(self._parallel_amount, dtype=bool)
def store(self,
observation_current: numpy.ndarray,
action_current: numpy.ndarray,
reward: numpy.ndarray,
observation_next: numpy.ndarray,
action_next: numpy.ndarray,
episode_done: numpy.ndarray):
"""
Store the time-step in the buffer, serializing immediately all the parallel episodes.
:param observation_current: the current observation to store in the buffer wrapped in a numpy array
:param action_current: the last current action to store in the buffer wrapped in a numpy array
:param reward: the reward obtained from the action at the current state to store in the buffer wrapped in a numpy array
:param observation_next: the next observation to store in the buffer wrapped in a numpy array
:param action_next: the last next action to store in the buffer wrapped in a numpy array
:param episode_done: whether or not this time-step was the last of the episode wrapped in a numpy array
"""
# Serialize all the experiences to store in the buffer
for i in range(self._parallel_amount):
if self._episode_done_previous_step[i]:
continue
# Find the current max priority on the tree leafs
max_priority: float = numpy.max(self._sum_tree.leafs)
# If the max priority is zero set it to the minimum defined
if max_priority <= 0:
max_priority = self._MINIMUM_ALLOWED_PRIORITY
# Set the max priority as the default one for this new sample
# Note: we set the max priority for each new sample and then improve on it iteratively during training
self._sum_tree.add((observation_current[i], action_current[i], reward[i], observation_next[i], action_next[i], episode_done[i]), max_priority)
# Update the stored episode done flags
self._episode_done_previous_step = episode_done.copy()
def get(self,
amount: int = 0):
"""
Get a batch of data from the buffer of the given size. If size is not given all the buffer is used.
:param amount: the batch size of data to get
:return a list containing the arrays of: current observations, actions, rewards, next observations and episode done flags
"""
# Adjust amount with respect to the size of the sum-tree
if amount <= 0 or amount > self._sum_tree.size:
amount = self._sum_tree.size
# Define arrays of each sample components
observations_current: [] = []
actions_current: [] = []
rewards: [] = []
observations_next: [] = []
actions_next: [] = []
episode_done_flags: [] = []
# Define the returned arrays of indexes and weights
self._sum_tree_last_sampled_indexes = numpy.empty((amount,), dtype=numpy.int32)
importance_sampling_weights = numpy.empty((amount, 1), dtype=numpy.float32)
# Get the segment of total priority according to the given amount
# Note: it divides the sum tree priority by the amount and get the priority assigned to each segment
priority_segment: float = self._sum_tree.total_priority / amount
# Increase the importance sampling value of the defined increment value until the upper bound is reached
self._importance_sampling_value = numpy.min((self._IMPORTANCE_SAMPLING_VALUE_UPPER_BOUND, self._importance_sampling_value + self._importance_sampling_value_increment))
# Compute max importance sampling weight
# Note: the weight of a given transition is inversely proportional to the probability of the transition stored
# in the related leaf. The transition probability is computed by normalizing the priority of a leaf with the
# total priority of the sum tree
min_probability = numpy.min(self._sum_tree.leafs / self._sum_tree.total_priority)
max_weight = (min_probability * amount) ** (-self._importance_sampling_value)
# Return the sample
for sample in range(amount):
# Sample a random uniform value between the first and the last priority values of each priority segment
lower_bound: float = priority_segment * sample
upper_bound: float = priority_segment * (sample + 1)
priority_value: float = numpy.random.uniform(lower_bound, upper_bound)
# Get leaf index and related priority and data as stored in the sum tree
leaf_index: int = self._sum_tree.get(priority_value)
leaf_priority: float = self._sum_tree.get_priority(leaf_index)
leaf_data = self._sum_tree.get_data(leaf_index)
# Get the probability of the current sample
sample_probability: float = leaf_priority / self._sum_tree.total_priority
# Compute the importance sampling weights of each delta
# The operation is: wj = (1/N * 1/P(j))**b / max wi == (N * P(j))**-b / max wi
exponent: float = -self._importance_sampling_value
importance_sampling_weights[sample, 0] = ((sample_probability * amount) ** exponent) / max_weight
# Add the leaf index to the last sampled indexes list
self._sum_tree_last_sampled_indexes[sample] = leaf_index
# Generate the minibatch for this example
observations_current.append(leaf_data[0])
actions_current.append(leaf_data[1])
rewards.append(leaf_data[2])
observations_next.append(leaf_data[3])
actions_next.append(leaf_data[4])
episode_done_flags.append(leaf_data[5])
# Return the sample (minibatch) with related weights
return [numpy.array(observations_current), numpy.array(actions_current), numpy.array(rewards), numpy.array(observations_next), numpy.array(actions_next), numpy.array(episode_done_flags), importance_sampling_weights]
def update(self,
absolute_errors: []):
"""
Update the buffer using the given absolute errors.
:param absolute_errors: the absolute errors on the values predictions
"""
# If no last sampled indexes are found, stop here
if self._sum_tree_last_sampled_indexes is None:
return
# Avoid absolute error (delta) equal to zero (which would result in zero priority), by adding an epsilon
absolute_errors += self._minimum_sample_probability
# Force an upper bound of 1 on each absolute error (delta + epsilon)
absolute_errors = numpy.minimum(absolute_errors, self._ABSOLUTE_ERROR_UPPER_BOUND)
# Compute the priority to store as (delta + epsilon)^alpha
priority_values = absolute_errors ** self._random_sample_trade_off
# Get only the max priority values along each row (second axis)
# Note: this is necessary since the absolute error is always zero between the current outputs and target outputs
# when the action index is not the same of the chosen action of the sample
priority_values = numpy.amax(priority_values, axis=1)
# Before zipping reshape the absolute error array to be compatible with the stored tree indexes
priority_values.reshape(self._sum_tree_last_sampled_indexes.shape)
# For each last sampled sum tree leaf index and each correlated priority value update the sum tree
for sum_tree_index, priority_value in zip(self._sum_tree_last_sampled_indexes, priority_values):
self._sum_tree.update(sum_tree_index, priority_value)
# Reset the last sampled sum tree indexes for the next update
self._sum_tree_last_sampled_indexes = None
def finish_trajectory(self):
"""
Finish the trajectory, resetting episode done flags.
"""
# Reset stored episode done flags
self._episode_done_previous_step: numpy.ndarray = numpy.zeros(self._parallel_amount, dtype=bool)
@property
def capacity(self) -> int:
"""
The capacity of the buffer..
:return: the integer capacity of the buffer
"""
return self._capacity
@property
def size(self) -> int:
"""
The size of the buffer at the current time.
:return: the integer size of the buffer
"""
return self._sum_tree.size
class Estimator:
"""
Estimator defining the real Deep SARSA model. It is used to define two identical models:
target network and main-network.
It is generated given the size of the observation and action spaces and the hidden layer config defining the
hidden layers of the network.
"""
def __init__(self,
scope: str,
observation_space_shape, agent_action_space_shape,
hidden_layers_config: Config,
error_clipping: bool = False,
huber_delta: float = 1.0):
self.scope: str = scope
with tensorflow.variable_scope(self.scope):
# Define observations placeholder as a float adaptable array with shape Nx(O) where N is the number of examples and (O) the shape of the observation space
self.observations = tensorflow.placeholder(shape=[None, *observation_space_shape], dtype=tensorflow.float32, name="observations")
# Define the q-values targets placeholder with adaptable size NxA where N is the number of examples and A the size of the action space (always discrete)
self.q_values_targets = tensorflow.placeholder(shape=[None, *agent_action_space_shape], dtype=tensorflow.float32, name="q_values_targets")
# Define the estimator network hidden layers from the config
hidden_layers_output = hidden_layers_config.apply_hidden_layers(self.observations)
# Define the mask
self.mask = tensorflow.placeholder(shape=[None, *agent_action_space_shape], dtype=tensorflow.float32, name="mask")
# Define the estimator network predicted q-values given the input observations and the mask
self.q_values_predictions = tensorflow.add(tensorflow.layers.dense(hidden_layers_output, *agent_action_space_shape, name="q_values_predictions"), self.mask)
# Define the weights of the targets during the update process (e.g. the importance sampling weights)
self.loss_weights = tensorflow.placeholder(shape=[None, 1], dtype=tensorflow.float32, name="loss_weights")
# Define the absolute error
self.absolute_error = tensorflow.abs(self.q_values_targets - self.q_values_predictions, name="absolute_error")
# Define the loss with error clipping (huber loss) if required
if error_clipping:
self.loss = tensorflow.reduce_mean(self.loss_weights * tensorflow.where(self.absolute_error < huber_delta,
0.5 * tensorflow.square(self.absolute_error),
self.absolute_error - 0.5), name="loss")
else:
self.loss = tensorflow.reduce_mean(self.loss_weights * tensorflow.square(self.absolute_error), name="loss")
# Define the estimator weight parameters
self.weight_parameters = [variable for variable in tensorflow.trainable_variables() if variable.name.startswith(self.scope)]
self.weight_parameters = sorted(self.weight_parameters, key=lambda parameter: parameter.name)
class DeepSARSA(Model):
"""
Deep SARSA model with SARSA update rule.
The model is a deep neural network whose hidden layers can be defined by a config parameter.
It uses a target network and a main network to correctly evaluate the expected future reward in order to stabilize
learning. To further stabilize learning error can be clipped through the Huber Loss (note that this may cause
instability in some environments).
In order to synchronize the target network and the main network, every some interval steps model weights have to be
copied from the main network to the target network.
The update rule is the following (Bellman equation):
Q(s, a) = R + gamma * Q(s', a')
It uses also the action predicted at the next observation according to the same policy to update the current
estimate of q-values.
Supported observation spaces:
- discrete
- continuous
Supported action spaces:
- discrete
"""
def __init__(self,
name: str,
hidden_layers_config: Config,
buffer_capacity: int = 1000000,
learning_rate: float = 1e-3, discount_factor: float = 0.99,
minimum_sample_probability: float = 1e-2, random_sample_trade_off: float = 0.6,
importance_sampling_value: float = 1e-3, importance_sampling_value_increment: float = 0.4,
error_clipping: bool = False, huber_delta: float = 1.0):
# Define model attributes
self.learning_rate: float = learning_rate
self.discount_factor: float = discount_factor
# Define internal model attributes
self._buffer_capacity: int = buffer_capacity
self._minimum_sample_probability: float = minimum_sample_probability
self._random_sample_trade_off: float = random_sample_trade_off
self._importance_sampling_value: float = importance_sampling_value
self._importance_sampling_value_increment: float = importance_sampling_value_increment
self._hidden_layers_config: Config = hidden_layers_config
self._error_clipping: bool = error_clipping
self._huber_delta: float = huber_delta
# Define model empty attributes
self.buffer: Buffer or None = None
# Define internal model empty attributes
self._target_network: Estimator or None = None
self._main_network: Estimator or None = None
self._target_network_observations = None
self._target_network_q_values_predictions = None
self._main_network_observations = None
self._main_network_q_values_predictions = None
self._main_network_mask = None
self._target_network_mask = None
self._q_values_targets = None
self._loss_weights = None
self._absolute_error = None
self._loss = None
self._optimizer = None
self._weight_copier = None
# Generate the base model
super(DeepSARSA, self).__init__(name)
# Define the types of allowed observation and action spaces
self._supported_observation_space_types.append(SpaceType.discrete)
self._supported_observation_space_types.append(SpaceType.continuous)
self._supported_action_space_types.append(SpaceType.discrete)
def _define_graph(self):
# Set the buffer
self.buffer = Buffer(self._buffer_capacity, self._parallel,
self._minimum_sample_probability, self._random_sample_trade_off,
self._importance_sampling_value, self._importance_sampling_value_increment)
# Define two estimator, one for target network and one for main network, with identical structure
full_scope: str = self._scope + "/" + self._name
self._main_network = Estimator(full_scope + "/MainNetwork",
self._observation_space_shape, self._agent_action_space_shape,
self._hidden_layers_config,
self._error_clipping, self._huber_delta)
self._target_network = Estimator(full_scope + "/TargetNetwork",
self._observation_space_shape, self._agent_action_space_shape,
self._hidden_layers_config,
self._error_clipping, self._huber_delta)
# Assign main and target networks to the model attributes
self._main_network_observations = self._main_network.observations
self._main_network_q_values_predictions = self._main_network.q_values_predictions
self._target_network_q_values_predictions = self._target_network.q_values_predictions
self._target_network_observations = self._target_network.observations
self._main_network_mask = self._main_network.mask
self._target_network_mask = self._target_network.mask
self._q_values_targets = self._main_network.q_values_targets
self._absolute_error = self._main_network.absolute_error
self._loss = self._main_network.loss
self._loss_weights = self._main_network.loss_weights
# Define global operations
with tensorflow.variable_scope(full_scope):
# Define the optimizer
self._optimizer = tensorflow.train.AdamOptimizer(self.learning_rate).minimize(self._loss)
# Define the initializer
self._initializer = tensorflow.variables_initializer(tensorflow.get_collection(tensorflow.GraphKeys.GLOBAL_VARIABLES, full_scope), name="initializer")
# Define the weight copier (to copy weights from main network to target network)
self._weight_copier = []
for main_network_parameter, target_network_parameter in zip(self._main_network.weight_parameters, self._target_network.weight_parameters):
copy_operation = target_network_parameter.assign(main_network_parameter)
self._weight_copier.append(copy_operation)
def get_q_values(self,
session,
observation_current: numpy.ndarray,
possible_actions: [] = None):
"""
Get all the q-values according to the model at the given current observation.
:param session: the session of tensorflow currently running
:param observation_current: the current observation of the agent in the environment to base prediction upon, wrapped in a numpy array
:param possible_actions: the optional list used to remove certain actions from the prediction
:return: all q-values predicted by the model
"""
# If there is no possible actions list generate a full pass-through mask otherwise generate a mask upon it
if possible_actions is None:
mask: numpy.ndarray = numpy.zeros((self._parallel, *self._agent_action_space_shape), dtype=float)
else:
mask: numpy.ndarray = -math.inf * numpy.ones((self._parallel, *self._agent_action_space_shape), dtype=float)
for i in range(self._parallel):
mask[i, possible_actions[i]] = 0.0
# Generate a one-hot encoded version of the observation if observation space is discrete
if self._observation_space_type == SpaceType.discrete:
observation_current: numpy.ndarray = numpy.eye(*self._observation_space_shape)[numpy.array(observation_current).reshape(-1)]
# Compute the q-values predicted by main and target networks
main_network_q_values = session.run(self._main_network_q_values_predictions,
feed_dict={
self._main_network_observations: observation_current,
self._main_network_mask: mask
})
target_network_q_values = session.run(self._target_network_q_values_predictions,
feed_dict={
self._target_network_observations: observation_current,
self._target_network_mask: mask
})
# Return the average of the q-values
return (main_network_q_values + target_network_q_values) / 2
def get_action_with_highest_q_value(self,
session,
observation_current: numpy.ndarray,
possible_actions: [] = None):
"""
Get the action with highest q-value from the q-values predicted by the model at the given current observation.
:param session: the session of tensorflow currently running
:param observation_current: the current observation of the agent in the environment to base prediction upon, wrapped in a numpy array
:param possible_actions: the optional list used to remove certain actions from the prediction
:return: the action chosen by the model
"""
# Return the action maximizing the predicted q-values given the current observation
return numpy.argmax(self.get_q_values(session, observation_current, possible_actions), axis=1)
def get_action_with_highest_q_value_and_q_values(self,
session,
observation_current: numpy.ndarray,
possible_actions: [] = None):
"""
Get the action with highest q-value from the q-values predicted by the model at the given current observation
and all the q-values according to the model at the given current observation.
:param session: the session of tensorflow currently running
:param observation_current: the current observation of the agent in the environment to base prediction upon, wrapped in a numpy array
:param possible_actions: the optional list used to remove certain actions from the prediction
:return: the action chosen by the model and all q-values predicted by the model
"""
# Get q-values
q_values = self.get_q_values(session, observation_current, possible_actions)
# Return the highest q-value action and all the q-values
return numpy.argmax(q_values, axis=1), q_values
def copy_weights(self,
session):
"""
Copy the weights from the main network to the target network.
:param session: the session of tensorflow currently running
"""
# Run all the weight copy operations
session.run(self._weight_copier)
def update(self,
session,
batch: []):
"""
Update the model weights (thus training the model) using a batch of samples. Update is performed using the
SARSA Bellman equation to compute the model target q-values.
:param session: the session of tensorflow currently running
:param batch: a batch of samples each one consisting in a tuple of observation current, action current, reward, observation next, action next, episode done flag and sample weight
:return: the loss and its relative absolute error
"""
# Generate a full pass-through mask for each example in the batch
masks: numpy.ndarray = numpy.zeros((len(batch[0]), *self._agent_action_space_shape), dtype=float)
# Unpack the batch into numpy arrays
observations_current, actions_current, rewards, observations_next, actions_next, episode_done_flags, weights = batch[0], batch[1], batch[2], batch[3], batch[4], batch[5], batch[6]
# Generate a one-hot encoded version of the observations if space type is discrete
if self._observation_space_type == SpaceType.discrete:
observations_current: numpy.ndarray = numpy.eye(*self._observation_space_shape)[observations_current.reshape(-1)]
observations_next: numpy.ndarray = numpy.eye(*self._observation_space_shape)[observations_next.reshape(-1)]
# Get the q-values from the model at both current observations and next observations
# Next observation is estimated by the target network
q_values_current: numpy.ndarray = session.run(self._main_network_q_values_predictions,
feed_dict={
self._main_network_observations: observations_current,
self._main_network_mask: masks
})
q_values_next: numpy.ndarray = session.run(self._target_network_q_values_predictions,
feed_dict={
self._target_network_observations: observations_next,
self._target_network_mask: masks
})
# Apply Bellman equation with the SARSA update rule
for sample_index in range(len(actions_current)):
# Extract current sample values
action_current = actions_current[sample_index]
action_next = actions_next[sample_index]
reward: float = rewards[sample_index]
episode_done: bool = episode_done_flags[sample_index]
# Note: only the immediate reward can be assigned at end of the episode, i.e. when next observation is None
if episode_done:
q_values_current[sample_index, action_current] = reward
else:
q_values_current[sample_index, action_current] = reward + self.discount_factor * q_values_next[sample_index, action_next]
# Train the model and save the value of the loss and of the absolute error as well as the summary
_, loss, absolute_error = session.run([self._optimizer, self._loss, self._absolute_error],
feed_dict={
self._main_network_observations: observations_current,
self._q_values_targets: q_values_current,
self._loss_weights: weights,
self._main_network_mask: masks
})
# Return the loss and the absolute error
return loss, absolute_error
@property
def warmup_steps(self) -> int:
return self._buffer_capacity
| 59.677755
| 223
| 0.663787
|
4a0bcdd22a7a4f6a5e9ba96c1a4a9926cbb87f57
| 923
|
py
|
Python
|
src/program/migrations/0042_auto_20170715_1547.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | 7
|
2017-04-14T15:28:29.000Z
|
2021-09-10T09:45:38.000Z
|
src/program/migrations/0042_auto_20170715_1547.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | 799
|
2016-04-28T09:31:50.000Z
|
2022-03-29T09:05:02.000Z
|
src/program/migrations/0042_auto_20170715_1547.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | 35
|
2016-04-28T09:23:53.000Z
|
2021-05-02T12:36:01.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-15 13:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("program", "0041_auto_20170711_2248")]
operations = [
migrations.AddField(
model_name="eventproposal",
name="submission_notes",
field=models.TextField(
blank=True,
help_text="Private notes for the event. Only visible to the submitting user and the BornHack organisers.",
),
),
migrations.AddField(
model_name="speakerproposal",
name="submission_notes",
field=models.TextField(
blank=True,
help_text="Private notes for the event. Only visible to the submitting user and the BornHack organisers.",
),
),
]
| 30.766667
| 122
| 0.600217
|
4a0bcdee5d6e2072efabe2c451cf120ed86c37f5
| 29,599
|
py
|
Python
|
python/ray/autoscaler/_private/resource_demand_scheduler.py
|
rogertrullo/ray
|
543f7809a6e81a346472662a1dcdf5d3263f6e27
|
[
"Apache-2.0"
] | null | null | null |
python/ray/autoscaler/_private/resource_demand_scheduler.py
|
rogertrullo/ray
|
543f7809a6e81a346472662a1dcdf5d3263f6e27
|
[
"Apache-2.0"
] | null | null | null |
python/ray/autoscaler/_private/resource_demand_scheduler.py
|
rogertrullo/ray
|
543f7809a6e81a346472662a1dcdf5d3263f6e27
|
[
"Apache-2.0"
] | null | null | null |
"""Implements multi-node-type autoscaling.
This file implements an autoscaling algorithm that is aware of multiple node
types (e.g., example-multi-node-type.yaml). The Ray autoscaler will pass in
a vector of resource shape demands, and the resource demand scheduler will
return a list of node types that can satisfy the demands given constraints
(i.e., reverse bin packing).
"""
import copy
import numpy as np
import logging
import collections
from numbers import Number
from typing import List, Dict
from ray.autoscaler.node_provider import NodeProvider
from ray.gcs_utils import PlacementGroupTableData
from ray.core.generated.common_pb2 import PlacementStrategy
from ray.autoscaler.tags import (TAG_RAY_USER_NODE_TYPE, NODE_KIND_UNMANAGED,
NODE_TYPE_LEGACY_WORKER, NODE_KIND_WORKER,
NODE_TYPE_LEGACY_HEAD, TAG_RAY_NODE_KIND)
logger = logging.getLogger(__name__)
# e.g., cpu_4_ondemand.
NodeType = str
# e.g., {"resources": ..., "max_workers": ...}.
NodeTypeConfigDict = str
# e.g., {"GPU": 1}.
ResourceDict = Dict[str, Number]
# e.g., "node-1".
NodeID = str
# e.g., "127.0.0.1".
NodeIP = str
class ResourceDemandScheduler:
def __init__(self, provider: NodeProvider,
node_types: Dict[NodeType, NodeTypeConfigDict],
max_workers: int):
self.provider = provider
self.node_types = copy.deepcopy(node_types)
self.max_workers = max_workers
# is_legacy_yaml tracks if the cluster configs was originally without
# available_node_types and was autofilled with available_node_types.
self.is_legacy_yaml = (NODE_TYPE_LEGACY_HEAD in node_types
and NODE_TYPE_LEGACY_WORKER in node_types)
def get_nodes_to_launch(
self,
nodes: List[NodeID],
pending_nodes: Dict[NodeType, int],
resource_demands: List[ResourceDict],
unused_resources_by_ip: Dict[NodeIP, ResourceDict],
pending_placement_groups: List[PlacementGroupTableData],
max_resources_by_ip: Dict[NodeIP, ResourceDict],
ensure_min_cluster_size: List[ResourceDict] = None,
) -> Dict[NodeType, int]:
"""Given resource demands, return node types to add to the cluster.
This method:
(1) calculates the resources present in the cluster.
(2) calculates the remaining nodes to add to respect min_workers
constraint per node type.
(3) for each strict spread placement group, reserve space on
available nodes and launch new nodes if necessary.
(4) calculates the unfulfilled resource bundles.
(5) calculates which nodes need to be launched to fulfill all
the bundle requests, subject to max_worker constraints.
Args:
nodes: List of existing nodes in the cluster.
pending_nodes: Summary of node types currently being launched.
resource_demands: Vector of resource demands from the scheduler.
unused_resources_by_ip: Mapping from ip to available resources.
pending_placement_groups: Placement group demands.
max_resources_by_ip: Mapping from ip to static node resources.
ensure_min_cluster_size: Try to ensure the cluster can fit at least
this set of resources. This differs from resources_demands in
that we don't take into account existing usage.
"""
# If the user is using request_resources() API, calculate the remaining
# delta resources required to meet their requested cluster size.
if ensure_min_cluster_size is not None:
used_resources = []
for ip, max_res in max_resources_by_ip.items():
res = copy.deepcopy(max_res)
_inplace_subtract(res, unused_resources_by_ip.get(ip, {}))
used_resources.append(res)
# Example: user requests 1000 CPUs, but the cluster is currently
# 500 CPUs in size with 250 used. Then, the delta is 750 CPUs that
# we need to fit to get the cluster to scale to 1000.
resource_requests, _ = get_bin_pack_residual(
used_resources, ensure_min_cluster_size)
resource_demands += resource_requests
else:
resource_requests = []
if self.is_legacy_yaml:
# When using legacy yaml files we need to infer the head & worker
# node resources from the static node resources from LoadMetrics.
self._infer_legacy_node_resources_if_needed(max_resources_by_ip)
node_resources: List[ResourceDict]
node_type_counts: Dict[NodeType, int]
node_resources, node_type_counts = self.calculate_node_resources(
nodes, pending_nodes, unused_resources_by_ip)
logger.info("Cluster resources: {}".format(node_resources))
logger.info("Node counts: {}".format(node_type_counts))
# Step 2: add nodes to add to satisfy min_workers for each type
node_resources, node_type_counts, min_workers_nodes_to_add = \
_add_min_workers_nodes(
node_resources, node_type_counts, self.node_types)
# Step 3: add nodes for strict spread groups
logger.info(f"Placement group demands: {pending_placement_groups}")
placement_group_demand_vector, strict_spreads = \
placement_groups_to_resource_demands(pending_placement_groups)
resource_demands.extend(placement_group_demand_vector)
if self.is_legacy_yaml and \
not self.node_types[NODE_TYPE_LEGACY_WORKER]["resources"]:
# Need to launch worker nodes to later infer their
# resources.
return self._legacy_worker_node_to_launch(
nodes, pending_nodes, node_resources, resource_demands)
placement_group_nodes_to_add, node_resources, node_type_counts = \
self.reserve_and_allocate_spread(
strict_spreads, node_resources, node_type_counts)
# Step 4/5: add nodes for pending tasks, actors, and non-strict spread
# groups
unfulfilled, _ = get_bin_pack_residual(node_resources,
resource_demands)
logger.info("Resource demands: {}".format(resource_demands))
logger.info("Unfulfilled demands: {}".format(unfulfilled))
max_to_add = self.max_workers - sum(node_type_counts.values())
if resource_requests:
nodes_to_add_based_on_requests = get_nodes_for(
self.node_types, node_type_counts, max_to_add,
resource_requests)
else:
nodes_to_add_based_on_requests = {}
nodes_to_add_based_on_demand = get_nodes_for(
self.node_types, node_type_counts, max_to_add, unfulfilled)
# Merge nodes to add based on demand and nodes to add based on
# min_workers constraint. We add them because nodes to add based on
# demand was calculated after the min_workers constraint was respected.
total_nodes_to_add = {}
for node_type in self.node_types:
nodes_to_add = (min_workers_nodes_to_add.get(
node_type, 0) + placement_group_nodes_to_add.get(node_type, 0)
+ nodes_to_add_based_on_demand.get(node_type, 0))
if nodes_to_add > 0:
total_nodes_to_add[node_type] = nodes_to_add
# Limit the number of concurrent launches
total_nodes_to_add = self._get_concurrent_resource_demand_to_launch(
total_nodes_to_add, unused_resources_by_ip.keys(), nodes,
pending_nodes, nodes_to_add_based_on_requests)
logger.info("Node requests: {}".format(total_nodes_to_add))
return total_nodes_to_add
def _legacy_worker_node_to_launch(
self, nodes: List[NodeID], pending_nodes: Dict[NodeType, int],
node_resources: List[ResourceDict],
resource_demands: List[ResourceDict]) -> Dict[NodeType, int]:
"""Get worker nodes to launch when resources missing in legacy yamls.
If there is unfulfilled demand and we don't know the resources of the
workers, it returns max(1, min_workers) worker nodes from which we
later calculate the node resources.
"""
if self.max_workers == 0:
return {}
elif pending_nodes or len(nodes) > 1:
# If we are already launching a worker node.
# If first worker node fails this will never launch more nodes.
return {}
else:
unfulfilled, _ = get_bin_pack_residual(node_resources,
resource_demands)
if self.node_types[NODE_TYPE_LEGACY_WORKER]["min_workers"] > 0 or \
unfulfilled:
return {
NODE_TYPE_LEGACY_WORKER: max(
1, self.node_types[NODE_TYPE_LEGACY_WORKER][
"min_workers"])
}
else:
return {}
def _infer_legacy_node_resources_if_needed(
self, max_resources_by_ip: Dict[NodeIP, ResourceDict]
) -> (bool, Dict[NodeType, int]):
"""Infers node resources for legacy config files.
Updates the resources of the head and worker node types in
self.node_types.
Args:
max_resources_by_ip: Mapping from ip to static node resources.
"""
# We fill the head node resources only once.
if not self.node_types[NODE_TYPE_LEGACY_HEAD]["resources"]:
assert len(max_resources_by_ip) == 1 # Only the head node.
self.node_types[NODE_TYPE_LEGACY_HEAD]["resources"] = next(
iter(max_resources_by_ip.values()))
# We fill the worker node resources only once.
if not self.node_types[NODE_TYPE_LEGACY_WORKER]["resources"]:
if len(max_resources_by_ip) > 1:
# Set the node_types here as we already launched a worker node
# from which we directly get the node_resources.
worker_nodes = self.provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
worker_node_ips = [
self.provider.internal_ip(node_id)
for node_id in worker_nodes
]
for ip in worker_node_ips:
if ip in max_resources_by_ip:
self.node_types[NODE_TYPE_LEGACY_WORKER][
"resources"] = max_resources_by_ip[ip]
assert self.node_types[NODE_TYPE_LEGACY_WORKER]["resources"]
def _get_concurrent_resource_demand_to_launch(
self,
to_launch: Dict[NodeType, int],
connected_nodes: List[NodeIP],
non_terminated_nodes: List[NodeID],
pending_launches_nodes: Dict[NodeType, int],
nodes_to_add_based_on_requests: Dict[NodeType, int],
) -> Dict[NodeType, int]:
"""Updates the max concurrent resources to launch for each node type.
Given the current nodes that should be launched, the non terminated
nodes (running and pending) and the pending to be launched nodes. This
method calculates the maximum number of nodes to launch concurrently
for each node type as follows:
1) Calculates the running nodes.
2) Calculates the pending nodes and gets the launching nodes.
3) Limits the total number of pending + currently-launching +
to-be-launched nodes to max(5, frac * running_nodes[node_type]).
Args:
to_launch: List of number of nodes to launch based on resource
demand for every node type.
connected_nodes: Running nodes (from LoadMetrics).
non_terminated_nodes: Non terminated nodes (pending/running).
pending_launches_nodes: Nodes that are in the launch queue.
nodes_to_add_based_on_requests: Nodes to launch to satisfy
request_resources(). This overrides the launch limits since the
user is hinting to immediately scale up to this size.
Returns:
Dict[NodeType, int]: Maximum number of nodes to launch for each
node type.
"""
# TODO(ameer): Consider making frac configurable.
frac = 1
updated_nodes_to_launch = {}
running_nodes, pending_nodes = \
self._separate_running_and_pending_nodes(
non_terminated_nodes, connected_nodes,
)
for node_type in to_launch:
# Enforce here max allowed pending nodes to be frac of total
# running nodes.
max_allowed_pending_nodes = max(
5, int(frac * running_nodes[node_type]))
total_pending_nodes = pending_launches_nodes.get(
node_type, 0) + pending_nodes[node_type]
upper_bound = max(
max_allowed_pending_nodes - total_pending_nodes,
# Allow more nodes if this is to respect min_workers.
self.node_types[node_type].get("min_workers", 0) -
total_pending_nodes - running_nodes[node_type],
# Allow more nodes from request_resources API.
nodes_to_add_based_on_requests.get(node_type,
0) - total_pending_nodes)
if upper_bound > 0:
updated_nodes_to_launch[node_type] = min(
upper_bound, to_launch[node_type])
return updated_nodes_to_launch
def _separate_running_and_pending_nodes(
self,
non_terminated_nodes: List[NodeID],
connected_nodes: List[NodeIP],
) -> (Dict[NodeType, int], Dict[NodeType, int]):
"""Splits connected and non terminated nodes to pending & running."""
running_nodes = collections.defaultdict(int)
pending_nodes = collections.defaultdict(int)
for node_id in non_terminated_nodes:
tags = self.provider.node_tags(node_id)
if TAG_RAY_USER_NODE_TYPE in tags:
node_type = tags[TAG_RAY_USER_NODE_TYPE]
node_ip = self.provider.internal_ip(node_id)
if node_ip in connected_nodes:
running_nodes[node_type] += 1
else:
pending_nodes[node_type] += 1
return running_nodes, pending_nodes
def calculate_node_resources(
self, nodes: List[NodeID], pending_nodes: Dict[NodeID, int],
unused_resources_by_ip: Dict[str, ResourceDict]
) -> (List[ResourceDict], Dict[NodeType, int]):
"""Returns node resource list and node type counts.
Counts the running nodes, pending nodes.
Args:
nodes: Existing nodes.
pending_nodes: Pending nodes.
Returns:
node_resources: a list of running + pending resources.
E.g., [{"CPU": 4}, {"GPU": 2}].
node_type_counts: running + pending workers per node type.
"""
node_resources = []
node_type_counts = collections.defaultdict(int)
def add_node(node_type, available_resources=None):
if node_type not in self.node_types:
logger.warn(
f"Missing entry for node_type {node_type} in "
f"cluster config: {self.node_types} under entry "
f"available_node_types. This node's resources will be "
f"ignored. If you are using an unmanaged node, manually "
f"set the user_node_type tag to \"{NODE_KIND_UNMANAGED}\""
f"in your cloud provider's management console.")
return None
# Careful not to include the same dict object multiple times.
available = copy.deepcopy(self.node_types[node_type]["resources"])
# If available_resources is None this might be because the node is
# no longer pending, but the raylet hasn't sent a heartbeat to gcs
# yet.
if available_resources is not None:
available = copy.deepcopy(available_resources)
node_resources.append(available)
node_type_counts[node_type] += 1
for node_id in nodes:
tags = self.provider.node_tags(node_id)
if TAG_RAY_USER_NODE_TYPE in tags:
node_type = tags[TAG_RAY_USER_NODE_TYPE]
ip = self.provider.internal_ip(node_id)
available_resources = unused_resources_by_ip.get(ip)
add_node(node_type, available_resources)
for node_type, count in pending_nodes.items():
for _ in range(count):
add_node(node_type)
return node_resources, node_type_counts
def reserve_and_allocate_spread(self,
strict_spreads: List[List[ResourceDict]],
node_resources: List[ResourceDict],
node_type_counts: Dict[NodeType, int]):
"""For each strict spread, attempt to reserve as much space as possible
on the node, then allocate new nodes for the unfulfilled portion.
Args:
strict_spreads (List[List[ResourceDict]]): A list of placement
groups which must be spread out.
node_resources (List[ResourceDict]): Available node resources in
the cluster.
node_type_counts (Dict[NodeType, int]): The amount of each type of
node pending or in the cluster.
Returns:
Dict[NodeType, int]: Nodes to add.
List[ResourceDict]: The updated node_resources after the method.
Dict[NodeType, int]: The updated node_type_counts.
"""
to_add = collections.defaultdict(int)
for bundles in strict_spreads:
# Try to pack as many bundles of this group as possible on existing
# nodes. The remaining will be allocated on new nodes.
unfulfilled, node_resources = get_bin_pack_residual(
node_resources, bundles, strict_spread=True)
max_to_add = self.max_workers - sum(node_type_counts.values())
# Allocate new nodes for the remaining bundles that don't fit.
to_launch = get_nodes_for(
self.node_types,
node_type_counts,
max_to_add,
unfulfilled,
strict_spread=True)
_inplace_add(node_type_counts, to_launch)
_inplace_add(to_add, to_launch)
new_node_resources = _node_type_counts_to_node_resources(
self.node_types, to_launch)
# Update node resources to include newly launched nodes and their
# bundles.
unfulfilled, including_reserved = get_bin_pack_residual(
new_node_resources, unfulfilled, strict_spread=True)
assert not unfulfilled
node_resources += including_reserved
return to_add, node_resources, node_type_counts
def debug_string(self, nodes: List[NodeID],
pending_nodes: Dict[NodeID, int],
unused_resources_by_ip: Dict[str, ResourceDict]) -> str:
node_resources, node_type_counts = self.calculate_node_resources(
nodes, pending_nodes, unused_resources_by_ip)
out = "Worker node types:"
for node_type, count in node_type_counts.items():
out += "\n - {}: {}".format(node_type, count)
if pending_nodes.get(node_type):
out += " ({} pending)".format(pending_nodes[node_type])
return out
def _node_type_counts_to_node_resources(
node_types: Dict[NodeType, NodeTypeConfigDict],
node_type_counts: Dict[NodeType, int]) -> List[ResourceDict]:
"""Converts a node_type_counts dict into a list of node_resources."""
resources = []
for node_type, count in node_type_counts.items():
# Be careful, each entry in the list must be deep copied!
resources += [
node_types[node_type]["resources"].copy() for _ in range(count)
]
return resources
def _add_min_workers_nodes(
node_resources: List[ResourceDict],
node_type_counts: Dict[NodeType, int],
node_types: Dict[NodeType, NodeTypeConfigDict],
) -> (List[ResourceDict], Dict[NodeType, int], Dict[NodeType, int]):
"""Updates resource demands to respect the min_workers constraint.
Args:
node_resources: Resources of exisiting nodes already launched/pending.
node_type_counts: Counts of existing nodes already launched/pending.
node_types: Node types config.
Returns:
node_resources: The updated node resources after adding min_workers
constraint per node type.
node_type_counts: The updated node counts after adding min_workers
constraint per node type.
total_nodes_to_add: The nodes to add to respect min_workers constraint.
"""
total_nodes_to_add_dict = {}
for node_type, config in node_types.items():
existing = node_type_counts.get(node_type, 0)
target = config.get("min_workers", 0)
if existing < target:
total_nodes_to_add_dict[node_type] = target - existing
node_type_counts[node_type] = target
available = copy.deepcopy(node_types[node_type]["resources"])
node_resources.extend(
[available] * total_nodes_to_add_dict[node_type])
return node_resources, node_type_counts, total_nodes_to_add_dict
def get_nodes_for(node_types: Dict[NodeType, NodeTypeConfigDict],
existing_nodes: Dict[NodeType, int],
max_to_add: int,
resources: List[ResourceDict],
strict_spread: bool = False) -> Dict[NodeType, int]:
"""Determine nodes to add given resource demands and constraints.
Args:
node_types: node types config.
existing_nodes: counts of existing nodes already launched.
This sets constraints on the number of new nodes to add.
max_to_add: global constraint on nodes to add.
resources: resource demands to fulfill.
strict_spread: If true, each element in `resources` must be placed on a
different node.
Returns:
Dict of count to add for each node type.
"""
nodes_to_add = collections.defaultdict(int)
while resources and sum(nodes_to_add.values()) < max_to_add:
utilization_scores = []
for node_type in node_types:
if (existing_nodes.get(node_type, 0) + nodes_to_add.get(
node_type, 0) >= node_types[node_type]["max_workers"]):
continue
node_resources = node_types[node_type]["resources"]
if strict_spread:
# If handling strict spread, only one bundle can be placed on
# the node.
score = _utilization_score(node_resources, [resources[0]])
else:
score = _utilization_score(node_resources, resources)
if score is not None:
utilization_scores.append((score, node_type))
# Give up, no feasible node.
if not utilization_scores:
# TODO (Alex): We will hit this case every time a placement group
# starts up because placement groups are scheduled via custom
# resources. This will behave properly with the current utilization
# score heuristic, but it's a little dangerous and misleading.
logger.info(
"No feasible node type to add for {}".format(resources))
break
utilization_scores = sorted(utilization_scores, reverse=True)
best_node_type = utilization_scores[0][1]
nodes_to_add[best_node_type] += 1
if strict_spread:
resources = resources[1:]
else:
allocated_resource = node_types[best_node_type]["resources"]
residual, _ = get_bin_pack_residual([allocated_resource],
resources)
assert len(residual) < len(resources), (resources, residual)
resources = residual
return nodes_to_add
def _utilization_score(node_resources: ResourceDict,
resources: ResourceDict) -> float:
remaining = copy.deepcopy(node_resources)
fittable = []
for r in resources:
if _fits(remaining, r):
fittable.append(r)
_inplace_subtract(remaining, r)
if not fittable:
return None
util_by_resources = []
for k, v in node_resources.items():
util = (v - remaining[k]) / v
util_by_resources.append(v * (util**3))
# Prioritize using all resources first, then prioritize overall balance
# of multiple resources.
return (min(util_by_resources), np.mean(util_by_resources))
def get_bin_pack_residual(node_resources: List[ResourceDict],
resource_demands: List[ResourceDict],
strict_spread: bool = False) -> List[ResourceDict]:
"""Return a subset of resource_demands that cannot fit in the cluster.
TODO(ekl): this currently does not guarantee the resources will be packed
correctly by the Ray scheduler. This is only possible once the Ray backend
supports a placement groups API.
Args:
node_resources (List[ResourceDict]): List of resources per node.
resource_demands (List[ResourceDict]): List of resource bundles that
need to be bin packed onto the nodes.
strict_spread (bool): If true, each element in resource_demands must be
placed on a different entry in `node_resources`.
Returns:
List[ResourceDict] the residual list resources that do not fit.
List[ResourceDict]: The updated node_resources after the method.
"""
unfulfilled = []
# A most naive bin packing algorithm.
nodes = copy.deepcopy(node_resources)
# List of nodes that cannot be used again due to strict spread.
used = []
for demand in resource_demands:
found = False
node = None
for i in range(len(nodes)):
node = nodes[i]
if _fits(node, demand):
found = True
# In the strict_spread case, we can't reuse nodes.
if strict_spread:
used.append(node)
del nodes[i]
break
if found and node:
_inplace_subtract(node, demand)
else:
unfulfilled.append(demand)
return unfulfilled, nodes + used
def _fits(node: ResourceDict, resources: ResourceDict) -> bool:
for k, v in resources.items():
if v > node.get(k, 0.0):
return False
return True
def _inplace_subtract(node: ResourceDict, resources: ResourceDict) -> None:
for k, v in resources.items():
assert k in node, (k, node)
node[k] -= v
assert node[k] >= 0.0, (node, k, v)
def _inplace_add(a: collections.defaultdict, b: Dict) -> None:
"""Generically adds values in `b` to `a`.
a[k] should be defined for all k in b.keys()"""
for k, v in b.items():
a[k] += v
def placement_groups_to_resource_demands(
pending_placement_groups: List[PlacementGroupTableData]):
"""Preprocess placement group requests into regular resource demand vectors
when possible. The policy is:
* STRICT_PACK - Convert to a single bundle.
* PACK - Flatten into a resource demand vector.
* STRICT_SPREAD - Cannot be converted.
* SPREAD - Flatten into a resource demand vector.
Args:
pending_placement_groups (List[PlacementGroupData]): List of
PlacementGroupLoad's.
Returns:
List[ResourceDict]: The placement groups which were converted to a
resource demand vector.
List[List[ResourceDict]]: The placement groups which should be strictly
spread.
"""
resource_demand_vector = []
unconverted = []
for placement_group in pending_placement_groups:
shapes = [
dict(bundle.unit_resources) for bundle in placement_group.bundles
]
if (placement_group.strategy == PlacementStrategy.PACK
or placement_group.strategy == PlacementStrategy.SPREAD):
resource_demand_vector.extend(shapes)
elif placement_group.strategy == PlacementStrategy.STRICT_PACK:
combined = collections.defaultdict(float)
for shape in shapes:
for label, quantity in shape.items():
combined[label] += quantity
resource_demand_vector.append(combined)
elif (placement_group.strategy == PlacementStrategy.STRICT_SPREAD):
unconverted.append(shapes)
else:
logger.error(
f"Unknown placement group request type: {placement_group}. "
f"Please file a bug report "
f"https://github.com/ray-project/ray/issues/new.")
return resource_demand_vector, unconverted
| 43.91543
| 79
| 0.630427
|
4a0bce6f1dbd27265cb1dd3335c00a5dc1212306
| 20,224
|
py
|
Python
|
app/routes.py
|
ccodwg/Covid19CanadaAPI
|
0c31435b3c27313af999dfb7eecba8b8b811887f
|
[
"MIT"
] | 1
|
2021-12-17T14:47:58.000Z
|
2021-12-17T14:47:58.000Z
|
app/routes.py
|
ccodwg/Covid19CanadaAPI
|
0c31435b3c27313af999dfb7eecba8b8b811887f
|
[
"MIT"
] | 22
|
2021-01-08T16:28:18.000Z
|
2021-12-20T19:48:38.000Z
|
app/routes.py
|
ccodwg/Covid19CanadaAPI
|
0c31435b3c27313af999dfb7eecba8b8b811887f
|
[
"MIT"
] | 3
|
2021-01-10T00:59:51.000Z
|
2021-10-01T20:46:23.000Z
|
# import app components
from app import app, data
from flask_cors import CORS
CORS(app) # enable CORS for all routes
# import libraries
from flask import request, jsonify
import pandas as pd
import re
from datetime import datetime
from functools import reduce
# define functions
## process date args
def date_arg(arg):
try:
arg = datetime.strptime(arg, '%d-%m-%Y')
except:
try:
arg = datetime.strptime(arg, '%Y-%m-%d')
except:
arg = None
return arg
## process missing arg
def missing_arg(missing):
if missing == 'na':
missing_val = 'NA'
elif missing == 'empty':
missing_val = ''
elif missing == 'nan':
missing_val = 'NaN'
else:
missing_val = 'NULL'
return(missing_val)
## get date column
def get_date_col(df):
return list(filter(re.compile('^date_.*').search, df.columns.values))[0]
# list of dataset by location
data_canada = ['cases_timeseries_canada',
'mortality_timeseries_canada',
'recovered_timeseries_canada',
'testing_timeseries_canada',
'active_timeseries_canada',
'vaccine_administration_timeseries_canada',
'vaccine_distribution_timeseries_canada',
'vaccine_completion_timeseries_canada']
data_prov = ['cases_timeseries_prov',
'mortality_timeseries_prov',
'recovered_timeseries_prov',
'testing_timeseries_prov',
'active_timeseries_prov',
'vaccine_administration_timeseries_prov',
'vaccine_distribution_timeseries_prov',
'vaccine_completion_timeseries_prov']
data_hr = ['cases_timeseries_hr',
'mortality_timeseries_hr']
data_names = ['cases',
'mortality',
'recovered',
'testing',
'active',
'avaccine',
'dvaccine',
'cvaccine']
data_sknew = ['sk_new_cases_timeseries_hr_combined',
'sk_new_mortality_timeseries_hr_combined']
data_names_dates = {
'date_report': 'cases',
'date_death_report': 'mortality',
'date_recovered': 'recovered',
'date_testing': 'testing',
'date_active': 'active',
'date_vaccine_administered': 'avaccine',
'date_vaccine_distributed': 'dvaccine',
'date_vaccine_completed': 'cvaccine'
}
data_other = {
'prov': 'prov_map',
'hr': 'hr_map',
'age_cases': 'age_map_cases',
'age_mortality': 'age_map_mortality'
}
@app.route('/')
@app.route('/index')
def index():
# initialize response
response = {}
# subset dataframes
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date', regex = True)
# subset active dataframe to avoid duplicate columns
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert date column and filter to most recent date
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df.loc[df['date'] == data.version['date']]
# format output
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna('NULL')
response['summary'] = df.to_dict(orient='records')
# add version to response
response['version'] = data.version['version']
# return response
return response
@app.route('/timeseries')
def timeseries():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes
if loc == 'canada':
if stat == 'cases':
data_name = data_canada[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_canada[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_canada[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_canada[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_canada[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_canada[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_canada[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_canada[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
elif loc == 'prov' or loc in data.keys_prov.keys():
if stat == 'cases':
data_name = data_prov[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_prov[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_prov[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_prov[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_prov[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_prov[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_prov[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_prov[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_prov}
dfs = list(dfs.values()) # convert to list
elif loc == 'hr' or loc in data.keys_hr.keys():
if stat == 'cases':
data_name = data_hr[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_hr[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
else:
return "Record not found", 404
# filter by location
if loc in data.keys_prov.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['health_region'] == data.keys_hr[loc]['health_region']]
if loc != '9999':
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_hr[loc]['province']]
# convert date column
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
dfs[i][col_date] = pd.to_datetime(dfs[i][col_date], dayfirst=True)
# filter by date
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if date:
dfs[i] = dfs[i].loc[dfs[i][col_date] == date]
if after:
dfs[i] = dfs[i].loc[dfs[i][col_date] >= after]
if before:
dfs[i] = dfs[i].loc[dfs[i][col_date] <= before]
# format output
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if ymd == 'true':
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%Y-%m-%d')
else:
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%d-%m-%Y')
dfs[i] = dfs[i].fillna(missing_val)
# determine response name and add dataframe to response
resp_name = data_names_dates[col_date]
response[resp_name] = dfs[i].to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/sknew')
def sknew():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
# get dataframes
if stat == 'cases':
data_name = data_sknew[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_sknew[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_sknew}
dfs = list(dfs.values()) # convert to list
# filter by location
if loc in data.keys_prov.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['health_region'] == data.keys_hr[loc]['health_region']]
if loc != '9999':
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_hr[loc]['province']]
# convert date column
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
dfs[i][col_date] = pd.to_datetime(dfs[i][col_date], dayfirst=True)
# filter by date
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if date:
dfs[i] = dfs[i].loc[dfs[i][col_date] == date]
if after:
dfs[i] = dfs[i].loc[dfs[i][col_date] >= after]
if before:
dfs[i] = dfs[i].loc[dfs[i][col_date] <= before]
# format output
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if ymd == 'true':
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%Y-%m-%d')
else:
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%d-%m-%Y')
dfs[i] = dfs[i].fillna(missing_val)
# determine response name and add dataframe to response
resp_name = data_names_dates[col_date]
response[resp_name] = dfs[i].to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/summary')
def summary():
# initialize response
response = {}
# read arguments
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
if not date and not after and not before:
date = data.version['date']
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes and subset by location
if loc == 'canada':
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
elif loc == 'prov' or loc in data.keys_prov.keys():
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_prov}
elif loc == 'hr' or loc in data.keys_hr.keys():
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_hr}
else:
return "Record not found", 404
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date')
# subset active dataframe to avoid duplicate columns
if loc == 'canada':
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
elif loc == 'prov' or loc in data.keys_prov.keys():
dfs['active_timeseries_prov'] = dfs['active_timeseries_prov'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
if loc == 'hr' or loc in data.keys_hr.keys():
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province', 'health_region'], how='outer'), dfs.values())
else:
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert dates column
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
# filter by location
if loc in data.keys_prov.keys():
df = df.loc[df['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
df = df.loc[df['health_region'] == data.keys_hr[loc]['health_region']]
print("HI")
if loc != '9999':
df = df.loc[df['province'] == data.keys_hr[loc]['province']]
# filter by date
if date:
df = df.loc[df['date'] == date]
if after:
df = df.loc[df['date'] >= after]
if before:
df = df.loc[df['date'] <= before]
# format output
if ymd == 'true':
df['date'] = df['date'].dt.strftime('%Y-%m-%d')
else:
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna(missing_val)
response['summary'] = df.to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/individual')
def individual():
return "Individual level data are retired. Archived data may be downloaded from GitHub: https://github.com/ccodwg/Covid19Canada", 404
@app.route('/other')
def other():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
missing = request.args.get('missing')
version = request.args.get('version')
# process other arguments
missing_val = missing_arg(missing)
# get dataframes
if stat:
if (stat == 'prov'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
elif (stat == 'hr'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
elif (stat == 'age_cases'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
elif (stat == 'age_mortality'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
else:
return "Record not found", 404
# format output
dfs = dfs.fillna(missing_val)
# determine response name and add dataframe to response
response[stat] = dfs.to_dict(orient='records')
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_other.values()}
dfs = list(dfs.values()) # convert to list
# format output
for i in range(len(dfs)):
dfs[i] = dfs[i].fillna(missing_val)
# determine response name and add dataframe to response
resp_name = list(data_other)[i]
response[resp_name] = dfs[i].to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/version')
def version():
# initialize response
response = {}
# read arguments
dateonly = request.args.get('dateonly')
# get version
if dateonly == 'true':
response['version'] = data.version['version'].split()[0]
else:
response['version'] = data.version['version']
# return response
return response
@app.route('/datasets')
def datasets():
# read UUIDs
uuid = request.args.get('uuid')
if uuid is None:
return data.datasets['datasets']
else:
uuid = uuid.split('|')
# filter dictionary
response = data.datasets['datasets']
try:
response = {k: response[k] for k in uuid}
except Exception as e:
print(e)
return "UUID not found", 404
# return response
return(response)
@app.route('/archive')
def archive():
# read UUIDs
uuid = request.args.get('uuid')
if uuid is None:
return "Please specify one or more values for 'uuid', seperated by '|'.", 404
else:
uuid = uuid.split('|')
# read parameters
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
remove_duplicates = request.args.get('remove_duplicates')
if (remove_duplicates):
remove_duplicates = str(remove_duplicates).lower()
# process date filters
if date is None and after is None and before is None:
# if no filters, return all
date = 'all'
else:
if date and date!= 'all' and date != 'latest' and date != 'first':
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# get dataframe
df = data.archive['index']
df = df[df['uuid'].isin(uuid)]
# return 404 if no valid UUIDs
if len(df) == 0:
return 'UUID not found', 404
# date filtering
df['file_date_true'] = pd.to_datetime(df['file_date_true'])
if date:
# if date is defined, after and before are ignored
if (date == 'all'):
pass
elif (date == 'latest'):
df = df.groupby('uuid').last()
elif (date == 'first'):
df = df.groupby('uuid').first()
else:
if date:
df = df[df['file_date_true'] == date]
else:
if after:
df = df[df['file_date_true'] >= after]
if before:
df = df[df['file_date_true'] <= before]
# return 404 if no results found
if len(df) == 0:
return 'No results, please check your date filters', 404
# filter duplicates in the filtered sample
# not the same thing as remove file_date_duplicate == 1,
# since the first instance of a duplicate dataset may not
# be in the filtered sample
if (remove_duplicates == 'true'):
df = df.drop_duplicates(subset=['file_etag'])
# format output
df['file_date_true'] = df['file_date_true'].dt.strftime('%Y-%m-%d')
response = jsonify(df.to_dict(orient='records'))
# return response
return response
| 32.831169
| 137
| 0.562401
|
4a0bd03c55d02a593385f66664f2f6956da56b8e
| 587
|
py
|
Python
|
Examples/Image/Classification/ResNet/Python/run_Distributed.py
|
Wootai/CNTK
|
5eca042341c8152594e67652a44c3b733a2acaa0
|
[
"RSA-MD"
] | 5
|
2017-08-28T08:27:18.000Z
|
2021-04-20T21:12:52.000Z
|
Examples/Image/Classification/ResNet/Python/run_Distributed.py
|
zhuyawen/CNTK
|
0ee09cf771bda9d4912790e0fed7322e89d86d87
|
[
"RSA-MD"
] | null | null | null |
Examples/Image/Classification/ResNet/Python/run_Distributed.py
|
zhuyawen/CNTK
|
0ee09cf771bda9d4912790e0fed7322e89d86d87
|
[
"RSA-MD"
] | 3
|
2019-08-23T11:42:14.000Z
|
2022-01-06T08:41:32.000Z
|
if __name__ == '__main__':
import time
import subprocess
start_time = time.time()
subprocess.call(["python", "TrainResNet_CIFAR10_Distributed.py", "-n", "resnet110", "-q", "32", "-a", "0"], stderr=subprocess.STDOUT)
print("\n--- Non-distributed: %s seconds ---\n" % (time.time() - start_time))
start_time = time.time()
subprocess.call(["mpiexec", "-n", "2", "python", "TrainResNet_CIFAR10_Distributed.py", "-n", "resnet110", "-q", "32", "-a", "0"], stderr=subprocess.STDOUT)
print("\n--- 2 workers : %s seconds ---\n" % (time.time() - start_time))
| 48.916667
| 159
| 0.608177
|
4a0bd0df4d30c56aab32b2013e010da4e1887d9b
| 5,621
|
py
|
Python
|
mpmath/rational.py
|
diofant/mpmath1
|
1deafadddfc9d62f619f6ee80cd469fc001d7a70
|
[
"BSD-3-Clause"
] | null | null | null |
mpmath/rational.py
|
diofant/mpmath1
|
1deafadddfc9d62f619f6ee80cd469fc001d7a70
|
[
"BSD-3-Clause"
] | null | null | null |
mpmath/rational.py
|
diofant/mpmath1
|
1deafadddfc9d62f619f6ee80cd469fc001d7a70
|
[
"BSD-3-Clause"
] | null | null | null |
import numbers
import operator
import sys
from .libmp import int_types, mpf_hash, bitcount, from_man_exp, HASH_MODULUS
new = object.__new__
def create_reduced(p, q, _cache={}):
key = p, q
if key in _cache:
return _cache[key]
x, y = p, q
while y:
x, y = y, x % y
if x != 1:
p //= x
q //= x
v = new(mpq)
v._mpq_ = p, q
# Speedup integers, half-integers and other small fractions
if q <= 4 and abs(key[0]) < 100:
_cache[key] = v
return v
class mpq(object):
"""
Exact rational type, currently only intended for internal use.
"""
__slots__ = ["_mpq_"]
def __new__(cls, p, q=1):
if type(p) is tuple:
p, q = p
elif hasattr(p, '_mpq_'):
p, q = p._mpq_
return create_reduced(p, q)
def __repr__(s):
return "mpq(%s,%s)" % s._mpq_
def __str__(s):
return "(%s/%s)" % s._mpq_
def __int__(s):
a, b = s._mpq_
return a // b
def __nonzero__(s):
return bool(s._mpq_[0])
__bool__ = __nonzero__
def __hash__(s):
a, b = s._mpq_
inverse = pow(b, HASH_MODULUS-2, HASH_MODULUS)
if not inverse:
h = sys.hash_info.inf
else:
h = (abs(a) * inverse) % HASH_MODULUS
if a < 0: h = -h
if h == -1: h = -2
return h
def __eq__(s, t):
ttype = type(t)
if ttype is mpq:
return s._mpq_ == t._mpq_
if ttype in int_types:
a, b = s._mpq_
if b != 1:
return False
return a == t
return NotImplemented
def __ne__(s, t):
ttype = type(t)
if ttype is mpq:
return s._mpq_ != t._mpq_
if ttype in int_types:
a, b = s._mpq_
if b != 1:
return True
return a != t
return NotImplemented
def _cmp(s, t, op):
ttype = type(t)
if ttype in int_types:
a, b = s._mpq_
return op(a, t*b)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return op(a*d, b*c)
return NotImplementedError
def __lt__(s, t): return s._cmp(t, operator.lt)
def __le__(s, t): return s._cmp(t, operator.le)
def __gt__(s, t): return s._cmp(t, operator.gt)
def __ge__(s, t): return s._cmp(t, operator.ge)
def __abs__(s):
a, b = s._mpq_
if a >= 0:
return s
v = new(mpq)
v._mpq_ = -a, b
return v
def __neg__(s):
a, b = s._mpq_
v = new(mpq)
v._mpq_ = -a, b
return v
def __pos__(s):
return s
def __add__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*d+b*c, b*d)
if ttype in int_types:
a, b = s._mpq_
v = new(mpq)
v._mpq_ = a+b*t, b
return v
return NotImplemented
__radd__ = __add__
def __sub__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*d-b*c, b*d)
if ttype in int_types:
a, b = s._mpq_
v = new(mpq)
v._mpq_ = a-b*t, b
return v
return NotImplemented
def __rsub__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(b*c-a*d, b*d)
if ttype in int_types:
a, b = s._mpq_
v = new(mpq)
v._mpq_ = b*t-a, b
return v
return NotImplemented
def __mul__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*c, b*d)
if ttype in int_types:
a, b = s._mpq_
return create_reduced(a*t, b)
return NotImplemented
__rmul__ = __mul__
def __div__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*d, b*c)
if ttype in int_types:
a, b = s._mpq_
return create_reduced(a, b*t)
return NotImplemented
def __rdiv__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(b*c, a*d)
if ttype in int_types:
a, b = s._mpq_
return create_reduced(b*t, a)
return NotImplemented
def __pow__(s, t):
ttype = type(t)
if ttype in int_types:
a, b = s._mpq_
if t:
if t < 0:
a, b, t = b, a, -t
v = new(mpq)
v._mpq_ = a**t, b**t
return v
raise ZeroDivisionError
return NotImplemented
mpq_1 = mpq((1,1))
mpq_0 = mpq((0,1))
mpq_1_2 = mpq((1,2))
mpq_3_2 = mpq((3,2))
mpq_1_4 = mpq((1,4))
mpq_1_16 = mpq((1,16))
mpq_3_16 = mpq((3,16))
mpq_5_2 = mpq((5,2))
mpq_3_4 = mpq((3,4))
mpq_7_4 = mpq((7,4))
mpq_5_4 = mpq((5,4))
# Register with "numbers" ABC
# We do not subclass, hence we do not use the @abstractmethod checks. While
# this is less invasive it may turn out that we do not actually support
# parts of the expected interfaces. See
# https://docs.python.org/3/library/numbers.html for list of abstract
# methods.
numbers.Rational.register(mpq)
| 24.43913
| 79
| 0.486568
|
4a0bd1c9424a52f1d1f1f68d567fa77958728d99
| 13,162
|
py
|
Python
|
iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/sqlparse/keywords.py
|
mpol/iis
|
fbf7129bbd131fbf824a0d3fc8a0afde367794e2
|
[
"Apache-2.0"
] | 20
|
2015-09-19T21:17:23.000Z
|
2022-03-01T10:37:59.000Z
|
iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/sqlparse/keywords.py
|
mpol/iis
|
fbf7129bbd131fbf824a0d3fc8a0afde367794e2
|
[
"Apache-2.0"
] | 1,054
|
2015-09-11T06:51:27.000Z
|
2022-03-30T09:46:54.000Z
|
iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/sqlparse/keywords.py
|
mpol/iis
|
fbf7129bbd131fbf824a0d3fc8a0afde367794e2
|
[
"Apache-2.0"
] | 80
|
2015-12-09T12:41:52.000Z
|
2022-02-16T11:46:42.000Z
|
from sqlparse.tokens import *
KEYWORDS = {
'ABORT': Keyword,
'ABS': Keyword,
'ABSOLUTE': Keyword,
'ACCESS': Keyword,
'ADA': Keyword,
'ADD': Keyword,
'ADMIN': Keyword,
'AFTER': Keyword,
'AGGREGATE': Keyword,
'ALIAS': Keyword,
'ALL': Keyword,
'ALLOCATE': Keyword,
'ANALYSE': Keyword,
'ANALYZE': Keyword,
'ANY': Keyword,
'ARE': Keyword,
'ASC': Keyword,
'ASENSITIVE': Keyword,
'ASSERTION': Keyword,
'ASSIGNMENT': Keyword,
'ASYMMETRIC': Keyword,
'AT': Keyword,
'ATOMIC': Keyword,
'AUTHORIZATION': Keyword,
'AVG': Keyword,
'BACKWARD': Keyword,
'BEFORE': Keyword,
'BEGIN': Keyword,
'BETWEEN': Keyword,
'BITVAR': Keyword,
'BIT_LENGTH': Keyword,
'BOTH': Keyword,
'BREADTH': Keyword,
# 'C': Keyword, # most likely this is an alias
'CACHE': Keyword,
'CALL': Keyword,
'CALLED': Keyword,
'CARDINALITY': Keyword,
'CASCADE': Keyword,
'CASCADED': Keyword,
'CAST': Keyword,
'CATALOG': Keyword,
'CATALOG_NAME': Keyword,
'CHAIN': Keyword,
'CHARACTERISTICS': Keyword,
'CHARACTER_LENGTH': Keyword,
'CHARACTER_SET_CATALOG': Keyword,
'CHARACTER_SET_NAME': Keyword,
'CHARACTER_SET_SCHEMA': Keyword,
'CHAR_LENGTH': Keyword,
'CHECK': Keyword,
'CHECKED': Keyword,
'CHECKPOINT': Keyword,
'CLASS': Keyword,
'CLASS_ORIGIN': Keyword,
'CLOB': Keyword,
'CLOSE': Keyword,
'CLUSTER': Keyword,
'COALSECE': Keyword,
'COBOL': Keyword,
'COLLATE': Keyword,
'COLLATION': Keyword,
'COLLATION_CATALOG': Keyword,
'COLLATION_NAME': Keyword,
'COLLATION_SCHEMA': Keyword,
'COLUMN': Keyword,
'COLUMN_NAME': Keyword,
'COMMAND_FUNCTION': Keyword,
'COMMAND_FUNCTION_CODE': Keyword,
'COMMENT': Keyword,
'COMMIT': Keyword,
'COMMITTED': Keyword,
'COMPLETION': Keyword,
'CONDITION_NUMBER': Keyword,
'CONNECT': Keyword,
'CONNECTION': Keyword,
'CONNECTION_NAME': Keyword,
'CONSTRAINT': Keyword,
'CONSTRAINTS': Keyword,
'CONSTRAINT_CATALOG': Keyword,
'CONSTRAINT_NAME': Keyword,
'CONSTRAINT_SCHEMA': Keyword,
'CONSTRUCTOR': Keyword,
'CONTINUE': Keyword,
'CONVERSION': Keyword,
'CONVERT': Keyword,
'COPY': Keyword,
'CORRESPONTING': Keyword,
'COUNT': Keyword,
'CREATEDB': Keyword,
'CREATEUSER': Keyword,
'CROSS': Keyword,
'CUBE': Keyword,
'CURRENT': Keyword,
'CURRENT_DATE': Keyword,
'CURRENT_PATH': Keyword,
'CURRENT_ROLE': Keyword,
'CURRENT_TIME': Keyword,
'CURRENT_TIMESTAMP': Keyword,
'CURRENT_USER': Keyword,
'CURSOR': Keyword,
'CURSOR_NAME': Keyword,
'CYCLE': Keyword,
'DATA': Keyword,
'DATABASE': Keyword,
'DATETIME_INTERVAL_CODE': Keyword,
'DATETIME_INTERVAL_PRECISION': Keyword,
'DAY': Keyword,
'DEALLOCATE': Keyword,
'DECLARE': Keyword,
'DEFAULT': Keyword,
'DEFAULTS': Keyword,
'DEFERRABLE': Keyword,
'DEFERRED': Keyword,
'DEFINED': Keyword,
'DEFINER': Keyword,
'DELIMITER': Keyword,
'DELIMITERS': Keyword,
'DEREF': Keyword,
'DESC': Keyword,
'DESCRIBE': Keyword,
'DESCRIPTOR': Keyword,
'DESTROY': Keyword,
'DESTRUCTOR': Keyword,
'DETERMINISTIC': Keyword,
'DIAGNOSTICS': Keyword,
'DICTIONARY': Keyword,
'DISCONNECT': Keyword,
'DISPATCH': Keyword,
'DO': Keyword,
'DOMAIN': Keyword,
'DYNAMIC': Keyword,
'DYNAMIC_FUNCTION': Keyword,
'DYNAMIC_FUNCTION_CODE': Keyword,
'EACH': Keyword,
'ENCODING': Keyword,
'ENCRYPTED': Keyword,
'END-EXEC': Keyword,
'EQUALS': Keyword,
'ESCAPE': Keyword,
'EVERY': Keyword,
'EXCEPT': Keyword,
'ESCEPTION': Keyword,
'EXCLUDING': Keyword,
'EXCLUSIVE': Keyword,
'EXEC': Keyword,
'EXECUTE': Keyword,
'EXISTING': Keyword,
'EXISTS': Keyword,
'EXTERNAL': Keyword,
'EXTRACT': Keyword,
'FALSE': Keyword,
'FETCH': Keyword,
'FINAL': Keyword,
'FIRST': Keyword,
'FORCE': Keyword,
'FOREIGN': Keyword,
'FORTRAN': Keyword,
'FORWARD': Keyword,
'FOUND': Keyword,
'FREE': Keyword,
'FREEZE': Keyword,
'FULL': Keyword,
'FUNCTION': Keyword,
'G': Keyword,
'GENERAL': Keyword,
'GENERATED': Keyword,
'GET': Keyword,
'GLOBAL': Keyword,
'GO': Keyword,
'GOTO': Keyword,
'GRANT': Keyword,
'GRANTED': Keyword,
'GROUPING': Keyword,
'HANDLER': Keyword,
'HAVING': Keyword,
'HIERARCHY': Keyword,
'HOLD': Keyword,
'HOST': Keyword,
'IDENTITY': Keyword,
'IGNORE': Keyword,
'ILIKE': Keyword,
'IMMEDIATE': Keyword,
'IMMUTABLE': Keyword,
'IMPLEMENTATION': Keyword,
'IMPLICIT': Keyword,
'INCLUDING': Keyword,
'INCREMENT': Keyword,
'INDEX': Keyword,
'INDITCATOR': Keyword,
'INFIX': Keyword,
'INHERITS': Keyword,
'INITIALIZE': Keyword,
'INITIALLY': Keyword,
'INOUT': Keyword,
'INPUT': Keyword,
'INSENSITIVE': Keyword,
'INSTANTIABLE': Keyword,
'INSTEAD': Keyword,
'INTERSECT': Keyword,
'INTO': Keyword,
'INVOKER': Keyword,
'IS': Keyword,
'ISNULL': Keyword,
'ISOLATION': Keyword,
'ITERATE': Keyword,
'K': Keyword,
'KEY': Keyword,
'KEY_MEMBER': Keyword,
'KEY_TYPE': Keyword,
'LANCOMPILER': Keyword,
'LANGUAGE': Keyword,
'LARGE': Keyword,
'LAST': Keyword,
'LATERAL': Keyword,
'LEADING': Keyword,
'LENGTH': Keyword,
'LESS': Keyword,
'LEVEL': Keyword,
'LIMIT': Keyword,
'LISTEN': Keyword,
'LOAD': Keyword,
'LOCAL': Keyword,
'LOCALTIME': Keyword,
'LOCALTIMESTAMP': Keyword,
'LOCATION': Keyword,
'LOCATOR': Keyword,
'LOCK': Keyword,
'LOWER': Keyword,
'M': Keyword,
'MAP': Keyword,
'MATCH': Keyword,
'MAXVALUE': Keyword,
'MESSAGE_LENGTH': Keyword,
'MESSAGE_OCTET_LENGTH': Keyword,
'MESSAGE_TEXT': Keyword,
'METHOD': Keyword,
'MINUTE': Keyword,
'MINVALUE': Keyword,
'MOD': Keyword,
'MODE': Keyword,
'MODIFIES': Keyword,
'MODIFY': Keyword,
'MONTH': Keyword,
'MORE': Keyword,
'MOVE': Keyword,
'MUMPS': Keyword,
'NAMES': Keyword,
'NATIONAL': Keyword,
'NATURAL': Keyword,
'NCHAR': Keyword,
'NCLOB': Keyword,
'NEW': Keyword,
'NEXT': Keyword,
'NO': Keyword,
'NOCREATEDB': Keyword,
'NOCREATEUSER': Keyword,
'NONE': Keyword,
'NOT': Keyword,
'NOTHING': Keyword,
'NOTIFY': Keyword,
'NOTNULL': Keyword,
'NULL': Keyword,
'NULLABLE': Keyword,
'NULLIF': Keyword,
'OBJECT': Keyword,
'OCTET_LENGTH': Keyword,
'OF': Keyword,
'OFF': Keyword,
'OFFSET': Keyword,
'OIDS': Keyword,
'OLD': Keyword,
'ONLY': Keyword,
'OPEN': Keyword,
'OPERATION': Keyword,
'OPERATOR': Keyword,
'OPTION': Keyword,
'OPTIONS': Keyword,
'ORDINALITY': Keyword,
'OUT': Keyword,
'OUTPUT': Keyword,
'OVERLAPS': Keyword,
'OVERLAY': Keyword,
'OVERRIDING': Keyword,
'OWNER': Keyword,
'PAD': Keyword,
'PARAMETER': Keyword,
'PARAMETERS': Keyword,
'PARAMETER_MODE': Keyword,
'PARAMATER_NAME': Keyword,
'PARAMATER_ORDINAL_POSITION': Keyword,
'PARAMETER_SPECIFIC_CATALOG': Keyword,
'PARAMETER_SPECIFIC_NAME': Keyword,
'PARAMATER_SPECIFIC_SCHEMA': Keyword,
'PARTIAL': Keyword,
'PASCAL': Keyword,
'PENDANT': Keyword,
'PLACING': Keyword,
'PLI': Keyword,
'POSITION': Keyword,
'POSTFIX': Keyword,
'PRECISION': Keyword,
'PREFIX': Keyword,
'PREORDER': Keyword,
'PREPARE': Keyword,
'PRESERVE': Keyword,
'PRIMARY': Keyword,
'PRIOR': Keyword,
'PRIVILEGES': Keyword,
'PROCEDURAL': Keyword,
'PROCEDURE': Keyword,
'PUBLIC': Keyword,
'RAISE': Keyword,
'READ': Keyword,
'READS': Keyword,
'RECHECK': Keyword,
'RECURSIVE': Keyword,
'REF': Keyword,
'REFERENCES': Keyword,
'REFERENCING': Keyword,
'REINDEX': Keyword,
'RELATIVE': Keyword,
'RENAME': Keyword,
'REPEATABLE': Keyword,
'REPLACE': Keyword,
'RESET': Keyword,
'RESTART': Keyword,
'RESTRICT': Keyword,
'RESULT': Keyword,
'RETURN': Keyword,
'RETURNED_LENGTH': Keyword,
'RETURNED_OCTET_LENGTH': Keyword,
'RETURNED_SQLSTATE': Keyword,
'RETURNS': Keyword,
'REVOKE': Keyword,
'RIGHT': Keyword,
'ROLE': Keyword,
'ROLLBACK': Keyword,
'ROLLUP': Keyword,
'ROUTINE': Keyword,
'ROUTINE_CATALOG': Keyword,
'ROUTINE_NAME': Keyword,
'ROUTINE_SCHEMA': Keyword,
'ROW': Keyword,
'ROWS': Keyword,
'ROW_COUNT': Keyword,
'RULE': Keyword,
'SAVE_POINT': Keyword,
'SCALE': Keyword,
'SCHEMA': Keyword,
'SCHEMA_NAME': Keyword,
'SCOPE': Keyword,
'SCROLL': Keyword,
'SEARCH': Keyword,
'SECOND': Keyword,
'SECURITY': Keyword,
'SELF': Keyword,
'SENSITIVE': Keyword,
'SERIALIZABLE': Keyword,
'SERVER_NAME': Keyword,
'SESSION': Keyword,
'SESSION_USER': Keyword,
'SETOF': Keyword,
'SETS': Keyword,
'SHARE': Keyword,
'SHOW': Keyword,
'SIMILAR': Keyword,
'SIMPLE': Keyword,
'SIZE': Keyword,
'SOME': Keyword,
'SOURCE': Keyword,
'SPACE': Keyword,
'SPECIFIC': Keyword,
'SPECIFICTYPE': Keyword,
'SPECIFIC_NAME': Keyword,
'SQL': Keyword,
'SQLCODE': Keyword,
'SQLERROR': Keyword,
'SQLEXCEPTION': Keyword,
'SQLSTATE': Keyword,
'SQLWARNINIG': Keyword,
'STABLE': Keyword,
'START': Keyword,
'STATE': Keyword,
'STATEMENT': Keyword,
'STATIC': Keyword,
'STATISTICS': Keyword,
'STDIN': Keyword,
'STDOUT': Keyword,
'STORAGE': Keyword,
'STRICT': Keyword,
'STRUCTURE': Keyword,
'STYPE': Keyword,
'SUBCLASS_ORIGIN': Keyword,
'SUBLIST': Keyword,
'SUBSTRING': Keyword,
'SUM': Keyword,
'SYMMETRIC': Keyword,
'SYSID': Keyword,
'SYSTEM': Keyword,
'SYSTEM_USER': Keyword,
'TABLE': Keyword,
'TABLE_NAME': Keyword,
' TEMP': Keyword,
'TEMPLATE': Keyword,
'TEMPORARY': Keyword,
'TERMINATE': Keyword,
'THAN': Keyword,
'TIMESTAMP': Keyword,
'TIMEZONE_HOUR': Keyword,
'TIMEZONE_MINUTE': Keyword,
'TO': Keyword,
'TOAST': Keyword,
'TRAILING': Keyword,
'TRANSATION': Keyword,
'TRANSACTIONS_COMMITTED': Keyword,
'TRANSACTIONS_ROLLED_BACK': Keyword,
'TRANSATION_ACTIVE': Keyword,
'TRANSFORM': Keyword,
'TRANSFORMS': Keyword,
'TRANSLATE': Keyword,
'TRANSLATION': Keyword,
'TREAT': Keyword,
'TRIGGER': Keyword,
'TRIGGER_CATALOG': Keyword,
'TRIGGER_NAME': Keyword,
'TRIGGER_SCHEMA': Keyword,
'TRIM': Keyword,
'TRUE': Keyword,
'TRUNCATE': Keyword,
'TRUSTED': Keyword,
'TYPE': Keyword,
'UNCOMMITTED': Keyword,
'UNDER': Keyword,
'UNENCRYPTED': Keyword,
'UNION': Keyword,
'UNIQUE': Keyword,
'UNKNOWN': Keyword,
'UNLISTEN': Keyword,
'UNNAMED': Keyword,
'UNNEST': Keyword,
'UNTIL': Keyword,
'UPPER': Keyword,
'USAGE': Keyword,
'USER': Keyword,
'USER_DEFINED_TYPE_CATALOG': Keyword,
'USER_DEFINED_TYPE_NAME': Keyword,
'USER_DEFINED_TYPE_SCHEMA': Keyword,
'USING': Keyword,
'VACUUM': Keyword,
'VALID': Keyword,
'VALIDATOR': Keyword,
'VALUES': Keyword,
'VARIABLE': Keyword,
'VERBOSE': Keyword,
'VERSION': Keyword,
'VIEW': Keyword,
'VOLATILE': Keyword,
'WHENEVER': Keyword,
'WITH': Keyword,
'WITHOUT': Keyword,
'WORK': Keyword,
'WRITE': Keyword,
'YEAR': Keyword,
'ZONE': Keyword,
'ARRAY': Name.Builtin,
'BIGINT': Name.Builtin,
'BINARY': Name.Builtin,
'BIT': Name.Builtin,
'BLOB': Name.Builtin,
'BOOLEAN': Name.Builtin,
'CHAR': Name.Builtin,
'CHARACTER': Name.Builtin,
'DATE': Name.Builtin,
'DEC': Name.Builtin,
'DECIMAL': Name.Builtin,
'FLOAT': Name.Builtin,
'INT': Name.Builtin,
'INTEGER': Name.Builtin,
'INTERVAL': Name.Builtin,
'NUMBER': Name.Builtin,
'NUMERIC': Name.Builtin,
'REAL': Name.Builtin,
'SERIAL': Name.Builtin,
'SMALLINT': Name.Builtin,
'VARCHAR': Name.Builtin,
'VARYING': Name.Builtin,
'INT8': Name.Builtin,
'SERIAL8': Name.Builtin,
'TEXT': Name.Builtin,
}
KEYWORDS_COMMON = {
'SELECT': Keyword.DML,
'INSERT': Keyword.DML,
'DELETE': Keyword.DML,
'UPDATE': Keyword.DML,
'DROP': Keyword.DDL,
'CREATE': Keyword.DDL,
'ALTER': Keyword.DDL,
'WHERE': Keyword,
'FROM': Keyword,
'INNER': Keyword,
'JOIN': Keyword,
'AND': Keyword,
'OR': Keyword,
'LIKE': Keyword,
'ON': Keyword,
'IN': Keyword,
'SET': Keyword,
'BY': Keyword,
'GROUP': Keyword,
'ORDER': Keyword,
'LEFT': Keyword,
'OUTER': Keyword,
'IF': Keyword,
'END': Keyword,
'THEN': Keyword,
'LOOP': Keyword,
'AS': Keyword,
'ELSE': Keyword,
'FOR': Keyword,
'CASE': Keyword,
'WHEN': Keyword,
'MIN': Keyword,
'MAX': Keyword,
'DISTINCT': Keyword,
}
| 23.336879
| 50
| 0.593831
|
4a0bd1d195f96f782e72b39e40dac512c8514d68
| 950
|
py
|
Python
|
app/models.py
|
benosment/cookbook
|
4f0d704f1953627145fcc89b924e5476b0bdb2fc
|
[
"MIT"
] | null | null | null |
app/models.py
|
benosment/cookbook
|
4f0d704f1953627145fcc89b924e5476b0bdb2fc
|
[
"MIT"
] | null | null | null |
app/models.py
|
benosment/cookbook
|
4f0d704f1953627145fcc89b924e5476b0bdb2fc
|
[
"MIT"
] | null | null | null |
from . import db
from datetime import datetime
# With an ORM, the model (a Python class) is mapped to
# columns in a corresponding database table
class Recipe(db.Model):
__tablename__ = 'recipes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, nullable=False)
description = db.Column(db.Text(100))
directions = db.Column(db.Text(1000))
ingredients = db.Column(db.Text(1000))
preparation_time = db.Column(db.String(20))
num_portions = db.Column(db.String(20))
source = db.Column(db.String(120))
date_created = db.Column(db.DateTime, default=datetime.utcnow())
date_updated = db.Column(db.DateTime, default=datetime.utcnow())
img_location = db.Column(db.String(120))
# TODO -- should be another table, but keeping as string for
# simplicity for now
tags = db.Column(db.String(120))
def __repr__(self):
return 'Recipe %r' % self.name
| 33.928571
| 68
| 0.691579
|
4a0bd203b4efd163153865d067cda41fab7da66c
| 2,467
|
py
|
Python
|
tests/flytekit/unit/models/test_common.py
|
flytehub/flytekit
|
f8f53567594069b29fcd3f99abd1da71a5ef0e22
|
[
"Apache-2.0"
] | 1
|
2019-10-22T05:22:16.000Z
|
2019-10-22T05:22:16.000Z
|
tests/flytekit/unit/models/test_common.py
|
chixcode/flytekit
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
[
"Apache-2.0"
] | null | null | null |
tests/flytekit/unit/models/test_common.py
|
chixcode/flytekit
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
[
"Apache-2.0"
] | 1
|
2019-08-28T22:27:07.000Z
|
2019-08-28T22:27:07.000Z
|
from __future__ import absolute_import
from flytekit.models import common as _common
from flytekit.models.core import execution as _execution
def test_notification_email():
obj = _common.EmailNotification(['a', 'b', 'c'])
assert obj.recipients_email == ['a', 'b', 'c']
obj2 = _common.EmailNotification.from_flyte_idl(obj.to_flyte_idl())
assert obj2 == obj
def test_notification_pagerduty():
obj = _common.PagerDutyNotification(['a', 'b', 'c'])
assert obj.recipients_email == ['a', 'b', 'c']
obj2 = _common.PagerDutyNotification.from_flyte_idl(obj.to_flyte_idl())
assert obj2 == obj
def test_notification_slack():
obj = _common.SlackNotification(['a', 'b', 'c'])
assert obj.recipients_email == ['a', 'b', 'c']
obj2 = _common.SlackNotification.from_flyte_idl(obj.to_flyte_idl())
assert obj2 == obj
def test_notification():
phases = [_execution.WorkflowExecutionPhase.FAILED, _execution.WorkflowExecutionPhase.SUCCEEDED]
recipients = ['a', 'b', 'c']
obj = _common.Notification(phases, email=_common.EmailNotification(recipients))
assert obj.phases == phases
assert obj.email.recipients_email == recipients
obj2 = _common.Notification.from_flyte_idl(obj.to_flyte_idl())
assert obj == obj2
assert obj2.phases == phases
assert obj2.email.recipients_email == recipients
obj = _common.Notification(phases, pager_duty=_common.PagerDutyNotification(recipients))
assert obj.phases == phases
assert obj.pager_duty.recipients_email == recipients
obj2 = _common.Notification.from_flyte_idl(obj.to_flyte_idl())
assert obj == obj2
assert obj2.phases == phases
assert obj2.pager_duty.recipients_email == recipients
obj = _common.Notification(phases, slack=_common.SlackNotification(recipients))
assert obj.phases == phases
assert obj.slack.recipients_email == recipients
obj2 = _common.Notification.from_flyte_idl(obj.to_flyte_idl())
assert obj == obj2
assert obj2.phases == phases
assert obj2.slack.recipients_email == recipients
def test_labels():
obj = _common.Labels({"my": "label"})
assert obj.values == {"my": "label"}
obj2 = _common.Labels.from_flyte_idl(obj.to_flyte_idl())
assert obj2 == obj
def test_annotations():
obj = _common.Annotations({"my": "annotation"})
assert obj.values == {"my": "annotation"}
obj2 = _common.Annotations.from_flyte_idl(obj.to_flyte_idl())
assert obj2 == obj
| 35.753623
| 100
| 0.714633
|
4a0bd266332ff1282d160b46d871f7b8e3dcb7d0
| 46
|
py
|
Python
|
olive/core/managers/__init__.py
|
liuyenting/olive-core
|
b532b29e29fe9f167369f66b8d922f5f644f9309
|
[
"Apache-2.0"
] | null | null | null |
olive/core/managers/__init__.py
|
liuyenting/olive-core
|
b532b29e29fe9f167369f66b8d922f5f644f9309
|
[
"Apache-2.0"
] | null | null | null |
olive/core/managers/__init__.py
|
liuyenting/olive-core
|
b532b29e29fe9f167369f66b8d922f5f644f9309
|
[
"Apache-2.0"
] | null | null | null |
from .devices import *
from .drivers import *
| 23
| 23
| 0.73913
|
4a0bd36eb588aed9ebdc29dd83fc786aca11770c
| 732
|
py
|
Python
|
config/queue.py
|
FAITHM1/masonite_blog_backend
|
d20b707f7a93dc51816821fa47b341c9a38f7df6
|
[
"MIT"
] | 4
|
2020-11-01T22:51:01.000Z
|
2021-09-23T23:12:53.000Z
|
config/queue.py
|
FAITHM1/masonite_blog_backend
|
d20b707f7a93dc51816821fa47b341c9a38f7df6
|
[
"MIT"
] | 17
|
2021-02-07T17:32:15.000Z
|
2022-03-21T22:08:31.000Z
|
config/queue.py
|
FAITHM1/masonite_blog_backend
|
d20b707f7a93dc51816821fa47b341c9a38f7df6
|
[
"MIT"
] | 3
|
2020-11-14T16:55:09.000Z
|
2021-03-26T18:59:06.000Z
|
"""Queue Settings."""
from masonite import env
"""Queue Driver
Queues are an excellent way to send intensive and time consuming tasks
into the background to improve performance of your application.
Supported: 'async', 'amqp'
"""
DRIVER = env("QUEUE_DRIVER", "async")
"""Queue Drivers
Put any configuration settings for your drivers in this configuration setting.
"""
DRIVERS = {
"async": {"mode": "threading"},
"amqp": {
"username": env("QUEUE_USERNAME", "guest"),
"vhost": env("QUEUE_VHOST", ""),
"password": env("QUEUE_PASSWORD", "guest"),
"host": env("QUEUE_HOST", "localhost"),
"port": env("QUEUE_PORT", "5672"),
"channel": env("QUEUE_CHANNEL", "default"),
},
}
| 25.241379
| 78
| 0.636612
|
4a0bd4df630f0a6268109b22c359e2e874f2a4ab
| 3,854
|
py
|
Python
|
src/bandersnatch_filter_plugins/filename_name.py
|
techalchemy/bandersnatch
|
1c4030f714d7abdc993f1265c42acb7657c9b84f
|
[
"AFL-3.0"
] | null | null | null |
src/bandersnatch_filter_plugins/filename_name.py
|
techalchemy/bandersnatch
|
1c4030f714d7abdc993f1265c42acb7657c9b84f
|
[
"AFL-3.0"
] | null | null | null |
src/bandersnatch_filter_plugins/filename_name.py
|
techalchemy/bandersnatch
|
1c4030f714d7abdc993f1265c42acb7657c9b84f
|
[
"AFL-3.0"
] | null | null | null |
import logging
from typing import List
from bandersnatch.filter import FilterReleasePlugin
logger = logging.getLogger("bandersnatch")
class ExcludePlatformFilter(FilterReleasePlugin):
"""
Filters releases based on regex patters defined by the user.
"""
name = "exclude_platform"
_patterns: List[str] = []
_packagetypes: List[str] = []
def initialize_plugin(self):
"""
Initialize the plugin reading patterns from the config.
"""
if self._patterns or self._packagetypes:
logger.debug(
"Skipping initalization of Exclude Platform plugin. "
+ "Already initialized"
)
return
try:
tags = self.configuration["blacklist"]["platforms"].split()
except KeyError:
logger.error(f"Plugin {self.name}: missing platforms= setting")
return
for platform in tags:
lplatform = platform.lower()
if lplatform in ("windows", "win"):
# PEP 425
# see also setuptools/package_index.py
self._patterns.extend([".win32", "-win32", "win_amd64", "win-amd64"])
# PEP 527
self._packagetypes.extend(["bdist_msi", "bdist_wininst"])
elif lplatform in ("macos", "macosx"):
self._patterns.extend(["macosx_", "macosx-"])
self._packagetypes.extend(["bdist_dmg"])
elif lplatform in ("freebsd"):
# concerns only very few files
self._patterns.extend([".freebsd", "-freebsd"])
elif lplatform in ("linux"):
self._patterns.extend(
[
"linux-i686", # PEP 425
"linux-x86_64", # PEP 425
"linux_armv7l", # https://github.com/pypa/warehouse/pull/2010
"linux_armv6l", # https://github.com/pypa/warehouse/pull/2012
"manylinux1_", # PEP 513
"manylinux2010_", # PEP 571
]
)
self._packagetypes.extend(["bdist_rpm"])
logger.info(f"Initialized {self.name} plugin with {self._patterns!r}")
def filter(self, metadata):
releases = metadata["releases"]
"""
Remove files from `releases` that match any pattern.
"""
# Make a copy of releases keys
# as we may delete packages during iteration
removed = 0
versions = list(releases.keys())
for version in versions:
new_files = []
for file_desc in releases[version]:
if self._check_match(file_desc):
removed += 1
else:
new_files.append(file_desc)
if len(new_files) == 0:
del releases[version]
else:
releases[version] = new_files
logger.debug(f"{self.name}: filenames removed: {removed}")
if not releases:
return False
else:
return True
def _check_match(self, file_desc) -> bool:
"""
Check if a release version matches any of the specified patterns.
Parameters
==========
name: file_desc
file description entry
Returns
=======
bool:
True if it matches, False otherwise.
"""
# source dist: never filter out
pt = file_desc.get("packagetype")
if pt == "sdist":
return False
# Windows installer
if pt in self._packagetypes:
return True
fn = file_desc["filename"]
for i in self._patterns:
if i in fn:
return True
return False
| 30.587302
| 86
| 0.522314
|
4a0bd54adb69f33f3d824f8cf044c462d488cd87
| 1,488
|
py
|
Python
|
build.py
|
odant/conan-date
|
bbd75bd1fe2083c192848de96f086ca49be2d931
|
[
"MIT"
] | null | null | null |
build.py
|
odant/conan-date
|
bbd75bd1fe2083c192848de96f086ca49be2d931
|
[
"MIT"
] | null | null | null |
build.py
|
odant/conan-date
|
bbd75bd1fe2083c192848de96f086ca49be2d931
|
[
"MIT"
] | null | null | null |
import platform, os
from conan.packager import ConanMultiPackager
# Common settings
username = "odant" if "CONAN_USERNAME" not in os.environ else None
# Windows settings
visual_versions = ["14", "15"] if "CONAN_VISUAL_VERSIONS" not in os.environ else None
visual_runtimes = ["MD", "MDd"] if "CONAN_VISUAL_RUNTIMES" not in os.environ else None
def filter_libcxx(builds):
result = []
for settings, options, env_vars, build_requires, reference in builds:
if settings["compiler.libcxx"] == "libstdc++11":
result.append([settings, options, env_vars, build_requires, reference])
return result
if __name__ == "__main__":
command = "sudo apt-get -qq update && sudo apt-get -qq install -y tzdata"
builder = ConanMultiPackager(
username=username,
visual_versions=visual_versions,
visual_runtimes=visual_runtimes,
exclude_vcvars_precommand=True,
docker_entry_script=command
)
builder.add_common_builds(pure_c=False)
# Adjusting build configurations
builds = builder.items
if platform.system() == "Linux":
builds = filter_libcxx(builds)
# Replace build configurations
builder.items = []
for settings, options, env_vars, build_requires, _ in builds:
builder.add(
settings=settings,
options=options,
env_vars=env_vars,
build_requires=build_requires
)
builder.run()
| 33.818182
| 87
| 0.663306
|
4a0bd602e4d9abe3c75bc3eb84f8dd144446ed64
| 1,549
|
py
|
Python
|
sdk/python/pulumi_rke/__init__.py
|
jaxxstorm/pulumi-rke
|
9031a8509ce0f210348ece7419c1e7392174d603
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-03-13T20:27:32.000Z
|
2020-12-10T15:47:31.000Z
|
sdk/python/pulumi_rke/__init__.py
|
jaxxstorm/pulumi-rke
|
9031a8509ce0f210348ece7419c1e7392174d603
|
[
"ECL-2.0",
"Apache-2.0"
] | 97
|
2020-03-24T01:00:33.000Z
|
2021-04-25T20:27:28.000Z
|
sdk/python/pulumi_rke/__init__.py
|
jaxxstorm/pulumi-rke
|
9031a8509ce0f210348ece7419c1e7392174d603
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-03-23T21:44:29.000Z
|
2021-05-13T14:28:22.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .cluster import *
from .provider import *
from ._inputs import *
from . import outputs
# Make subpackages available:
from . import (
config,
)
def _register_module():
import pulumi
from . import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "rke:index/cluster:Cluster":
return Cluster(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("rke", "index/cluster", _module_instance)
class Package(pulumi.runtime.ResourcePackage):
_version = _utilities.get_semver_version()
def version(self):
return Package._version
def construct_provider(self, name: str, typ: str, urn: str) -> pulumi.ProviderResource:
if typ != "pulumi:providers:rke":
raise Exception(f"unknown provider type {typ}")
return Provider(name, pulumi.ResourceOptions(urn=urn))
pulumi.runtime.register_resource_package("rke", Package())
_register_module()
| 29.226415
| 95
| 0.662363
|
4a0bd6bd29c404b436f005f9e42f057f706caf22
| 4,683
|
py
|
Python
|
heart_disease_correlation/coronary.py
|
Sapphirine/202112-38-GOPS
|
0a52d8ed56c432845201be93bc5db80bc06d9b0e
|
[
"MIT"
] | null | null | null |
heart_disease_correlation/coronary.py
|
Sapphirine/202112-38-GOPS
|
0a52d8ed56c432845201be93bc5db80bc06d9b0e
|
[
"MIT"
] | 2
|
2021-12-23T04:32:41.000Z
|
2021-12-23T05:01:33.000Z
|
heart_disease_correlation/coronary.py
|
Sapphirine/202112-38-GOPS
|
0a52d8ed56c432845201be93bc5db80bc06d9b0e
|
[
"MIT"
] | 1
|
2022-03-05T22:50:36.000Z
|
2022-03-05T22:50:36.000Z
|
import pandas as pd
from statistics import mean
state_codes = ['KY', 'OH', 'PA', 'VA', 'WV']
df = pd.read_csv('heart_disease.csv', low_memory=False)
df = df.loc[df['Topic'] == 'Coronary Heart Disease']
df = df.loc[df['Data_Value_Unit']=="per 100,000"]
df = df.loc[df['LocationAbbr'].isin(state_codes)]
# print(df)
state_to_rates = dict()
for state_code in state_codes:
state_df = df.loc[df['LocationAbbr'] == state_code].sort_values(by='Year')
sum_in_state_same_year = 2 * state_df.groupby('Year')['Data_Value'].mean()[12:19] # from 2011- 2017
state_year_array = sum_in_state_same_year.to_numpy()
state_to_rates[state_code] = state_year_array
# print(state_to_rates)
fips_list = df['LocationID'].unique()
fips_to_rates = dict()
for location_id in fips_list:
county_df = df.loc[df['LocationID'] == location_id].sort_values(by='Year')
sum_in_same_year = county_df.groupby('Year')['Data_Value'].sum()[12:19] # from 2011- 2017
year_array = sum_in_same_year.to_numpy()
fips_to_rates[location_id] = year_array
NFLIS = pd.read_csv('MCM_NFLIS_Data.csv')
NFLIS = NFLIS.loc[NFLIS['State'].isin(state_codes)]
# drug_names = ['Heroin', 'Hydrocodone', 'Fentanyl', 'Oxycodone', 'Buprenorphine', 'Morphine', 'Hydromorphone', 'Oxymorphone', 'Tramadol', 'Methadone']
drug_names = ['Heroin', 'Hydrocodone', 'Oxycodone', 'Buprenorphine', 'Morphine']
state_res_dict = dict()
for drug_name in drug_names:
drug_df = NFLIS[NFLIS['SubstanceName'] == drug_name]
report_sum = drug_df.groupby(['State','YYYY'])[['DrugReports']].sum()
report_diff = report_sum.groupby(level=0)['DrugReports'].diff()
drug_increase_dict = dict()
for code in state_codes:
drug_increase_dict[code] = report_diff[code].to_numpy()[1:]
corr_code_dict = dict()
for code in state_codes:
state_to_rates_series = pd.Series(state_to_rates[code])
normalized_state_to_rates_series = (state_to_rates_series-state_to_rates_series.mean())/state_to_rates_series.std()
drug_increase_series = pd.Series(drug_increase_dict[code])
normalized_drug_increase_series = (drug_increase_series-drug_increase_series.mean())/drug_increase_series.std()
corr_code_dict[code] = normalized_state_to_rates_series.corr(normalized_drug_increase_series,method='pearson')
state_res_dict[drug_name] = corr_code_dict
state_res_df = pd.DataFrame(state_res_dict)
# print(state_res_df)
state_res_df.to_csv('coronary_state_combined.csv')
res_dict = dict()
for drug_name in drug_names:
drug_df = NFLIS[NFLIS['SubstanceName'] == drug_name]
fips_dict = dict()
for fips in fips_list:
drug_fips_df = drug_df[drug_df['FIPS_Combined'] == fips]
increase = drug_fips_df['DrugReports'].diff().to_numpy()
if(len(increase) < 8): #drop unvailable data
continue
increase = increase[1:] # get rid of NaN
disease_rate = fips_to_rates[fips]
drug_increase_series = pd.Series(increase)
normalized_drug_increase_series = (drug_increase_series-drug_increase_series.mean())/drug_increase_series.std()
disease_rate_series = pd.Series(disease_rate)
normalized_disease_rate_series = (disease_rate_series-disease_rate_series.mean())/disease_rate_series.std()
corr = normalized_drug_increase_series.corr(normalized_disease_rate_series, method='pearson')
# print(corr)
fips_dict[fips] = corr
res_dict[drug_name] = fips_dict
result_df = pd.DataFrame(res_dict)
state_mean_dict = dict()
fips_state_dict= {21:'KY', 39:'OH', 42:'PA',51:'VA', 54:'WV'}
for drug_name in drug_names:
state_dict = dict()
fips_dict = res_dict[drug_name]
fips = fips_dict.keys()
ky_fips = [key for key in fips_dict.keys() if 20999 <key and key < 22000 ]
oh_fips = [key for key in fips_dict.keys() if 38999 <key and key < 40000 ]
pa_fips = [key for key in fips_dict.keys() if 41999 <key and key < 43000 ]
va_fips = [key for key in fips_dict.keys() if 50999 <key and key < 52000 ]
wv_fips = [key for key in fips_dict.keys() if 53999 <key and key < 55000 ]
if ky_fips:
state_dict['KY'] = mean([fips_dict[key] for key in ky_fips])
if oh_fips:
state_dict['OH'] = mean([fips_dict[key] for key in oh_fips])
if pa_fips:
state_dict['PA'] = mean([fips_dict[key] for key in pa_fips])
if va_fips:
state_dict['VA'] = mean([fips_dict[key] for key in va_fips])
if wv_fips:
state_dict['WV'] = mean([fips_dict[key] for key in wv_fips])
state_mean_dict[drug_name] = state_dict
state_mean_df = pd.DataFrame(state_mean_dict)
# print(state_mean_df)
state_mean_df.to_csv('coronary_state_mean.csv')
| 42.963303
| 151
| 0.708093
|
4a0bd84645a4f0b192db65d1940578b30ee51e58
| 5,682
|
py
|
Python
|
pronotepy/test_pronotepy.py
|
HevelMc/pronotepy
|
efacde74c70e003394adf07d8ddf289358966344
|
[
"MIT"
] | null | null | null |
pronotepy/test_pronotepy.py
|
HevelMc/pronotepy
|
efacde74c70e003394adf07d8ddf289358966344
|
[
"MIT"
] | null | null | null |
pronotepy/test_pronotepy.py
|
HevelMc/pronotepy
|
efacde74c70e003394adf07d8ddf289358966344
|
[
"MIT"
] | null | null | null |
import datetime
import typing
import unittest
import pronotepy
client = pronotepy.Client('https://demo.index-education.net/pronote/eleve.html', 'demonstration', 'pronotevs')
class TestClient(unittest.TestCase):
global client
def test__get_week(self):
self.assertEqual(client.get_week(client.start_day + datetime.timedelta(days=8)), 2)
def test_lessons(self):
start = client.start_day
end = client.start_day + datetime.timedelta(days=8)
lessons = client.lessons(start, end)
# We assume demo website will always have some lessons
self.assertGreater(len(lessons), 0)
for lesson in lessons:
self.assertLessEqual(start, lesson.start.date())
self.assertLessEqual(lesson.start.date(), end)
def test_periods(self):
self.assertIsNotNone(client.periods)
def test_current_period(self):
self.assertIsNotNone(client.current_period)
def test_homework(self):
start = client.start_day
end = client.start_day + datetime.timedelta(days=31)
homework = client.homework(start, end)
# We assume demo website will always have homework
self.assertGreater(len(homework), 0)
for hw in homework:
self.assertLessEqual(start, hw.date)
self.assertLessEqual(hw.date, end)
def test_refresh(self):
client.refresh()
self.assertEqual(client.session_check(), True)
class TestPeriod(unittest.TestCase):
period: pronotepy.Period
@classmethod
def setUpClass(cls) -> None:
global client
cls.period = client.current_period
def test_grades(self):
# We assume demo website will have grades
grades = self.period.grades
self.assertGreater(len(grades), 0)
def test_averages(self):
self.assertGreater(len(self.period.averages), 0)
def test_overall_average(self):
self.assertIsNotNone(self.period.overall_average)
def test_evaluations(self):
evaluations = self.period.evaluations
self.assertGreater(len(evaluations), 0)
for evaluation in evaluations:
for acquisition in evaluation.acquisitions:
self.assertIsNotNone(acquisition)
def test_absences(self):
self.period.absences()
class TestInformation(unittest.TestCase):
def test_attribute_type(self):
information = client.information_and_surveys()
for info in information:
for attr_name, attr_type in typing.get_type_hints(pronotepy.Information).items():
with self.subTest(attr_name=attr_name, attr_type=attr_type):
if isinstance(attr_type, typing._BaseGenericAlias):
attr_type = typing.get_args(attr_type)
self.assertIsInstance(info.__getattribute__(attr_name), attr_type)
def test_unread(self):
information = client.information_and_surveys(only_unread=True)
for info in information:
self.assertFalse(info.read)
def test_time_delta(self):
start = datetime.datetime(year=client.start_day.year, month=client.start_day.month, day=client.start_day.day)
end = start + datetime.timedelta(days=100)
information = client.information_and_surveys(date_from=start, date_to=end)
for info in information:
self.assertTrue(start <= info.start_date <= end, msg="date outside the research limits")
class TestLesson(unittest.TestCase):
lesson: pronotepy.Lesson
@classmethod
def setUpClass(cls) -> None:
global client
cls.lesson = client.lessons(client.start_day + datetime.timedelta(days=4))[0]
def test_normal(self):
self.assertIsNotNone(self.lesson.normal)
def test_content(self):
self.assertIsInstance(self.lesson.content, pronotepy.LessonContent)
class TestLessonContent(unittest.TestCase):
lessonContent: pronotepy.LessonContent
@classmethod
def setUpClass(cls) -> None:
global client
cls.lessonContent = client.lessons(client.start_day + datetime.timedelta(days=4))[0].content
def test_files(self):
self.assertIsNotNone(self.lessonContent.files)
class TestParentClient(unittest.TestCase):
client: pronotepy.ParentClient
@classmethod
def setUpClass(cls) -> None:
cls.client = pronotepy.ParentClient('https://demo.index-education.net/pronote/parent.html',
'demonstration', 'pronotevs')
def test_set_child(self):
self.client.set_child(self.client.children[1])
self.client.set_child('PARENT Fanny')
def test_homework(self):
self.assertIsNotNone(
self.client.homework(self.client.start_day, self.client.start_day + datetime.timedelta(days=31)))
class TestVieScolaireClient(unittest.TestCase):
client: pronotepy.VieScolaireClient
@classmethod
def setUpClass(cls) -> None:
cls.client = pronotepy.VieScolaireClient('https://demo.index-education.net/pronote/viescolaire.html',
'demonstration2', 'pronotevs')
def test_classes(self):
self.assertGreater(len(self.client.classes), 0)
for cls in self.client.classes:
self.assertIsNotNone(cls.name)
for student in self.client.classes[0].students():
self.assertIsInstance(student.identity, pronotepy.Identity)
self.assertGreater(len(student.guardians), 0)
for guardian in student.guardians:
self.assertIsInstance(guardian.identity, pronotepy.Identity)
if __name__ == '__main__':
unittest.main()
| 33.621302
| 117
| 0.67441
|
4a0bd88599e6fad73a0e88090941dd1d2a49afb3
| 1,167
|
py
|
Python
|
pymoo/usage/usage_performance_indicator.py
|
gabicavalcante/pymoo
|
1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846
|
[
"Apache-2.0"
] | 11
|
2018-05-22T17:38:02.000Z
|
2022-02-28T03:34:33.000Z
|
pymoo/usage/usage_performance_indicator.py
|
gabicavalcante/pymoo
|
1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846
|
[
"Apache-2.0"
] | 15
|
2022-01-03T19:36:36.000Z
|
2022-03-30T03:57:58.000Z
|
pymoo/usage/usage_performance_indicator.py
|
gabicavalcante/pymoo
|
1711ce3a96e5ef622d0116d6c7ea4d26cbe2c846
|
[
"Apache-2.0"
] | 3
|
2021-11-22T08:01:47.000Z
|
2022-03-11T08:53:58.000Z
|
# START load_data
import numpy as np
from pymoo.factory import get_problem
from pymoo.visualization.scatter import Scatter
# The pareto front of a scaled zdt1 problem
pf = get_problem("zdt1").pareto_front()
# The result found by an algorithm
A = pf[::10] * 1.1
# plot the result
Scatter(legend=True).add(pf, label="Pareto-front").add(A, label="Result").show()
# END load_data
# START gd
from pymoo.factory import get_performance_indicator
gd = get_performance_indicator("gd", pf)
print("GD", gd.calc(A))
# END gd
# START gd_plus
from pymoo.factory import get_performance_indicator
gd_plus = get_performance_indicator("gd+", pf)
print("GD+", gd_plus.calc(A))
# END gd_plus
# START igd
from pymoo.factory import get_performance_indicator
igd = get_performance_indicator("igd", pf)
print("IGD", igd.calc(A))
# END igd
# START igd_plus
from pymoo.factory import get_performance_indicator
igd_plus = get_performance_indicator("igd+", pf)
print("IGD+", igd_plus.calc(A))
# END igd_plus
# START hv
from pymoo.factory import get_performance_indicator
hv = get_performance_indicator("hv", ref_point=np.array([1.2, 1.2]))
print("hv", hv.calc(A))
# END hv
| 19.779661
| 80
| 0.748072
|
4a0bda9ff63a11d50ecbedfc6e621e6123a5fbad
| 1,175
|
py
|
Python
|
load_dataset.py
|
DiegoLigtenberg/Workspace-Thesis-MSS
|
061fd31c09a7d6d0e9dd1c49d0dd11b84057e547
|
[
"MIT"
] | null | null | null |
load_dataset.py
|
DiegoLigtenberg/Workspace-Thesis-MSS
|
061fd31c09a7d6d0e9dd1c49d0dd11b84057e547
|
[
"MIT"
] | null | null | null |
load_dataset.py
|
DiegoLigtenberg/Workspace-Thesis-MSS
|
061fd31c09a7d6d0e9dd1c49d0dd11b84057e547
|
[
"MIT"
] | null | null | null |
import re
import glob
import os
DB_DIRECTORY = r"database\**\*.mp4"
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def dir_to_sorted_file_list():
files = list(glob.glob(DB_DIRECTORY,recursive=True))
files.sort(key=natural_keys)
return files
files = dir_to_sorted_file_list()
def get_filepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
files = get_filepaths("database")
# print(files[3])
| 26.111111
| 74
| 0.674043
|
4a0bdae9ccda92274c0484fb6ff92b87a582bbf3
| 4,545
|
py
|
Python
|
covertutils/covertutils/shells/subshells/controlsubshell.py
|
aidden-laoch/sabre
|
0940aa51dfc5074291df9d29db827ddb4010566d
|
[
"MIT"
] | 2
|
2020-11-23T23:54:32.000Z
|
2021-05-25T12:28:05.000Z
|
covertutils/covertutils/shells/subshells/controlsubshell.py
|
aidden-laoch/sabre
|
0940aa51dfc5074291df9d29db827ddb4010566d
|
[
"MIT"
] | 1
|
2021-03-20T05:43:02.000Z
|
2021-03-20T05:43:02.000Z
|
covertutils/covertutils/shells/subshells/controlsubshell.py
|
aidden-laoch/sabre
|
0940aa51dfc5074291df9d29db827ddb4010566d
|
[
"MIT"
] | null | null | null |
import json
# from covertutils.payloads.generic.control import Commands as control_commands
from covertutils.shells.subshells import SimpleSubShell
Commands = {
'reset' : 'R',
'identity' : 'ID',
'sysinfo' : 'SI',
'kill' : 'KI',
'mute' : 'MU',
'unmute' : 'UM',
'nuke' : 'NK',
'check_sync' : 'CS',
'sync' : 'Y',
'chpasswd' : 'PWD',
}
def message_handle(message, instance) :
if instance.sync_stream :
if message == 'OK' :
instance.handler.getOrchestrator().reset([instance.sync_stream])
if instance.chpasswd :
if message == 'OK' :
instance.handler.getOrchestrator().initCrypto(instance.chpasswd)
instance.chpasswd = None
if instance.sysinfo :
# sysinfo_var = message
# sysinfo = json.loads(message)
sysinfo = message.split('+')
instance.message_logger.warn( """
General:
Host: {}
Machine: {}
Version: {}
Locale: {}
Platform: {}
Release: {}
System: {}
Processor: {}
User: {}
Specifics:
Windows: {}
Linux: {}
""".format( *sysinfo ) )
# MacOS: {}
instance.base_shell.sysinfo = sysinfo
instance.sysinfo = False
elif instance.check_sync :
local_dict = {}
orch = instance.handler.getOrchestrator()
for stream in orch.getStreams() :
local_dict[stream] = orch.getKeyCycles(stream)
remote_dict = json.loads(message)
output = ''
for stream in remote_dict :
if stream not in local_dict :
self.debug_logger.warn("Stream '%s' exists only in the Agent")
# if local decryption key matches the remote encryption and vise-versa
encryption_sync = local_dict[stream][0] == remote_dict[stream][1]
decryption_sync = local_dict[stream][1] == remote_dict[stream][0]
synced = encryption_sync and decryption_sync
if synced :
output += "[+] Stream '%s' is synchronized at %d - %d cycles\n" % ( stream, local_dict[stream][0], local_dict[stream][1] )
else :
output += "[-] Stream '%s' is out-of-sync at '%s' channel\n" % ( stream, "Encryption" if encryption_sync else "Decryption" )
# if instance.sync :
instance.message_logger.warn(output)
# print "local: " + json.dumps(local_dict)
# print "remote: " + message
instance.check_sync = False
else :
instance.message_logger.warn( message )
class ControlSubShell ( SimpleSubShell ) :
def __init__( self, stream, handler, queue_dict, base_shell, ignore_messages = set(['X']), prompt_templ = " (>{stream}<) |-> ") :
SimpleSubShell.__init__( self, stream, handler, queue_dict, base_shell, ignore_messages, prompt_templ )
self.updatePrompt( )
self.message_function = message_handle
self.sysinfo = False
self.check_sync = False
self.killed = False
self.sync_stream = False
self.chpasswd = False
def default( self, line ) :
comm, args, line = self.parseline(line)
try :
command = Commands[comm]
except :
self.debug_logger.warn( "No such control command [%s]!" % comm)
return
if command == Commands['sync'] :
if len(args) == 0 :
self.debug_logger.warn( "No Stream selected!")
return
stream = args
if stream not in self.handler.getOrchestrator().getStreams() :
self.debug_logger.warn( "Stream '%s' does not exist!" % stream)
return
self.sync_stream = stream
command = "%s %s" % (command, stream)
if command == Commands['chpasswd'] :
if len(args) == 0 :
self.debug_logger.warn( "No Password selected!")
return
new_passwd = args
self.chpasswd = new_passwd
# new_passwd = '1234'
command = "%s %s" % (command, new_passwd)
if command == Commands['reset'] :
self.debug_logger.warn( "Reseting handler" )
self.resetHandler()
if command == Commands['sysinfo'] :
self.sysinfo = True
if command == Commands['kill'] :
self.killed = True
self.debug_logger.warn( "Sending '%s' control command!" % command )
self.handler.preferred_send( command, self.stream )
if self.chpasswd :
self.handler.getOrchestrator().initCrypto(self.chpasswd)
self.chpasswd = None
if command == Commands['check_sync'] :
self.check_sync = True
def resetHandler( self ) :
self.handler.reset()
def completenames( self, text, line, begidx, endidx ) :
# print "RUN"
comm, args, line = self.parseline(line)
# print "pasrsed"
complete_list = []
probable_comm = comm
# print probable_comm
if probable_comm in list(Commands.keys()) :
return []
for known_command in list(Commands.keys()) :
if known_command.startswith(probable_comm) :
complete_list.append( known_command )
return complete_list
def do_help( self, line ) :
commands = list(Commands.keys())
print(commands)
| 26.424419
| 130
| 0.676128
|
4a0bdaeec3b710e6bd81a7787f700b8c8d71b01f
| 45
|
py
|
Python
|
src/deps/ext/torchvision/__init__.py
|
ericotjo001/neuron-descriptions
|
744fbf65c6538edd2fa423108eca7e2cd72f8b59
|
[
"MIT"
] | 5
|
2022-02-22T21:58:10.000Z
|
2022-03-22T16:19:14.000Z
|
src/deps/ext/torchvision/__init__.py
|
ericotjo001/neuron-descriptions
|
744fbf65c6538edd2fa423108eca7e2cd72f8b59
|
[
"MIT"
] | 3
|
2022-02-27T06:43:34.000Z
|
2022-03-18T08:30:30.000Z
|
src/deps/ext/torchvision/__init__.py
|
ericotjo001/neuron-descriptions
|
744fbf65c6538edd2fa423108eca7e2cd72f8b59
|
[
"MIT"
] | 1
|
2022-02-27T05:18:30.000Z
|
2022-02-27T05:18:30.000Z
|
"""Extensions to the torchvision library."""
| 22.5
| 44
| 0.733333
|
4a0bdb9836d1fe403db1926de42525dbfeab1681
| 4,401
|
py
|
Python
|
small_spider/spider_downloader.py
|
saberbin/small_spider
|
d3c14681a5bb2b068fe5afb23d637d21b8fa76aa
|
[
"Apache-2.0"
] | null | null | null |
small_spider/spider_downloader.py
|
saberbin/small_spider
|
d3c14681a5bb2b068fe5afb23d637d21b8fa76aa
|
[
"Apache-2.0"
] | null | null | null |
small_spider/spider_downloader.py
|
saberbin/small_spider
|
d3c14681a5bb2b068fe5afb23d637d21b8fa76aa
|
[
"Apache-2.0"
] | null | null | null |
import requests
import random
import warnings
class SpiderDownloader(object):
# 请求头
HEADERS = (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20120101 Firefox/33.0',
'Mozilla/5.0 (MSIE 10.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A'
)
def html_content(self, url, file_type='html'):
"""
url: 需要请求的url路径
file_type: 返回的数据类型,为html格式的字符串
"""
# 如果传入的url的类型错误,则抛出类型异常
if url is None:
raise TypeError("The url type error, url must be string type, not None type.")
elif type(url) != type('string'):
raise TypeError("The url type error, url must be string type.")
else:
# 如果file_type不为‘html’, 则抛出警告
if file_type != 'html':
warnings.warn("The 'file_type' must be html.")
file_type = 'html' # 将file_type赋值为'html'
# raise ValueError("The 'file_type' must be html.")
# 定制随机请求头,获取response
response = requests.get(url=url, headers={'User-Agent': random.choice(self.HEADERS)})
response.encoding = response.apparent_encoding
# 返回html的字符串格式数据
return response.text
return None
def get_content(self, url):
"""
url: 需要请求的url路径
"""
# 如果传入的url的类型错误,则抛出类型异常
if url is None:
raise TypeError("The url type error, url must be string type, not None type.")
elif type(url) != type('string'):
raise TypeError("The url type error, url must be string type.")
else:
# 定制随机请求头,获取response
response = requests.get(url=url, headers={'User-Agent': random.choice(self.HEADERS)})
response.encoding = response.apparent_encoding
# 返回html的字符串格式数据
return response.content
return None
def img_content(self, img_url, file_type='jpg'):
if img_url is None:
raise TypeError("The url type error, url must be string type, not None type.")
elif type(img_url) != type('string'):
raise TypeError("The url type error, url must be string type.")
else:
img_types = ('jpg', 'png', 'jpeg')
if file_type not in img_types:
warnings.warn("The 'file_type' must be 'jpg','png', or other image file types.")
img_type = img_url.split('.')[-1]
if img_type not in img_types:
file_type = 'jpg'
else:
file_type = img_type
response = requests.get(url=img_url, headers={'User-Agent': random.choice(self.HEADERS)})
# print(response.status_code)
# 返回二进制数据
return response.content
return None
def img_downloader(self, img_url, file_type='jpg', file_name=None):
if img_url is None:
raise TypeError("The url type error, url must be string type, not None type.")
elif type(img_url) != type('string'):
raise TypeError("The url type error, url must be string type.")
else:
img_types = ('jpg', 'png', 'jpeg')
if file_type not in img_types:
warnings.warn("The 'file_type' must be 'jpg','png', or other image file types.")
img_type = img_url.split('.')[-1]
if img_type not in img_types:
file_type = 'jpg'
img_type = file_type
else:
file_type = img_type
file_name = img_url.split('/')[-1]
response = requests.get(url=img_url, headers={'User-Agent': random.choice(self.HEADERS)})
# print(response.status_code)
# save the image file and return sucessful code
with open(file_name, 'wb') as f:
f.write(response.content)
return 200
return None
if __name__ == "__main__":
pass
| 39.648649
| 129
| 0.56987
|
4a0bdcf0191b84a8e04552927cac5f2ec0604e62
| 2,714
|
py
|
Python
|
getNews.py
|
iprashant2402/newsquoo-backend-python
|
fef21eefdb1160c3728067706798cc8020c08eb8
|
[
"Apache-2.0"
] | null | null | null |
getNews.py
|
iprashant2402/newsquoo-backend-python
|
fef21eefdb1160c3728067706798cc8020c08eb8
|
[
"Apache-2.0"
] | null | null | null |
getNews.py
|
iprashant2402/newsquoo-backend-python
|
fef21eefdb1160c3728067706798cc8020c08eb8
|
[
"Apache-2.0"
] | null | null | null |
from newsapi import NewsApiClient
import json
# Init
newsapi = NewsApiClient(api_key='e1529d21c8c34fc49978ebc4e673ccf7')
uid = 0
all_articles = []
# Get articles for month of April----------------------------------#
day = 17
base_date1 = '2020-04-'
base_date2 = '2020-04-0'
while(day<=30):
if(day<10):
if(day==9):
from_date = base_date2 + str(day)
to_date = base_date1 + str(day+1)
else:
from_date = base_date2 + str(day)
to_date = base_date2 + str(day+1)
else:
from_date = base_date1 + str(day)
to_date = base_date1 + str(day+1)
for i in range(1, 2):
all_articles.append(newsapi.get_everything(q=None,
sources='bbc-news,financial-post,entertainment-weekly,cnn,espn,google-news-in',
from_param=from_date,
to=to_date,
language='en',
sort_by='publishedAt',
page=i,
page_size=100))
day = day + 2
# Get articles for month of May------------------------#
base_date1 = '2020-05-'
base_date2 = '2020-05-0'
for day in range(1, 17, 2):
if day < 10:
if day == 9:
from_date = base_date2 + str(day)
to_date = base_date1 + str(day + 1)
else:
from_date = base_date2 + str(day)
to_date = base_date2 + str(day + 1)
else:
from_date = base_date1 + str(day)
to_date = base_date1 + str(day + 1)
for i in range(1, 2):
all_articles.append(newsapi.get_everything(q=None,
sources='bbc-news,financial-post,entertainment-weekly,cnn,espn,google-news-in',
from_param=from_date,
to=to_date,
language='en',
sort_by='publishedAt',
page=i,
page_size=100))
# push articles into final array-----------------------#
for z in all_articles:
for article in z['articles']:
article['id'] = uid
uid = uid+1
final_arr = []
for x in all_articles:
final_arr.extend(x['articles'])
print("TOTAL NEWS: ", len(final_arr))
with open('dataset/news_data.json', 'w') as outfile:
json.dump(final_arr, outfile)
| 35.246753
| 130
| 0.450626
|
4a0bdd29c23ee12e389a2f30f3e8321b46a2d291
| 7,016
|
py
|
Python
|
storageclient/google.py
|
uc-cdis/storage-client
|
43f116effcc8f9469cee912dfb6cb64506c4ece1
|
[
"Apache-2.0"
] | null | null | null |
storageclient/google.py
|
uc-cdis/storage-client
|
43f116effcc8f9469cee912dfb6cb64506c4ece1
|
[
"Apache-2.0"
] | 13
|
2017-12-07T21:21:28.000Z
|
2021-11-02T19:37:57.000Z
|
storageclient/google.py
|
uc-cdis/storage-client
|
43f116effcc8f9469cee912dfb6cb64506c4ece1
|
[
"Apache-2.0"
] | null | null | null |
from storageclient.base import StorageClient
from storageclient.errors import RequestError
from cirrus import GoogleCloudManager
class UserProxy(object):
def __init__(self, username):
self.username = username
class GoogleCloudStorageClient(StorageClient):
def __init__(self, config):
super(GoogleCloudStorageClient, self).__init__(__name__)
self._config = config
self.google_project_id = config.get("google_project_id")
@property
def provider(self):
"""
Returns the type of storage
"""
return "GoogleCloudStorage"
def get_user(self, username):
"""
Get a user
Args:
username (str): An email address representing a User's Google
Proxy Group (e.g. a single Google Group to hold a single
user's diff identities).
Returns:
UserProxy: a UserProxy object if the user exists, else None
"""
user_proxy = None
with GoogleCloudManager(project_id=self.google_project_id) as g_mgr:
user_proxy_response = g_mgr.get_group(username)
if user_proxy_response.get("email"):
user_proxy = UserProxy(username=user_proxy_response.get("email"))
return user_proxy
def delete_user(self, username):
"""
Delete a user
:returns: None
:raise:
:NotFound: the user is not found
"""
msg = "delete_user not implemented"
raise NotImplementedError(msg)
def create_user(self, username):
"""
Create a user
:returns: User object
"""
msg = "create_user not implemented"
raise NotImplementedError(msg)
def list_users(self):
"""
List users
:returns: a list of User objects
"""
msg = "list_users not implemented"
raise NotImplementedError(msg)
def get_or_create_user(self, username):
"""
Tries to retrieve a user.
WARNING: If the user is not found, this DOES NOT CREATE ONE.
Google architecture requires that a separate process populate
a proxy Google group per user. If it doesn't exist, we can't create it
here.
"""
user_proxy = self.get_user(username)
if not user_proxy:
raise Exception(
"Unable to determine User's Google Proxy group. Cannot create "
"here. Another process should create proxy groups for "
"new users. Username provided: {}".format(username)
)
return user_proxy
def create_keypair(self, username):
"""
Creates a keypair for the user, and
returns it
"""
msg = "create_keypair not implemented"
raise NotImplementedError(msg)
def delete_keypair(self, username, access_key):
"""
Deletes a keypair from the user and
doesn't return anything
"""
msg = "delete_keypair not implemented"
raise NotImplementedError(msg)
def add_bucket_acl(self, bucket, username, access=None):
"""
Tries to grant a user access to a bucket
Args:
bucket (str): Google Bucket Access Group email address. This should
be the address of a Google Group that has read access on a
single bucket. Access is controlled by adding members to this
group.
username (str): An email address of a member to add to the Google
Bucket Access Group.
access (str): IGNORED. For Google buckets, the Google Bucket Access
Group is given access to the bucket through Google's
IAM, so you cannot selectively choose permissions. Once you're
added, you have the access that was set up for that group
in Google IAM.
"""
response = None
with GoogleCloudManager(project_id=self.google_project_id) as g_mgr:
try:
response = g_mgr.add_member_to_group(
member_email=username, group_id=bucket
)
except Exception as exc:
raise RequestError("Google API Error: {}".format(exc), code=400)
return response
def has_bucket_access(self, bucket, user_id):
"""
Check if the user appears in the acl
: returns Bool
"""
msg = "has_bucket_access not implemented"
raise NotImplementedError(msg)
def list_buckets(self):
"""
Return a list of Bucket objects
: [bucket1, bucket2,...]
"""
msg = "list_buckets not implemented"
raise NotImplementedError(msg)
def delete_all_keypairs(self, user):
"""
Remove all the keys from a user
: returns None
"""
msg = "delete_all_keypairs not implemented"
raise NotImplementedError(msg)
def get_bucket(self, bucket):
"""
Return a bucket from the storage
"""
msg = "get_bucket not implemented"
raise NotImplementedError(msg)
def get_or_create_bucket(self, bucket, access_key=None, secret_key=None):
"""
Tries to retrieve a bucket and if fit fails,
creates and returns one
"""
msg = "get_or_create_bucket not implemented"
raise NotImplementedError(msg)
def edit_bucket_template(self, template_id, **kwargs):
"""
Change the parameters for the template used to create
the buckets
"""
msg = "edit_bucket_template not implemented"
raise NotImplementedError(msg)
def update_bucket_acl(self, bucket, user_list):
"""
Add acl's for the list of users
"""
msg = "update_bucket_acl not implemented"
raise NotImplementedError(msg)
def set_bucket_quota(self, bucket, quota_unit, quota):
"""
Set quota for the entire bucket
"""
msg = "set_bucket_quota not implemented"
raise NotImplementedError(msg)
def delete_bucket_acl(self, bucket, user):
"""
Set quota for the entire bucket
Args:
bucket (str): Google Bucket Access Group email address. This should
be the address of a Google Group that has read access on a
single bucket. Access is controlled by adding members to this
group.
user (str): An email address of a member to add to the Google
Bucket Access Group.
"""
response = None
with GoogleCloudManager(project_id=self.google_project_id) as g_mgr:
try:
response = g_mgr.remove_member_from_group(
member_email=user, group_id=bucket
)
except Exception as exc:
raise RequestError("Google API Error: {}".format(exc), code=400)
return response
| 32.183486
| 81
| 0.597349
|
4a0bdd636cedff721af320c0b637a31b7f175f00
| 118
|
py
|
Python
|
scripts/hashing_file.py
|
Pyt45/algorithms-dataStructure-python
|
d88a5225e49dd1cbce5363b1f88c0f207a301d5c
|
[
"MIT"
] | null | null | null |
scripts/hashing_file.py
|
Pyt45/algorithms-dataStructure-python
|
d88a5225e49dd1cbce5363b1f88c0f207a301d5c
|
[
"MIT"
] | null | null | null |
scripts/hashing_file.py
|
Pyt45/algorithms-dataStructure-python
|
d88a5225e49dd1cbce5363b1f88c0f207a301d5c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import hashlib
from __future__ import annotations
def hash_file(filepath: str) -> str:
pass
| 19.666667
| 36
| 0.754237
|
4a0bde1908a76ba550238c14359b18ae63f00d96
| 2,821
|
py
|
Python
|
airbyte-integrations/connectors/source-twitter-ads/unit_tests/test_streams.py
|
blinkist/airbyte
|
46ab8abc4fcc4243b6eeb7d3864fa43fa5a55d64
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-twitter-ads/unit_tests/test_streams.py
|
blinkist/airbyte
|
46ab8abc4fcc4243b6eeb7d3864fa43fa5a55d64
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-twitter-ads/unit_tests/test_streams.py
|
blinkist/airbyte
|
46ab8abc4fcc4243b6eeb7d3864fa43fa5a55d64
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from http import HTTPStatus
from unittest.mock import MagicMock
import pytest
from source_twitter_ads.source import TwitterAdsStream
@pytest.fixture
def patch_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(TwitterAdsStream, "path", "v0/example_endpoint")
mocker.patch.object(TwitterAdsStream, "primary_key", "test_primary_key")
mocker.patch.object(TwitterAdsStream, "__abstractmethods__", set())
def test_request_params(patch_base_class):
stream = TwitterAdsStream()
# TODO: replace this with your input parameters
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
# TODO: replace this with your expected request parameters
expected_params = {}
assert stream.request_params(**inputs) == expected_params
def test_next_page_token(patch_base_class):
stream = TwitterAdsStream()
# TODO: replace this with your input parameters
inputs = {"response": MagicMock()}
# TODO: replace this with your expected next page token
expected_token = None
assert stream.next_page_token(**inputs) == expected_token
def test_parse_response(patch_base_class):
stream = TwitterAdsStream()
# TODO: replace this with your input parameters
inputs = {"response": MagicMock()}
# TODO: replace this with your expected parced object
expected_parsed_object = {}
assert next(stream.parse_response(**inputs)) == expected_parsed_object
def test_request_headers(patch_base_class):
stream = TwitterAdsStream()
# TODO: replace this with your input parameters
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
# TODO: replace this with your expected request headers
expected_headers = {}
assert stream.request_headers(**inputs) == expected_headers
def test_http_method(patch_base_class):
stream = TwitterAdsStream()
# TODO: replace this with your expected http request method
expected_method = "GET"
assert stream.http_method == expected_method
@pytest.mark.parametrize(
("http_status", "should_retry"),
[
(HTTPStatus.OK, False),
(HTTPStatus.BAD_REQUEST, False),
(HTTPStatus.TOO_MANY_REQUESTS, True),
(HTTPStatus.INTERNAL_SERVER_ERROR, True),
],
)
def test_should_retry(patch_base_class, http_status, should_retry):
response_mock = MagicMock()
response_mock.status_code = http_status
stream = TwitterAdsStream()
assert stream.should_retry(response_mock) == should_retry
def test_backoff_time(patch_base_class):
response_mock = MagicMock()
stream = TwitterAdsStream()
expected_backoff_time = None
assert stream.backoff_time(response_mock) == expected_backoff_time
| 33.583333
| 82
| 0.740518
|
4a0bde24e10dba91b983bad2939fb64d3f979cf4
| 41
|
py
|
Python
|
venv/lib/python3.6/encodings/hp_roman8.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2019-04-17T13:35:50.000Z
|
2021-12-21T00:11:36.000Z
|
venv/lib/python3.6/encodings/hp_roman8.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2021-03-31T19:51:24.000Z
|
2021-06-10T23:05:09.000Z
|
venv/lib/python3.6/encodings/hp_roman8.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2019-10-01T08:47:35.000Z
|
2020-07-11T06:32:16.000Z
|
/usr/lib/python3.6/encodings/hp_roman8.py
| 41
| 41
| 0.829268
|
4a0bde8f3edbbafaf3d987a0d6e3e19e1cd29b3d
| 6,060
|
py
|
Python
|
orion/evaluation/point.py
|
PSFC-HEDP/Orion
|
37535c788112df346bb9d3a13255f58f2479d4bc
|
[
"MIT"
] | 543
|
2020-06-16T21:48:43.000Z
|
2021-10-04T01:56:27.000Z
|
orion/evaluation/point.py
|
PSFC-HEDP/Orion
|
37535c788112df346bb9d3a13255f58f2479d4bc
|
[
"MIT"
] | 147
|
2020-05-20T02:22:26.000Z
|
2021-10-12T05:28:56.000Z
|
orion/evaluation/point.py
|
PSFC-HEDP/Orion
|
37535c788112df346bb9d3a13255f58f2479d4bc
|
[
"MIT"
] | 98
|
2020-08-13T11:29:51.000Z
|
2021-10-04T18:59:09.000Z
|
from orion.evaluation.common import _accuracy, _f1_score, _precision, _recall, _weighted_segment
def _point_partition(expected, observed, start=None, end=None):
expected = set(expected)
observed = set(observed)
edge_start = min(expected.union(observed))
if start is not None:
edge_start = start
edge_end = max(expected.union(observed))
if end is not None:
edge_end = end
length = int(edge_end) - int(edge_start) + 1
expected_parts = [0] * length
observed_parts = [0] * length
for edge in expected:
expected_parts[edge - edge_start] = 1
for edge in observed:
observed_parts[edge - edge_start] = 1
return expected_parts, observed_parts, None
def point_confusion_matrix(expected, observed, data=None, start=None, end=None):
"""Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
"""
def _ws(x, y, z, w):
return _weighted_segment(x, y, _point_partition, z, w)
if data is not None:
start = data['timestamp'].min()
end = data['timestamp'].max()
if not isinstance(expected, list):
expected = list(expected['timestamp'])
if not isinstance(observed, list):
observed = list(observed['timestamp'])
return _ws(expected, observed, start, end)
def point_accuracy(expected, observed, data=None, start=None, end=None):
"""Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
"""
return _accuracy(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_precision(expected, observed, data=None, start=None, end=None):
"""Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Precision score between the ground truth and detected anomalies.
"""
return _precision(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_recall(expected, observed, data=None, start=None, end=None):
"""Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Recall score between the ground truth and detected anomalies.
"""
return _recall(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_f1_score(expected, observed, data=None, start=None, end=None):
"""Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
F1 score between the ground truth and detected anomalies.
"""
return _f1_score(expected, observed, data, start, end, cm=point_confusion_matrix)
| 36.506024
| 96
| 0.645545
|
4a0bdfae05defeee64d8f9ef7e4991812eb815c9
| 677
|
py
|
Python
|
PDSUtilities/pandas/utilities.py
|
DrJohnWagner/PDSUtilities
|
ffad1a02f78f46acdf4bd65d7c2eb063af7dbc13
|
[
"Apache-2.0"
] | null | null | null |
PDSUtilities/pandas/utilities.py
|
DrJohnWagner/PDSUtilities
|
ffad1a02f78f46acdf4bd65d7c2eb063af7dbc13
|
[
"Apache-2.0"
] | 12
|
2022-01-18T06:21:03.000Z
|
2022-01-20T07:29:56.000Z
|
PDSUtilities/pandas/utilities.py
|
DrJohnWagner/PDSUtilities
|
ffad1a02f78f46acdf4bd65d7c2eb063af7dbc13
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
def read_or_create_csv(filename, columns, index_col=[0]):
try:
df = pd.read_csv(filename, index_col=index_col)
except FileNotFoundError:
print(f"Pandas file {filename} not found...creating!")
df = pd.DataFrame(columns = columns)
except pd.errors.EmptyDataError:
print(f"Pandas file {filename} not found...creating!")
df = pd.DataFrame(columns = columns)
return df
# def append(df, data, columns = None):
# if isinstance(data, dict):
# return pd.concat([df, pd.DataFrame(data)])
# if isinstance(data, list) and columns is not None:
# return pd.concat([df, pd.DataFrame(data)])
| 37.611111
| 62
| 0.654357
|
4a0bdfb19dea97ece2b859d4235ad136cba3d6ef
| 313
|
py
|
Python
|
tests/tensor/test_multinomial.py
|
nlp-greyfoss/metagrad
|
0f32f177ced1478f0c75ad37bace9a9fc4044ba3
|
[
"MIT"
] | 7
|
2022-01-27T05:38:02.000Z
|
2022-03-30T01:48:00.000Z
|
tests/tensor/test_multinomial.py
|
nlp-greyfoss/metagrad
|
0f32f177ced1478f0c75ad37bace9a9fc4044ba3
|
[
"MIT"
] | null | null | null |
tests/tensor/test_multinomial.py
|
nlp-greyfoss/metagrad
|
0f32f177ced1478f0c75ad37bace9a9fc4044ba3
|
[
"MIT"
] | 2
|
2022-02-22T07:47:02.000Z
|
2022-03-22T08:31:59.000Z
|
from metagrad.tensor import Tensor
def test_simple_multinomial():
weights = Tensor([0, 10, 3, 0])
x = Tensor.multinomial(weights, 5, replace=True)
print(x)
def test_multinomial():
weights = Tensor([[0, 10, 3, 0], [5, 0, 0, 5]])
x = Tensor.multinomial(weights, 5, replace=True)
print(x)
| 24.076923
| 52
| 0.642173
|
4a0be0d2556a6120203b89cb1cdf05b71d2ed3fb
| 3,429
|
py
|
Python
|
tests/test_class_file_data.py
|
jthDEV/tern
|
cea8d4341a04a7d0785d6e5e5cc4827667502cd1
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_file_data.py
|
jthDEV/tern
|
cea8d4341a04a7d0785d6e5e5cc4827667502cd1
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_file_data.py
|
jthDEV/tern
|
cea8d4341a04a7d0785d6e5e5cc4827667502cd1
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import unittest
from tern.classes.file_data import FileData
from test_fixtures import TestTemplate1
from test_fixtures import TestTemplate2
class TestClassFileData(unittest.TestCase):
def setUp(self):
self.afile = FileData('afile',
'path/to/afile')
self.afile.licenses = ['MIT', 'GPL']
def tearDown(self):
del self.afile
def testInstance(self):
file1 = FileData('file1', 'path/to/file1')
self.assertEqual(file1.name, 'file1')
self.assertEqual(file1.path, 'path/to/file1')
self.assertFalse(file1.checksum_type)
self.assertFalse(file1.checksum)
self.assertFalse(file1.date)
self.assertFalse(file1.version_control)
self.assertFalse(file1.version)
self.assertFalse(file1.file_type)
self.assertFalse(file1.licenses)
self.assertFalse(file1.license_expressions)
self.assertFalse(file1.copyrights)
self.assertFalse(file1.authors)
self.assertFalse(file1.packages)
self.assertFalse(file1.urls)
with self.assertRaises(ValueError):
file2 = FileData('file2',
'path/to/file2',
'12355')
file2 = FileData('file2',
'path/to/file2',
'2020-01-01',
'binary')
self.assertEqual(file2.date, '2020-01-01')
self.assertEqual(file2.file_type, 'binary')
file2.licenses = ['MIT', 'GPL']
file2.license_expressions = ['GPLv2 or MIT', 'MIT and GPLv2']
file2.copyrights = ['copyrights']
file2.authors = ['author1', 'author2']
file2.packages = ['package1', 'package2']
self.assertEqual(file2.licenses, ['MIT', 'GPL'])
self.assertEqual(file2.license_expressions, ['GPLv2 or MIT',
'MIT and GPLv2'])
self.assertEqual(file2.copyrights, ['copyrights'])
self.assertEqual(file2.authors, ['author1', 'author2'])
self.assertEqual(file2.packages, ['package1', 'package2'])
def testSetChecksum(self):
self.afile.set_checksum('sha256', '12345abcde')
self.assertEqual(self.afile.checksum_type, 'sha256')
self.assertEqual(self.afile.checksum, '12345abcde')
def testSetVersion(self):
self.afile.set_version('git', '12345abcde')
self.assertEqual(self.afile.version_control, 'git')
self.assertEqual(self.afile.version, '12345abcde')
def testToDict(self):
file_dict = self.afile.to_dict()
self.assertEqual(file_dict['name'], 'afile')
self.assertEqual(file_dict['path'], 'path/to/afile')
self.assertEqual(file_dict['licenses'], ['MIT', 'GPL'])
def testToDictTemplate(self):
template1 = TestTemplate1()
template2 = TestTemplate2()
dict1 = self.afile.to_dict(template1)
dict2 = self.afile.to_dict(template2)
self.assertEqual(len(dict1.keys()), 3)
self.assertEqual(dict1['file.name'], 'afile')
self.assertEqual(dict1['file.path'], 'path/to/afile')
self.assertEqual(dict1['file.licenses'], ['MIT', 'GPL'])
self.assertFalse(dict2['notes'])
if __name__ == '__main__':
unittest.main()
| 36.870968
| 70
| 0.610674
|
4a0be13889ad2640fa67b2e6d152e91629557369
| 12,540
|
py
|
Python
|
scipy/spatial/_spherical_voronoi.py
|
xu-hong-/scipy
|
f737001cf0a75654efe09a1de5cdf5d1895bda59
|
[
"BSD-3-Clause"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
scipy/spatial/_spherical_voronoi.py
|
xu-hong-/scipy
|
f737001cf0a75654efe09a1de5cdf5d1895bda59
|
[
"BSD-3-Clause"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
scipy/spatial/_spherical_voronoi.py
|
xu-hong-/scipy
|
f737001cf0a75654efe09a1de5cdf5d1895bda59
|
[
"BSD-3-Clause"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
"""
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as Scipy.
#
import numpy as np
import numpy.matlib
import scipy
import itertools
from scipy._lib._version import NumpyVersion
# Whether Numpy has stacked matrix linear algebra
HAS_NUMPY_VEC_DET = (NumpyVersion(np.__version__) >= '1.8.0')
__all__ = ['SphericalVoronoi']
def calc_circumcenters(tetrahedrons):
""" Calculates the cirumcenters of the circumspheres of tetrahedrons.
An implementation based on
http://mathworld.wolfram.com/Circumsphere.html
Parameters
----------
tetrahedrons : an array of shape (N, 4, 3)
consisting of N tetrahedrons defined by 4 points in 3D
Returns
----------
circumcenters : an array of shape (N, 3)
consisting of the N circumcenters of the tetrahedrons in 3D
"""
num = tetrahedrons.shape[0]
a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2)
sums = np.sum(tetrahedrons ** 2, axis=2)
d = np.concatenate((sums[:, :, np.newaxis], a), axis=2)
dx = np.delete(d, 1, axis=2)
dy = np.delete(d, 2, axis=2)
dz = np.delete(d, 3, axis=2)
if HAS_NUMPY_VEC_DET:
dx = np.linalg.det(dx)
dy = -np.linalg.det(dy)
dz = np.linalg.det(dz)
a = np.linalg.det(a)
else:
dx = np.array([np.linalg.det(m) for m in dx])
dy = -np.array([np.linalg.det(m) for m in dy])
dz = np.array([np.linalg.det(m) for m in dz])
a = np.array([np.linalg.det(m) for m in a])
nominator = np.vstack((dx, dy, dz))
denominator = 2*a
return (nominator / denominator).T
def project_to_sphere(points, center, radius):
"""
Projects the elements of points onto the sphere defined
by center and radius.
Parameters
----------
points : array of floats of shape (npoints, ndim)
consisting of the points in a space of dimension ndim
center : array of floats of shape (ndim,)
the center of the sphere to project on
radius : float
the radius of the sphere to project on
returns: array of floats of shape (npoints, ndim)
the points projected onto the sphere
"""
lengths = scipy.spatial.distance.cdist(points, np.array([center]))
return (points - center) / lengths * radius + center
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, 3)
Coordinates of points to construct a spherical
Voronoi diagram from
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (3,)
Center of sphere (Default: origin)
Attributes
----------
points : double array of shape (npoints, 3)
the points in 3D to generate the Voronoi diagram from
radius : double
radius of the sphere
Default: None (forces estimation, which is less precise)
center : double array of shape (3,)
center of the sphere
Default: None (assumes sphere is centered at origin)
vertices : double array of shape (nvertices, 3)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Notes
----------
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
A 3D Delaunay tetrahedralization is obtained by including the origin of
the coordinate system as the fourth vertex of each simplex of the Convex
Hull. The circumcenters of all tetrahedra in the system are calculated and
projected to the surface of the sphere, producing the Voronoi vertices.
The Delaunay tetrahedralization neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them
into triangles and using L'Huilier's Theorem to calculate the spherical
excess of each triangle [Weisstein]_. The sum of the spherical excesses is
multiplied by the square of the sphere radius to obtain the surface area
of the spherical polygon. For nearly-degenerate spherical polygons an area
of approximately 0 is returned by default, rather than attempting the
unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement). The reconstitution of the surface area of the
sphere, measured as the sum of the surface areas of all Voronoi regions,
is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web
Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
>>> center = np.array([0, 0, 0])
>>> radius = 1
>>> # calculate spherical Voronoi diagram
>>> sv = SphericalVoronoi(points, radius, center)
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> # generate plot
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=None, center=None):
"""
Initializes the object and starts the computation of the Voronoi
diagram.
points : The generator points of the Voronoi diagram assumed to be
all on the sphere with radius supplied by the radius parameter and
center supplied by the center parameter.
radius : The radius of the sphere. Will default to 1 if not supplied.
center : The center of the sphere. Will default to the origin if not
supplied.
"""
self.points = points
if np.any(center):
self.center = center
else:
self.center = np.zeros(3)
if radius:
self.radius = radius
else:
self.radius = 1
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# perform 3D Delaunay triangulation on data set
# (here ConvexHull can also be used, and is faster)
self._tri = scipy.spatial.ConvexHull(self.points)
# add the center to each of the simplices in tri to get the same
# tetrahedrons we'd have gotten from Delaunay tetrahedralization
tetrahedrons = self._tri.points[self._tri.simplices]
tetrahedrons = np.insert(
tetrahedrons,
3,
np.array([self.center]),
axis=1
)
# produce circumcenters of tetrahedrons from 3D Delaunay
circumcenters = calc_circumcenters(tetrahedrons)
# project tetrahedron circumcenters to the surface of the sphere
self.vertices = project_to_sphere(
circumcenters,
self.center,
self.radius
)
# calculate regions from triangulation
generator_indices = np.arange(self.points.shape[0])
filter_tuple = np.where((np.expand_dims(self._tri.simplices,
-1) == generator_indices).any(axis=1))
list_tuples_associations = zip(filter_tuple[1],
filter_tuple[0])
list_tuples_associations = sorted(list_tuples_associations,
key=lambda t: t[0])
# group by generator indices to produce
# unsorted regions in nested list
groups = []
for k, g in itertools.groupby(list_tuples_associations,
lambda t: t[0]):
groups.append([element[1] for element in list(g)])
self.regions = groups
def sort_vertices_of_regions(self):
"""
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the projected circumcenter of the
tetrahedron obtained by the k-th triangle in _tri.simplices (and the
origin). For each region n, we choose the first triangle (=Voronoi
vertex) in _tri.simplices and a vertex of that triangle not equal to
the center n. These determine a unique neighbor of that triangle,
which is then chosen as the second triangle. The second triangle
will have a unique vertex not equal to the current vertex or the
center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
for n in range(0, len(self.regions)):
remaining = self.regions[n][:]
sorted_vertices = []
current_simplex = remaining[0]
current_vertex = [k for k in self._tri.simplices[current_simplex]
if k != n][0]
remaining.remove(current_simplex)
sorted_vertices.append(current_simplex)
while remaining:
current_simplex = [
s for s in remaining
if current_vertex in self._tri.simplices[s]
][0]
current_vertex = [
s for s in self._tri.simplices[current_simplex]
if s != n and s != current_vertex
][0]
remaining.remove(current_simplex)
sorted_vertices.append(current_simplex)
self.regions[n] = sorted_vertices
| 38.348624
| 78
| 0.629027
|
4a0be1d4e0fe1553ab9b7e04cfe6c645533c92d9
| 2,000
|
py
|
Python
|
lograt/watchdog/observers/lograt_observer_api.py
|
teddbug-S/LogRat
|
b608e29cbbe93d2f35a32e47c2b98e6be40bdf59
|
[
"MIT"
] | null | null | null |
lograt/watchdog/observers/lograt_observer_api.py
|
teddbug-S/LogRat
|
b608e29cbbe93d2f35a32e47c2b98e6be40bdf59
|
[
"MIT"
] | null | null | null |
lograt/watchdog/observers/lograt_observer_api.py
|
teddbug-S/LogRat
|
b608e29cbbe93d2f35a32e47c2b98e6be40bdf59
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2021 Divine Darkey
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from watchdog.observers import Observer
class LogRatObserverAPI(Observer):
"""
LogRatObserverAPI directly subclasses watchdog.observers.Observer
to override dispatch_events method to dispatch each event with the
name of the thread triggering that change.
"""
def __init__(self):
super().__init__()
def dispatch_events(self, event_queue, timeout):
event, watch = event_queue.get(block=True, timeout=timeout)
with self._lock:
# To allow unschedule/stop and safe removal of event handlers
# within event handlers itself, check if the handler is still
# registered after every dispatch.
for handler in list(self._handlers.get(watch, [])):
if handler in self._handlers.get(watch, []):
handler.dispatch(event, self.name)
event_queue.task_done()
| 40.816327
| 80
| 0.7255
|
4a0be22c6326419ea8f1271618a99cf113efe452
| 371
|
py
|
Python
|
tests/bento_service_examples/iris_classifier.py
|
vedashree29296/BentoML
|
79f94d543a0684e04551207d102a2d254b770ad3
|
[
"Apache-2.0"
] | null | null | null |
tests/bento_service_examples/iris_classifier.py
|
vedashree29296/BentoML
|
79f94d543a0684e04551207d102a2d254b770ad3
|
[
"Apache-2.0"
] | null | null | null |
tests/bento_service_examples/iris_classifier.py
|
vedashree29296/BentoML
|
79f94d543a0684e04551207d102a2d254b770ad3
|
[
"Apache-2.0"
] | null | null | null |
import bentoml
from bentoml.adapters import DataframeInput
from bentoml.frameworks.sklearn import SklearnModelArtifact
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([SklearnModelArtifact('model')])
class IrisClassifier(bentoml.BentoService):
@bentoml.api(input=DataframeInput())
def predict(self, df):
return self.artifacts.model.predict(df)
| 30.916667
| 59
| 0.795148
|
4a0be234e994de5d2a8b2523837d9a61a2cd5271
| 4,304
|
py
|
Python
|
composer/models/base.py
|
jbloxham/composer
|
6dd0a0f297cafb404333d6280a5344bcb7f3bee6
|
[
"Apache-2.0"
] | null | null | null |
composer/models/base.py
|
jbloxham/composer
|
6dd0a0f297cafb404333d6280a5344bcb7f3bee6
|
[
"Apache-2.0"
] | null | null | null |
composer/models/base.py
|
jbloxham/composer
|
6dd0a0f297cafb404333d6280a5344bcb7f3bee6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
import abc
from typing import Any, Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.classification.accuracy import Accuracy
from torchmetrics.collections import MetricCollection
from composer.core.types import Batch, BatchPair, Metrics, Tensors
from composer.models.loss import CrossEntropyLoss, soft_cross_entropy
class BaseMosaicModel(torch.nn.Module, abc.ABC):
"""The minimal interface needed to use a model with :class:`composer.trainer.Trainer`.
"""
@abc.abstractmethod
def loss(self, outputs: Any, batch: Batch, *args, **kwargs) -> Tensors:
"""Compute the loss of the model.
Args:
outputs (Any): The output of the foward pass.
batch (~composer.core.types.Batch): The input batch from dataloader.
Returns:
Tensors:
The loss as a ``Tensors`` object.
"""
pass
@abc.abstractmethod
def forward(self, batch: Batch) -> Tensors:
"""Compute model output given an input.
Args:
batch (Batch): The input batch for the forward pass.
Returns:
Tensors:
The result that is passed to :meth:`loss` as a ``Tensors``
object.
"""
pass
@abc.abstractmethod
def metrics(self, train: bool = False) -> Metrics:
"""Get metrics for evaluating the model.
.. warning:: Each metric keeps states which are updated with data seen so far.
As a result, different metric instances should be used for training
and validation. See:
https://torchmetrics.readthedocs.io/en/latest/pages/overview.html
for more details.
Args:
train (bool, optional): True to return metrics that should be computed
during training and False otherwise. (default: ``False``)
Returns:
Metrics: A ``Metrics`` object.
"""
pass
@abc.abstractmethod
def validate(self, batch: Batch) -> Tuple[Any, Any]:
"""Compute model outputs on provided data.
The output of this function will be directly used as input
to all metrics returned by :meth:`metrics`.
Args:
batch (Batch): The data to perform validation with.
Specified as a tuple of tensors (input, target).
Returns:
Tuple[Any, Any]: Tuple that is passed directly to the
`update()` methods of the metrics returned by :meth:`metrics`.
Most often, this will be a tuple of the form (predictions, targets).
"""
pass
class MosaicClassifier(BaseMosaicModel):
"""Implements the base logic that all classifiers can build on top of.
Inherits from :class:`~composer.models.BaseMosaicModel`.
Args:
module (torch.nn.Module): The neural network module to wrap with
:class:`~composer.models.MosaicClassifier`.
"""
num_classes: Optional[int] = None
def __init__(self, module: torch.nn.Module) -> None:
super().__init__()
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.val_loss = CrossEntropyLoss()
self.module = module
if hasattr(self.module, "num_classes"):
self.num_classes = getattr(self.module, "num_classes")
def loss(self, outputs: Any, batch: BatchPair, *args, **kwargs) -> Tensors:
_, y = batch
assert isinstance(outputs, Tensor), "Loss expects outputs as Tensor"
assert isinstance(y, Tensor), "Loss does not support multiple target Tensors"
return soft_cross_entropy(outputs, y, *args, **kwargs)
def metrics(self, train: bool = False) -> Metrics:
return self.train_acc if train else MetricCollection([self.val_acc, self.val_loss])
def forward(self, batch: BatchPair) -> Tensor:
x, y = batch
logits = self.module(x)
return logits
def validate(self, batch: BatchPair) -> Tuple[Any, Any]:
assert self.training is False, "For validation, model must be in eval mode"
inputs, targets = batch
logits = self.forward(batch)
return logits, targets
| 33.364341
| 91
| 0.62802
|
4a0be43146c06dad6851e21b74dd068eba85ffb6
| 872
|
py
|
Python
|
bnw/core/ensure_indexes.py
|
l29ah/bnw
|
d03db025e12c96de42c9a7a4ede329d7c9d216c5
|
[
"BSD-2-Clause"
] | 23
|
2015-01-14T13:22:37.000Z
|
2022-01-11T11:38:43.000Z
|
bnw/core/ensure_indexes.py
|
l29ah/bnw
|
d03db025e12c96de42c9a7a4ede329d7c9d216c5
|
[
"BSD-2-Clause"
] | 31
|
2015-01-27T19:57:45.000Z
|
2018-10-04T22:35:22.000Z
|
bnw/core/ensure_indexes.py
|
l29ah/bnw
|
d03db025e12c96de42c9a7a4ede329d7c9d216c5
|
[
"BSD-2-Clause"
] | 11
|
2015-01-02T10:29:14.000Z
|
2018-06-28T13:09:53.000Z
|
#!/usr/bin/env python
from twisted.internet import defer
try:
from bnw.core import bnw_objects as objs
except ImportError:
pass
@defer.inlineCallbacks
def index():
for name in dir(objs):
cls = getattr(objs, name)
if (isinstance(cls, type) and issubclass(cls, objs.MongoObject) and
cls is not objs.MongoObject):
print '---', name
yield cls.ensure_indexes()
print 'Indexes updated.'
if __name__ == '__main__':
import sys
import os.path
root = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, os.path.abspath(root))
from twisted.internet import reactor
import bnw.core.base
from bnw.core import bnw_objects as objs
import config
bnw.core.base.config.register(config)
index().addCallback(lambda ign: reactor.stop())
reactor.run()
| 25.647059
| 75
| 0.651376
|
4a0be5bf7e804e4281f17548366e910ea8af14d0
| 25,435
|
py
|
Python
|
dss/start.py
|
sethbam9/tutorials
|
c259636682304cb516e9048ca8df5a3ab92c62cc
|
[
"MIT"
] | 2
|
2019-07-17T18:51:26.000Z
|
2019-07-24T19:45:23.000Z
|
dss/start.py
|
sethbam9/tutorials
|
c259636682304cb516e9048ca8df5a3ab92c62cc
|
[
"MIT"
] | 3
|
2019-01-16T10:56:50.000Z
|
2020-11-16T16:30:48.000Z
|
dss/start.py
|
sethbam9/tutorials
|
c259636682304cb516e9048ca8df5a3ab92c62cc
|
[
"MIT"
] | 2
|
2020-12-17T15:41:33.000Z
|
2021-11-03T18:23:07.000Z
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img align="right" src="images/tf.png" width="128"/>
# <img align="right" src="images/logo.png" width="128"/>
# <img align="right" src="images/etcbc.png" width="128"/>
# <img align="right" src="images/dans.png" width="128"/>
#
# # Tutorial
#
# This notebook gets you started with using
# [Text-Fabric](https://annotation.github.io/text-fabric/) for coding in the Dead-Sea Scrolls.
#
# Familiarity with the underlying
# [data model](https://annotation.github.io/text-fabric/tf/about/datamodel.html)
# is recommended.
# ## Installing Text-Fabric
#
# ### Python
#
# You need to have Python on your system. Most systems have it out of the box,
# but alas, that is python2 and we need at least python **3.6**.
#
# Install it from [python.org](https://www.python.org) or from
# [Anaconda](https://www.anaconda.com/download).
#
# ### TF itself
#
# ```
# pip3 install text-fabric
# ```
#
# ### Jupyter notebook
#
# You need [Jupyter](http://jupyter.org).
#
# If it is not already installed:
#
# ```
# pip3 install jupyter
# ```
# ## Tip
# If you cloned the repository containing this tutorial,
# first copy its parent directory to somewhere outside your clone of the repo,
# before computing with this it.
#
# If you pull changes from the repository later, it will not conflict with
# your computations.
#
# Where you put your tutorial directory is up to you.
# It will work from any directory.
# ## Cookbook
#
# This tutorial and its sister tutorials are meant to showcase most of things TF can do.
#
# But we also have a [cookbook](cookbook) with a set of focused recipes on tricky things.
# ## Data
#
# Text-Fabric will fetch the data set for you from github, and check for updates.
#
# The data will be stored in the `text-fabric-data` in your home directory.
# # Features
# The data of the corpus is organized in features.
# They are *columns* of data.
# Think of the corpus as a gigantic spreadsheet, where row 1 corresponds to the
# first sign, row 2 to the second sign, and so on, for all ~ 1.5 M signs,
# followed by ~ 500 K word nodes and yet another 200 K nodes of other types.
#
# The information which reading each sign has, constitutes a column in that spreadsheet.
# The DSS corpus contains > 50 columns.
#
# Instead of putting that information in one big table, the data is organized in separate columns.
# We call those columns **features**.
# %load_ext autoreload
# %autoreload 2
import os
import collections
# # Incantation
#
# The simplest way to get going is by this *incantation*:
from tf.app import use
# For the very last version, use `hot`.
#
# For the latest release, use `latest`.
#
# If you have cloned the repos (TF app and data), use `clone`.
#
# If you do not want/need to upgrade, leave out the checkout specifiers.
A = use("dss:clone", checkout="clone", hoist=globals())
# A = use('dss:hot', checkout='hot', hoist=globals())
# A = use('dss:latest', checkout='latest', hoist=globals())
# A = use('dss', hoist=globals())
# You can see which features have been loaded, and if you click on a feature name, you find its documentation.
# If you hover over a name, you see where the feature is located on your system.
# ## API
#
# The result of the incantation is that we have a bunch of special variables at our disposal
# that give us access to the text and data of the corpus.
#
# At this point it is helpful to throw a quick glance at the text-fabric API documentation
# (see the links under **API Members** above).
#
# The most essential thing for now is that we can use `F` to access the data in the features
# we've loaded.
# But there is more, such as `N`, which helps us to walk over the text, as we see in a minute.
#
# The **API members** above show you exactly which new names have been inserted in your namespace.
# If you click on these names, you go to the API documentation for them.
# ## Search
# Text-Fabric contains a flexible search engine, that does not only work for the data,
# of this corpus, but also for other corpora and data that you add to corpora.
#
# **Search is the quickest way to come up-to-speed with your data, without too much programming.**
#
# Jump to the dedicated [search](search.ipynb) search tutorial first, to whet your appetite.
#
# The real power of search lies in the fact that it is integrated in a programming environment.
# You can use programming to:
#
# * compose dynamic queries
# * process query results
#
# Therefore, the rest of this tutorial is still important when you want to tap that power.
# If you continue here, you learn all the basics of data-navigation with Text-Fabric.
# # Counting
#
# In order to get acquainted with the data, we start with the simple task of counting.
#
# ## Count all nodes
# We use the
# [`N.walk()` generator](https://annotation.github.io/text-fabric/tf/core/nodes.html#tf.core.nodes.Nodes.walk)
# to walk through the nodes.
#
# We compared the TF data to a gigantic spreadsheet, where the rows correspond to the signs.
# In Text-Fabric, we call the rows `slots`, because they are the textual positions that can be filled with signs.
#
# We also mentioned that there are also other textual objects.
# They are the clusters, lines, faces and documents.
# They also correspond to rows in the big spreadsheet.
#
# In Text-Fabric we call all these rows *nodes*, and the `N()` generator
# carries us through those nodes in the textual order.
#
# Just one extra thing: the `info` statements generate timed messages.
# If you use them instead of `print` you'll get a sense of the amount of time that
# the various processing steps typically need.
# +
A.indent(reset=True)
A.info("Counting nodes ...")
i = 0
for n in N.walk():
i += 1
A.info("{} nodes".format(i))
# -
# Here you see it: over 2M nodes.
# ## What are those nodes?
# Every node has a type, like sign, or line, face.
# But what exactly are they?
#
# Text-Fabric has two special features, `otype` and `oslots`, that must occur in every Text-Fabric data set.
# `otype` tells you for each node its type, and you can ask for the number of `slot`s in the text.
#
# Here we go!
F.otype.slotType
F.otype.maxSlot
F.otype.maxNode
F.otype.all
C.levels.data
# This is interesting: above you see all the textual objects, with the average size of their objects,
# the node where they start, and the node where they end.
# ## Count individual object types
# This is an intuitive way to count the number of nodes in each type.
# Note in passing, how we use the `indent` in conjunction with `info` to produce neat timed
# and indented progress messages.
# +
A.indent(reset=True)
A.info("counting objects ...")
for otype in F.otype.all:
i = 0
A.indent(level=1, reset=True)
for n in F.otype.s(otype):
i += 1
A.info("{:>7} {}s".format(i, otype))
A.indent(level=0)
A.info("Done")
# -
# # Viewing textual objects
#
# You can use the A API (the extra power) to display cuneiform text.
#
# See the [display](display.ipynb) tutorial.
# # Feature statistics
#
# `F`
# gives access to all features.
# Every feature has a method
# `freqList()`
# to generate a frequency list of its values, higher frequencies first.
# Here are the parts of speech:
F.sp.freqList()
# Signs, words and clusters have types. We can count them separately:
F.type.freqList("cluster")
F.type.freqList("word")
F.type.freqList("sign")
# # Word matters
#
# ## Top 20 frequent words
#
# We represent words by their essential symbols, collected in the feature *glyph* (which also exists for signs).
for (w, amount) in F.glyph.freqList("word")[0:20]:
print(f"{amount:>5} {w}")
# ## Word distribution
#
# Let's do a bit more fancy word stuff.
#
# ### Hapaxes
#
# A hapax can be found by picking the words with frequency 1.
# We do have lexeme information in this corpus, let's use it for determining hapaxes.
#
# We print 20 hapaxes.
hapaxes1 = sorted(lx for (lx, amount) in F.lex.freqList("word") if amount == 1)
len(hapaxes1)
for lx in hapaxes1[0:20]:
print(lx)
# An other way to find lexemes with only one occurrence is to use the `occ` edge feature from lexeme nodes to the word nodes of
# its occurrences.
hapaxes2 = sorted(F.lex.v(lx) for lx in F.otype.s("lex") if len(E.occ.f(lx)) == 1)
len(hapaxes2)
for lx in hapaxes2[0:20]:
print(lx)
# The feature `lex` contains lexemes that may have uncertain characters in it.
#
# The function `glex` has all those characters stripped.
# Let's use `glex` instead.
hapaxes1g = sorted(lx for (lx, amount) in F.glex.freqList("word") if amount == 1)
len(hapaxes1)
for lx in hapaxes1g[0:20]:
print(lx)
# If we are not interested in the numerals:
for lx in [x for x in hapaxes1g if not x.isdigit()][0:20]:
print(lx)
# ### Small occurrence base
#
# The occurrence base of a word are the scrolls in which occurs.
#
# We compute the occurrence base of each word, based on lexemes according to the `glex` feature.
# +
occurrenceBase1 = collections.defaultdict(set)
A.indent(reset=True)
A.info("compiling occurrence base ...")
for w in F.otype.s("word"):
scroll = T.sectionFromNode(w)[0]
occurrenceBase1[F.glex.v(w)].add(scroll)
A.info(f"{len(occurrenceBase1)} entries")
# -
# Wow, that took long!
#
# We looked up the scroll for each word.
#
# But there is another way:
#
# Start with scrolls, and iterate through their words.
# +
occurrenceBase2 = collections.defaultdict(set)
A.indent(reset=True)
A.info("compiling occurrence base ...")
for s in F.otype.s("scroll"):
scroll = F.scroll.v(s)
for w in L.d(s, otype="word"):
occurrenceBase2[F.glex.v(w)].add(scroll)
A.info("done")
A.info(f"{len(occurrenceBase2)} entries")
# -
# Much better. Are the results equal?
occurrenceBase1 == occurrenceBase2
# Yes.
occurrenceBase = occurrenceBase2
# An overview of how many words have how big occurrence bases:
# +
occurrenceSize = collections.Counter()
for (w, scrolls) in occurrenceBase.items():
occurrenceSize[len(scrolls)] += 1
occurrenceSize = sorted(
occurrenceSize.items(),
key=lambda x: (-x[1], x[0]),
)
for (size, amount) in occurrenceSize[0:10]:
print(f"base size {size:>4} : {amount:>5} words")
print("...")
for (size, amount) in occurrenceSize[-10:]:
print(f"base size {size:>4} : {amount:>5} words")
# -
# Let's give the predicate *private* to those words whose occurrence base is a single scroll.
privates = {w for (w, base) in occurrenceBase.items() if len(base) == 1}
len(privates)
# ### Peculiarity of scrolls
#
# As a final exercise with scrolls, lets make a list of all scrolls, and show their
#
# * total number of words
# * number of private words
# * the percentage of private words: a measure of the peculiarity of the scroll
# +
scrollList = []
empty = set()
ordinary = set()
for d in F.otype.s("scroll"):
scroll = T.scrollName(d)
words = {F.glex.v(w) for w in L.d(d, otype="word")}
a = len(words)
if not a:
empty.add(scroll)
continue
o = len({w for w in words if w in privates})
if not o:
ordinary.add(scroll)
continue
p = 100 * o / a
scrollList.append((scroll, a, o, p))
scrollList = sorted(scrollList, key=lambda e: (-e[3], -e[1], e[0]))
print(f"Found {len(empty):>4} empty scrolls")
print(f"Found {len(ordinary):>4} ordinary scrolls (i.e. without private words)")
# +
print(
"{:<20}{:>5}{:>5}{:>5}\n{}".format(
"scroll",
"#all",
"#own",
"%own",
"-" * 35,
)
)
for x in scrollList[0:20]:
print("{:<20} {:>4} {:>4} {:>4.1f}%".format(*x))
print("...")
for x in scrollList[-20:]:
print("{:<20} {:>4} {:>4} {:>4.1f}%".format(*x))
# -
# ## Tip
#
# See the [lexeme recipe](cookbook/lexeme.ipynb) in the cookbook for how you get from a lexeme node to
# its word occurrence nodes.
# # Locality API
# We travel upwards and downwards, forwards and backwards through the nodes.
# The Locality-API (`L`) provides functions: `u()` for going up, and `d()` for going down,
# `n()` for going to next nodes and `p()` for going to previous nodes.
#
# These directions are indirect notions: nodes are just numbers, but by means of the
# `oslots` feature they are linked to slots. One node *contains* an other node, if the one is linked to a set of slots that contains the set of slots that the other is linked to.
# And one if next or previous to an other, if its slots follow or precede the slots of the other one.
#
# `L.u(node)` **Up** is going to nodes that embed `node`.
#
# `L.d(node)` **Down** is the opposite direction, to those that are contained in `node`.
#
# `L.n(node)` **Next** are the next *adjacent* nodes, i.e. nodes whose first slot comes immediately after the last slot of `node`.
#
# `L.p(node)` **Previous** are the previous *adjacent* nodes, i.e. nodes whose last slot comes immediately before the first slot of `node`.
#
# All these functions yield nodes of all possible otypes.
# By passing an optional parameter, you can restrict the results to nodes of that type.
#
# The result are ordered according to the order of things in the text.
#
# The functions return always a tuple, even if there is just one node in the result.
#
# ## Going up
# We go from the first word to the scroll it contains.
# Note the `[0]` at the end. You expect one scroll, yet `L` returns a tuple.
# To get the only element of that tuple, you need to do that `[0]`.
#
# If you are like me, you keep forgetting it, and that will lead to weird error messages later on.
firstScroll = L.u(1, otype="scroll")[0]
print(firstScroll)
# And let's see all the containing objects of sign 3:
s = 3
for otype in F.otype.all:
if otype == F.otype.slotType:
continue
up = L.u(s, otype=otype)
upNode = "x" if len(up) == 0 else up[0]
print("sign {} is contained in {} {}".format(s, otype, upNode))
# ## Going next
# Let's go to the next nodes of the first scroll.
afterFirstScroll = L.n(firstScroll)
for n in afterFirstScroll:
print(
"{:>7}: {:<13} first slot={:<6}, last slot={:<6}".format(
n,
F.otype.v(n),
E.oslots.s(n)[0],
E.oslots.s(n)[-1],
)
)
secondScroll = L.n(firstScroll, otype="scroll")[0]
# ## Going previous
#
# And let's see what is right before the second scroll.
for n in L.p(secondScroll):
print(
"{:>7}: {:<13} first slot={:<6}, last slot={:<6}".format(
n,
F.otype.v(n),
E.oslots.s(n)[0],
E.oslots.s(n)[-1],
)
)
# ## Going down
# We go to the fragments of the first scroll, and just count them.
fragments = L.d(firstScroll, otype="fragment")
print(len(fragments))
# ## The first line
# We pick two nodes and explore what is above and below them:
# the first line and the first word.
for n in [
F.otype.s("word")[0],
F.otype.s("line")[0],
]:
A.indent(level=0)
A.info("Node {}".format(n), tm=False)
A.indent(level=1)
A.info("UP", tm=False)
A.indent(level=2)
A.info("\n".join(["{:<15} {}".format(u, F.otype.v(u)) for u in L.u(n)]), tm=False)
A.indent(level=1)
A.info("DOWN", tm=False)
A.indent(level=2)
A.info("\n".join(["{:<15} {}".format(u, F.otype.v(u)) for u in L.d(n)]), tm=False)
A.indent(level=0)
A.info("Done", tm=False)
# # Text API
#
# So far, we have mainly seen nodes and their numbers, and the names of node types.
# You would almost forget that we are dealing with text.
# So let's try to see some text.
#
# In the same way as `F` gives access to feature data,
# `T` gives access to the text.
# That is also feature data, but you can tell Text-Fabric which features are specifically
# carrying the text, and in return Text-Fabric offers you
# a Text API: `T`.
#
# ## Formats
# DSS text can be represented in a number of ways:
#
# * `orig`: unicode
# * `trans`: ETCBC transcription
# * `source`: as in Abegg's data files
#
# All three can be represented in two flavours:
#
# * `full`: all glyphs, but no bracketings and flags
# * `extra`: everything
#
# If you wonder where the information about text formats is stored:
# not in the program text-fabric, but in the data set.
# It has a feature `otext`, which specifies the formats and which features
# must be used to produce them. `otext` is the third special feature in a TF data set,
# next to `otype` and `oslots`.
# It is an optional feature.
# If it is absent, there will be no `T` API.
#
# Here is a list of all available formats in this data set.
T.formats
# ## Using the formats
#
# The ` T.text()` function is central to get text representations of nodes. Its most basic usage is
#
# ```python
# T.text(nodes, fmt=fmt)
# ```
# where `nodes` is a list or iterable of nodes, usually word nodes, and `fmt` is the name of a format.
# If you leave out `fmt`, the default `text-orig-full` is chosen.
#
# The result is the text in that format for all nodes specified:
# You see for each format in the list above its intended level of operation: `sign` or `word`.
#
# If TF formats a node according to a defined text-format, it will descend to constituent nodes and represent those
# constituent nodes.
#
# In this case, the formats ending in `-extra` specify the `word` level as the descend type.
# Because, in this dataset, the features that contain the text-critical brackets are only defined at the word level.
# At the sign level, those brackets are no longer visible, but they have left their traces in other features.
# If we do not specify a format, the **default** format is used (`text-orig-full`).
# We examine a portion of biblical material at the start 1Q1.
fragmentNode = T.nodeFromSection(("1Q1", "f1"))
fragmentNode
signs = L.d(fragmentNode, otype="sign")
words = L.d(fragmentNode, otype="word")
lines = L.d(fragmentNode, otype="line")
print(
f"""
Fragment {T.sectionFromNode(fragmentNode)} with
{len(signs):>3} signs
{len(words):>3} words
{len(lines):>3} lines
"""
)
T.text(signs[0:100])
T.text(words[0:20])
T.text(lines[0:2])
# ### The `-extra` formats
#
# In order to use non-default formats, we have to specify them in the *fmt* parameter.
T.text(signs[0:100], fmt="text-orig-extra")
# We do not get much, let's ask why.
T.text(signs[0:2], fmt="text-orig-extra", explain=True)
# The reason can be found in `TARGET LEVEL: word` and `EXPANSION 0 words`.
# We are applying the word targeted format `text-orig-extra` to a sign, which does not contain words.
T.text(words[0:20], fmt="text-orig-extra")
T.text(lines[0:2], fmt="text-orig-extra")
# Note that the direction of the brackets look wrong, because they have not been adapted to the right-to-left writing direction.
#
# We can view them in ETCBC transcription as well:
T.text(words[0:20], fmt="text-trans-extra")
T.text(lines[0:2], fmt="text-trans-extra")
# Or in Abegg's source encoding:
T.text(words[0:20], fmt="text-source-extra")
T.text(lines[0:2], fmt="text-source-extra")
# The function `T.text()` works with nodes of many types.
#
# We compose a set of example nodes and run `T.text` on them:
exampleNodes = [
F.otype.s("sign")[1],
F.otype.s("word")[1],
F.otype.s("cluster")[0],
F.otype.s("line")[0],
F.otype.s("fragment")[0],
F.otype.s("scroll")[0],
F.otype.s("lex")[1],
]
exampleNodes
for n in exampleNodes:
print(f"This is {F.otype.v(n)} {n}:")
text = T.text(n)
if len(text) > 200:
text = text[0:200] + f"\nand {len(text) - 200} characters more"
print(text)
print("")
# Look at the last case, the lexeme node: obviously, the text-format that has been invoked provides
# the *language* (`h`) of the lexeme, plus its representations in unicode, etcbc, and Abegg transcription.
#
# But what format exactly has been invoked?
# Let's ask.
T.text(exampleNodes[-1], explain=True)
# The clue is in `FORMATTING: implicit lex-default`.
#
# Remember that we saw the format `lex-default` in `T.formats`.
#
# The Text-API has matched the type of the lexeme node we provided with this default format and applies it,
# thereby skipping the expansion of the lexeme node to its occurrences.
#
# But we can force the expansion:
T.text(exampleNodes[-1], fmt="lex-default", descend=True)
# ## Using the formats
# Now let's use those formats to print out the first biblical line in this corpus.
#
# Note that the formats starting with `layout-` are not usable for this.
# Also the format `lex-default` is not useful, so we leave that out as well.
#
# For the `layout-` formats, see [display](display.ipynb).
usefulFormats = [
fmt
for fmt in sorted(T.formats)
if not fmt.startswith("layout-") and not fmt == "lex-default"
]
len(usefulFormats)
firstLine = T.nodeFromSection(("1Q1", "f1", "1"))
for fmt in usefulFormats:
if not fmt.startswith("layout-"):
print(
"{}:\n\t{}\n".format(
fmt,
T.text(firstLine, fmt=fmt),
)
)
# ## Whole text in all formats in a few seconds
# Part of the pleasure of working with computers is that they can crunch massive amounts of data.
# The text of the Dead Sea Scrolls is a piece of cake.
#
# It takes just a dozen seconds or so to have that cake and eat it.
# In all useful formats.
# +
A.indent(reset=True)
A.info("writing plain text of all scrolls in all text formats")
text = collections.defaultdict(list)
for ln in F.otype.s("line"):
for fmt in usefulFormats:
if fmt.startswith("text-"):
text[fmt].append(T.text(ln, fmt=fmt, descend=True))
A.info("done {} formats".format(len(text)))
for fmt in sorted(text):
print("{}\n{}\n".format(fmt, "\n".join(text[fmt][0:5])))
# -
# ### The full plain text
# We write all formats to file, in your `Downloads` folder.
for fmt in T.formats:
if fmt.startswith("text-"):
with open(
os.path.expanduser(f"~/Downloads/{fmt}.txt"),
"w",
# encoding='utf8',
) as f:
f.write("\n".join(text[fmt]))
# (if this errors, uncomment the line with `encoding`)
# ## Sections
#
# A section in the DSS is a scroll, a fragment or a line.
# Knowledge of sections is not baked into Text-Fabric.
# The config feature `otext.tf` may specify three section levels, and tell
# what the corresponding node types and features are.
#
# From that knowledge it can construct mappings from nodes to sections, e.g. from line
# nodes to tuples of the form:
#
# (scroll acronym, fragment label, line number)
#
# You can get the section of a node as a tuple of relevant scroll, fragment, and line nodes.
# Or you can get it as a passage label, a string.
#
# You can ask for the passage corresponding to the first slot of a node, or the one corresponding to the last slot.
#
# If you are dealing with scroll and fragment nodes, you can ask to fill out the line and fragment parts as well.
#
# Here are examples of getting the section that corresponds to a node and vice versa.
#
# **NB:** `sectionFromNode` always delivers a line specification, either from the
# first slot belonging to that node, or, if `lastSlot`, from the last slot
# belonging to that node.
someNodes = (
F.otype.s("sign")[100000],
F.otype.s("word")[10000],
F.otype.s("cluster")[5000],
F.otype.s("line")[15000],
F.otype.s("fragment")[1000],
F.otype.s("scroll")[500],
)
for n in someNodes:
nType = F.otype.v(n)
d = f"{n:>7} {nType}"
first = A.sectionStrFromNode(n)
last = A.sectionStrFromNode(n, lastSlot=True, fillup=True)
tup = (
T.sectionTuple(n),
T.sectionTuple(n, lastSlot=True, fillup=True),
)
print(f"{d:<16} - {first:<18} {last:<18} {tup}")
# # Clean caches
#
# Text-Fabric pre-computes data for you, so that it can be loaded faster.
# If the original data is updated, Text-Fabric detects it, and will recompute that data.
#
# But there are cases, when the algorithms of Text-Fabric have changed, without any changes in the data, that you might
# want to clear the cache of precomputed results.
#
# There are two ways to do that:
#
# * Locate the `.tf` directory of your dataset, and remove all `.tfx` files in it.
# This might be a bit awkward to do, because the `.tf` directory is hidden on Unix-like systems.
# * Call `TF.clearCache()`, which does exactly the same.
#
# It is not handy to execute the following cell all the time, that's why I have commented it out.
# So if you really want to clear the cache, remove the comment sign below.
# +
# TF.clearCache()
# -
# # Next steps
#
# By now you have an impression how to compute around in the corpus.
# While this is still the beginning, I hope you already sense the power of unlimited programmatic access
# to all the bits and bytes in the data set.
#
# Here are a few directions for unleashing that power.
#
# * **[display](display.ipynb)** become an expert in creating pretty displays of your text structures
# * **[search](search.ipynb)** turbo charge your hand-coding with search templates
# * **[exportExcel](exportExcel.ipynb)** make tailor-made spreadsheets out of your results
# * **[share](share.ipynb)** draw in other people's data and let them use yours
# * **[similarLines](similarLines.ipynb)** spot the similarities between lines
#
# ---
#
# See the [cookbook](cookbook) for recipes for small, concrete tasks.
#
# CC-BY Dirk Roorda
| 30.279762
| 178
| 0.681148
|
4a0be62f614c813c00d241ee06ea2b5b1bb63372
| 23,818
|
py
|
Python
|
nltk/app/concordance_app.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | null | null | null |
nltk/app/concordance_app.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | null | null | null |
nltk/app/concordance_app.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | 1
|
2019-10-18T08:58:45.000Z
|
2019-10-18T08:58:45.000Z
|
# Natural Language Toolkit: Concordance Application
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Sumukh Ghodke <sghodke@csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import re
import threading
from six.moves import queue as q
from six.moves.tkinter_font import Font
from six.moves.tkinter import (Tk, Button, END, Entry, Frame, IntVar, LEFT,
Label, Menu, OptionMenu, SUNKEN, Scrollbar,
StringVar, Text)
import nltk.compat
from nltk.corpus import (cess_cat, brown, nps_chat, treebank, sinica_treebank,
alpino, indian, floresta, mac_morpho, cess_esp)
from nltk.util import in_idle
from nltk.draw.util import ShowText
WORD_OR_TAG = '[^/ ]+'
BOUNDARY = r'\b'
CORPUS_LOADED_EVENT = '<<CL_EVENT>>'
SEARCH_TERMINATED_EVENT = '<<ST_EVENT>>'
SEARCH_ERROR_EVENT = '<<SE_EVENT>>'
ERROR_LOADING_CORPUS_EVENT = '<<ELC_EVENT>>'
POLL_INTERVAL = 50
# NB All corpora must be specified in a lambda expression so as not to be
# loaded when the module is imported.
_DEFAULT = 'English: Brown Corpus (Humor, simplified)'
_CORPORA = {
'Catalan: CESS-CAT Corpus (simplified)':
lambda: cess_cat.tagged_sents(tagset='universal'),
'English: Brown Corpus':
lambda: brown.tagged_sents(),
'English: Brown Corpus (simplified)':
lambda: brown.tagged_sents(tagset='universal'),
'English: Brown Corpus (Press, simplified)':
lambda: brown.tagged_sents(categories=['news', 'editorial', 'reviews'], tagset='universal'),
'English: Brown Corpus (Religion, simplified)':
lambda: brown.tagged_sents(categories='religion', tagset='universal'),
'English: Brown Corpus (Learned, simplified)':
lambda: brown.tagged_sents(categories='learned', tagset='universal'),
'English: Brown Corpus (Science Fiction, simplified)':
lambda: brown.tagged_sents(categories='science_fiction', tagset='universal'),
'English: Brown Corpus (Romance, simplified)':
lambda: brown.tagged_sents(categories='romance', tagset='universal'),
'English: Brown Corpus (Humor, simplified)':
lambda: brown.tagged_sents(categories='humor', tagset='universal'),
'English: NPS Chat Corpus':
lambda: nps_chat.tagged_posts(),
'English: NPS Chat Corpus (simplified)':
lambda: nps_chat.tagged_posts(tagset='universal'),
'English: Wall Street Journal Corpus':
lambda: treebank.tagged_sents(),
'English: Wall Street Journal Corpus (simplified)':
lambda: treebank.tagged_sents(tagset='universal'),
'Chinese: Sinica Corpus':
lambda: sinica_treebank.tagged_sents(),
'Chinese: Sinica Corpus (simplified)':
lambda: sinica_treebank.tagged_sents(tagset='universal'),
'Dutch: Alpino Corpus':
lambda: alpino.tagged_sents(),
'Dutch: Alpino Corpus (simplified)':
lambda: alpino.tagged_sents(tagset='universal'),
'Hindi: Indian Languages Corpus':
lambda: indian.tagged_sents(files='hindi.pos'),
'Hindi: Indian Languages Corpus (simplified)':
lambda: indian.tagged_sents(files='hindi.pos', tagset='universal'),
'Portuguese: Floresta Corpus (Portugal)':
lambda: floresta.tagged_sents(),
'Portuguese: Floresta Corpus (Portugal, simplified)':
lambda: floresta.tagged_sents(tagset='universal'),
'Portuguese: MAC-MORPHO Corpus (Brazil)':
lambda: mac_morpho.tagged_sents(),
'Portuguese: MAC-MORPHO Corpus (Brazil, simplified)':
lambda: mac_morpho.tagged_sents(tagset='universal'),
'Spanish: CESS-ESP Corpus (simplified)':
lambda: cess_esp.tagged_sents(tagset='universal'),
}
class ConcordanceSearchView(object):
_BACKGROUND_COLOUR='#FFF' #white
#Colour of highlighted results
_HIGHLIGHT_WORD_COLOUR='#F00' #red
_HIGHLIGHT_WORD_TAG='HL_WRD_TAG'
_HIGHLIGHT_LABEL_COLOUR='#C0C0C0' # dark grey
_HIGHLIGHT_LABEL_TAG='HL_LBL_TAG'
#Percentage of text left of the scrollbar position
_FRACTION_LEFT_TEXT=0.30
def __init__(self):
self.queue = q.Queue()
self.model = ConcordanceSearchModel(self.queue)
self.top = Tk()
self._init_top(self.top)
self._init_menubar()
self._init_widgets(self.top)
self.load_corpus(self.model.DEFAULT_CORPUS)
self.after = self.top.after(POLL_INTERVAL, self._poll)
def _init_top(self, top):
top.geometry('950x680+50+50')
top.title('NLTK Concordance Search')
top.bind('<Control-q>', self.destroy)
top.protocol('WM_DELETE_WINDOW', self.destroy)
top.minsize(950,680)
def _init_widgets(self, parent):
self.main_frame = Frame(parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1))
self._init_corpus_select(self.main_frame)
self._init_query_box(self.main_frame)
self._init_results_box(self.main_frame)
self._init_paging(self.main_frame)
self._init_status(self.main_frame)
self.main_frame.pack(fill='both', expand=True)
def _init_menubar(self):
self._result_size = IntVar(self.top)
self._cntx_bf_len = IntVar(self.top)
self._cntx_af_len = IntVar(self.top)
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0, borderwidth=0)
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-q')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
rescntmenu = Menu(editmenu, tearoff=0)
rescntmenu.add_radiobutton(label='20', variable=self._result_size,
underline=0, value=20,
command=self.set_result_size)
rescntmenu.add_radiobutton(label='50', variable=self._result_size,
underline=0, value=50,
command=self.set_result_size)
rescntmenu.add_radiobutton(label='100', variable=self._result_size,
underline=0, value=100,
command=self.set_result_size)
rescntmenu.invoke(1)
editmenu.add_cascade(label='Result Count', underline=0, menu=rescntmenu)
cntxmenu = Menu(editmenu, tearoff=0)
cntxbfmenu = Menu(cntxmenu, tearoff=0)
cntxbfmenu.add_radiobutton(label='60 characters',
variable=self._cntx_bf_len,
underline=0, value=60,
command=self.set_cntx_bf_len)
cntxbfmenu.add_radiobutton(label='80 characters',
variable=self._cntx_bf_len,
underline=0, value=80,
command=self.set_cntx_bf_len)
cntxbfmenu.add_radiobutton(label='100 characters',
variable=self._cntx_bf_len,
underline=0, value=100,
command=self.set_cntx_bf_len)
cntxbfmenu.invoke(1)
cntxmenu.add_cascade(label='Before', underline=0, menu=cntxbfmenu)
cntxafmenu = Menu(cntxmenu, tearoff=0)
cntxafmenu.add_radiobutton(label='70 characters',
variable=self._cntx_af_len,
underline=0, value=70,
command=self.set_cntx_af_len)
cntxafmenu.add_radiobutton(label='90 characters',
variable=self._cntx_af_len,
underline=0, value=90,
command=self.set_cntx_af_len)
cntxafmenu.add_radiobutton(label='110 characters',
variable=self._cntx_af_len,
underline=0, value=110,
command=self.set_cntx_af_len)
cntxafmenu.invoke(1)
cntxmenu.add_cascade(label='After', underline=0, menu=cntxafmenu)
editmenu.add_cascade(label='Context', underline=0, menu=cntxmenu)
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
self.top.config(menu=menubar)
def set_result_size(self, **kwargs):
self.model.result_count = self._result_size.get()
def set_cntx_af_len(self, **kwargs):
self._char_after = self._cntx_af_len.get()
def set_cntx_bf_len(self, **kwargs):
self._char_before = self._cntx_bf_len.get()
def _init_corpus_select(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.var = StringVar(innerframe)
self.var.set(self.model.DEFAULT_CORPUS)
Label(innerframe, justify=LEFT, text=' Corpus: ',
background=self._BACKGROUND_COLOUR, padx = 2, pady = 1, border = 0).pack(side='left')
other_corpora = list(self.model.CORPORA.keys()).remove(self.model.DEFAULT_CORPUS)
om = OptionMenu(innerframe, self.var, self.model.DEFAULT_CORPUS, command=self.corpus_selected, *self.model.non_default_corpora())
om['borderwidth'] = 0
om['highlightthickness'] = 1
om.pack(side='left')
innerframe.pack(side='top', fill='x', anchor='n')
def _init_status(self, parent):
self.status = Label(parent, justify=LEFT, relief=SUNKEN, background=self._BACKGROUND_COLOUR, border=0, padx = 1, pady = 0)
self.status.pack(side='top', anchor='sw')
def _init_query_box(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
another = Frame(innerframe, background=self._BACKGROUND_COLOUR)
self.query_box = Entry(another, width=60)
self.query_box.pack(side='left', fill='x', pady=25, anchor='center')
self.search_button = Button(another, text='Search', command=self.search, borderwidth=1, highlightthickness=1)
self.search_button.pack(side='left', fill='x', pady=25, anchor='center')
self.query_box.bind('<KeyPress-Return>', self.search_enter_keypress_handler)
another.pack()
innerframe.pack(side='top', fill='x', anchor='n')
def search_enter_keypress_handler(self, *event):
self.search()
def _init_results_box(self, parent):
innerframe = Frame(parent)
i1 = Frame(innerframe)
i2 = Frame(innerframe)
vscrollbar = Scrollbar(i1, borderwidth=1)
hscrollbar = Scrollbar(i2, borderwidth=1, orient='horiz')
self.results_box = Text(i1,
font=Font(family='courier', size='16'),
state='disabled', borderwidth=1,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set, wrap='none', width='40', height = '20', exportselection=1)
self.results_box.pack(side='left', fill='both', expand=True)
self.results_box.tag_config(self._HIGHLIGHT_WORD_TAG, foreground=self._HIGHLIGHT_WORD_COLOUR)
self.results_box.tag_config(self._HIGHLIGHT_LABEL_TAG, foreground=self._HIGHLIGHT_LABEL_COLOUR)
vscrollbar.pack(side='left', fill='y', anchor='e')
vscrollbar.config(command=self.results_box.yview)
hscrollbar.pack(side='left', fill='x', expand=True, anchor='w')
hscrollbar.config(command=self.results_box.xview)
#there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!!
Label(i2, text=' ', background=self._BACKGROUND_COLOUR).pack(side='left', anchor='e')
i1.pack(side='top', fill='both', expand=True, anchor='n')
i2.pack(side='bottom', fill='x', anchor='s')
innerframe.pack(side='top', fill='both', expand=True)
def _init_paging(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.prev = prev = Button(innerframe, text='Previous', command=self.previous, width='10', borderwidth=1, highlightthickness=1, state='disabled')
prev.pack(side='left', anchor='center')
self.next = next = Button(innerframe, text='Next', command=self.__next__, width='10', borderwidth=1, highlightthickness=1, state='disabled')
next.pack(side='right', anchor='center')
innerframe.pack(side='top', fill='y')
self.current_page = 0
def previous(self):
self.clear_results_box()
self.freeze_editable()
self.model.prev(self.current_page - 1)
def __next__(self):
self.clear_results_box()
self.freeze_editable()
self.model.next(self.current_page + 1)
def about(self, *e):
ABOUT = ("NLTK Concordance Search Demo\n")
TITLE = 'About: NLTK Concordance Search Demo'
try:
from six.moves.tkinter_messagebox import Message
Message(message=ABOUT, title=TITLE, parent=self.main_frame).show()
except:
ShowText(self.top, TITLE, ABOUT)
def _bind_event_handlers(self):
self.top.bind(CORPUS_LOADED_EVENT, self.handle_corpus_loaded)
self.top.bind(SEARCH_TERMINATED_EVENT, self.handle_search_terminated)
self.top.bind(SEARCH_ERROR_EVENT, self.handle_search_error)
self.top.bind(ERROR_LOADING_CORPUS_EVENT, self.handle_error_loading_corpus)
def _poll(self):
try:
event = self.queue.get(block=False)
except q.Empty:
pass
else:
if event == CORPUS_LOADED_EVENT:
self.handle_corpus_loaded(event)
elif event == SEARCH_TERMINATED_EVENT:
self.handle_search_terminated(event)
elif event == SEARCH_ERROR_EVENT:
self.handle_search_error(event)
elif event == ERROR_LOADING_CORPUS_EVENT:
self.handle_error_loading_corpus(event)
self.after = self.top.after(POLL_INTERVAL, self._poll)
def handle_error_loading_corpus(self, event):
self.status['text'] = 'Error in loading ' + self.var.get()
self.unfreeze_editable()
self.clear_all()
self.freeze_editable()
def handle_corpus_loaded(self, event):
self.status['text'] = self.var.get() + ' is loaded'
self.unfreeze_editable()
self.clear_all()
self.query_box.focus_set()
def handle_search_terminated(self, event):
#todo: refactor the model such that it is less state sensitive
results = self.model.get_results()
self.write_results(results)
self.status['text'] = ''
if len(results) == 0:
self.status['text'] = 'No results found for ' + self.model.query
else:
self.current_page = self.model.last_requested_page
self.unfreeze_editable()
self.results_box.xview_moveto(self._FRACTION_LEFT_TEXT)
def handle_search_error(self, event):
self.status['text'] = 'Error in query ' + self.model.query
self.unfreeze_editable()
def corpus_selected(self, *args):
new_selection = self.var.get()
self.load_corpus(new_selection)
def load_corpus(self, selection):
if self.model.selected_corpus != selection:
self.status['text'] = 'Loading ' + selection + '...'
self.freeze_editable()
self.model.load_corpus(selection)
def search(self):
self.current_page = 0
self.clear_results_box()
self.model.reset_results()
query = self.query_box.get()
if (len(query.strip()) == 0): return
self.status['text'] = 'Searching for ' + query
self.freeze_editable()
self.model.search(query, self.current_page + 1, )
def write_results(self, results):
self.results_box['state'] = 'normal'
row = 1
for each in results:
sent, pos1, pos2 = each[0].strip(), each[1], each[2]
if len(sent) != 0:
if (pos1 < self._char_before):
sent, pos1, pos2 = self.pad(sent, pos1, pos2)
sentence = sent[pos1-self._char_before:pos1+self._char_after]
if not row == len(results):
sentence += '\n'
self.results_box.insert(str(row) + '.0', sentence)
word_markers, label_markers = self.words_and_labels(sent, pos1, pos2)
for marker in word_markers: self.results_box.tag_add(self._HIGHLIGHT_WORD_TAG, str(row) + '.' + str(marker[0]), str(row) + '.' + str(marker[1]))
for marker in label_markers: self.results_box.tag_add(self._HIGHLIGHT_LABEL_TAG, str(row) + '.' + str(marker[0]), str(row) + '.' + str(marker[1]))
row += 1
self.results_box['state'] = 'disabled'
def words_and_labels(self, sentence, pos1, pos2):
search_exp = sentence[pos1:pos2]
words, labels = [], []
labeled_words = search_exp.split(' ')
index = 0
for each in labeled_words:
if each == '':
index += 1
else:
word, label = each.split('/')
words.append((self._char_before + index, self._char_before + index + len(word)))
index += len(word) + 1
labels.append((self._char_before + index, self._char_before + index + len(label)))
index += len(label)
index += 1
return words, labels
def pad(self, sent, hstart, hend):
if hstart >= self._char_before:
return sent, hstart, hend
d = self._char_before - hstart
sent = ''.join([' '] * d) + sent
return sent, hstart + d, hend + d
def destroy(self, *e):
if self.top is None: return
self.top.after_cancel(self.after)
self.top.destroy()
self.top = None
def clear_all(self):
self.query_box.delete(0, END)
self.model.reset_query()
self.clear_results_box()
def clear_results_box(self):
self.results_box['state'] = 'normal'
self.results_box.delete("1.0", END)
self.results_box['state'] = 'disabled'
def freeze_editable(self):
self.query_box['state'] = 'disabled'
self.search_button['state'] = 'disabled'
self.prev['state'] = 'disabled'
self.next['state'] = 'disabled'
def unfreeze_editable(self):
self.query_box['state'] = 'normal'
self.search_button['state'] = 'normal'
self.set_paging_button_states()
def set_paging_button_states(self):
if self.current_page == 0 or self.current_page == 1:
self.prev['state'] = 'disabled'
else:
self.prev['state'] = 'normal'
if self.model.has_more_pages(self.current_page):
self.next['state'] = 'normal'
else:
self.next['state'] = 'disabled'
def fire_event(self, event):
#Firing an event so that rendering of widgets happen in the mainloop thread
self.top.event_generate(event, when='tail')
def mainloop(self, *args, **kwargs):
if in_idle(): return
self.top.mainloop(*args, **kwargs)
class ConcordanceSearchModel(object):
def __init__(self, queue):
self.queue = queue
self.CORPORA = _CORPORA
self.DEFAULT_CORPUS = _DEFAULT
self.selected_corpus = None
self.reset_query()
self.reset_results()
self.result_count = None
self.last_sent_searched = 0
def non_default_corpora(self):
copy = []
copy.extend(list(self.CORPORA.keys()))
copy.remove(self.DEFAULT_CORPUS)
copy.sort()
return copy
def load_corpus(self, name):
self.selected_corpus = name
self.tagged_sents = []
runner_thread = self.LoadCorpus(name, self)
runner_thread.start()
def search(self, query, page):
self.query = query
self.last_requested_page = page
self.SearchCorpus(self, page, self.result_count).start()
def next(self, page):
self.last_requested_page = page
if len(self.results) < page:
self.search(self.query, page)
else:
self.queue.put(SEARCH_TERMINATED_EVENT)
def prev(self, page):
self.last_requested_page = page
self.queue.put(SEARCH_TERMINATED_EVENT)
def reset_results(self):
self.last_sent_searched = 0
self.results = []
self.last_page = None
def reset_query(self):
self.query = None
def set_results(self, page, resultset):
self.results.insert(page - 1, resultset)
def get_results(self):
return self.results[self.last_requested_page - 1]
def has_more_pages(self, page):
if self.results == [] or self.results[0] == []:
return False
if self.last_page is None:
return True
return page < self.last_page
class LoadCorpus(threading.Thread):
def __init__(self, name, model):
threading.Thread.__init__(self)
self.model, self.name = model, name
def run(self):
try:
ts = self.model.CORPORA[self.name]()
self.model.tagged_sents = [' '.join(w+'/'+t for (w,t) in sent) for sent in ts]
self.model.queue.put(CORPUS_LOADED_EVENT)
except Exception as e:
print(e)
self.model.queue.put(ERROR_LOADING_CORPUS_EVENT)
class SearchCorpus(threading.Thread):
def __init__(self, model, page, count):
self.model, self.count, self.page = model, count, page
threading.Thread.__init__(self)
def run(self):
q = self.processed_query()
sent_pos, i, sent_count = [], 0, 0
for sent in self.model.tagged_sents[self.model.last_sent_searched:]:
try:
m = re.search(q, sent)
except re.error:
self.model.reset_results()
self.model.queue.put(SEARCH_ERROR_EVENT)
return
if m:
sent_pos.append((sent, m.start(), m.end()))
i += 1
if i > self.count:
self.model.last_sent_searched += sent_count - 1
break
sent_count += 1
if (self.count >= len(sent_pos)):
self.model.last_sent_searched += sent_count - 1
self.model.last_page = self.page
self.model.set_results(self.page, sent_pos)
else:
self.model.set_results(self.page, sent_pos[:-1])
self.model.queue.put(SEARCH_TERMINATED_EVENT)
def processed_query(self):
new = []
for term in self.model.query.split():
term = re.sub(r'\.', r'[^/ ]', term)
if re.match('[A-Z]+$', term):
new.append(BOUNDARY + WORD_OR_TAG + '/' + term + BOUNDARY)
elif '/' in term:
new.append(BOUNDARY + term + BOUNDARY)
else:
new.append(BOUNDARY + term + '/' + WORD_OR_TAG + BOUNDARY)
return ' '.join(new)
def app():
d = ConcordanceSearchView()
d.mainloop()
if __name__ == '__main__':
app()
__all__ = ['app']
| 41.933099
| 162
| 0.59665
|
4a0be728959dabc4e4bf55fda0130ee9d850cf89
| 574
|
py
|
Python
|
chapter 04/timerclass.py
|
bpbpublications/Python-Data-Persistence
|
444cd44b2853c05a84e59e85d4f705ef74395204
|
[
"MIT"
] | 6
|
2020-08-16T12:55:01.000Z
|
2022-03-08T13:06:23.000Z
|
chapter 04/timerclass.py
|
bpbpublications/Python-Data-Persistence
|
444cd44b2853c05a84e59e85d4f705ef74395204
|
[
"MIT"
] | null | null | null |
chapter 04/timerclass.py
|
bpbpublications/Python-Data-Persistence
|
444cd44b2853c05a84e59e85d4f705ef74395204
|
[
"MIT"
] | 1
|
2020-08-16T12:57:39.000Z
|
2020-08-16T12:57:39.000Z
|
#timerclass.py
#4.42
class timer:
def __init__(self, hr=None, min=None):
self.hrs=hr
self.mins=min
def __add__(self, arg):
temp=timer()
temp.hrs=self.hrs+arg.hrs
temp.mins=self.mins+arg.mins
if temp.mins>=60:
temp.mins=temp.mins-60
temp.hrs=temp.hrs+1
return temp
def __str__(self):
timestring='{} Hrs. {} mins.'.format(self.hrs,self.mins)
return timestring
| 31.888889
| 72
| 0.461672
|
4a0be85adfbd0c0a17a00408ed2cf36f205493aa
| 7,610
|
py
|
Python
|
src/practice task1/practicedemo.py
|
cjshearer/project-athena
|
3394da6cd6dbe1c1c2b84f0a2f58c5168c4c6775
|
[
"MIT"
] | null | null | null |
src/practice task1/practicedemo.py
|
cjshearer/project-athena
|
3394da6cd6dbe1c1c2b84f0a2f58c5168c4c6775
|
[
"MIT"
] | 2
|
2020-10-20T18:24:11.000Z
|
2020-10-20T18:25:53.000Z
|
src/practice task1/practicedemo.py
|
cjshearer/project-athena
|
3394da6cd6dbe1c1c2b84f0a2f58c5168c4c6775
|
[
"MIT"
] | null | null | null |
import os
from utils.file import load_from_json
from utils.model import load_lenet
import numpy as np
from utils.model import load_pool
from utils.metrics import error_rate, get_corrections
from models.athena import Ensemble, ENSEMBLE_STRATEGY
def generate_ae(model, data, labels, attack_configs, save=False, output_dir=None):
"""
Generate adversarial examples
:param model: WeakDefense. The targeted model.
:param data: array. The benign samples to generate adversarial for.
:param labels: array or list. The true labels.
:param attack_configs: dictionary. Attacks and corresponding settings.
:param save: boolean. True, if save the adversarial examples.
:param output_dir: str or path. Location to save the adversarial examples.
It cannot be None when save is True.
:return:
"""
img_rows, img_cols = data.shape[1], data.shape[2]
num_attacks = attack_configs.get("num_attacks")
data_loader = (data, labels)
if len(labels.shape) > 1:
labels = np.asarray([np.argmax(p) for p in labels])
# generate attacks one by one
for id in range(num_attacks):
key = "configs{}".format(id)
data_adv = generate(model=model,
data_loader=data_loader,
attack_args=attack_configs.get(key)
)
# predict the adversarial examples
predictions = model.predict(data_adv)
predictions = np.asarray([np.argmax(p) for p in predictions])
err = error_rate(y_pred=predictions, y_true=labels)
print(">>> error rate:", err)
# plotting some examplesjsm
num_plotting = min(data.shape[0], 2)
for i in range(num_plotting):
img = data_adv[i].reshape((img_rows, img_cols))
plt.imshow(img, cmap='gray')
title = '{}: {}->{}'.format(attack_configs.get(key).get("description"),
labels[i],
predictions[i]
)
plt.title(title)
plt.show()
plt.close()
# save the adversarial example
if save:
if output_dir is None:
raise ValueError("Cannot save images to a none path.")
# save with a random name
file = os.path.join(output_dir, "{}.npy".format(time.monotonic()))
print("Save the adversarial examples to file [{}].".format(file))
np.save(file, data_adv)
model_configs = load_from_json("./md.json")
data_configs = load_from_json("./dt.json")
attack_configs = load_from_json("./at.json")
# load the targeted model
model_file = os.path.join(model_configs.get("dir"), model_configs.get("um_file"))
target = load_lenet(file=model_file, wrap=True)
# load the benign samples
data_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
data_bs = np.load(data_file)
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
# generate adversarial examples for a small subset
data_bs = data_bs[:10]
labels = labels[:10]
generate_ae(model=target, data=data_bs, labels=labels, attack_configs=attack_configs)
# copied from tutorials/eval_model.py
def evaluate(trans_configs, model_configs,
data_configs, save=False, output_dir=None):
"""
Apply transformation(s) on images.
:param trans_configs: dictionary. The collection of the parameterized transformations to test.
in the form of
{ configsx: {
param: value,
}
}
The key of a configuration is 'configs'x, where 'x' is the id of corresponding weak defense.
:param model_configs: dictionary. Defines model related information.
Such as, location, the undefended model, the file format, etc.
:param data_configs: dictionary. Defines data related information.
Such as, location, the file for the true labels, the file for the benign samples,
the files for the adversarial examples, etc.
:param save: boolean. Save the transformed sample or not.
:param output_dir: path or str. The location to store the transformed samples.
It cannot be None when save is True.
:return:
"""
# Load the baseline defense (PGD-ADT model)
baseline = load_lenet(file=model_configs.get('jsma_trained'), trans_configs=None,
use_logits=False, wrap=False)
# get the undefended model (UM)
file = os.path.join(model_configs.get('dir'), model_configs.get('um_file'))
undefended = load_lenet(file=file,
trans_configs=trans_configs.get('configs0'),
wrap=True)
print(">>> um:", type(undefended))
# load weak defenses into a pool
pool, _ = load_pool(trans_configs=trans_configs,
model_configs=model_configs,
active_list=True,
wrap=True)
# create an AVEP ensemble from the WD pool
wds = list(pool.values())
print(">>> wds:", type(wds), type(wds[0]))
ensemble = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)
# load the benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
img_rows, img_cols = x_bs.shape[1], x_bs.shape[2]
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
# get indices of benign samples that are correctly classified by the targeted model
print(">>> Evaluating UM on [{}], it may take a while...".format(bs_file))
pred_bs = undefended.predict(x_bs)
corrections = get_corrections(y_pred=pred_bs, y_true=labels)
# Evaluate AEs.
results = {}
ae_list = data_configs.get('ae_files')
ae_file = os.path.join(data_configs.get('dir'), ae_list[4])
x_adv = np.load(ae_file)
# evaluate the undefended model on the AE
print(">>> Evaluating UM on [{}], it may take a while...".format(ae_file))
pred_adv_um = undefended.predict(x_adv)
err_um = error_rate(y_pred=pred_adv_um, y_true=labels, correct_on_bs=corrections)
# track the result
results['UM'] = err_um
# evaluate the ensemble on the AE
print(">>> Evaluating ensemble on [{}], it may take a while...".format(ae_file))
pred_adv_ens = ensemble.predict(x_adv)
err_ens = error_rate(y_pred=pred_adv_ens, y_true=labels, correct_on_bs=corrections)
# track the result
results['Ensemble'] = err_ens
# evaluate the baseline on the AE
print(">>> Evaluating baseline model on [{}], it may take a while...".format(ae_file))
pred_adv_bl = baseline.predict(x_adv)
err_bl = error_rate(y_pred=pred_adv_bl, y_true=labels, correct_on_bs=corrections)
# track the result
results['JSMA-ADT'] = err_bl
# TODO: collect and dump the evaluation results to file(s) such that you can analyze them later.
print(">>> Evaluations on [{}]:\n{}".format(ae_file, results))
attack_configs = load_from_json("../src/configs/demo/at.json")
model_configs = load_from_json("../src/configs/demo/md.json")
data_configs = load_from_json("../src/configs/demo/dt.json")
output_dir = "../results"
# evaluate
evaluate(attack_configs=trans_configs,
model_configs=model_configs,
data_configs=data_configs,
save=False,
output_dir=output_root)
| 41.135135
| 100
| 0.651774
|
4a0beb6171e24f2d7cc390d895a2d608519b5f20
| 9,650
|
py
|
Python
|
ros2_ws/src/algorithms/state_estimation/state_estimation_3d/state_estimation_3d/ekf_node.py
|
FastSense/rosbot-ros2
|
c2d274ce179534fec5b2786a6f96b6d638019ac4
|
[
"MIT"
] | null | null | null |
ros2_ws/src/algorithms/state_estimation/state_estimation_3d/state_estimation_3d/ekf_node.py
|
FastSense/rosbot-ros2
|
c2d274ce179534fec5b2786a6f96b6d638019ac4
|
[
"MIT"
] | 2
|
2021-07-05T14:50:09.000Z
|
2021-09-14T15:21:11.000Z
|
ros2_ws/src/algorithms/state_estimation/state_estimation_3d/state_estimation_3d/ekf_node.py
|
FastSense/metalbot
|
063c897a16129d9aa88c2c7c52bdf6547af894e4
|
[
"MIT"
] | null | null | null |
import cv2
import nnio
import numpy as np
from scipy.spatial.transform import Rotation
from argparse import Namespace
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image, Imu, CameraInfo
from nav_msgs.msg import Odometry
from cv_bridge import CvBridge
import tf2_ros
from .spacekf import SpaceKF12
from perception_msgs.msg import OdoFlow
from optical_flow.stereo_camera import StereoCamera
class EKFNode(Node):
def __init__(self):
super().__init__('ekf_3d')
self.bridge = CvBridge()
self.tf2_broadcaster = tf2_ros.TransformBroadcaster(self)
# Declare parameters
self.declare_parameter('period', 0.1)
self.declare_parameter('vel_std', 1.0)
self.declare_parameter('rot_vel_std', 1.0)
# Kalman filter parameters
vel_std = self.get_parameter('vel_std').get_parameter_value().double_value
rot_vel_std = self.get_parameter('rot_vel_std').get_parameter_value().double_value
# Get camera parameters
self.stereo = None
# Subscribe to sensor topics
self.create_subscription(
OdoFlow,
'odom_flow',
self.odometry_callback,
10,
)
self.create_subscription(
Imu,
'imu',
self.imu_callback,
10,
)
self.create_subscription(
CameraInfo,
'rectified_camera_info',
self.calibration_callback,
10,
)
# Publisher
self.pose_publisher = self.create_publisher(
Odometry,
'pose_ekf',
10,
)
# Create timer
self.period = self.get_parameter('period').get_parameter_value().double_value
self.create_timer(self.period, self.step)
# Create Kalman filter
self.tracker = SpaceKF12(dt=self.period, velocity_std=vel_std, rot_vel_std=rot_vel_std)
self.tracker.P = self.tracker.P * 0.01
# Buffers for measurements
self.imu_buffer = None
self.imu_count = 0
self.odom_buffer = None
# TF listener
self.tf_buffer = tf2_ros.Buffer(rclpy.duration.Duration(seconds=1))
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, self)
def odometry_callback(self, msg):
self.odom_buffer = msg
def imu_callback(self, msg):
if not 'oakd' in msg.header.frame_id:
# print('Wrong imu frame:', msg.header.frame_id)
return
if self.imu_buffer is None:
self.imu_buffer = msg
self.imu_count = 1
else:
self.imu_buffer.angular_velocity.x += msg.angular_velocity.x
self.imu_buffer.angular_velocity.y += msg.angular_velocity.y
self.imu_buffer.angular_velocity.z += msg.angular_velocity.z
self.imu_buffer.linear_acceleration.x += msg.linear_acceleration.x
self.imu_buffer.linear_acceleration.y += msg.linear_acceleration.y
self.imu_buffer.linear_acceleration.z += msg.linear_acceleration.z
self.imu_count += 1
def step(self):
'''
EKF predict and update step
'''
# Predict
self.tracker.predict()
# Update
if self.imu_buffer is not None:
self.update_imu(self.imu_buffer)
self.imu_buffer = None
if self.odom_buffer is not None:
self.update_odom_flow(self.odom_buffer)
self.odom_buffer = None
# Publish
self.publish_pose()
def update_imu(self, msg):
'''
Update filter state using IMU message
'''
# Make KF-compatible measurements
rot_vel = np.empty(3)
rot_vel[0] = msg.angular_velocity.x / self.imu_count
rot_vel[1] = msg.angular_velocity.y / self.imu_count
rot_vel[2] = msg.angular_velocity.z / self.imu_count
rot_vel_R = np.array(msg.angular_velocity_covariance).reshape([3, 3])
acc = np.empty(3)
acc[0] = msg.linear_acceleration.x / self.imu_count
acc[1] = msg.linear_acceleration.y / self.imu_count
acc[2] = msg.linear_acceleration.z / self.imu_count
acc_R = np.array(msg.linear_acceleration_covariance).reshape([3, 3])
# Get extrinsics from tf
extrinsic = self.get_extrinsic(msg.header.frame_id, 'base_link')
# Update
print(acc)
self.tracker.update_acc(acc, acc_R, extrinsic=extrinsic)
# self.tracker.update_rot_vel(rot_vel, rot_vel_R, extrinsic=extrinsic)
def update_odom_flow(self, msg):
'''
Update filter state using flow odometry message
'''
if self.stereo is None:
print('waiting for camera parameters...')
return
z = np.vstack([msg.flow_x, msg.flow_y, msg.delta_depth]).transpose() # [N, 3]
pixels = np.vstack([msg.x, msg.y]).transpose() # [N, 2]
R = np.diag(msg.covariance_diag)
# Get extrinsics from tf
extrinsic = self.get_extrinsic(msg.header.frame_id, 'base_link')
# Compute time delay
msg_time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9
ros_stamp = self.get_clock().now().seconds_nanoseconds()
ros_time = ros_stamp[0] + ros_stamp[1] * 1e-9
delay = ros_time - msg_time
print('odom delay:', delay)
self.tracker.update_flow(
z,
msg.delta_t,
msg.depth,
pixels,
R,
self.stereo.M1,
self.stereo.M1_inv,
extrinsic=extrinsic,
delay=delay*0,
)
def get_extrinsic(self, frame1, frame2):
'''
Parameters:
frame1 (str): tf frame
frame2 (str): tf frame
Returns:
np.array of shape [3, 4]: rotation-translation matrix between two tf frames.
'''
while True:
try:
# t = self.get_clock().now()
# rclpy.spin_once(self)
t = Namespace(seconds=0, nanoseconds=0)
trans = self.tf_buffer.lookup_transform(frame1, frame2, t, rclpy.duration.Duration(seconds=10))
# print(f"Got transform! {frame1} -> {frame2}")
break
except tf2_ros.LookupException:
# rclpy.spin_once(self)
print(f"Retrying to get transform {frame1} -> {frame2}", self.get_clock().now())
tr = np.array([
[trans.transform.translation.x],
[trans.transform.translation.y],
[trans.transform.translation.z],
])
rot_q = np.array([
trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w,
])
rot = Rotation.from_quat(rot_q).as_matrix()
extrinsic = np.concatenate([rot, tr], 1)
return extrinsic
def publish_pose(self):
# Make odometry message
msg = Odometry()
msg.header.stamp = self.get_clock().now().to_msg()
msg.header.frame_id = 'map'
msg.child_frame_id = 'base_link'
# Position
msg.pose.pose.position.x = self.tracker.pos[0]
msg.pose.pose.position.y = self.tracker.pos[1]
msg.pose.pose.position.z = self.tracker.pos[2]
# Angle
msg.pose.pose.orientation.x = self.tracker.q[0]
msg.pose.pose.orientation.y = self.tracker.q[1]
msg.pose.pose.orientation.z = self.tracker.q[2]
msg.pose.pose.orientation.w = self.tracker.q[3]
# Pose & angle covariance
msg.pose.covariance = self.tracker.get_pose_covariance()
# Velocity
msg.twist.twist.linear.x = self.tracker.vel[0]
msg.twist.twist.linear.y = self.tracker.vel[1]
msg.twist.twist.linear.z = self.tracker.vel[2]
# Angular velocity
msg.twist.twist.angular.x = self.tracker.rot_vel[0]
msg.twist.twist.angular.y = self.tracker.rot_vel[1]
msg.twist.twist.angular.z = self.tracker.rot_vel[2]
# Vel & angvel covariance
msg.twist.covariance = self.tracker.get_twist_covariance()
# Publish
self.pose_publisher.publish(msg)
# Broadcast tf2
t = tf2_ros.TransformStamped()
t.header = msg.header
t.child_frame_id = 'base_link'
t.transform.translation.x = self.tracker.pos[0]
t.transform.translation.y = self.tracker.pos[1]
t.transform.translation.z = self.tracker.pos[2]
t.transform.rotation.x = self.tracker.q[0]
t.transform.rotation.y = self.tracker.q[1]
t.transform.rotation.z = self.tracker.q[2]
t.transform.rotation.w = self.tracker.q[3]
self.tf2_broadcaster.sendTransform(t)
def calibration_callback(self, msg):
if self.stereo is None:
M1 = np.array(msg.k).reshape([3, 3])
M2 = M1
T = [msg.p[3] / msg.k[0], msg.p[7] / msg.k[0], msg.p[11] / msg.k[0]]
R = np.array(msg.r).reshape([3, 3])
print('T', T)
self.stereo = StereoCamera(
M1=M1, M2=M2, R=R, T=T, image_h=msg.height, image_w=msg.width
)
self.stereo.change_dimensions_(128, 128)
def main(args=None):
print('Hi from ekf_3d.')
rclpy.init(args=args)
node = EKFNode()
rclpy.spin(node)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 33.506944
| 111
| 0.600725
|
4a0bed6d6c40d1b3a7815043b57b58b86a6a4956
| 4,683
|
py
|
Python
|
script.module.tknorris.shared/lib/cache.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:37:15.000Z
|
2019-03-05T09:37:15.000Z
|
script.module.tknorris.shared/lib/cache.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.tknorris.shared/lib/cache.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
"""
tknorris shared module
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import functools
import log_utils
import time
import cPickle as pickle
import hashlib
import os
import shutil
import kodi
logger = log_utils.Logger.get_logger(__name__)
logger.disable()
try:
cache_path = kodi.translate_path(os.path.join(kodi.get_profile(), 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
except Exception as e:
logger.log('Failed to create cache: %s: %s' % (cache_path, e), log_utils.LOGWARNING)
cache_enabled = kodi.get_setting('use_cache') == 'true'
def reset_cache():
try:
shutil.rmtree(cache_path)
return True
except Exception as e:
logger.log('Failed to Reset Cache: %s' % (e), log_utils.LOGWARNING)
return False
def _get_func(name, args=None, kwargs=None, cache_limit=1):
if not cache_enabled: return False, None
now = time.time()
max_age = now - (cache_limit * 60 * 60)
if args is None: args = []
if kwargs is None: kwargs = {}
full_path = os.path.join(cache_path, _get_filename(name, args, kwargs))
if os.path.exists(full_path):
mtime = os.path.getmtime(full_path)
if mtime >= max_age:
with open(full_path, 'r') as f:
pickled_result = f.read()
# logger.log('Returning cached result: |%s|%s|%s| - modtime: %s max_age: %s age: %ss' % (name, args, kwargs, mtime, max_age, now - mtime), log_utils.LOGDEBUG)
return True, pickle.loads(pickled_result)
return False, None
def _save_func(name, args=None, kwargs=None, result=None):
try:
if args is None: args = []
if kwargs is None: kwargs = {}
pickled_result = pickle.dumps(result)
full_path = os.path.join(cache_path, _get_filename(name, args, kwargs))
with open(full_path, 'w') as f:
f.write(pickled_result)
except Exception as e:
logger.log('Failure during cache write: %s' % (e), log_utils.LOGWARNING)
def _get_filename(name, args, kwargs):
arg_hash = hashlib.md5(name).hexdigest() + hashlib.md5(str(args)).hexdigest() + hashlib.md5(str(kwargs)).hexdigest()
return arg_hash
def cache_method(cache_limit):
def wrap(func):
@functools.wraps(func)
def memoizer(*args, **kwargs):
if args:
klass, real_args = args[0], args[1:]
full_name = '%s.%s.%s' % (klass.__module__, klass.__class__.__name__, func.__name__)
else:
full_name = func.__name__
real_args = args
in_cache, result = _get_func(full_name, real_args, kwargs, cache_limit=cache_limit)
if in_cache:
logger.log('Using method cache for: |%s|%s|%s| -> |%d|' % (full_name, args, kwargs, len(pickle.dumps(result))), log_utils.LOGDEBUG)
return result
else:
logger.log('Calling cached method: |%s|%s|%s|' % (full_name, args, kwargs), log_utils.LOGDEBUG)
result = func(*args, **kwargs)
_save_func(full_name, real_args, kwargs, result)
return result
return memoizer
return wrap
# do not use this with instance methods the self parameter will cause args to never match
def cache_function(cache_limit):
def wrap(func):
@functools.wraps(func)
def memoizer(*args, **kwargs):
name = func.__name__
in_cache, result = _get_func(name, args, kwargs, cache_limit=cache_limit)
if in_cache:
logger.log('Using function cache for: |%s|%s|%s| -> |%d|' % (name, args, kwargs, len(pickle.dumps(result))), log_utils.LOGDEBUG)
return result
else:
logger.log('Calling cached function: |%s|%s|%s|' % (name, args, kwargs), log_utils.LOGDEBUG)
result = func(*args, **kwargs)
_save_func(name, args, kwargs, result)
return result
return memoizer
return wrap
| 39.686441
| 170
| 0.630579
|
4a0bee6fe44838b7852e55fef1bfe2763b86baf9
| 19,941
|
py
|
Python
|
networking_bigswitch/tests/unit/bigswitch/test_restproxy_plugin.py
|
WeifanFu-bsn/networking-bigswitch
|
6d66edbc4f9836e34f3588449775af983516092f
|
[
"Apache-2.0"
] | null | null | null |
networking_bigswitch/tests/unit/bigswitch/test_restproxy_plugin.py
|
WeifanFu-bsn/networking-bigswitch
|
6d66edbc4f9836e34f3588449775af983516092f
|
[
"Apache-2.0"
] | null | null | null |
networking_bigswitch/tests/unit/bigswitch/test_restproxy_plugin.py
|
WeifanFu-bsn/networking-bigswitch
|
6d66edbc4f9836e34f3588449775af983516092f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from mock import patch
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_allowedaddresspairs_db as test_addr_pair
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
import webob.exc
from networking_bigswitch.plugins.bigswitch import config as pl_config
from networking_bigswitch.plugins.bigswitch import constants as bsn_constants
from networking_bigswitch.plugins.bigswitch.servermanager import \
TenantIDNotFound
from networking_bigswitch.tests.unit.bigswitch import fake_server
from networking_bigswitch.tests.unit.bigswitch \
import test_base as bsn_test_base
from networking_bigswitch.tests.unit.bigswitch.mock_paths import HTTPCON
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
IS_UNICODE_ENABLED
class BigSwitchProxyPluginV2TestCase(bsn_test_base.BigSwitchTestBase,
test_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin_name=None, service_plugins=None, ext_mgr=None):
if hasattr(self, 'HAS_PORT_FILTER'):
cfg.CONF.set_override(
'enable_security_group', self.HAS_PORT_FILTER, 'SECURITYGROUP')
self.setup_config_files()
self.setup_patches()
if plugin_name:
self._plugin_name = plugin_name
bsn_service_plugins = {'L3_ROUTER_NAT': self._l3_plugin_name,
bsn_constants.BSN_SERVICE_PLUGIN:
self._bsn_service_plugin_name}
if service_plugins:
service_plugins.update(bsn_service_plugins)
else:
service_plugins = bsn_service_plugins
test_plugin.NeutronDbPluginV2TestCase.setUp(
self, self._plugin_name,
service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.port_create_status = 'BUILD'
self.startHttpPatch()
def setup_coreplugin(self, plugin, load_plugins=False):
self.setup_db()
super(BigSwitchProxyPluginV2TestCase, self).setup_coreplugin(
plugin, load_plugins=load_plugins)
class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet,
BigSwitchProxyPluginV2TestCase):
pass
class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse,
BigSwitchProxyPluginV2TestCase):
def test_failover_memory(self):
# first request causes failover so next shouldn't hit bad server
with self.network() as net:
kwargs = {'tenant_id': 'ExceptOnBadServer'}
with self.network(**kwargs) as net:
req = self.new_show_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2,
BigSwitchProxyPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def setUp(self, plugin_name=None):
super(TestBigSwitchProxyPortsV2,
self).setUp(self._plugin_name)
def test_get_ports_no_id(self):
with self.port(name='test'):
ports = directory.get_plugin().get_ports(
context.get_admin_context(), fields=['name'])
self.assertEqual(['name'], list(ports[0].keys()))
def test_router_port_status_active(self):
# router ports screw up port auto-deletion so it has to be
# disabled for this test
with self.network() as net:
with self.subnet(network=net) as sub:
with self.port(
subnet=sub,
do_delete=False,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF
) as port:
# router ports should be immediately active
self.assertEqual(port['port']['status'], 'ACTIVE')
def test_update_port_status_build(self):
# normal ports go into the pending build state for async creation
with self.port() as port:
self.assertEqual(port['port']['status'], 'BUILD')
self.assertEqual(self.port_create_status, 'BUILD')
def _get_ports(self, netid):
return self.deserialize('json',
self._list_ports('json', netid=netid))['ports']
def test_rollback_for_port_create(self):
plugin = directory.get_plugin()
with self.subnet() as s:
# stop normal patch
self.httpPatch.stop()
# allow thread spawns for this test
self.spawn_p.stop()
kwargs = {'device_id': 'somedevid'}
# put in a broken 'server'
httpPatch = patch(HTTPCON, new=fake_server.HTTPConnectionMock500)
httpPatch.start()
with self.port(subnet=s, **kwargs):
# wait for async port create request to finish
plugin.evpool.waitall()
# put good 'server' back in
httpPatch.stop()
self.httpPatch.start()
ports = self._get_ports(s['subnet']['network_id'])
# failure to create should result in port in error state
self.assertEqual(ports[0]['status'], 'ERROR')
def test_rollback_for_port_update(self):
with self.network() as n:
with self.port(network_id=n['network']['id'],
device_id='66') as port:
port = self._get_ports(n['network']['id'])[0]
data = {'port': {'name': 'aNewName', 'device_id': '99'}}
# stop normal patch
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self.new_update_request(
'ports', data, port['id']).get_response(self.api)
self.httpPatch.start()
uport = self._get_ports(n['network']['id'])[0]
# name should have stayed the same
self.assertEqual(port['name'], uport['name'])
def test_rollback_for_port_delete(self):
with self.network() as n:
with self.port(network_id=n['network']['id'],
device_id='somedevid') as port:
# stop normal patch
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._delete(
'ports',
port['port']['id'],
expected_code=webob.exc.HTTPInternalServerError.code)
self.httpPatch.start()
port = self._get_ports(n['network']['id'])[0]
self.assertEqual('BUILD', port['status'])
def test_correct_shared_net_tenant_id(self):
# tenant_id in port requests should match network tenant_id instead
# of port tenant_id
def rest_port_op(self, ten_id, netid, port):
if ten_id != 'SHARED':
raise Exception('expecting tenant_id SHARED. got %s' % ten_id)
with self.network(tenant_id='SHARED', shared=True) as net:
with self.subnet(network=net) as sub:
pref = ('networking_bigswitch.plugins.bigswitch'
'.servermanager.ServerPool.%s') # noqa
tomock = [pref % 'rest_create_port',
pref % 'rest_update_port',
pref % 'rest_delete_port']
patches = [patch(f, create=True, new=rest_port_op)
for f in tomock]
for restp in patches:
restp.start()
with self.port(subnet=sub, tenant_id='port-owner') as port:
data = {'port': {'binding:host_id': 'someotherhost',
'device_id': 'override_dev'}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
def test_create404_triggers_sync(self):
# allow async port thread for this patch
self.spawn_p.stop()
with\
self.subnet() as s,\
patch(HTTPCON, create=True,
new=fake_server.HTTPConnectionMock404),\
patch(bsn_test_base.PLUGIN_PATH +
'.NeutronRestProxyV2._send_all_data') as mock_send_all:
with self.port(subnet=s, device_id='somedevid') as p:
# wait for the async port thread to finish
plugin = directory.get_plugin()
plugin.evpool.waitall()
call = mock.call(
send_routers=True, send_floating_ips=True, timeout=None,
triggered_by_tenant=p['port']['tenant_id']
)
mock_send_all.assert_has_calls([call])
self.spawn_p.start()
def test_port_vif_details_default(self):
kwargs = {'name': 'name', 'device_id': 'override_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
def test_port_vif_details_override(self):
# ivshost is in the test config to override to IVS
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
'device_id': 'override_dev',
'arg_list': ('binding:host_id',)}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'],
pl_config.VIF_TYPE_IVS)
self._delete('ports', port['port']['id'])
self._delete('networks', port['port']['network_id'])
kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost',
'device_id': 'other_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE)
def test_port_move(self):
# ivshost is in the test config to override to IVS
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
'device_id': 'override_dev'}
with self.port(**kwargs) as port:
data = {'port': {'binding:host_id': 'someotherhost',
'device_id': 'override_dev'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE)
class TestVifDifferentDefault(BigSwitchProxyPluginV2TestCase):
def setup_config_files(self):
super(TestVifDifferentDefault, self).setup_config_files()
cfg.CONF.set_override('vif_type', 'ivs', 'NOVA')
def test_default_viftype(self):
with self.port() as port:
self.assertEqual(port['port']['binding:vif_type'], 'ivs')
class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2,
BigSwitchProxyPluginV2TestCase):
def _get_networks(self, tenant_id):
ctx = context.Context('', tenant_id)
return directory.get_plugin().get_networks(ctx)
def test_rollback_on_network_create(self):
tid = test_base._uuid()
kwargs = {'tenant_id': tid}
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._create_network('json', 'netname', True, **kwargs)
self.httpPatch.start()
self.assertFalse(self._get_networks(tid))
def test_rollback_on_network_update(self):
with self.network() as n:
data = {'network': {'name': 'aNewName'}}
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self.new_update_request(
'networks', data, n['network']['id']
).get_response(self.api)
self.httpPatch.start()
updatedn = self._get_networks(n['network']['tenant_id'])[0]
# name should have stayed the same due to failure
self.assertEqual(n['network']['name'], updatedn['name'])
def test_rollback_on_network_delete(self):
with self.network() as n:
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._delete(
'networks', n['network']['id'],
expected_code=webob.exc.HTTPInternalServerError.code)
self.httpPatch.start()
# network should still exist in db
self.assertEqual(n['network']['id'],
self._get_networks(n['network']['tenant_id']
)[0]['id'])
def test_notify_on_security_group_change(self):
plugin = directory.get_plugin()
with self.port() as p:
with\
mock.patch.object(plugin, 'notifier') as n_mock,\
mock.patch.object(plugin, 'is_security_group_member_updated',
return_value=True):
# any port update should trigger a notification due to s_mock
data = {'port': {'name': 'aNewName'}}
self.new_update_request(
'ports', data, p['port']['id']).get_response(self.api)
self.assertTrue(n_mock.port_update.called)
class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2,
BigSwitchProxyPluginV2TestCase):
pass
class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase):
def test_send_data(self):
plugin_obj = directory.get_plugin()
result = plugin_obj._send_all_data()
self.assertEqual(result[0], 200)
class TestDisplayName(BigSwitchProxyPluginV2TestCase):
def get_true(self):
"""Used for side_effect replacement
:return:
"""
return True
def test_map_display_name_or_tenant_unicode_disabled(self):
"""Test _map_display_name_or_tenant behaviors when unicode is disabled
:return:
"""
self.map_display_name_or_tenant_p.stop()
plugin_obj = directory.get_plugin()
self.assertFalse(plugin_obj.servers.is_unicode_enabled())
# object with non-existing tenant_id
no_tenant_obj = {'id': 'test_id',
'name': 'test_name',
'tenant_id': 'non_exist_tenant_id'}
self.assertRaises(TenantIDNotFound,
plugin_obj._map_display_name_or_tenant,
no_tenant_obj)
# add a tenant to cache
plugin_obj.servers.keystone_tenants = {'tenant_id': 'tenant_name'}
# object with name, '_' in name will be replaced with '__'
test_obj = {'id': 'test_id',
'name': 'test_name',
'tenant_id': 'tenant_id'}
expected_obj = {'id': 'test_id',
'name': 'test__name',
'tenant_id': 'tenant_id',
'tenant_name': 'tenant_name'}
self.assertEqual(expected_obj,
plugin_obj._map_display_name_or_tenant(test_obj))
# object without name
test_obj = {'id': 'test_id',
'tenant_id': 'tenant_id'}
expected_obj = {'id': 'test_id',
'tenant_id': 'tenant_id',
'tenant_name': 'tenant_name'}
self.assertEqual(expected_obj,
plugin_obj._map_display_name_or_tenant(test_obj))
def test_map_display_name_or_tenant_unicode_enabled(self):
"""Test _map_display_name_or_tenant behaviors when unicode is enabled
:return:
"""
self.map_display_name_or_tenant_p.stop()
self.is_unicode_enabled_p.stop()
mock.patch(IS_UNICODE_ENABLED, side_effect=self.get_true).start()
plugin_obj = directory.get_plugin()
self.assertTrue(plugin_obj.servers.is_unicode_enabled())
# object with non-existing tenant_id, unicode enabled
no_tenant_obj = {'id': 'test_id',
'name': 'test_name',
'tenant_id': 'non_exist_tenant_id'}
self.assertRaises(TenantIDNotFound,
plugin_obj._map_display_name_or_tenant,
no_tenant_obj)
# add a tenant to cache
plugin_obj.servers.keystone_tenants = {'tenant_id': 'tenant_name'}
# object with name, unicode enabled
test_obj = {'id': 'test_id',
'name': 'test_name',
'tenant_id': 'tenant_id'}
expected_obj = {'id': 'test_id',
'name': 'test_id',
'display-name': 'test_name',
'tenant_id': 'tenant_id'}
self.assertEqual(expected_obj,
plugin_obj._map_display_name_or_tenant(test_obj))
# object without name, unicode enabled
test_obj = {'id': 'test_id',
'tenant_id': 'tenant_id'}
expected_obj = {'id': 'test_id',
'name': 'test_id',
'tenant_id': 'tenant_id'}
self.assertEqual(expected_obj,
plugin_obj._map_display_name_or_tenant(test_obj))
class TestBigSwitchAddressPairs(test_addr_pair.TestAllowedAddressPairs,
BigSwitchProxyPluginV2TestCase):
def test_create_missing_mac_field(self):
# TODO(wolverineav): debug why this fails
pass
def test_create_address_gets_port_mac(self):
# TODO(wolverineav): debug why this fails
pass
def test_create_overlap_with_fixed_ip(self):
# TODO(wolverineav): debug why this fails
pass
def test_create_port_remove_allowed_address_pairs_with_none(self):
# TODO(wolverineav): debug why this fails
pass
def test_create_port_allowed_address_pairs(self):
# TODO(wolverineav): debug why this fails
pass
def test_equal_to_max_allowed_address_pair(self):
# TODO(wolverineav): debug why this fails
pass
def test_update_port_allowed_address_pairs_bad_format(self):
# TODO(wolverineav): debug why this fails
pass
def test_create_port_remove_allowed_address_pairs_with_list(self):
# TODO(wolverineav): debug why this fails
pass
def test_update_add_address_pairs(self):
# TODO(wolverineav): debug why this fails
pass
def test_update_with_none_and_own_mac_for_duplicate_ip(self):
# TODO(wolverineav): debug why this fails
pass
def test_update_add_address_pairs_with_unexpected_format(self):
# TODO(wolverineav): debug why this fails
pass
| 40.284848
| 79
| 0.600572
|
4a0bef5058b2e33bed0617ede5343ba341899a00
| 5,036
|
py
|
Python
|
wifipumpkin3/core/controllers/proxycontroller.py
|
oza6ut0ne/wifipumpkin3
|
7aef4d2a67e0b3b4618f27af4617c5fb8e3b9cc6
|
[
"Apache-2.0"
] | 1
|
2022-01-17T11:38:59.000Z
|
2022-01-17T11:38:59.000Z
|
wifipumpkin3/core/controllers/proxycontroller.py
|
oza6ut0ne/wifipumpkin3
|
7aef4d2a67e0b3b4618f27af4617c5fb8e3b9cc6
|
[
"Apache-2.0"
] | null | null | null |
wifipumpkin3/core/controllers/proxycontroller.py
|
oza6ut0ne/wifipumpkin3
|
7aef4d2a67e0b3b4618f27af4617c5fb8e3b9cc6
|
[
"Apache-2.0"
] | null | null | null |
from wifipumpkin3.core.config.globalimport import *
from wifipumpkin3.core.common.uimodel import *
from wifipumpkin3.core.servers.proxy import *
from wifipumpkin3.core.utility.component import ControllerBlueprint
import copy
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ProxyModeController(PluginsUI, ControllerBlueprint):
Name = "Proxy"
Caption = "Enable Proxy Server"
ID = "proxy_controller"
proxies = {}
proxies_infor = {}
SetNoProxy = QtCore.pyqtSignal(object)
dockMount = QtCore.pyqtSignal(bool)
@staticmethod
def getID():
return ProxyModeController.ID
def __init__(self, parent=None, **kwargs):
super(ProxyModeController, self).__init__(parent)
self.parent = parent
# append controller in DefaultWidget
self.parent.getDefault.addController(self)
self.conf = SuperSettings.getInstance()
# load all plugin proxy
__proxlist = [
prox(parent=self.parent) for prox in proxymode.ProxyMode.__subclasses__()
]
# Keep Proxy in a dictionary
for k in __proxlist:
self.proxies[k.Name] = k
self.proxies_infor[k.ID] = {
"ID": k.ID,
"Name": k.Name,
"Port": k.getRunningPort(),
"Activate": k.isChecked(),
"Author": k.Author,
"Logger": k.LogFile,
"ConfigPath": k.CONFIGINI_PATH,
"Description": k.Description,
"Config": k.getConfig,
"TypeButton": k.TypeButton,
}
# set all proxy plugin as child class
for n, p in self.proxies.items():
if hasattr(p, "ID"):
setattr(self, p.ID, p)
def isChecked(self):
return self.conf.get("plugins", self.ID, format=bool)
@property
def ActiveDocks(self):
return self.Active.dockwidget
@property
def ActiveReactor(self):
reactor = []
for act in self.proxies.values():
if act.isChecked():
if act.Name == "noproxy":
reactor.append(act.reactor)
reactor.append(act.subreactor)
else:
reactor.append(act.reactor)
if act.subreactor:
reactor.append(act.subreactor)
return reactor
@property
def Active(self):
for act in self.proxies.values():
# exclude tcp proxy log
if act.getTypePlugin() != 2:
# print(act.isChecked(),act.Name)
if act.isChecked():
return act
@property
def ActiveLoad(self):
""" load all proxies type checkbox UI in tab plugins """
proxies = []
for act in self.proxies.values():
if act.isChecked():
proxies.append(act)
return proxies
@property
def get(self):
return self.proxies
def getInfo(self, excluded=()):
if not excluded:
return self.proxies_infor
result = {}
for item in self.proxies_infor:
result[item] = {}
for subItem in self.proxies_infor[item]:
if not subItem in excluded:
result[item][subItem] = self.proxies_infor[item][subItem]
return result
@classmethod
def disable(cls, val=True):
pass
@property
def disableproxy(self, name):
pass
def Start(self):
self.Active.Initialize()
self.Active.Serve()
self.Active.boot()
# load proxy checkbox all type all proxies
for proxy in self.ActiveLoad:
if proxy.Name != self.Active.Name:
proxy.Initialize()
proxy.Serve()
proxy.boot()
@property
def getReactor(self):
return self.Active.reactor
def getReactorInfo(self):
info_reactor = {}
try:
info_reactor[self.getReactor.getID()] = {
"ID": self.getReactor.getID(),
"PID": self.getReactor.getpid(),
}
except AttributeError:
pass
return info_reactor
def Stop(self):
self.Active.Serve(False)
self.Active.shutdown()
def SaveLog(self):
self.Active.SaveLog()
| 30.337349
| 85
| 0.583002
|
4a0bef5fd459939aaf5645a766cb22c75502c7fc
| 5,069
|
py
|
Python
|
async_covid/worldometers/covid.py
|
ahnaf-zamil/async-covid
|
0789ce8712b429fc2a226444e33c4fe03be91a8a
|
[
"MIT"
] | 2
|
2020-11-03T06:46:57.000Z
|
2020-11-18T12:36:01.000Z
|
async_covid/worldometers/covid.py
|
ahnaf-zamil/async-covid
|
0789ce8712b429fc2a226444e33c4fe03be91a8a
|
[
"MIT"
] | null | null | null |
async_covid/worldometers/covid.py
|
ahnaf-zamil/async-covid
|
0789ce8712b429fc2a226444e33c4fe03be91a8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Covid coronavirus statistics based on worldometers.info statistics
"""
import urllib.request
from bs4 import BeautifulSoup
from async_covid.worldometers.models import CovidModel
from async_covid import config
from pydantic import ValidationError
URL = "https://www.worldometers.info/coronavirus/"
SOURCE = config.WORLDOMETERS
class Covid:
"""Worldometers data for COVID-19
Note: Instantiate this class at the beginning of your code since it gets "all" the data when it's instantiated
"""
def __init__(self):
self.__url = URL
self.__data = {}
self.__fetch()
self.__set_data()
self.source = SOURCE
def __fetch(self):
"""Method get all data when the class is inistantiated
1. parses html
2. gets all country data
"""
request = urllib.request.Request(self.__url, headers={'User-Agent':'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17'})
response = urllib.request.urlopen(request).read()
soup = BeautifulSoup(response, "html.parser")
table = soup.find("table", attrs={"class": "main_table_countries"})
headers = table.find_all("th")
self.__headers = [
header.text.replace("\xa0", "") for header in headers
]
self.__rows = table.tbody.find_all("tr")
self.__total_cases = soup.find_all(
"div", attrs={"class": "maincounter-number"}
)
def __set_data(self):
"""Method formats data to make it easily callable by country name
"""
countries = (
[attr.text.strip() for attr in row if attr != "\n"]
for row in self.__rows
)
self.__data = {country[1].lower(): country for country in countries}
async def __format(self, _list: list) -> list:
"""Method formats a list and returns a fomatted one
1. removes ','
2. if there is no value it adds 0
Args:
_list (list): input list to be formatted
Returns:
list: output formatted list
"""
_list = [val.strip().replace(",", "") for val in _list]
return [val if val and val != "N/A" else 0 for val in _list]
async def get_data(self) -> list:
"""Method returns a list of all of the data from worldometers after being formatted
Returns:
list: List of country data
"""
return [
CovidModel(**dict(zip(self.__headers, await self.__format(val))))
for val in self.__data.values()
]
async def get_status_by_country_name(self, country_name: str) -> dict:
"""Method gets country status
Args:
country_name (str): country name e.g "Sweden"
Raises:
ValueError: when country name is not correct
Returns:
dict: Country information
"""
try:
country_data = dict(
zip(
self.__headers,
await self.__format(self.__data[country_name.lower()]),
)
)
except KeyError:
raise ValueError(
f"There is no country called '{country_name}', to check available country names use `list_countries()`"
)
return CovidModel(**country_data)
async def list_countries(self) -> list:
return list(self.__data.keys())
@staticmethod
async def __to_num(string: str) -> int:
"""formats string numbers and converts them to an integer
e.g '123,456' -> 123456
Args:
string (str): input string number
Returns:
int: output integer number
"""
return int(string.strip().replace(",", ""))
async def get_total_confirmed_cases(self) -> int:
"""Method gets the total number of confirmed cases
Returns:
int: Number of confirmed cases
"""
return await self.__to_num(self.__total_cases[0].span.text)
async def get_total_deaths(self) -> int:
"""Method gets the total number of deaths
Returns:
int: Total number of deaths
"""
return await self.__to_num(self.__total_cases[1].span.text)
async def get_total_recovered(self) -> int:
"""Method gets the total number of recovered cases
Returns:
int: Total number of recovered cases
"""
return await self.__to_num(self.__total_cases[2].span.text)
async def get_total_active_cases(self) -> int:
"""Method gets the total number of active cases
Returns:
int: Total number of active cases
"""
confirmed = await self.get_total_confirmed_cases()
deaths = await self.get_total_deaths()
recovered = await self.get_total_recovered()
return confirmed - (recovered + deaths)
| 32.703226
| 181
| 0.584928
|
4a0befeda38f253de81da5920df40d7dd14e5b30
| 12,008
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/core/util/decorator.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/core/util/decorator.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/core/util/decorator.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
# -*- coding: utf-8 -*-
"""
Decorator used in ObsPy.
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import functools
import inspect
import os
import re
import socket
import tarfile
import threading
import unittest
import warnings
import zipfile
import numpy as np
from decorator import decorator
from future.utils import PY2, native_str
from obspy.core.util import get_example_file
from obspy.core.util.base import NamedTemporaryFile
from obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning
def deprecated(warning_msg=None):
"""
This is a decorator which can be used to mark functions as deprecated.
.. note::
Actually, this is not a decorator itself but a decorator factory,
returning the correct decorator for the specified options. It can be
used just like a decorator.
It will result in a warning being emitted when the function is used.
"""
@decorator
def _deprecated(func, *args, **kwargs):
if 'deprecated' in str(func.__doc__).lower():
msg = func.__doc__
elif warning_msg:
msg = warning_msg
if PY2 and inspect.ismethod(func):
func.im_func.__doc__ = warning_msg
else:
func.__doc__ = warning_msg
else:
msg = "Call to deprecated function %s." % func.__name__
warnings.warn(msg, category=ObsPyDeprecationWarning, stacklevel=3)
return func(*args, **kwargs)
return _deprecated
def deprecated_keywords(keywords):
"""
Decorator for marking keywords as deprecated.
.. note::
Actually, this is not a decorator itself but a decorator factory,
returning the correct decorator for the specified options. It can be
used just like a decorator.
:type keywords: dict
:param keywords: old/new keyword names as key/value pairs.
"""
def fdec(func):
fname = func.__name__
msg = "Deprecated keyword %s in %s() call - please use %s instead."
msg2 = "Deprecated keyword %s in %s() call - ignoring."
msg3 = ("Conflicting deprecated keywords (%s) in %s() call"
" - please use new '%s' keyword instead.")
@functools.wraps(func)
def echo_func(*args, **kwargs):
# check if multiple deprecated keywords get mapped to the same new
# keyword
new_keyword_appearance_counts = dict.fromkeys(keywords.values(), 0)
for key, new_key in keywords.items():
if key in kwargs:
new_keyword_appearance_counts[new_key] += 1
for key_ in keywords.values():
# ignore `None` as new value, it means that no mapping is
# happening..
if key_ is None:
continue
if new_keyword_appearance_counts[key_] > 1:
conflicting_keys = ", ".join(
[old_key for old_key, new_key in keywords.items()
if new_key == key_])
raise Exception(msg3 % (conflicting_keys, fname, new_key))
# map deprecated keywords to new keywords
for kw in kwargs.keys():
if kw in keywords:
nkw = keywords[kw]
if nkw is None:
warnings.warn(msg2 % (kw, fname),
category=ObsPyDeprecationWarning,
stacklevel=3)
else:
warnings.warn(msg % (kw, fname, nkw),
category=ObsPyDeprecationWarning,
stacklevel=3)
kwargs[nkw] = kwargs[kw]
del(kwargs[kw])
return func(*args, **kwargs)
return echo_func
return fdec
@decorator
def skip_on_network_error(func, *args, **kwargs):
"""
Decorator for unittest to mark test routines that fail with certain network
errors (e.g. timeouts) as "skipped" rather than "Error".
"""
try:
return func(*args, **kwargs)
###################################################
# add more except clauses like this to add other
# network errors that should be skipped
except socket.timeout as e:
if str(e) == "timed out":
raise unittest.SkipTest(str(e))
###################################################
except socket.error as e:
if str(e) == "[Errno 110] Connection timed out":
raise unittest.SkipTest(str(e))
# general except to be able to generally reraise
except Exception:
raise
@decorator
def uncompress_file(func, filename, *args, **kwargs):
"""
Decorator used for temporary uncompressing file if .gz or .bz2 archive.
"""
if not kwargs.get('check_compression', True):
return func(filename, *args, **kwargs)
if not isinstance(filename, (str, native_str)):
return func(filename, *args, **kwargs)
elif not os.path.exists(filename):
msg = "File not found '%s'" % (filename)
raise IOError(msg)
# check if we got a compressed file or archive
obj_list = []
if tarfile.is_tarfile(filename):
try:
# reading with transparent compression
with tarfile.open(filename, 'r|*') as tar:
for tarinfo in tar:
# only handle regular files
if not tarinfo.isfile():
continue
data = tar.extractfile(tarinfo).read()
# Skip empty files - we don't need them no matter what
# and it guards against rare cases where waveforms files
# are also slightly valid tar-files.
if not data:
continue
obj_list.append(data)
except Exception:
pass
elif zipfile.is_zipfile(filename):
try:
zip = zipfile.ZipFile(filename)
obj_list = [zip.read(name) for name in zip.namelist()]
except Exception:
pass
elif filename.endswith('.bz2'):
# bz2 module
try:
import bz2
with open(filename, 'rb') as fp:
obj_list.append(bz2.decompress(fp.read()))
except Exception:
pass
elif filename.endswith('.gz'):
# gzip module
try:
import gzip
with gzip.open(filename, 'rb') as fp:
obj_list.append(fp.read())
except Exception:
pass
# handle results
if obj_list:
# write results to temporary files
result = None
for obj in obj_list:
with NamedTemporaryFile() as tempfile:
tempfile._fileobj.write(obj)
stream = func(tempfile.name, *args, **kwargs)
# just add other stream objects to first stream
if result is None:
result = stream
else:
result += stream
else:
# no compressions
result = func(filename, *args, **kwargs)
return result
@decorator
def raise_if_masked(func, *args, **kwargs):
"""
Raises if the first argument (self in case of methods) is a Trace with
masked values or a Stream containing a Trace with masked values.
"""
arrays = []
# first arg seems to be a Stream
if hasattr(args[0], "traces"):
arrays = [tr.data for tr in args[0]]
# first arg seems to be a Trace
if hasattr(args[0], "data") and isinstance(args[0].data, np.ndarray):
arrays = [args[0].data]
for arr in arrays:
if np.ma.is_masked(arr):
msg = "Trace with masked values found. This is not " + \
"supported for this operation. Try the split() " + \
"method on Trace/Stream to produce a Stream with " + \
"unmasked Traces."
raise NotImplementedError(msg)
return func(*args, **kwargs)
@decorator
def skip_if_no_data(func, *args, **kwargs):
"""
Does nothing if the first argument (self in case of methods) is a Trace
with no data in it.
"""
if not args[0]:
return
return func(*args, **kwargs)
def map_example_filename(arg_kwarg_name):
"""
Decorator that replaces "/path/to/filename" patterns in the arg or kwarg
of the specified name with the correct file path. If the pattern is not
encountered nothing is done.
.. note::
Actually, this is not a decorator itself but a decorator factory,
returning the correct decorator for the specified options. It can be
used just like a decorator.
:type arg_kwarg_name: str
:param arg_kwarg_name: name of the arg/kwarg that should be (tried) to map
"""
@decorator
def _map_example_filename(func, *args, **kwargs):
prefix = '/path/to/'
# check kwargs
if arg_kwarg_name in kwargs:
if isinstance(kwargs[arg_kwarg_name], (str, native_str)):
if re.match(prefix, kwargs[arg_kwarg_name]):
try:
kwargs[arg_kwarg_name] = \
get_example_file(kwargs[arg_kwarg_name][9:])
# file not found by get_example_file:
except IOError:
pass
# check args
else:
try:
inspected_args = [
p.name
for p in inspect.signature(func).parameters.values()
]
except AttributeError:
inspected_args = inspect.getargspec(func).args
try:
ind = inspected_args.index(arg_kwarg_name)
except ValueError:
pass
else:
if ind < len(args) and isinstance(args[ind], (str,
native_str)):
# need to check length of args from inspect
if re.match(prefix, args[ind]):
try:
args = list(args)
args[ind] = get_example_file(args[ind][9:])
args = tuple(args)
# file not found by get_example_file:
except IOError:
pass
return func(*args, **kwargs)
return _map_example_filename
def _decorate_polyfill(func, caller):
"""
decorate(func, caller) decorates a function using a caller.
"""
try:
from decorator import decorate
return decorate(func, caller)
except ImportError:
from decorator import FunctionMaker
evaldict = dict(_call_=caller, _func_=func)
fun = FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def rlock(func):
"""
Place a threading recursive lock (Rlock) on the wrapped function.
"""
# This lock will be instantiated at function creation time, i.e. at the
# time the Python interpreter sees the decorated function the very
# first time - this lock thus exists once for each decorated function.
_rlock = threading.RLock()
def _locked_f(f, *args, **kwargs):
with _rlock:
return func(*args, **kwargs)
return _decorate_polyfill(func, _locked_f)
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 35.111111
| 79
| 0.564041
|
4a0bf04dc887e96bba8f19bb8b50d76d895b628d
| 3,872
|
py
|
Python
|
pytext/metrics/tests/basic_metrics_test.py
|
debowin/pytext
|
91126bb34bd689f3513f25ca0d356ad374e004ab
|
[
"BSD-3-Clause"
] | 3
|
2019-10-19T11:16:12.000Z
|
2021-11-17T11:09:00.000Z
|
pytext/metrics/tests/basic_metrics_test.py
|
debowin/pytext
|
91126bb34bd689f3513f25ca0d356ad374e004ab
|
[
"BSD-3-Clause"
] | null | null | null |
pytext/metrics/tests/basic_metrics_test.py
|
debowin/pytext
|
91126bb34bd689f3513f25ca0d356ad374e004ab
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pytext.metrics import (
ClassificationMetrics,
LabelPrediction,
MacroPRF1Metrics,
MacroPRF1Scores,
PRF1Scores,
SoftClassificationMetrics,
compute_classification_metrics,
compute_soft_metrics,
)
from pytext.metrics.tests.metrics_test_base import MetricsTestBase
LABEL_NAMES1 = ["label1", "label2", "label3"]
PREDICTIONS1 = [
LabelPrediction(scores, predicted, expected)
for scores, predicted, expected in [
([-0.5, -0.7, -0.8], 0, 0),
([-0.9, -0.2, -0.9], 1, 0),
([-0.7, -0.4, -0.9], 1, 1),
([-0.8, -0.9, -0.3], 2, 1),
]
]
LABEL_NAMES2 = ["label1", "label2"]
PREDICTIONS2 = [
LabelPrediction(scores, predicted, expected)
for scores, predicted, expected in [
([-0.6, -0.4], 1, 0),
([-0.7, -0.8], 0, 0),
([-0.6, -0.2], 1, 1),
([-0.5, -0.8], 0, 1),
([-0.6, -0.8], 0, 0),
]
]
class BasicMetricsTest(MetricsTestBase):
def test_prf1_metrics(self) -> None:
self.assertMetricsAlmostEqual(
compute_classification_metrics(
PREDICTIONS1, LABEL_NAMES1, loss=2.0, average_precisions=False
),
ClassificationMetrics(
accuracy=0.5,
macro_prf1_metrics=MacroPRF1Metrics(
per_label_scores={
# label1: TP = 1, FP = 0, FN = 1
"label1": PRF1Scores(1, 0, 1, 1.0, 0.5, 2.0 / 3),
# label2: TP = 1, FP = 1, FN = 1
"label2": PRF1Scores(1, 1, 1, 0.5, 0.5, 0.5),
# label3: TP = 0, FP = 1, FN = 0
"label3": PRF1Scores(0, 1, 0, 0.0, 0.0, 0.0),
},
macro_scores=MacroPRF1Scores(3, 0.5, 1.0 / 3, 7.0 / 18),
),
per_label_soft_scores=None,
mcc=None,
roc_auc=None,
loss=2.0,
),
)
def test_soft_metrics_computation(self) -> None:
recall_at_precision_dict_l1 = {0.9: 0.0, 0.8: 0.0, 0.6: 1.0, 0.4: 1.0, 0.2: 1.0}
recall_at_precision_dict_l2 = {0.9: 0.5, 0.8: 0.5, 0.6: 0.5, 0.4: 1.0, 0.2: 1.0}
decision_thresh_at_precision_dict_l1 = {
0.9: 0.0,
0.8: 0.0,
0.6: -0.7,
0.4: -0.7,
0.2: -0.7,
}
decision_thresh_at_precision_dict_l2 = {
0.9: -0.2,
0.8: -0.2,
0.6: -0.2,
0.4: -0.8,
0.2: -0.8,
}
self.assertMetricsAlmostEqual(
compute_soft_metrics(PREDICTIONS2, LABEL_NAMES2),
{
"label1": SoftClassificationMetrics(
average_precision=8.0 / 15,
recall_at_precision=recall_at_precision_dict_l1,
decision_thresh_at_precision=decision_thresh_at_precision_dict_l1,
roc_auc=1.0 / 6,
),
"label2": SoftClassificationMetrics(
average_precision=0.7,
recall_at_precision=recall_at_precision_dict_l2,
decision_thresh_at_precision=decision_thresh_at_precision_dict_l2,
roc_auc=4.0 / 6,
),
},
)
def test_compute_mcc(self) -> None:
metrics = compute_classification_metrics(PREDICTIONS2, LABEL_NAMES2, loss=5.0)
self.assertAlmostEqual(metrics.mcc, 1.0 / 6)
# Just to test the metrics print without errors
metrics.print_metrics()
def test_compute_roc_auc(self) -> None:
metrics = compute_classification_metrics(PREDICTIONS2, LABEL_NAMES2, loss=5.0)
self.assertAlmostEqual(metrics.roc_auc, 1.0 / 6)
| 34.882883
| 88
| 0.522211
|
4a0bf0701a37851c1e9bbdf93829798596618309
| 15,134
|
py
|
Python
|
rama/keysyms.py
|
tadfisher/rama
|
6dcb0c41fa1c52fa43f08f1f1777e741db3b372e
|
[
"MIT"
] | 2
|
2015-08-29T01:07:20.000Z
|
2015-11-15T19:12:42.000Z
|
rama/keysyms.py
|
tadfisher/rama
|
6dcb0c41fa1c52fa43f08f1f1777e741db3b372e
|
[
"MIT"
] | null | null | null |
rama/keysyms.py
|
tadfisher/rama
|
6dcb0c41fa1c52fa43f08f1f1777e741db3b372e
|
[
"MIT"
] | 2
|
2018-08-07T11:27:19.000Z
|
2021-01-12T15:20:20.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2009, samurai-x.org
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the samurai-x.org nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SAMURAI-X.ORG ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL SAMURAI-X.ORG BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module is a direct C -> Python port of the keysyms module from
xcb-util which can be found here: http://cgit.freedesktop.org/xcb/util
xcb-keysyms license::
Copyright © 2008 Ian Osgood <iano@quirkster.com>
Copyright © 2008 Jamey Sharp <jamey@minilop.net>
Copyright © 2008 Josh Triplett <josh@freedesktop.org>
Copyright © 2008 Ulrich Eckhardt <doomster@knuut.de>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the names of the authors or
their institutions shall not be used in advertising or otherwise to
promote the sale, use or other dealings in this Software without
prior written authorization from the authors.
.. data:: X_KEYS
list of X key names
"""
from . import keysymdef
class cached_property(object):
"""
A simple cached property descriptor.
from http://ronny.uberhost.de/simple-cached-for-properties-done-right
"""
def __init__(self, func):
self.func = func
self.name = func.__name__
def __get__(self, obj, type=None):
if obj is None:
return self
result = self.func(obj)
setattr(obj, self.name, result)
return result
NO_SYMBOL = 0
def convert_case(sym):
"""
return (lower, upper) for internal use.
:note: direct port of
http://cgit.freedesktop.org/xcb/util/tree/keysyms/keysyms.c#n361
"""
lower = sym
upper = sym
enc = sym >> 8
if enc == 0: # latin1
if ((sym >= keysymdef.keysyms["A"]) and (sym <= keysymdef.keysyms["Z"])):
lower += (keysymdef.keysyms["a"] - keysymdef.keysyms["A"])
elif ((sym >= keysymdef.keysyms["a"]) and (sym <= keysymdef.keysyms["z"])):
upper -= (keysymdef.keysyms["a"] - keysymdef.keysyms["A"])
elif ((sym >= keysymdef.keysyms["Agrave"])
and (sym <= keysymdef.keysyms["Odiaeresis"])):
lower += (keysymdef.keysyms["agrave"] - keysymdef.keysyms["Agrave"])
elif ((sym >= keysymdef.keysyms["agrave"])
and (sym <= keysymdef.keysyms["odiaeresis"])):
upper -= (keysymdef.keysyms["agrave"] - keysymdef.keysyms["Agrave"])
elif ((sym >= keysymdef.keysyms["Ooblique"]) and (sym <= keysymdef.keysyms["Thorn"])):
lower += (keysymdef.keysyms["oslash"] - keysymdef.keysyms["Ooblique"])
elif ((sym >= keysymdef.keysyms["oslash"]) and (sym <= keysymdef.keysyms["thorn"])):
upper -= (keysymdef.keysyms["oslash"] - keysymdef.keysyms["Ooblique"])
elif enc == 1: # latin2
# Assume the KeySym is a legal value (ignore discontinuities)
if (sym == keysymdef.keysyms["Aogonek"]):
lower = keysymdef.keysyms["aogonek"]
elif (sym >= keysymdef.keysyms["Lstroke"] and sym <= keysymdef.keysyms["Sacute"]):
lower += (keysymdef.keysyms["lstroke"] - keysymdef.keysyms["Lstroke"])
elif (sym >= keysymdef.keysyms["Scaron"] and sym <= keysymdef.keysyms["Zacute"]):
lower += (keysymdef.keysyms["scaron"] - keysymdef.keysyms["Scaron"])
elif (sym >= keysymdef.keysyms["Zcaron"] and sym <= keysymdef.keysyms["Zabovedot"]):
lower += (keysymdef.keysyms["zcaron"] - keysymdef.keysyms["Zcaron"])
elif (sym == keysymdef.keysyms["aogonek"]):
upper = keysymdef.keysyms["Aogonek"]
elif (sym >= keysymdef.keysyms["lstroke"] and sym <= keysymdef.keysyms["sacute"]):
upper -= (keysymdef.keysyms["lstroke"] - keysymdef.keysyms["Lstroke"])
elif (sym >= keysymdef.keysyms["scaron"] and sym <= keysymdef.keysyms["zacute"]):
upper -= (keysymdef.keysyms["scaron"] - keysymdef.keysyms["Scaron"])
elif (sym >= keysymdef.keysyms["zcaron"] and sym <= keysymdef.keysyms["zabovedot"]):
upper -= (keysymdef.keysyms["zcaron"] - keysymdef.keysyms["Zcaron"])
elif (sym >= keysymdef.keysyms["Racute"] and sym <= keysymdef.keysyms["Tcedilla"]):
lower += (keysymdef.keysyms["racute"] - keysymdef.keysyms["Racute"])
elif (sym >= keysymdef.keysyms["racute"] and sym <= keysymdef.keysyms["tcedilla"]):
upper -= (keysymdef.keysyms["racute"] - keysymdef.keysyms["Racute"])
elif enc == 2: # latin3
# Assume the KeySym is a legal value (ignore discontinuities)
if (sym >= keysymdef.keysyms["Hstroke"] and sym <= keysymdef.keysyms["Hcircumflex"]):
lower += (keysymdef.keysyms["hstroke"] - keysymdef.keysyms["Hstroke"])
elif (sym >= keysymdef.keysyms["Gbreve"] and sym <= keysymdef.keysyms["Jcircumflex"]):
lower += (keysymdef.keysyms["gbreve"] - keysymdef.keysyms["Gbreve"])
elif (sym >= keysymdef.keysyms["hstroke"] and sym <= keysymdef.keysyms["hcircumflex"]):
upper -= (keysymdef.keysyms["hstroke"] - keysymdef.keysyms["Hstroke"])
elif (sym >= keysymdef.keysyms["gbreve"] and sym <= keysymdef.keysyms["jcircumflex"]):
upper -= (keysymdef.keysyms["gbreve"] - keysymdef.keysyms["Gbreve"])
elif (sym >= keysymdef.keysyms["Cabovedot"]
and sym <= keysymdef.keysyms["Scircumflex"]):
lower += (keysymdef.keysyms["cabovedot"] - keysymdef.keysyms["Cabovedot"])
elif (sym >= keysymdef.keysyms["cabovedot"]
and sym <= keysymdef.keysyms["scircumflex"]):
upper -= (keysymdef.keysyms["cabovedot"] - keysymdef.keysyms["Cabovedot"])
elif enc == 3: # latin4
# Assume the KeySym is a legal value (ignore discontinuities)
if (sym >= keysymdef.keysyms["Rcedilla"] and sym <= keysymdef.keysyms["Tslash"]):
lower += (keysymdef.keysyms["rcedilla"] - keysymdef.keysyms["Rcedilla"])
elif (sym >= keysymdef.keysyms["rcedilla"] and sym <= keysymdef.keysyms["tslash"]):
upper -= (keysymdef.keysyms["rcedilla"] - keysymdef.keysyms["Rcedilla"])
elif (sym == keysymdef.keysyms["ENG"]):
lower = keysymdef.keysyms["eng"]
elif (sym == keysymdef.keysyms["eng"]):
upper = keysymdef.keysyms["ENG"]
elif (sym >= keysymdef.keysyms["Amacron"] and sym <= keysymdef.keysyms["Umacron"]):
lower += (keysymdef.keysyms["amacron"] - keysymdef.keysyms["Amacron"])
elif (sym >= keysymdef.keysyms["amacron"] and sym <= keysymdef.keysyms["umacron"]):
upper -= (keysymdef.keysyms["amacron"] - keysymdef.keysyms["Amacron"])
elif enc == 6: # cyrillic
# Assume the KeySym is a legal value (ignore discontinuities)
if (sym >= keysymdef.keysyms["Serbian_DJE"]
and sym <= keysymdef.keysyms["Serbian_DZE"]):
lower -= (keysymdef.keysyms["Serbian_DJE"] - keysymdef.keysyms["Serbian_dje"])
elif (sym >= keysymdef.keysyms["Serbian_dje"]
and sym <= keysymdef.keysyms["Serbian_dze"]):
upper += (keysymdef.keysyms["Serbian_DJE"] - keysymdef.keysyms["Serbian_dje"])
elif (sym >= keysymdef.keysyms["Cyrillic_YU"]
and sym <= keysymdef.keysyms["Cyrillic_HARDSIGN"]):
lower -= (keysymdef.keysyms["Cyrillic_YU"] - keysymdef.keysyms["Cyrillic_yu"])
elif (sym >= keysymdef.keysyms["Cyrillic_yu"]
and sym <= keysymdef.keysyms["Cyrillic_hardsign"]):
upper += (keysymdef.keysyms["Cyrillic_YU"] - keysymdef.keysyms["Cyrillic_yu"])
elif enc == 7: # greek
if (sym >= keysymdef.keysyms["Greek_ALPHAaccent"]
and sym <= keysymdef.keysyms["Greek_OMEGAaccent"]):
lower += (keysymdef.keysyms["Greek_alphaaccent"] -
keysymdef.keysyms["Greek_ALPHAaccent"])
elif (sym >= keysymdef.keysyms["Greek_alphaaccent"]
and sym <= keysymdef.keysyms["Greek_omegaaccent"] and
sym != keysymdef.keysyms["Greek_iotaaccentdieresis"] and
sym != keysymdef.keysyms["Greek_upsilonaccentdieresis"]):
upper -= (keysymdef.keysyms["Greek_alphaaccent"] -
keysymdef.keysyms["Greek_ALPHAaccent"])
elif (sym >= keysymdef.keysyms["Greek_ALPHA"]
and sym <= keysymdef.keysyms["Greek_OMEGA"]):
lower += (keysymdef.keysyms["Greek_alpha"] - keysymdef.keysyms["Greek_ALPHA"])
elif (sym >= keysymdef.keysyms["Greek_alpha"]
and sym <= keysymdef.keysyms["Greek_omega"] and
sym != keysymdef.keysyms["Greek_finalsmallsigma"]):
upper -= (keysymdef.keysyms["Greek_alpha"] - keysymdef.keysyms["Greek_ALPHA"])
elif enc == 0x14: # armenian
if (sym >= keysymdef.keysyms["Armenian_AYB"]
and sym <= keysymdef.keysyms["Armenian_fe"]):
lower = sym | 1
upper = sym & ~1
return lower, upper
class Keysyms(object):
"""
a simple helper for keycodes and keysyms.
"""
def __init__(self, conn):
"""
:type conn: :class:`ooxcb.conn.Connection`
"""
self.conn = conn
@cached_property
def _cookie(self):
min_keycode = self.conn.get_setup().min_keycode
max_keycode = self.conn.get_setup().max_keycode
return self.conn.core.GetKeyboardMapping(
min_keycode,
max_keycode - min_keycode + 1)
@cached_property
def _reply(self):
return self._cookie.reply()
def get_keycode(self, keysym):
"""
return the corresponding keycode for *keysym* or None.
"""
for j in xrange(self._reply.keysyms_per_keycode):
for keycode in xrange(self.conn.get_setup().min_keycode,
self.conn.get_setup().max_keycode + 1):
if self.get_keysym(keycode, j) == keysym:
return keycode
return None
def get_keysym(self, keycode, col):
"""
return the corresponding keysym for *keycode* in column
*col*.
:todo: no error checking for now :)
"""
keysyms = self._reply.keysyms
min_keycode = self.conn.get_setup().min_keycode
max_keycode = self.conn.get_setup().max_keycode
per = self._reply.keysyms_per_keycode
#ptr = (keycode - min_keycode) * per
keysyms = keysyms[(keycode - min_keycode) * per:]
# TODO: error checking
if col < 4:
if col > 1:
while (per > 2 and keysyms[per - 1] == NO_SYMBOL):
per -= 1
if per < 3:
col -= 2
if (per <= (col|1) or keysyms[col | 1] == NO_SYMBOL):
lsym, usym = convert_case(keysyms[col & ~1])
if not col & 1:
return lsym
elif lsym == usym:
return 0
else:
return usym
return keysyms[col]
X_KEYS = ['Home', 'Left', 'Up', 'Right', 'Down', 'Page_Up',
'Page_Down', 'End', 'Begin', 'BackSpace',
'Return', 'Escape', 'KP_Enter'] + \
['F%d' % i for i in range(1, 36)]
def keysym_to_str(keysym):
"""
convert a keysym to its equivalent character or
key description and return it.
Returns an empty for an unknown keysym.
That's just a shortcut for :mod:`ooxcb.keysymdef`.
"""
return keysymdef.names.get(keysym, '')
class ConversionError(Exception):
pass
def keysym_to_char(keysym):
"""
try to convert *keysym* (an `int`) to a character and return it as
an unicode string.
If it couldn't be converted or *keysym* is NoSymbol / VoidSymbol,
a :class:`ConversionError` is raised.
The approach is described in `http://www.cl.cam.ac.uk/~mgk25/ucs/X11.keysyms.pdf`.
It is able to convert latin-1, unicode and legacy keysyms. Special,
function and vendor keysyms will raise a :class:`ConversionError`.
"""
# special keysyms
if keysym in (0, 0x00ffffff):
raise ConversionError("%d is a special keysym" % keysym)
# latin-1 keysyms
elif (0x0020 <= keysym <= 0x007e or 0x00a0 <= keysym <= 0x00ff):
return unichr(keysym)
# unicode keysyms
elif (0x01000100 <= keysym <= 0x0110ffff):
return unichr(keysym - 0x01000000)
# legacy keysyms
elif keysym in keysymdef.legacy_keysyms:
return unichr(keysymdef.legacy_keysyms[keysym])
# dunno!
else:
raise ConversionError("Unsupported keysym category or legacy keysym: %d" % keysym)
| 48.197452
| 95
| 0.624488
|
4a0bf0e1dda2b1bac0c73e90c318f4e0125f0378
| 985
|
py
|
Python
|
examples/m_reader.py
|
JoaoGFarias/locust-plugins
|
c3cac416d75466e899b69416915509c0e1f8d08d
|
[
"Apache-2.0"
] | null | null | null |
examples/m_reader.py
|
JoaoGFarias/locust-plugins
|
c3cac416d75466e899b69416915509c0e1f8d08d
|
[
"Apache-2.0"
] | null | null | null |
examples/m_reader.py
|
JoaoGFarias/locust-plugins
|
c3cac416d75466e899b69416915509c0e1f8d08d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import locust_plugins.utils
locust_plugins.utils.gevent_debugger_patch()
import os
from locust_plugins.mongoreader import MongoReader
from locust_plugins.listeners import PrintListener
from locust import HttpLocust, task, TaskSet
from locust.wait_time import constant_pacing
reader = MongoReader(
filters=[{"tb": 0}, {"lb": 1}],
id_column="ssn",
uri=os.environ["LOCUST_MONGO"],
database=os.environ["LOCUST_MONGO_DATABASE"],
collection=os.environ["LOCUST_MONGO_COLLECTION"],
)
class UserBehavior(TaskSet):
@task
def my_task(self):
with reader.user() as user:
self.client.get(f"/?ssn={user['ssn']}")
class MyHttpLocust(HttpLocust):
task_set = UserBehavior
wait_time = constant_pacing(1)
host = "http://example.com"
# allow running as executable, to support attaching the debugger
if __name__ == "__main__":
PrintListener()
MyHttpLocust._catch_exceptions = False
MyHttpLocust().run()
| 25.25641
| 64
| 0.723858
|
4a0bf12945f0289299ea0762fbe278d006bc617d
| 3,664
|
py
|
Python
|
ClientA.py
|
DashanGao/Secret-sharing-based_2PC_vertical_fed_learning
|
a7c3eeb697d839dbc6d11ddccdd57197450a47bc
|
[
"MIT"
] | 10
|
2020-06-19T03:29:33.000Z
|
2021-12-05T12:01:53.000Z
|
ClientA.py
|
GaoDashan1/Secret-sharing-based_2PC_vertical_fed_learning
|
a7c3eeb697d839dbc6d11ddccdd57197450a47bc
|
[
"MIT"
] | null | null | null |
ClientA.py
|
GaoDashan1/Secret-sharing-based_2PC_vertical_fed_learning
|
a7c3eeb697d839dbc6d11ddccdd57197450a47bc
|
[
"MIT"
] | 1
|
2021-12-31T14:57:54.000Z
|
2021-12-31T14:57:54.000Z
|
from Client import Client
import numpy as np
class ClientA(Client):
def __init__(self, name, X, config):
super().__init__(config, config['ADDR_A'])
self.name = name
self.X = X
self.weights = np.zeros(X.shape[1]) # np.random.rand(X.shape[1]) * 2 - 1
self.estim_e = None
self.B_addr = config['ADDR_B']
self.C_addr = config['ADDR_C']
def eval(self):
'''
A do estimation
:return: [W_A] * X_A'
'''
return self.X.dot(self.weights)
def predict(self, X):
return X.dot(self.weights)
def update_weights(self, estim_error, lr=0.03, lam=0.01):
self.estim_e = estim_error
gradient = self.estim_e.dot(self.X) * (2 / self.X.shape[0]) + lam * self.weights
self.weights -= lr * gradient # Update encrypted weight using gradient descent
def set_weights(self, weights):
self.weights = weights
def step_1(self):
try:
u_a = np.dot(self.weights, self.X.T)
l_a_1 = np.random.rand(u_a.shape[0])
u_a_1 = np.random.rand(u_a.shape[0])
x_a_1 = np.random.rand(self.X.shape[0], self.X.shape[1])
x_a_2 = self.X - x_a_1
u_a_2 = u_a - u_a_1
l_a = u_a ** 2
l_a_2 = l_a - l_a_1
except Exception as e:
print("Wrong 1 in A: %s" % e)
data_to_C = {"u_a_2": u_a_2, "x_a_2": x_a_2, "l_a_2": l_a_2}
data_to_B = {"u_a_1": u_a_1, "x_a_1": x_a_1}
self.data.data.update(data_to_B)
self.data.data.update(data_to_C)
self.data.data.update({"u_a": u_a, "l_a": l_a, "l_a_1": l_a_1})
to_b = [self.B_addr, data_to_B, [self.data.iter_num, 1, str(self.data.iter_num)+"A1"]]
to_c = [self.C_addr, data_to_C, [self.data.iter_num, 1, str(self.data.iter_num)+"A1"]]
return to_b, to_c
def step_2(self):
try:
dt = self.data.data
assert "R" in dt.keys(), "Error: 'R' from C in step 1 not successfully received."
l_1 = dt["l_a_1"] + 2 * np.dot(dt["u_a_2"], dt["u_b_1"])
except Exception as e:
print("A step 2 exception: %s" % e)
print(dt.keys())
print(dt['u_a_2'], dt['u_b_1'])
try:
u_a_x_a = sum([self.X[i, :] * dt["u_a"][i] for i in range(self.X.shape[0])])
u_b_1_x_a = sum([self.X[i, :] * dt["u_b_1"][i] for i in range(self.X.shape[0])])
f_a_1 = 2 * u_a_x_a + 2 * u_b_1_x_a
u_a_2_x_b_1 = sum([dt["x_b_1"][i, :] * dt["u_a_2"][i] for i in range(self.X.shape[0])])
f_b_1 = dt["R"] + 2 * u_a_2_x_b_1
except Exception as e:
print("A step 2 exception 2 %s " % e)
print(dt["R"].shape)
print(dt['u_a_2'].shape, dt['x_b_1'].shape, u_a_2_x_b_1.shape)
print()
print(u_a_2_x_b_1)
self.data.data.update({"f_a_1": f_a_1, "f_b_1": f_b_1})
return [self.B_addr, {"f_b_1": f_b_1, "l_1": l_1}, [self.data.iter_num, 2, str(self.data.iter_num)+"A2"]]
def step_3(self):
dt = self.data.data
# Compute gradient
try:
f_a = (dt["f_a_1"] + dt["f_a_2"] + dt["f_a_3"]) / self.X.shape[0]
# Update weight
self.weights = self.weights - self.config["lr"] * f_a - self.config['lambda'] * self.weights
except Exception as e:
print("A step 3 exception: %s" % e)
print(dt.keys())
print("A weight %d : " % self.data.iter_num, self.weights)
return
| 38.166667
| 114
| 0.525109
|
4a0bf30a96ec1d6bdde51ef47f0c1289303e0763
| 11,595
|
py
|
Python
|
src/esdl_helper.py
|
ESDLMapEditorESSIM/esdl-mapeditor
|
c17090e19de8ff3a0cc552f347639bac67840d22
|
[
"Apache-2.0"
] | null | null | null |
src/esdl_helper.py
|
ESDLMapEditorESSIM/esdl-mapeditor
|
c17090e19de8ff3a0cc552f347639bac67840d22
|
[
"Apache-2.0"
] | 14
|
2020-09-30T21:16:46.000Z
|
2021-11-08T18:54:34.000Z
|
src/esdl_helper.py
|
ESDLMapEditorESSIM/esdl-mapeditor
|
c17090e19de8ff3a0cc552f347639bac67840d22
|
[
"Apache-2.0"
] | 1
|
2020-09-17T12:48:57.000Z
|
2020-09-17T12:48:57.000Z
|
# This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
import re
from flask_socketio import emit
from pyecore.ecore import EEnum
from esdl import esdl
from esdl.processing import ESDLGeometry, ESDLAsset, ESDLQuantityAndUnits
from extensions.session_manager import get_handler, get_session, get_session_for_esid
from extensions.profiles import Profiles
def generate_profile_info(profile_list):
profile_info_list = []
for profile in profile_list:
profile_class = type(profile).__name__
qau = profile.profileQuantityAndUnit
if isinstance(qau, esdl.QuantityAndUnitReference):
qau = qau.reference
if qau:
profile_type = ESDLQuantityAndUnits.qau_to_string(qau)
else:
profile_type = profile.profileType.name
profile_name = profile.name
profile_id = profile.id
if profile_class == 'SingleValue':
value = profile.value
profile_info_list.append({'id': profile_id, 'class': 'SingleValue', 'value': value, 'type': profile_type, 'uiname': profile_name})
if profile_class == 'InfluxDBProfile':
database = profile.database
multiplier = profile.multiplier
measurement = profile.measurement
field = profile.field
profiles = Profiles.get_instance().get_profiles()['profiles']
for pkey in profiles:
p = profiles[pkey]
if p['database'] == database and p['measurement'] == measurement and p['field'] == field:
profile_name = p['profile_uiname']
if profile_name == None:
profile_name = field
profile_info_list.append({'id': profile_id, 'class': 'InfluxDBProfile', 'multiplier': multiplier, 'type': profile_type, 'uiname': profile_name})
if profile_class == 'DateTimeProfile':
profile_info_list.append({'id': profile_id, 'class': 'DateTimeProfile', 'type': profile_type, 'uiname': profile_name})
return profile_info_list
def get_port_profile_info(asset):
ports = asset.port
port_profile_list = []
for p in ports:
prof = p.profile
profile_info_list = []
if prof:
profile_info_list = generate_profile_info(prof)
port_profile_list.append({'port_id': p.id, 'port_name': p.name, 'profiles': profile_info_list})
return port_profile_list
# ---------------------------------------------------------------------------------------------------------------------
# Get connections information for an asset
# ---------------------------------------------------------------------------------------------------------------------
def get_connected_to_info(asset):
result = []
ports = asset.port
for p in ports:
ptype = type(p).__name__
if p.carrier:
pcarr = p.carrier.name
else:
pcarr = None
ct_list = []
conn_to = p.connectedTo
if conn_to:
for conn_port in conn_to:
conn_asset = conn_port.energyasset #small a instead of Asset
ct_list.append({'opid': p.id, 'pid': conn_port.id, 'aid': conn_asset.id, 'atype': type(conn_asset).__name__, 'aname': conn_asset.name})
result.append({'pid': p.id, 'ptype': ptype, 'pname': p.name, 'pcarr': pcarr, 'ct_list': ct_list})
#logger.debug(result)
return result
def get_asset_geom_info(asset):
geom = asset.geometry
if geom:
if isinstance(geom, esdl.Point):
lat = geom.lat
lon = geom.lon
return (lat, lon)
if isinstance(geom, esdl.Line):
points = geom.point
first = (points[0].lat, points[0].lon)
last = (points[len(points) - 1].lat, points[len(points) - 1].lon)
return [first, last]
if isinstance(geom, esdl.Polygon):
center = ESDLGeometry.calculate_polygon_center(geom)
return center
return ()
def get_asset_from_port_id(esh, es_id, pid):
port = esh.get_by_id(es_id, pid)
return port.eContainer()
def get_asset_and_coord_from_port_id(esh, es_id, pid):
port = esh.get_by_id(es_id, pid)
asset = port.eContainer()
if asset.geometry:
coord = get_asset_geom_info(asset)
if isinstance(asset.geometry, esdl.Line):
# Take care of returning first coordinate for first port and last coordinate for second port
if asset.port[0].id == pid:
return {'asset': asset, 'coord': coord[0]}
if asset.port[1].id == pid:
return {'asset': asset, 'coord': coord[1]}
else:
return {'asset': asset, 'coord': coord}
return {'asset': asset, 'coord': ()}
def asset_state_to_ui(asset):
if asset.state == esdl.AssetStateEnum.ENABLED:
return 'e'
elif asset.state == esdl.AssetStateEnum.OPTIONAL:
return 'o'
else:
return 'd'
def get_tooltip_asset_attrs(asset, shape):
user_settings = get_session('user_settings')
attrs_dict = dict()
if user_settings:
if 'ui_settings' in user_settings:
if 'tooltips' in user_settings['ui_settings']:
tooltip_settings = user_settings['ui_settings']['tooltips']
if (shape == 'marker' or shape == 'polygon') and 'marker_tooltip_format' in tooltip_settings:
tooltip_format = tooltip_settings['marker_tooltip_format']
elif shape == 'line' and 'line_tooltip_format' in tooltip_settings:
tooltip_format = tooltip_settings['line_tooltip_format']
attr_names_list = re.findall('\{(.*?)\}', tooltip_format)
for attr_names in attr_names_list:
for attr_name in attr_names.split('/'):
if attr_name not in ['name', 'id']:
attr = asset.eClass.findEStructuralFeature(attr_name)
if attr and asset.eIsSet(attr_name):
value = asset.eGet(attr_name)
if isinstance(attr.eType, EEnum):
attrs_dict[attr_name] = value.name
else:
attrs_dict[attr_name] = value
return attrs_dict
def add_spatial_attributes(asset, attrs):
user_settings = get_session('user_settings')
if user_settings:
if 'ui_settings' in user_settings:
if 'spatial_buffers' in user_settings['ui_settings']:
spatial_buffer_settings = user_settings['ui_settings']['spatial_buffers']
if 'visible_on_startup' in spatial_buffer_settings and spatial_buffer_settings['visible_on_startup']:
if asset.bufferDistance:
attrs['dist'] = dict()
for bd in asset.bufferDistance:
attrs['dist'][bd.type.name] = bd.distance
# To be able to show a circle for assets that have a Point geometry and surfaceArea attribute
if asset.eIsSet('surfaceArea'):
attrs['surfaceArea'] = asset.surfaceArea
def energy_asset_to_ui(esh, es_id, asset): # , port_asset_mapping):
port_list = []
conn_list = []
ports = asset.port
for p in ports:
# p_asset = port_asset_mapping[p.id]
p_asset = get_asset_and_coord_from_port_id(esh, es_id, p.id)
p_asset_coord = p_asset['coord'] # get proper coordinate if asset is line
conn_to = [cp.id for cp in p.connectedTo]
profile = p.profile
profile_info_list = []
carrier_id = p.carrier.id if p.carrier else None
if profile:
profile_info_list = generate_profile_info(profile)
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': conn_to,
'profile': profile_info_list, 'carrier': carrier_id})
if conn_to:
# conn_to_list = conn_to.split(' ') # connectedTo attribute is list of port ID's separated by a space
for id in conn_to:
# pc_asset = port_asset_mapping[id]
pc_asset = get_asset_and_coord_from_port_id(esh, es_id, id)
pc_asset_coord = pc_asset['coord']
conn_list.append(
{'from-port-id': p.id,
'from-asset-id': p_asset['asset'].id,
'from-port-carrier': p.carrier.id if p.carrier else None,
'from-asset-coord': p_asset_coord,
'to-port-id': id,
'to-port-carrier': pc_asset['asset'].carrier.id if pc_asset['asset'].carrier else None,
'to-asset-id': pc_asset['asset'].id,
'to-asset-coord': pc_asset_coord
})
state = asset_state_to_ui(asset)
geom = asset.geometry
if geom:
if isinstance(geom, esdl.Point):
lat = geom.lat
lon = geom.lon
capability_type = ESDLAsset.get_asset_capability_type(asset)
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'marker')
return ['point', 'asset', asset.name, asset.id, type(asset).__name__, [lat, lon], tooltip_asset_attrs,
state, port_list, capability_type], conn_list
elif isinstance(geom, esdl.Line):
coords = []
for point in geom.point:
coords.append([point.lat, point.lon])
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'line')
return ['line', 'asset', asset.name, asset.id, type(asset).__name__, coords, tooltip_asset_attrs, state,
port_list], conn_list
elif isinstance(geom, esdl.Polygon):
if isinstance(asset, esdl.WindPark) or isinstance(asset, esdl.PVPark):
coords = ESDLGeometry.parse_esdl_subpolygon(geom.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords) # --> [lat, lon]
capability_type = ESDLAsset.get_asset_capability_type(asset)
# print(coords)
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'polygon')
return ['polygon', 'asset', asset.name, asset.id, type(asset).__name__, coords, tooltip_asset_attrs,
state, port_list, capability_type], conn_list
else:
return [], []
def update_carrier_conn_list():
esh = get_handler()
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
for c in conn_list:
from_port = esh.get_by_id(active_es_id, c['from-port-id'])
if from_port.carrier:
c['from-port-carrier'] = from_port.carrier.id
to_port = esh.get_by_id(active_es_id, c['from-port-id'])
if to_port.carrier:
c['to-port-carrier'] = to_port.carrier.id
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
| 41.55914
| 156
| 0.594653
|
4a0bf3c57bfec1a89a82d7e6d7f9df5761be83d7
| 9,365
|
py
|
Python
|
openstack_dashboard/test/settings.py
|
NunoEdgarGFlowHub/horizon
|
73a0bbd43ea78ac5337f7d00977ec5f32452067e
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/test/settings.py
|
NunoEdgarGFlowHub/horizon
|
73a0bbd43ea78ac5337f7d00977ec5f32452067e
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/test/settings.py
|
NunoEdgarGFlowHub/horizon
|
73a0bbd43ea78ac5337f7d00977ec5f32452067e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import six
from django.utils.translation import pgettext_lazy
from horizon.test.settings import * # noqa: F403,H303
from horizon.utils import secret_key
from openstack_dashboard import exceptions
from horizon.utils.escape import monkeypatch_escape
# this is used to protect from client XSS attacks, but it's worth
# enabling in our test setup to find any issues it might cause
monkeypatch_escape()
from openstack_dashboard.utils import settings as settings_utils
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(TEST_DIR, ".."))
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
STATIC_URL = '/static/'
WEBROOT = '/'
SECRET_KEY = secret_key.generate_or_read_from_file(
os.path.join(tempfile.gettempdir(), '.secret_key_store'))
ROOT_URLCONF = 'openstack_dashboard.test.urls'
TEMPLATES[0]['DIRS'] = [
os.path.join(TEST_DIR, 'templates')
]
TEMPLATES[0]['OPTIONS']['context_processors'].append(
'openstack_dashboard.context_processors.openstack'
)
CUSTOM_THEME_PATH = 'themes/default'
# 'key', 'label', 'path'
AVAILABLE_THEMES = [
(
'default',
pgettext_lazy('Default style theme', 'Default'),
'themes/default'
), (
'material',
pgettext_lazy("Google's Material Design style theme", "Material"),
'themes/material'
),
]
SELECTABLE_THEMES = [
(
'default',
pgettext_lazy('Default style theme', 'Default'),
'themes/default'
),
]
# Theme Static Directory
THEME_COLLECTION_DIR = 'themes'
COMPRESS_OFFLINE = False
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
'django_nose',
'openstack_auth',
'compressor',
'horizon',
'openstack_dashboard',
)
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
SITE_BRANDING = 'OpenStack'
HORIZON_CONFIG = {
"password_validator": {
"regex": '^.{8,18}$',
"help_text": "Password must be between 8 and 18 characters."
},
'user_home': None,
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
}
ANGULAR_FEATURES = {
'images_panel': False, # Use the legacy panel so unit tests are still run
'flavors_panel': False,
'roles_panel': False,
}
STATICFILES_DIRS = settings_utils.get_xstatic_dirs(
settings_utils.BASE_XSTATIC_MODULES, HORIZON_CONFIG
)
# Load the pluggable dashboard settings
import openstack_dashboard.enabled
INSTALLED_APPS = list(INSTALLED_APPS) # Make sure it's mutable
settings_utils.update_dashboards(
[
openstack_dashboard.enabled,
],
HORIZON_CONFIG,
INSTALLED_APPS,
)
OPENSTACK_PROFILER = {'enabled': False}
settings_utils.find_static_files(HORIZON_CONFIG, AVAILABLE_THEMES,
THEME_COLLECTION_DIR, ROOT_PATH)
# Set to 'legacy' or 'direct' to allow users to upload images to glance via
# Horizon server. When enabled, a file form field will appear on the create
# image form. If set to 'off', there will be no file form field on the create
# image form. See documentation for deployment considerations.
HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
AVAILABLE_REGIONS = [
('http://localhost:5000/v3', 'local'),
('http://remote:5000/v3', 'remote'),
]
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2
}
OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v3"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'test_domain'
OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = True
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': False, # Enabled in specific tests only
'enable_distributed_router': False,
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': True,
}
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', 'Select format'),
('aki', 'AKI - Amazon Kernel Image'),
('ami', 'AMI - Amazon Machine Image'),
('ari', 'ARI - Amazon Ramdisk Image'),
('iso', 'ISO - Optical Disk Image'),
('ploop', 'PLOOP - Virtuozzo/Parallels Loopback Disk'),
('qcow2', 'QCOW2 - QEMU Emulator'),
('raw', 'Raw'),
('vdi', 'VDI'),
('vhd', 'VHD'),
('vmdk', 'VMDK')
]
}
LOGGING['loggers'].update(
{
'openstack_dashboard': {
'handlers': ['test'],
'propagate': False,
},
'openstack_auth': {
'handlers': ['test'],
'propagate': False,
},
'novaclient': {
'handlers': ['test'],
'propagate': False,
},
'keystoneclient': {
'handlers': ['test'],
'propagate': False,
},
'glanceclient': {
'handlers': ['test'],
'propagate': False,
},
'neutronclient': {
'handlers': ['test'],
'propagate': False,
},
'oslo_policy': {
'handlers': ['test'],
'propagate': False,
},
'stevedore': {
'handlers': ['test'],
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
}
)
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': 'ALL TCP',
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
}
NOSE_ARGS = ['--nocapture',
'--nologcapture',
'--cover-package=openstack_dashboard',
'--cover-inclusive',
'--all-modules']
# TODO(amotoki): Need to investigate why --with-html-output
# is unavailable in python3.
# NOTE(amotoki): Most horizon plugins import this module in their test
# settings and they do not necessarily have nosehtmloutput in test-reqs.
# Assuming nosehtmloutput potentially breaks plugins tests,
# we check the availability of htmloutput module (from nosehtmloutput).
try:
import htmloutput # noqa: F401
has_html_output = True
except ImportError:
has_html_output = False
if six.PY2 and has_html_output:
NOSE_ARGS += ['--with-html-output',
'--html-out-file=ut_openstack_dashboard_nose_results.html']
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json'
}
# The openstack_auth.user.Token object isn't JSON-serializable ATM
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_API_SETTING_1 = 'foo'
REST_API_SETTING_2 = 'bar'
REST_API_SECURITY = 'SECURITY'
REST_API_REQUIRED_SETTINGS = ['REST_API_SETTING_1']
REST_API_ADDITIONAL_SETTINGS = ['REST_API_SETTING_2']
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
# --------------------
# Test-only settings
# --------------------
# TEST_GLOBAL_MOCKS_ON_PANELS: defines what and how methods should be
# mocked globally for unit tests and Selenium tests.
# 'method' is required. 'return_value' and 'side_effect'
# are optional and passed to mock.patch().
TEST_GLOBAL_MOCKS_ON_PANELS = {
'aggregates': {
'method': ('openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access'),
'return_value': True,
},
'domains': {
'method': ('openstack_dashboard.dashboards.identity'
'.domains.panel.Domains.can_access'),
'return_value': True,
},
'trunk-project': {
'method': ('openstack_dashboard.dashboards.project'
'.trunks.panel.Trunks.can_access'),
'return_value': True,
},
'trunk-admin': {
'method': ('openstack_dashboard.dashboards.admin'
'.trunks.panel.Trunks.can_access'),
'return_value': True,
},
'qos': {
'method': ('openstack_dashboard.dashboards.project'
'.network_qos.panel.NetworkQoS.can_access'),
'return_value': True,
},
}
| 28.465046
| 78
| 0.639829
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.