hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9341a63382a080379eb1fbad26490deed5a76c6
| 2,404
|
py
|
Python
|
pysteps/tests/helpers.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 6
|
2019-01-06T07:42:55.000Z
|
2021-02-03T13:59:50.000Z
|
pysteps/tests/helpers.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 5
|
2018-12-23T15:10:27.000Z
|
2021-01-06T15:03:03.000Z
|
pysteps/tests/helpers.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T14:16:43.000Z
|
2019-08-13T00:36:31.000Z
|
"""
Testing helper functions
=======================
Collection of helper functions for the testing suite.
"""
from datetime import datetime
import numpy as np
import pytest
import pysteps as stp
from pysteps import io, rcparams
def get_precipitation_fields(num_prev_files=0):
"""Get a precipitation field from the archive to be used as reference."""
# Selected case
date = datetime.strptime("201505151630", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
# Find the input files from the archive
fns = io.archive.find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext,
timestep=5, num_prev_files=num_prev_files)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
reference_field, quality, metadata = io.read_timeseries(fns, importer,
**importer_kwargs)
del quality # Not used
if num_prev_files == 0:
reference_field = np.squeeze(reference_field) # Remove time dimension
# Convert to mm/h
reference_field, metadata = stp.utils.to_rainrate(reference_field, metadata)
# Mask invalid values
reference_field = np.ma.masked_invalid(reference_field)
# Log-transform the data [dBR]
reference_field, metadata = stp.utils.dB_transform(reference_field,
metadata,
threshold=0.1,
zerovalue=-15.0)
return reference_field
def smart_assert(actual_value, expected, tolerance=None):
"""
Assert by equality for non-numeric values, or by approximation otherwise.
If the precision keyword is None, assert by equality.
When the precision is not None, assert that two numeric values
(or two sets of numbers) are equal to each other within the tolerance.
"""
if tolerance is None:
assert actual_value == expected
else:
# Compare numbers up to a certain precision
assert actual_value == pytest.approx(expected, 1e-6)
| 33.388889
| 80
| 0.640599
| 297
| 2,404
| 4.996633
| 0.427609
| 0.09434
| 0.032345
| 0.01752
| 0.040431
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012644
| 0.276206
| 2,404
| 71
| 81
| 33.859155
| 0.84023
| 0.270799
| 0
| 0
| 0
| 0
| 0.052199
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 1
| 0.058824
| false
| 0
| 0.294118
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b934cd0c4d4115b02def19c6bd570d1877b158cd
| 3,598
|
py
|
Python
|
modules/courses/courses.py
|
ehiller/mobilecsp-v18
|
a59801c44c616d30f5e916d6771e479c8a9e88f7
|
[
"Apache-2.0"
] | null | null | null |
modules/courses/courses.py
|
ehiller/mobilecsp-v18
|
a59801c44c616d30f5e916d6771e479c8a9e88f7
|
[
"Apache-2.0"
] | null | null | null |
modules/courses/courses.py
|
ehiller/mobilecsp-v18
|
a59801c44c616d30f5e916d6771e479c8a9e88f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from common import resource
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import resources_display
from models import custom_modules
from models import roles
from tools import verify
All_LOCALES_PERMISSION = 'can_pick_all_locales'
All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.'
SEE_DRAFTS_PERMISSION = 'can_see_draft_content'
SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.'
custom_module = None
def can_pick_all_locales(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, All_LOCALES_PERMISSION)
def can_see_drafts(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, SEE_DRAFTS_PERMISSION)
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
roles.Roles.register_permissions(custom_module, permissions_callback)
resource.Registry.register(resources_display.ResourceCourseSettings)
resource.Registry.register(resources_display.ResourceUnit)
resource.Registry.register(resources_display.ResourceAssessment)
resource.Registry.register(resources_display.ResourceLink)
resource.Registry.register(resources_display.ResourceLesson)
resource.Registry.register(utils.ResourceHtmlHook)
def permissions_callback(unused_app_context):
return [
roles.Permission(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION),
roles.Permission(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION)
]
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.UnitHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/resources', utils.ResourcesHandler),
('/rest/locale', utils.StudentLocaleRESTHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/settracks', utils.StudentSetTracksHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| 36.714286
| 78
| 0.735686
| 399
| 3,598
| 6.443609
| 0.418546
| 0.031116
| 0.056009
| 0.064177
| 0.125243
| 0.047452
| 0.047452
| 0.047452
| 0.047452
| 0.047452
| 0
| 0.002698
| 0.175931
| 3,598
| 97
| 79
| 37.092784
| 0.864418
| 0.193719
| 0
| 0.032787
| 0
| 0
| 0.140522
| 0.014609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.147541
| 0.04918
| 0.295082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9355080468a287acd9198671ea28f44a47c9a46
| 2,389
|
py
|
Python
|
test/IECoreMaya/ImageConverterTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 386
|
2015-01-02T11:10:43.000Z
|
2022-03-10T15:12:20.000Z
|
test/IECoreMaya/ImageConverterTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 484
|
2015-01-09T18:28:06.000Z
|
2022-03-31T16:02:04.000Z
|
test/IECoreMaya/ImageConverterTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 99
|
2015-01-28T23:18:04.000Z
|
2022-03-27T00:59:39.000Z
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreImage
import IECoreMaya
class ImageConverterTest( IECoreMaya.TestCase ) :
def test( self ) :
imageA = IECore.Reader.create( "test/IECoreImage/data/exr/colorBarsWithAlpha.exr" ).read()
toMaya = IECoreMaya.ToMayaImageConverter( imageA )
mImage = maya.OpenMaya.MImage()
toMaya.convert( mImage )
fromMaya = IECoreMaya.FromMayaImageConverter( mImage )
imageB = fromMaya.convert()
self.assertFalse(
IECoreImage.ImageDiffOp()( imageA=imageA, imageB=imageB, maxError=1.0/256 ).value
)
if __name__ == "__main__":
IECoreMaya.TestProgram()
| 37.920635
| 92
| 0.706153
| 291
| 2,389
| 5.769759
| 0.546392
| 0.028588
| 0.02025
| 0.027397
| 0.109589
| 0.081001
| 0.081001
| 0.081001
| 0.081001
| 0.081001
| 0
| 0.004555
| 0.172876
| 2,389
| 62
| 93
| 38.532258
| 0.845142
| 0.65676
| 0
| 0
| 0
| 0
| 0.087637
| 0.075117
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b93889b31eb8ffef50e08b669fe2f20c16f4d959
| 1,628
|
py
|
Python
|
tests/test_common.py
|
ColinKennedy/ways
|
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
|
[
"MIT"
] | 2
|
2019-11-10T18:35:38.000Z
|
2020-05-12T10:37:42.000Z
|
tests/test_common.py
|
ColinKennedy/ways
|
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
|
[
"MIT"
] | 5
|
2017-11-27T18:05:25.000Z
|
2021-06-01T21:57:48.000Z
|
tests/test_common.py
|
ColinKennedy/ways
|
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
|
[
"MIT"
] | 1
|
2017-11-27T17:54:53.000Z
|
2017-11-27T17:54:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Make sure that generic functions work exactly as we expect.'''
# IMPORT STANDARD LIBRARIES
import unittest
# IMPORT WAYS LIBRARIES
from ways import common
class ParseTestCase(unittest.TestCase):
'''Test generic parsing-related functions.'''
def test_working_0001(self):
'''Test that correct input for expand_string works as expected.'''
pattern = '/jobs/{JOB}/some_kind/{THING}/real_folders'
text = '/jobs/some_job_here/some_kind/of/real_folders'
expected_output = {'JOB': 'some_job_here', 'THING': 'of'}
self.assertEqual(expected_output, common.expand_string(pattern, text))
def test_working_0002(self):
'''Test that correct input for expand_string works as expected.'''
shot = 'NAME_010'
format_string = '{SHOT}_{ID}'
expected_output = {'SHOT': 'NAME', 'ID': '010'}
self.assertEqual(expected_output, common.expand_string(format_string, shot))
def test_expand_string_failure_0001(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/of/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
def test_expand_string_failure_0002(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/{SHOTNAME}/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
| 35.391304
| 84
| 0.686732
| 212
| 1,628
| 5.056604
| 0.339623
| 0.11194
| 0.051306
| 0.052239
| 0.622201
| 0.540112
| 0.538246
| 0.427239
| 0.427239
| 0.427239
| 0
| 0.017411
| 0.188575
| 1,628
| 45
| 85
| 36.177778
| 0.794095
| 0.271499
| 0
| 0.190476
| 0
| 0
| 0.274306
| 0.226563
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.190476
| false
| 0
| 0.095238
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b93a4101b4ff85c90fbde08405fbe7515b2816bd
| 17,093
|
py
|
Python
|
bot/jobs/thorchain_node_jobs.py
|
block42-blockchain-company/thornode-telegram-bot
|
6478b1eb41e36c5fdd327b963b55343de1ce5337
|
[
"MIT"
] | 15
|
2020-04-21T07:51:26.000Z
|
2021-11-02T05:45:48.000Z
|
bot/jobs/thorchain_node_jobs.py
|
block42-blockchain-company/thornode-telegram-bot
|
6478b1eb41e36c5fdd327b963b55343de1ce5337
|
[
"MIT"
] | 78
|
2020-04-13T23:01:16.000Z
|
2021-05-09T11:46:25.000Z
|
bot/jobs/thorchain_node_jobs.py
|
block42-blockchain-company/thornode-telegram-bot
|
6478b1eb41e36c5fdd327b963b55343de1ce5337
|
[
"MIT"
] | 5
|
2020-09-03T21:19:16.000Z
|
2021-11-20T00:17:56.000Z
|
from constants.messages import get_node_health_warning_message, get_node_healthy_again_message
from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users
from packaging import version
from service.utils import *
def check_thornodes(context):
chat_id = context.job.context['chat_id']
chat_data = context.job.context['chat_data']
inactive_nodes = []
for node_address, local_node in chat_data.get('nodes', {}).items():
try:
remote_node = get_thornode_object_or_none(address=node_address)
except HTTPError as e:
logger.exception(e)
continue
if remote_node is None:
text = 'THORNode ' + local_node['alias'] + ' is not active anymore! 💀' + '\n' + \
'Address: ' + node_address + '\n\n' + \
'Please enter another THORNode address.'
inactive_nodes.append(node_address)
try_message_with_home_menu(context=context,
chat_id=chat_id,
text=text)
continue
is_not_blocked = float(local_node['last_notification_timestamp']) < \
datetime.timestamp(
datetime.now() - timedelta(seconds=local_node['notification_timeout_in_seconds']))
if is_not_blocked:
message = build_notification_message_for_active_node(local_node, remote_node, context)
if message:
# Update data
local_node['status'] = remote_node['status']
local_node['bond'] = remote_node['bond']
local_node['slash_points'] = remote_node['slash_points']
local_node['ip_address'] = remote_node['ip_address']
local_node['last_notification_timestamp'] = datetime.timestamp(datetime.now())
local_node['notification_timeout_in_seconds'] *= NOTIFICATION_TIMEOUT_MULTIPLIER
try_message_with_home_menu(context=context,
chat_id=chat_id,
text=message)
else:
local_node['notification_timeout_in_seconds'] = INITIAL_NOTIFICATION_TIMEOUT
if local_node['status'].upper() in MONITORED_STATUSES and is_thornode_healthy(context, node_address):
check_thorchain_block_height(context, node_address=node_address)
check_thorchain_catch_up_status(context, node_address=node_address)
check_thorchain_midgard_api(context, node_address=node_address)
for node_address in inactive_nodes:
del chat_data['nodes'][node_address]
def build_notification_message_for_active_node(local_node, remote_node, context) -> [str, None]:
changed_fields = [
field for field in ['status', 'bond', 'slash_points']
if local_node[field] != remote_node[field]
]
threshold = get_slash_points_threshold(context)
slash_point_change = abs(int(local_node['slash_points']) - int(remote_node['slash_points']))
if (len(changed_fields) <= 1) and ('slash_points' in changed_fields) and (slash_point_change <= threshold):
return None
if len(changed_fields) > 0:
text = f"THORNode: {local_node['alias']}\n" \
f"Address: {local_node['node_address']}\n" \
f"Status: {local_node['status'].capitalize()}"
if 'status' in changed_fields:
text += f' ➡️ {remote_node["status"].capitalize()}'
text += f"\nBond: {tor_to_rune(int(local_node['bond']))}"
if 'bond' in changed_fields:
text += f" ➡️ {tor_to_rune(int(remote_node['bond']))}"
text += '\nSlash Points: ' + '{:,}'.format(int(local_node['slash_points']))
if 'slash_points' in changed_fields:
text += ' ➡️ ' + '{:,}'.format(int(remote_node['slash_points']))
return text
else:
return None
def check_versions_status(context):
chat_data = context.job.context['chat_data']
try:
node_accounts = get_node_accounts()
except Exception as e:
logger.exception(e)
logger.error("I couldn't get the node accounts while checking version status.")
return
highest_version = max(map(lambda n: n['version'], node_accounts),
key=lambda v: version.parse(v))
last_newest_version = chat_data.get('newest_software_version', None)
if last_newest_version is None or version.parse(
highest_version) > version.parse(last_newest_version):
chat_data['newest_software_version'] = highest_version
for node in chat_data.get('nodes', {}).values():
if version.parse(node['version']) < version.parse(highest_version):
message = f"Consider updating the software on your node: *{node['alias']}* ‼️\n" \
f"Your software version is *{node['version']}* " \
f"but one of the nodes already runs on *{highest_version}*"
try_message_with_home_menu(
context,
chat_id=context.job.context['chat_id'],
text=message)
def check_churning(context):
try:
validators = get_node_accounts()
except Exception as e:
logger.exception(e)
logger.error("I couldn't get the node accounts while checking if churning occurred.")
return
if 'node_statuses' not in context.bot_data:
context.bot_data['node_statuses'] = {}
for validator in validators:
context.bot_data['node_statuses'][
validator['node_address']] = validator['status']
return
local_node_statuses = context.bot_data['node_statuses']
churned_in = []
churned_out = []
highest_churn_status_since = 0
for validator in validators:
if did_churn_happen(validator, local_node_statuses, highest_churn_status_since):
highest_churn_status_since = int(validator['status_since'])
for validator in validators:
remote_status = validator['status']
local_status = local_node_statuses[
validator['node_address']] if validator[
'node_address'] in local_node_statuses else "unknown"
if remote_status != local_status:
if 'active' == remote_status:
churned_in.append({
"address": validator['node_address'],
"bond": validator['bond']
})
elif 'active' == local_status:
churned_out.append({
"address": validator['node_address'],
"bond": validator['bond']
})
if len(churned_in) or len(churned_out):
text = "🔄 CHURN SUMMARY\n" \
"THORChain has successfully churned:\n\n"
text += "Nodes Added:\n" if len(churned_in) else ""
for node in churned_in:
text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n"
text += "\nNodes Removed:\n" if len(churned_out) else ""
for node in churned_out:
text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n"
text += "\nSystem:\n"
try:
network = get_network_data()
text += f"📡 Network Security: *{network_security_ratio_to_string(get_network_security_ratio(network))}*\n\n" \
f"💚 Total Active Bond: *{tor_to_rune(network['bondMetrics']['totalActiveBond'])}* (total)\n\n" \
"⚖️ Bonded/Staked Ratio: *" + '{:.2f}'.format(
int(get_network_security_ratio(network) * 100)) + " %*\n\n" \
"↩️ Bonding ROI: *" + '{:.2f}'.format(
float(network['bondingAPY']) * 100) + " %* APY\n\n" \
"↩️ Liquidity ROI: *" + '{:.2f}'.format(
float(network['liquidityAPY']) * 100) + " %* APY"
context.bot_data.setdefault("vault_addresses", {})
current_chains = get_pool_addresses_from_any_node()
for chain in current_chains:
if chain['chain'] in context.bot_data['vault_addresses']:
if chain['address'] != context.bot_data['vault_addresses'][chain['chain']]:
text += f"\n\n🔐 Vault Addresses:" if "Vault Addresses" not in text else ""
text += f"\n*{chain['chain']}*: \n" \
f"Old Vault address: {context.bot_data['vault_addresses'][chain['chain']]}\n"\
f"⬇️\n" \
f"New Vault address: {chain['address']}\n"
else:
text += "\n\n⚠️ 🚨 CHURNING BUT THE VAULT ADDRESSES DID NOT CHANGE 🚨\n"
context.bot_data['vault_addresses'][chain['chain']] = chain['address']
except Exception as e:
logger.exception(e)
try_message_to_all_users(context, text=text)
for validator in validators:
context.bot_data['node_statuses'][
validator['node_address']] = validator['status']
def did_churn_happen(validator, local_node_statuses, highest_churn_status_since) -> bool:
remote_status = validator['status']
local_status = local_node_statuses[validator['node_address']] if validator[
'node_address'] in local_node_statuses else "unknown"
if int(validator['status_since']) > highest_churn_status_since and \
((local_status == 'ready' and remote_status == 'active') or (
local_status == 'active' and remote_status == 'standby')):
return True
return False
def is_thornode_healthy(context, node_address) -> bool:
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
# If not initialized assuming node was healhty.
if "healthy" not in context.job.context['chat_data']['nodes'][node_address]:
context.job.context['chat_data']['nodes'][node_address]["healthy"] = True
was_healthy = node_data["healthy"]
try:
# Check whether node answers. If it doesn't we get an Exception.
get_latest_block_height(node_data['ip_address'])
if not was_healthy:
try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_healthy_again_message(node_data))
context.job.context['chat_data']['nodes'][node_address]["healthy"] = True
return True
except (Timeout, ConnectionError, BadStatusException, Exception):
if was_healthy:
try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_health_warning_message(node_data))
context.job.context['chat_data']['nodes'][node_address]["healthy"] = False
return False
def check_thorchain_block_height(context, node_address):
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
is_stuck = block_height <= node_data.setdefault('block_height', 0)
block_height_stuck_count = node_data.setdefault("block_height_stuck_count", 0)
if is_stuck:
block_height_stuck_count += 1
if block_height_stuck_count == 1:
text = 'Block height is not increasing anymore! 💀' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Block height stuck at: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
else:
if block_height_stuck_count >= 1:
text = f"Block height is increasing again! 👌\n" + \
f"IP: {node_data['ip_address']}\n" + \
f"THORNode: {node_data['alias']}\n" + \
f"Node address: {node_address}\n" + \
f"Block height now at: {block_height}\n"
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
block_height_stuck_count = 0
node_data['block_height'] = block_height
node_data["block_height_stuck_count"] = block_height_stuck_count
def check_solvency_job(context):
message = check_solvency(context)
if message:
try_message_to_all_users(context, text=message)
def check_solvency(context) -> [str, None]:
try:
asgard_solvency = asgard_solvency_check()
yggdrasil_solvency = yggdrasil_solvency_check()
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error while querying Asgard and Yggdrasil.")
return None
except Exception as e:
logger.exception(e)
return None
is_solvent = asgard_solvency['is_solvent'] and yggdrasil_solvency['is_solvent']
insolvency_count = context.bot_data.setdefault("insolvency_count", 0)
message = None
if not is_solvent:
insolvency_count += 1
if insolvency_count == MISSING_FUNDS_THRESHOLD:
message = 'THORChain is *missing funds*! 💀\n\n'
message += get_insolvent_balances_message(asgard_solvency, yggdrasil_solvency)
else:
if insolvency_count >= MISSING_FUNDS_THRESHOLD:
message = 'THORChain is *100% solvent* again! 👌\n'
insolvency_count = 0
context.bot_data["insolvency_count"] = insolvency_count
return message
def check_thorchain_catch_up_status(context, node_address):
"""
Check if node is some blocks behind with catch up status
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
if 'is_catching_up' not in node_data:
node_data['is_catching_up'] = False
try:
is_currently_catching_up = is_thorchain_catching_up(
node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
if node_data['is_catching_up'] != is_currently_catching_up:
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
block_height = "currently unavailable"
if is_currently_catching_up:
node_data['is_catching_up'] = True
text = 'The Node is behind the latest block height and catching up! 💀 ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
else:
node_data['is_catching_up'] = False
text = 'The node caught up to the latest block height again! 👌' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
def check_thorchain_midgard_api(context, node_address):
"""
Check that Midgard API is ok
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
was_healthy = node_data.setdefault('is_midgard_healthy', True)
is_midgard_healthy = is_midgard_api_healthy(node_data['ip_address'])
if was_healthy != is_midgard_healthy:
if is_midgard_healthy:
text = 'Midgard API is healthy again! 👌' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address
try_message_with_home_menu(context, chat_id=chat_id, text=text)
else:
text = 'Midgard API is not healthy anymore! 💀' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context, chat_id=chat_id, text=text)
node_data['is_midgard_healthy'] = is_midgard_healthy
| 43.164141
| 126
| 0.600889
| 1,995
| 17,093
| 4.870677
| 0.123308
| 0.054338
| 0.022744
| 0.034579
| 0.560049
| 0.47103
| 0.427601
| 0.377997
| 0.329526
| 0.300401
| 0
| 0.002206
| 0.2838
| 17,093
| 395
| 127
| 43.273418
| 0.78876
| 0.01211
| 0
| 0.369281
| 0
| 0
| 0.22935
| 0.052872
| 0.026144
| 0
| 0
| 0
| 0
| 1
| 0.035948
| false
| 0
| 0.013072
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b93b8add4495a7de42fb7a036f7ba8c5ddea0d87
| 1,508
|
py
|
Python
|
pantam_cli/utils/messages.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | 2
|
2020-10-04T10:29:43.000Z
|
2021-03-30T13:45:09.000Z
|
pantam_cli/utils/messages.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | null | null | null |
pantam_cli/utils/messages.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | null | null | null |
from sys import stderr, stdout
from enum import Enum
from colored import fg, attr
PANTAM: str = fg("yellow") + attr("bold") + "PANTAM" + attr("reset")
colour_msg = lambda msg, colour: fg(colour) + attr("bold") + msg + attr("reset")
info_msg = lambda msg: colour_msg(msg, "blue")
success_msg = lambda msg: colour_msg(msg, "green")
error_msg = lambda msg: colour_msg(msg, "red")
class NewLine(Enum):
before = 1
after = 2
both = 3
def write_msg(msg: str, spacing: NewLine = None) -> None:
"""Write message to stdout"""
prefix: str = "\n" if spacing in (NewLine.before, NewLine.both) else ""
suffix: str = "\n" if spacing in (NewLine.after, NewLine.both) else ""
stdout.write("%s%s%s" % (prefix, msg, suffix))
def write_error(msg: str) -> None:
"""Write message to stderr"""
stderr.write("\n%s\n" % msg)
welcome_msg = (
lambda: PANTAM
+ """
The microframework for microservices.
Let's build your app...
"""
)
name_index_file_msg = lambda: "What is the name of your main script?"
name_actions_folder_msg = lambda: "What is the name of your actions folder?"
def create_actions_file_msg(second_run: bool):
"""Actions File Message"""
article = "another" if second_run else "an"
return "Do you want to create %s action file?" % article
name_actions_file_msg = lambda: "What is the name of your actions file?"
confirm_structure_msg = (
lambda structure: """Your application will look like this:
%s
Happy to proceed?"""
% structure
)
| 24.322581
| 80
| 0.671088
| 223
| 1,508
| 4.421525
| 0.376682
| 0.08215
| 0.048682
| 0.073022
| 0.225152
| 0.225152
| 0.107505
| 0.107505
| 0.107505
| 0
| 0
| 0.002471
| 0.19496
| 1,508
| 61
| 81
| 24.721311
| 0.80972
| 0.045093
| 0
| 0
| 0
| 0
| 0.241573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.081081
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b93da1b1bbce8a3e5fafae55f093b2f5323fb641
| 2,510
|
py
|
Python
|
tests/manage/test_remove_mon_from_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
tests/manage/test_remove_mon_from_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
tests/manage/test_remove_mon_from_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
"""
A Testcase to remove mon from
when I/O's are happening.
Polarion-ID- OCS-355
"""
import logging
import pytest
from ocs_ci.ocs import ocp, constants
from ocs_ci.framework.testlib import tier4, ManageTest
from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from tests.helpers import run_io_with_rados_bench, delete_cephblockpool
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.exceptions import CephHealthException
log = logging.getLogger(__name__)
@retry(CephHealthException, 8, 3, 1)
def verify_mon_pod_up(ceph_cluster, pods):
"""
Verify mon pods are in Running state.
Returns:
bool: True for wait for the resource, False otherwise
"""
log.info(f"Verifying all mons pods are up and Running")
ceph_cluster.cluster_health_check(timeout=3)
ret = pods.wait_for_resource(
condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mon',
resource_count=3, timeout=700)
log.info(f"waited for all mon pod to come up and running {ret}")
return ret
def run_io_on_pool():
"""
Runs the I/O on the pool and delete the pool
Returns: A thread of I/O
"""
tools_pod = pod.get_ceph_tools_pod()
tools_pod.add_role(role='client')
return run_io_with_rados_bench(
ceph_pods=[tools_pod],
config={'time': 45, 'cleanup': False,
'pool': 'test-pool'
}
)
@tier4
@pytest.mark.polarion_id("OCS-355")
class TestRemoveMonFromCluster(ManageTest):
def test_remove_mon_pod_from_cluster(self):
"""
To remove mon pod from the cluster
after the I/O is performed on the pool
and waiting for the operator to create a
new mon pod on its own
"""
ceph_cluster = CephCluster()
pods = ocp.OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
list_mons = ceph_cluster.get_mons_from_cluster()
assert len(list_mons) > 1, pytest.skip(
"INVALID: Mon count should be more than one to delete."
)
assert run_io_on_pool(), 'Failed to run I/O on the pool'
assert delete_cephblockpool('test-pool'), 'Failed to delete pool'
ceph_cluster.cluster_health_check(timeout=0)
ceph_cluster.remove_mon_from_cluster()
assert verify_mon_pod_up(ceph_cluster, pods), f"Mon pods are not up and running state"
ceph_cluster.cluster_health_check(timeout=60)
| 29.529412
| 94
| 0.688446
| 367
| 2,510
| 4.504087
| 0.359673
| 0.053237
| 0.038113
| 0.029038
| 0.136721
| 0.100423
| 0.035088
| 0
| 0
| 0
| 0
| 0.011329
| 0.226295
| 2,510
| 84
| 95
| 29.880952
| 0.839856
| 0.156175
| 0
| 0
| 0
| 0
| 0.155104
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 1
| 0.06383
| false
| 0
| 0.212766
| 0
| 0.340426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b93f9ebd7406695d9627c10b5f85877c35692320
| 2,690
|
py
|
Python
|
smartystreets_python_sdk/us_autocomplete_pro/client.py
|
Caaz/smartystreets-python-sdk
|
f56cd00d29861bde297143c128f79a4b1d89541c
|
[
"Apache-2.0"
] | null | null | null |
smartystreets_python_sdk/us_autocomplete_pro/client.py
|
Caaz/smartystreets-python-sdk
|
f56cd00d29861bde297143c128f79a4b1d89541c
|
[
"Apache-2.0"
] | null | null | null |
smartystreets_python_sdk/us_autocomplete_pro/client.py
|
Caaz/smartystreets-python-sdk
|
f56cd00d29861bde297143c128f79a4b1d89541c
|
[
"Apache-2.0"
] | null | null | null |
from smartystreets_python_sdk import Request
from smartystreets_python_sdk.exceptions import SmartyException
from smartystreets_python_sdk.us_autocomplete_pro import Suggestion, geolocation_type
class Client:
def __init__(self, sender, serializer):
"""
It is recommended to instantiate this class using ClientBuilder.build_us_autocomplete_pro_api_client()
"""
self.sender = sender
self.serializer = serializer
def send(self, lookup):
"""
Sends a Lookup object to the US Autocomplete Pro API and stores the result in the Lookup's result field.
"""
if not lookup or not lookup.search:
raise SmartyException('Send() must be passed a Lookup with the search field set.')
request = self.build_request(lookup)
response = self.sender.send(request)
if response.error:
raise response.error
result = self.serializer.deserialize(response.payload)
suggestions = self.convert_suggestions(result.get('suggestions') or [])
lookup.result = suggestions
return suggestions
def build_request(self, lookup):
request = Request()
self.add_parameter(request, 'search', lookup.search)
self.add_parameter(request, 'max_results', lookup.max_results)
self.add_parameter(request, 'include_only_cities', self.build_filter_string(lookup.city_filter))
self.add_parameter(request, 'include_only_states', self.build_filter_string(lookup.state_filter))
self.add_parameter(request, 'include_only_zip_codes', self.build_filter_string(lookup.zip_filter))
self.add_parameter(request, 'exclude_states', self.build_filter_string(lookup.exclude))
self.add_parameter(request, 'prefer_cities', self.build_filter_string(lookup.prefer_cities))
self.add_parameter(request, 'prefer_states', self.build_filter_string(lookup.prefer_states))
self.add_parameter(request, 'prefer_zip_codes', self.build_filter_string(lookup.prefer_zips))
self.add_parameter(request, 'prefer_ratio', lookup.prefer_ratio)
self.add_parameter(request, 'prefer_geolocation', lookup.prefer_geo)
self.add_parameter(request, 'selected', lookup.selected)
return request
@staticmethod
def build_filter_string(filter_list):
return ','.join(filter_list or []) or None
@staticmethod
def convert_suggestions(suggestion_dictionaries):
return [Suggestion(suggestion) for suggestion in suggestion_dictionaries]
@staticmethod
def add_parameter(request, key, value):
if value and value != 'none':
request.parameters[key] = value
| 42.03125
| 112
| 0.717472
| 320
| 2,690
| 5.784375
| 0.28125
| 0.084279
| 0.133441
| 0.149109
| 0.292274
| 0.198271
| 0.081037
| 0
| 0
| 0
| 0
| 0
| 0.195911
| 2,690
| 63
| 113
| 42.698413
| 0.855756
| 0.076952
| 0
| 0.069767
| 0
| 0
| 0.100164
| 0.009031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139535
| false
| 0.023256
| 0.069767
| 0.046512
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b94044f865f05e0aee9b401bba3907e01e40ff6c
| 11,578
|
py
|
Python
|
mssqlvc.py
|
Saritasa/mssqlvc
|
836caeea59cc0ed23234687b94062e007707c603
|
[
"BSD-2-Clause"
] | 2
|
2016-09-22T04:36:46.000Z
|
2018-07-31T21:36:42.000Z
|
mssqlvc.py
|
Saritasa/mssqlvc
|
836caeea59cc0ed23234687b94062e007707c603
|
[
"BSD-2-Clause"
] | 1
|
2016-02-02T07:58:29.000Z
|
2016-02-02T14:19:18.000Z
|
mssqlvc.py
|
krasninja/mssqlvc
|
836caeea59cc0ed23234687b94062e007707c603
|
[
"BSD-2-Clause"
] | 2
|
2016-09-21T09:48:44.000Z
|
2020-03-24T15:59:54.000Z
|
# -*- coding: utf-8 -*-
"""
mssqlvc
~~~~~~~
Database version control utility for Microsoft SQL Server. See README.md for more information.
Licensed under the BSD license. See LICENSE file in the project root for full license information.
"""
import argparse
import datetime
import io
import logging
import os
import re
import sys
import urlparse
try:
import clr
except ImportError:
print('Cannot import crl module, make sure you run this script using IronPython')
exit(2)
import System
clr.AddReference('Microsoft.SqlServer.Smo')
clr.AddReference('Microsoft.SqlServer.SqlEnum')
clr.AddReference('Microsoft.SqlServer.ConnectionInfo')
import Microsoft.SqlServer.Management.Smo as Smo
import Microsoft.SqlServer.Management.Common as Common
__author__ = 'Ivan Kozhin'
__copyright__ = 'Copyright (c) 2015-2016, Saritasa'
__license__ = 'BSD'
__version__ = '1.4.5'
__all__ = ['MsSqlVersion']
class ScriptExecutionError(Exception):
pass
class MsSqlVersion(object):
"""
SQL Server patch migration class.
"""
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
def __init__(self, connection_string, patch_dir='.', exclude_pattern=None, logger=None,
stop_on_error=False, noexecute=False, case_insensitive=False, record_files_only=False):
"""
Initialize instance with connection and database objects.
:param connection_string: Connection string in rfc1738 url format
:param patch_dir: Patch directory with .sql files
:param exclude_pattern: String with regular expression the patch files should match
:param logger: Logger that is used for logging
:param stop_on_error: Stop execution on error, default behavior is to continue
:param case_insensitive: Use case insensitive to compare patch files
:param record_files_only: Only file names will be stored to patch table without folder paths
"""
url = urlparse.urlparse(connection_string)
is_local_login = not url.username
self.connection = Common.ServerConnection(LoginSecure=is_local_login, ServerInstance=url.hostname,
DatabaseName=url.path.replace('/', ''))
if not is_local_login:
self.connection.Login = url.username
self.connection.Password = url.password
self.server = Smo.Server(self.connection)
self.database = self.server.Databases[self.connection.DatabaseName]
self.server.ConnectionContext.ConnectTimeout = 90
self.exclude_pattern = exclude_pattern
self.patch_dir = patch_dir
self.stop_on_error = stop_on_error
self.case_insensitive = case_insensitive
self.record_files_only = record_files_only
self.executed_count = 0
self.logger = logging.NullHandler() if not logger else logger
if not os.path.exists(patch_dir):
raise Exception('Patch folder does not exist')
if 'mssql' not in connection_string:
raise Exception('Wrong connection string, it should contain mssql word')
exists = self._create_patch_table_if_not_exists(self.database)
if not exists:
self.logger.info('[%s] created _patch_history table' % (self.database.Name,))
def __del__(self):
if self.server:
self.server.ConnectionContext.Disconnect()
def update(self):
"""Executes database update process"""
patches = self.get_pending_patches()
self.logger.debug('Files to execute %s' % (patches,))
for patch in patches:
success = self.execute_file(patch)
if success:
self.executed_count += 1
self.put_patch(patch)
if not success and self.stop_on_error:
self.logger.critical(MsSqlVersion.bcolors.WARNING + 'Execution stopped. Please fix errors and try again.'
+ MsSqlVersion.bcolors.ENDC)
raise ScriptExecutionError()
self.logger.info('[%s] Executed %d patch(-es)' % (self.database.Name, self.executed_count))
def fill(self):
"""Skip scripts execution but add them to patches table"""
patches = self.get_pending_patches()
for patch in patches:
self.logger.info('Add file %s' % (patch,))
self.put_patch(patch)
def get_pending_patches(self):
applied_patches = self.get_applied_patches()
if self.record_files_only:
applied_patches = [os.path.basename(f) for f in applied_patches]
patches = self._get_sql_files_from_dir(applied_patches)
patches.sort()
return patches
def execute_file(self, file):
"""Executes file against database in transaction, returns True if success"""
ret = True
try:
full_name = os.path.join(os.path.normpath(self.patch_dir), file)
with io.open(full_name, 'r', encoding='utf8') as sql_file:
sql = sql_file.read()
self.logger.info('[%s] Executing %s...' % (self.database.Name, file))
self.connection.BeginTransaction()
self.database.ExecuteNonQuery(sql)
self.connection.CommitTransaction()
except Exception as e:
self.connection.RollBackTransaction()
self.logger.error('Exception on %s' % (file,))
message = e.message or e
if e.clsException.InnerException is not None and e.clsException.InnerException.InnerException is not None:
message += ' ' + e.clsException.InnerException.InnerException.Message
self.logger.error('[%s] %s (%s)' % (self.database.Name, full_name, message))
ret = False
return ret
def put_patch(self, file):
"""Write record that file has been executed"""
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.record_files_only:
file = os.path.basename(file)
sql = 'insert [_patch_history] (name, applied_at) values(\'%s\', \'%s\');' % (file, now)
self.database.ExecuteNonQuery(sql)
def get_applied_patches(self):
rows = self.database.ExecuteWithResults('select name from [_patch_history];').Tables[0].Rows
return set([row['name'] for row in rows])
def _get_sql_files_from_dir(self, exclude_list=[]):
"""Get all script files from directory"""
_exclude_list = set(exclude_list) if not self.case_insensitive else [f.lower() for f in exclude_list]
prevdir = os.getcwd()
os.chdir(self.patch_dir)
sql_files = []
for root, dirs, files in os.walk('.'):
for file in files:
file = os.path.normpath(os.path.join(root, file))
_file = file
if self.case_insensitive:
_file = _file.lower()
if self.record_files_only:
_file = os.path.basename(_file)
if (_file in _exclude_list or not _file.lower().endswith('.sql') or
(self.exclude_pattern and re.search(self.exclude_pattern, file))):
continue
sql_files.append(file)
os.chdir(prevdir)
return sql_files
@staticmethod
def _create_patch_table_if_not_exists(database):
"""Create patch table in database if not exists"""
sql = 'select * from sys.objects where object_id = object_id(\'_patch_history\') AND type in (\'U\');'
exists = database.ExecuteWithResults(sql).Tables[0].Rows.Count > 0
if not exists:
sql = """
create table [_patch_history] (id int not null identity(1, 1), name varchar(100) not null,
applied_at datetime not null);
alter table [_patch_history] add constraint _patch_history_PK primary key clustered (id);
"""
database.ExecuteNonQuery(sql)
return exists
def get_cmd_line_parser():
"""Get initialized argparse.ArgumentParser object"""
parser = argparse.ArgumentParser(
description='MSSQL database patch history tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''Example: %(prog)s -c "mssql://sa:123@host\instance/database" -d "D:/1/project/patch"''')
parser.add_argument('--connection', '-c',
required=True,
dest='connection',
action='store',
help='connection string in rfc1738 url format, required')
parser.add_argument('--directory', '-d',
dest='directory',
action='store',
default='.',
help='directory with patch files')
parser.add_argument('--log', '-l',
dest='log',
action='store',
help='log file')
parser.add_argument('--noexecute', '-n',
action='store_true',
dest='noexecute',
default=False,
help='displays pending script files with no execution')
parser.add_argument('--noexecute-fill', '-nf',
action='store_true',
dest='noexecute_fill',
default=False,
help='displays pending script files with no execution and fills patch table')
parser.add_argument('--stop-on-error', '-soe',
action='store_true',
dest='stop_on_error',
default=False,
help='stops execution if any script fails')
parser.add_argument('--exclude-pattern', '-ep',
dest='exclude_pattern',
help='skips files match to regular expression')
parser.add_argument('--record-files-only', '-rfo',
action='store_true',
dest='record_files_only',
default=False,
help='only file names will be stored to patch table without folder paths')
parser.add_argument('--case-insensitive', '-ci',
action='store_true',
dest='case_insensitive',
default=False,
help='use case insensitive to compare patch files so "PatchName.sql" and "patchname.sql" is the same')
parser.add_argument('--debug',
action='store_true',
dest='debug',
default=False,
help='enables debug output')
parser.add_argument('--version', '-v',
action='version',
version='%(prog)s ' + __version__)
return parser
if __name__ == '__main__':
# parser
parser = get_cmd_line_parser()
parser_args = parser.parse_args()
if parser_args.connection is None or parser_args.directory is None:
parser.print_help()
exit(1)
# logging
logger = logging.getLogger('mssql')
if parser_args.log:
fh = logging.FileHandler(parser_args.log)
fh.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.setLevel(logging.DEBUG if parser_args.debug else logging.INFO)
logger.addHandler(ch)
# database handle
sqlvc = MsSqlVersion(parser_args.connection, parser_args.directory, exclude_pattern=parser_args.exclude_pattern,
stop_on_error=parser_args.stop_on_error, case_insensitive=parser_args.case_insensitive,
record_files_only=parser_args.record_files_only, logger=logger)
if parser_args.noexecute:
for patch in sqlvc.get_pending_patches():
logger.info(' ' + patch)
elif parser_args.noexecute_fill:
sqlvc.fill()
else:
sqlvc.update()
| 39.515358
| 121
| 0.640266
| 1,379
| 11,578
| 5.201595
| 0.247281
| 0.019518
| 0.023003
| 0.015893
| 0.116548
| 0.087272
| 0.070263
| 0.059947
| 0.059947
| 0.059947
| 0
| 0.007732
| 0.251598
| 11,578
| 292
| 122
| 39.650685
| 0.820081
| 0.101226
| 0
| 0.131579
| 0
| 0.013158
| 0.2
| 0.012
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048246
| false
| 0.008772
| 0.061404
| 0
| 0.149123
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9408aacd4d750c790ebb27107e026e183ea1d35
| 4,296
|
py
|
Python
|
lib/python3.6/site-packages/statsmodels/iolib/tests/test_table_econpy.py
|
KshitizSharmaV/Quant_Platform_Python
|
d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39
|
[
"BSD-3-Clause"
] | 1
|
2020-05-09T08:42:52.000Z
|
2020-05-09T08:42:52.000Z
|
statsmodels/iolib/tests/test_table_econpy.py
|
yanzhenxiong/statsmodels
|
e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/iolib/tests/test_table_econpy.py
|
yanzhenxiong/statsmodels
|
e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0
|
[
"BSD-3-Clause"
] | 1
|
2020-05-09T08:42:58.000Z
|
2020-05-09T08:42:58.000Z
|
'''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_equal
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class TestCell(object):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i % 2)
for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
assert_equal(cell.data, datum)
class TestSimpleTable(object):
def test_txt_fmt1(self):
# Limited test of custom txt_fmt
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
assert_equal(actual, desired)
def test_ltx_fmt1(self):
# Limited test of custom ltx_fmt
desired = r"""
\begin{center}
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
\end{center}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
assert_equal(actual, desired)
def test_html_fmt1(self):
# Limited test of custom html_fmt
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
#the previous has significant trailing whitespace that got removed
#desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n'''
actual = '\n%s\n' % tbl.as_html()
actual = '\n'.join((line.rstrip() for line in actual.split('\n')))
#print(actual)
#print(desired)
#print len(actual), len(desired)
assert_equal(actual, desired)
def test_customlabel(self):
# Limited test of custom custom labeling
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
#print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
assert_equal(actual, desired)
| 30.041958
| 261
| 0.573091
| 542
| 4,296
| 4.415129
| 0.289668
| 0.02758
| 0.010029
| 0.028416
| 0.307564
| 0.263686
| 0.180527
| 0.164647
| 0.106979
| 0.058504
| 0
| 0.039162
| 0.23324
| 4,296
| 142
| 262
| 30.253521
| 0.68731
| 0.222533
| 0
| 0.257143
| 0
| 0.019048
| 0.288299
| 0.052473
| 0
| 0
| 0
| 0
| 0.057143
| 1
| 0.057143
| false
| 0
| 0.066667
| 0
| 0.152381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b941e493bd72a0cc29b7f5487a4bd483b40a8fe3
| 4,414
|
py
|
Python
|
test/unit/data/model/mapping/common.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,085
|
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
test/unit/data/model/mapping/common.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 11,253
|
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
test/unit/data/model/mapping/common.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,000
|
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
class AbstractBaseTest(ABC):
@pytest.fixture
def cls_(self):
"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""
prefix = len("Test")
class_name = self.__class__.__name__[prefix:]
return getattr(self.get_model(), class_name)
@abstractmethod
def get_model(self):
pass
def dbcleanup_wrapper(session, obj, where_clause=None):
with dbcleanup(session, obj, where_clause):
yield obj
@contextmanager
def dbcleanup(session, obj, where_clause=None):
"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""
return_id = where_clause is None
try:
obj_id = persist(session, obj, return_id)
yield obj_id
finally:
table = obj.__table__
if where_clause is None:
where_clause = _get_default_where_clause(type(obj), obj_id)
stmt = delete(table).where(where_clause)
session.execute(stmt)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):
# Either obj_id or where_clause must be provided, but not both
assert bool(obj_id) ^ (where_clause is not None)
if where_clause is None:
where_clause = _get_default_where_clause(cls, obj_id)
stmt = select(cls).where(where_clause)
result = session.execute(stmt)
# unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
if unique:
result = result.unique()
return result.scalar_one()
def has_unique_constraint(table, fields):
for constraint in table.constraints:
if isinstance(constraint, UniqueConstraint):
col_names = {c.name for c in constraint.columns}
if set(fields) == col_names:
return True
def has_index(table, fields):
for index in table.indexes:
col_names = {c.name for c in index.columns}
if set(fields) == col_names:
return True
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
def _get_default_where_clause(cls, obj_id):
where_clause = cls.__table__.c.id == obj_id
return where_clause
| 31.084507
| 98
| 0.677843
| 613
| 4,414
| 4.737357
| 0.303426
| 0.07197
| 0.017906
| 0.021694
| 0.183196
| 0.126377
| 0.110537
| 0.084711
| 0.061983
| 0.035124
| 0
| 0.004187
| 0.242411
| 4,414
| 141
| 99
| 31.304965
| 0.864234
| 0.306298
| 0
| 0.168675
| 0
| 0
| 0.001367
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 1
| 0.144578
| false
| 0.012048
| 0.060241
| 0
| 0.349398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9443b673da6e4fd8c252e11eba4606e69192845
| 1,036
|
py
|
Python
|
promt_tr/__main__.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
promt_tr/__main__.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
promt_tr/__main__.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
''' __main__, to run:
python -m promt_tr
'''
import sys
from random import randint
from promt_tr import promt_tr, LANG_CODES
# pragma: no cover
def main():
'''main'''
from_lang = 'auto'
to_lang = 'zh'
text = 'test ' + str(randint(0, 10000))
if not sys.argv[1:]:
print('Provide some English text, with an optional to_lang')
print('E.g., python -m promt_tr test this and that de')
print('Testing with some random text\n')
else:
argv = sys.argv[1:]
len_ = len(argv)
if len_ == 1:
if argv[0] in LANG_CODES:
to_lang = argv[0]
else:
text = argv[0]
elif argv[-1] in LANG_CODES:
to_lang = argv[-1]
text = ' '.join(argv[:-1])
else:
text = ' '.join(argv)
for to_lang in ['zh', 'de', 'fr', 'it', 'es']:
resu = promt_tr(text, from_lang, to_lang)
print(f'[{text}] translated to [{to_lang}]: [{resu}]')
if __name__ == '__main__':
main()
| 23.545455
| 68
| 0.527027
| 145
| 1,036
| 3.551724
| 0.393103
| 0.081553
| 0.046602
| 0.054369
| 0.081553
| 0.081553
| 0
| 0
| 0
| 0
| 0
| 0.02149
| 0.326255
| 1,036
| 43
| 69
| 24.093023
| 0.716332
| 0.05695
| 0
| 0.103448
| 0
| 0
| 0.210581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.137931
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b94613d2fb24bf9487b3045eae02b837543d3647
| 2,547
|
py
|
Python
|
pages/lstm.py
|
tekeburak/dam-occupancy-model
|
f39d436bf27088068177245f0180cafaa56ad123
|
[
"MIT"
] | 8
|
2021-01-24T14:56:23.000Z
|
2021-03-26T18:10:33.000Z
|
pages/lstm.py
|
tekeburak/dam-occupancy-model
|
f39d436bf27088068177245f0180cafaa56ad123
|
[
"MIT"
] | null | null | null |
pages/lstm.py
|
tekeburak/dam-occupancy-model
|
f39d436bf27088068177245f0180cafaa56ad123
|
[
"MIT"
] | 6
|
2021-01-24T14:44:49.000Z
|
2021-03-21T17:50:30.000Z
|
import streamlit as st
import tensorflow as tf
import numpy
from utils.get_owm_data import get_open_weather_map_data
from utils.get_date import get_date_list_for_gmt
import plotly.graph_objects as go
from plotly import tools
import plotly.offline as py
import plotly.express as px
def app():
st.title("LSTM Model")
st.subheader('What does LSTM model do?')
st.markdown("""<p style='text-align: justify;'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>""", unsafe_allow_html=True)
st.subheader('Why we chose LSTM?')
st.markdown("""<p style='text-align: justify;'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>""", unsafe_allow_html=True)
st.subheader('LSTM model input and output')
st.markdown("Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.", unsafe_allow_html=True)
LSTM_model_name = 'models/LSTM_model.h5'
model_lstm = tf.keras.models.load_model(LSTM_model_name)
features = get_open_weather_map_data()
prediction_lstm = model_lstm.predict(features) * 100
prediction_lstm = prediction_lstm.ravel()
date_list = get_date_list_for_gmt()
data = []
layout = go.Layout(
title= "<b>LSTM Dam Occupancy Forecasting Plot</b>",paper_bgcolor = 'rgb(248, 248, 255)',plot_bgcolor = 'rgb(248, 248, 255)',barmode = "stack",
xaxis = dict(title="Time", linecolor="#BCCCDC",showspikes=True,spikethickness=2,spikedash="dot",spikecolor= "#ffffff",spikemode="across",),
yaxis= dict(title="Dam Occupancy Rate (%)",linecolor="#021C1E"))
line_chart= go.Scatter(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)' )
data.append(line_chart)
fig= go.Figure(data=data, layout=layout)
st.plotly_chart(fig)
| 50.94
| 476
| 0.773852
| 387
| 2,547
| 4.976744
| 0.540052
| 0.03271
| 0.023364
| 0.029595
| 0.128764
| 0.069574
| 0.069574
| 0.037383
| 0
| 0
| 0
| 0.016144
| 0.12446
| 2,547
| 49
| 477
| 51.979592
| 0.847085
| 0
| 0
| 0
| 0
| 0.09375
| 0.53828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.28125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b947d963b017c12ec37d222b3722de432bf97da6
| 8,891
|
py
|
Python
|
BookingScraper-joao_v2/BookingScraper/airbnb.py
|
joaocamargo/estudos-python
|
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
|
[
"MIT"
] | 1
|
2019-10-09T12:56:13.000Z
|
2019-10-09T12:56:13.000Z
|
BookingScraper-joao_v2/BookingScraper/airbnb.py
|
joaocamargo/estudos-python
|
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
|
[
"MIT"
] | null | null | null |
BookingScraper-joao_v2/BookingScraper/airbnb.py
|
joaocamargo/estudos-python
|
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3.6
import argparse
import argcomplete
from argcomplete.completers import ChoicesCompleter
from argcomplete.completers import EnvironCompleter
import requests
from bthread import BookingThread
from bs4 import BeautifulSoup
from file_writer import FileWriter
hotels = []
def get_countries():
with open("europa2020.txt", "r") as f:
countries = f.read().splitlines()
return countries
def get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):
print('get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):')
print(session, offset, rooms, country, dest_id, DayIni, DayFim)
diaInicial = str(int(DayIni[0:2]))
mesInicial = str(int(DayIni[3:5]))
anoInicial = str(int(DayIni[6:10]))
diaFinal = str(int(DayFim[0:2]))
mesFinal = str(int(DayFim[3:5]))
anoFinal = str(int(DayFim[6:10]))
'''
Make request to airbnb page and parse html
:param offset:
:return: html page
'''
url = 'https://www.airbnb.com.br/s/Londres/'\
'homes?refinement_paths%5B%5D=%2Fhomes¤t_tab_id=home_tab&selected_tab_id=home_tab&source=mc_search_bar&search_type=unknown'\
'&click_referer=t%3ASEE_ALL%7Csid%3A874f16ee-6196-4289-9717-17dec73e1e5c%7Cst%3AMAGAZINE_HOMES&screen_size=large&hide_dates_and_guests_filters=false'\
'&ne_lat=51.80546533345978&ne_lng=0.4969575708007312&sw_lat=51.17528882051496&sw_lng=-0.8200285131836154&zoom=10&search_by_map=false&checkin={anoInicial}-{mesInicial}-{diaInicial}'\
'&checkout={anoFinal}-{mesFinal}-{diaFinal}&adults={rooms}&property_type_id%5B%5D=1&property_type_id%5B%5D=43&property_type_id%5B%5D=47'\
'&place_id=ChIJdd4hrwug2EcRmSrV3Vo6llI&room_types%5B%5D=Entire%20home%2Fapt'\
'§ion_offset=6&items_offset=18'.format(rooms=rooms, country=country.replace(' ', '+'),anoFinal=anoFinal,mesFinal=mesFinal,diaInicial=diaInicial,mesInicial=mesInicial,anoInicial=anoInicial,diaFinal=diaFinal,dest_id=dest_id) + str(offset)
r = requests.get(url, headers=
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0)'
' Gecko/20100101 Firefox/48.0'})
html = r.content
print(url)
parsed_html = BeautifulSoup(html, 'lxml')
return parsed_html
def process_hotels(session, offset, rooms, country, dest_id, DayIni, DayFim):
parsed_html = get_booking_page(session, offset, rooms, country, dest_id,DayIni, DayFim)
hotel = parsed_html.find_all('div', {'class': 'sr_item'})
for ho in hotel:
#print("ho.find('a', {'class': 'jq_tooltip'})")
#print(ho.find('a', {'class': 'jq_tooltip'}))
#name = ho.find('a', {'class': 'jq_tooltip'})['data-title']
print("ho.find('span', {'class': 'sr-hotel__name'})")
#print(ho.find('span', {'class': 'sr-hotel__name'}))
if ho.find('span', {'class': 'sr-hotel__name'}) is not None:
name = str(ho.find('span', {'class': 'sr-hotel__name'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else:
name = '-1'
if ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}) is not None:
price = ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}).text.replace('\n','').replace("b","").replace("'","")
else:
price = '-1'
if ho.find('span', {'class': '_ky9opu0'}) is not None:
nota = str(ho.find('span', {'class': '_ky9opu0'}).text.replace('\n','').replace("b","").replace("'",""))
else :
nota = '-1'
if ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}) is not None:
distance = str(ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else :
distance = '-1'
# if ho.find('a', {'class': 'bui-link'}) is not None :
# result = [str(item) for item in ho.find_all('span', attrs={'data-bui-component' : 'Tooltip'})]
# print('TAMANHO TOOLTIP', str(len(result)))
# for i in result:
# print(i)
# for i in result:
# if i in 'km':
# distance = str(i)
# else:
# distance = '----'
# else:
# distance = '----'
# if len(result) ==1:
# if result[0] in 'km':
# distance = result
# else:
# distance = 'aaaaa' + str(len(result))
# else:
# distance = '---'
hotels.append(DayIni+';'+DayFim+';'+name + ';' + price + ';' + nota + ';' + distance)
#hotels.append(str(len(hotels) + 1) + ' : ' + name + ' : ' + price)
def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None):
'''
Prepare data for saving
:return: hotels: set()
'''
offset = 1
session = requests.Session()
parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim)
all_offset = parsed_html.find_all('li', {'class':
'sr_pagination_item'})[-1].get_text().splitlines()[-1]
threads = []
for i in range(int(all_offset)):
offset += 1
t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
hotels2 = hotels
return hotels2
def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None):
'''
Get all accomodations in Macedonia and save them in file
:return: hotels-in-macedonia.{txt/csv/xlsx} file
'''
print('Procurando por',country)
hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format)
save_data(hotels_list , out_format=out_format, country=country)
def save_data(data, out_format, country):
'''
Saves hotels list in file
:param data: hotels list
:param out_format: json, csv or excel
:return:
'''
writer = FileWriter(data, out_format, country)
file = writer.output_file()
print('All accommodations are saved.')
print('You can find them in', file, 'file')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
countries = get_countries()
parser.add_argument("--rooms",
help='Add the number of rooms to the booking request.',
default=1,
type=int,
nargs='?')
parser.add_argument("--country",
help='Add the country to the booking request.',
default='Macedonia',
nargs='?').completer = ChoicesCompleter(countries)
parser.add_argument("--dest_id",
help='Add the country to the booking request.',
default='0',
nargs='?')
parser.add_argument("--DayIni",
help='Data inicial',
default='01/01/2019',
nargs='?')
parser.add_argument("--DayFim",
help='Data inicial',
default='02/01/2019',
nargs='?')
parser.add_argument("--out_format",
help='Add the format for the output file. Add excel, json or csv.',
default='json',
choices=['json', 'excel', 'csv'],
nargs='?').completer = EnvironCompleter
argcomplete.autocomplete(parser)
args = parser.parse_args()
localidades = [{
'Pais': 'London',
'dest_id': '-2601889'
}, {
'Pais': 'Utrecht',
'dest_id': '-2154382'
}, {
'Pais': 'Buzios',
'dest_id': '-626254'
}, {
'Pais': '',
'dest_id': ''
}]
countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']]
if len(countryAux)>0:
country = countryAux[0]
print('Parametros')
print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
else:
country = 'Nao Identificado'
locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != '']
print('----------')
print('Utilize uma das seguintes localizações')
for i in locais:
print(i)
print('----------')
| 37.995726
| 250
| 0.576313
| 1,068
| 8,891
| 4.670412
| 0.265918
| 0.026464
| 0.025662
| 0.028869
| 0.331195
| 0.290297
| 0.268845
| 0.235565
| 0.197273
| 0.180032
| 0
| 0.039568
| 0.260938
| 8,891
| 233
| 251
| 38.158798
| 0.719525
| 0.123383
| 0
| 0.15493
| 0
| 0.035211
| 0.272404
| 0.108699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0
| 0.056338
| 0
| 0.119718
| 0.091549
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b94c3a86b197fdae8da6f36cf6af0eeecde07155
| 13,008
|
py
|
Python
|
scripts/master/cros_try_job_git.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/master/cros_try_job_git.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/master/cros_try_job_git.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import os
import re
import shutil
import zlib
from StringIO import StringIO
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint: disable=E0611,F0401
from email.Message import Message
from email.Utils import formatdate
except ImportError:
raise
from buildbot.process.properties import Properties
from buildbot.schedulers.trysched import TryBase
from twisted.internet import defer, reactor, utils
from twisted.mail.smtp import SMTPSenderFactory
from twisted.python import log
from common.twisted_util.response import StringResponse
from master import gitiles_poller
from master.try_job_base import BadJobfile
class CbuildbotConfigs(object):
# Valid 'etc' builder targets. Specifically, this ensures:
# - The build name doesn't begin with a flag ('--')
# - The build name doesn't contain spaces (to spill into extra args).
_ETC_TARGET_RE = re.compile(r'^[a-zA-Z][\w-]+\w$')
def __init__(self, configs, etc_builder=None):
"""Holds base state of the master's try job related configuration.
configs (dict): A dictionary of all known CrOS configs. This will be as
up-to-date as the Chromite pin.
etc_builder (str): If not None, the name of the etc builder.
"""
self.configs = configs
self.etc_builder = etc_builder
def AddBuildBucketHooks(self, c):
"""Build mutation hook called via BuildBucket when scheduling builds.
The cbuildbot config is specified in the `cbb_config` property. The
callback transforms that property to an actual waterfall builder name by
mapping it based on its config.
If an 'etc' builder is configured and the config name is unknown, it will be
mapped to the 'etc' builder if possible.
A tryserver BuildBucket build takes the form:
- Empty `builder_name` parameter. If one is supplied, it will be ignored.
- BuildBot changes can be added by including one or more BuildBucket
`changes` parameters: [{'author': {'email': 'author@google.com'}}].
- `cbb_config` property must be set to the build's cbuildbot config target.
- `extra_args` property (optional) may be a JSON list of additional
parameters to pass to the tryjob.
- `slaves_request` property (optional) may be a JSON list of slaves on which
this build may run.
- Additional BuildBot properties may be added.
NOTE: Internally, all of these parameters are converted to BuildBot
properties and referenced as such in other areas of code. The Git poller
also constructs the same property set, so code paths converge.
"""
def params_hook(params, _build):
# Map `cbb_config` to a builder name.
properties = params.get('properties', {})
config_name = properties.get('cbb_config')
if not config_name:
raise ValueError('Missing required `cbb_config` property.')
params['builder_name'] = self.GetBuilderForConfig(config_name)
# Validate other fields.
if not isinstance(properties.get('extra_args', []), list):
raise ValueError('`extra_args` property is not a list.')
if not isinstance(properties.get('slaves_request', []), list):
raise ValueError('`slaves_request` is not a list.')
# Add mandatory properties to build.
params['properties'] = properties
c['buildbucket_params_hook'] = params_hook
def GetBuilderForConfig(self, config_name):
config = self.configs.get(config_name)
if config:
return config['_template'] or config_name
self.ValidateEtcBuild(config_name)
return self.etc_builder
def ValidateEtcBuild(self, config_name):
"""Tests whether a specified build config_name is candidate for etc build.
Raises a ValueError if an etc build cannot be dispatched.
"""
if not self.etc_builder:
raise ValueError('etc builder is not configured.')
if not config_name:
raise ValueError('Empty config name')
if not self._ETC_TARGET_RE.match(config_name):
raise ValueError('invalid etc config name (%s).' % (config_name,))
def translate_v1_to_v2(parsed_job):
"""Translate tryjob desc from V1 to V2."""
parsed_job.setdefault('extra_args', []).append('--remote-trybot')
parsed_job['version'] = 2
def translate_v2_to_v3(parsed_job):
"""Translate tryjob desc from V2 to V3."""
# V3 --remote-patches format is not backwards compatible.
if any(a.startswith('--remote-patches')
for a in parsed_job.get('extra_args', ())):
raise BadJobfile('Cannot translate --remote-patches from tryjob v.2 to '
'v.3. Please run repo sync.')
parsed_job['version'] = 3
class CrOSTryJobGit(TryBase):
"""Poll a Git server to grab patches to try."""
# Name of property source for generated properties.
_PROPERTY_SOURCE = 'Try Job'
# The version of tryjob that the master is expecting.
_TRYJOB_FORMAT_VERSION = 3
# Functions that translate from one tryjob version to another.
_TRANSLATION_FUNCS = {
1 : translate_v1_to_v2,
2 : translate_v2_to_v3,
}
# Template path URL component to retrieve the Base64 contents of a file from
# Gitiles.
_GITILES_PATH_TMPL = '%(repo)s/+/%(revision)s/%(path)s?format=text'
@classmethod
def updateJobDesc(cls, parsed_job):
"""Ensure job description is in the format we expect."""
while parsed_job['version'] < cls._TRYJOB_FORMAT_VERSION:
prev_ver = parsed_job['version']
translation_func = cls._TRANSLATION_FUNCS[parsed_job['version']]
translation_func(parsed_job)
if parsed_job['version'] <= prev_ver:
raise AssertionError('translation function %s not incrementing version!'
% str(translation_func))
def __init__(self, name, pollers, smtp_host, from_addr, reply_to,
email_footer, cbuildbot_configs, properties=None):
"""Initialize the class.
Arguments:
name: See TryBase.__init__().
pollers: A list of job repo git pit pollers.
smtp_host: The smtp host for sending out error emails.
from_addr: The email address to display as being sent from.
reply_to: The email address to put in the 'Reply-To' email header field.
email_footer: The footer to append to any emails sent out.
cbuildbot_configs: (CbuildbotConfigs) A configuration set instance. Any
'bot' request outside of this list will go to an 'etc' builder, if
available.
properties: See TryBase.__init__()
"""
TryBase.__init__(self, name, [], properties or {})
self.pollers = pollers
self.smtp_host = smtp_host
self.from_addr = from_addr
self.reply_to = reply_to
self.email_footer = email_footer
self.cbb = cbuildbot_configs
def startService(self):
TryBase.startService(self)
self.startConsumingChanges()
@staticmethod
def load_job(data):
try:
return json.loads(data)
except ValueError as e:
raise BadJobfile("Failed to parse job JSON: %s" % (e.message,))
def validate_job(self, parsed_job):
# A list of field description tuples of the format:
# (name, type, required).
fields = [('name', basestring, True),
('user', basestring, True),
('email', list, True),
('bot', list, True),
('extra_args', list, False),
('version', int, True),
('slaves_request', list, False),
]
error_msgs = []
for name, f_type, required in fields:
val = parsed_job.get(name)
if val is None:
if required:
error_msgs.append('Option %s missing!' % name)
elif not isinstance(val, f_type):
error_msgs.append('Option %s of wrong type!' % name)
# If we're an 'etc' job, we must have bots defined to execute.
for bot in parsed_job['bot']:
if bot in self.cbb.configs:
continue
# Assert that this is a valid 'etc' build.
try:
self.cbb.ValidateEtcBuild(bot)
except ValueError as e:
error_msgs.append("Invalid 'etc' build (%s): %s" % (bot, e.message))
if error_msgs:
raise BadJobfile('\n'.join(error_msgs))
def get_props(self, config, options):
"""Overriding base class method."""
props = Properties()
props.setProperty('slaves_request', options.get('slaves_request', []),
self._PROPERTY_SOURCE)
props.setProperty('cbb_config', config, self._PROPERTY_SOURCE)
extra_args = options.get('extra_args')
if extra_args:
# This field can be quite large, and exceed BuildBot property limits.
# Compress it, Base64 encode it, and prefix it with "z:" so the consumer
# knows its size.
extra_args = 'z:' + base64.b64encode(zlib.compress(json.dumps(
extra_args)))
props.setProperty('cbb_extra_args', extra_args,
self._PROPERTY_SOURCE)
return props
def create_buildset(self, ssid, parsed_job):
"""Overriding base class method."""
dlist = []
buildset_name = '%s:%s' % (parsed_job['user'], parsed_job['name'])
for bot in parsed_job['bot']:
builder_name = self.cbb.GetBuilderForConfig(bot)
log.msg("Creating '%s' try job(s) %s for %s" % (builder_name, ssid, bot))
dlist.append(self.addBuildsetForSourceStamp(ssid=ssid,
reason=buildset_name,
external_idstring=buildset_name,
builderNames=[builder_name],
properties=self.get_props(bot, parsed_job)))
return defer.DeferredList(dlist)
def send_validation_fail_email(self, name, emails, error):
"""Notify the user via email about the tryjob error."""
html_content = []
html_content.append('<html><body>')
body = """
Your tryjob with name '%(name)s' failed the validation step. This is most
likely because <br>you are running an older version of cbuildbot. Please run
<br><code>repo sync chromiumos/chromite</code> and try again. If you still
see<br>this message please contact chromeos-build@google.com.<br>
"""
html_content.append(body % {'name': name})
html_content.append("Extra error information:")
html_content.append(error.replace('\n', '<br>\n'))
html_content.append(self.email_footer)
m = Message()
m.set_payload('<br><br>'.join(html_content), 'utf8')
m.set_type("text/html")
m['Date'] = formatdate(localtime=True)
m['Subject'] = 'Tryjob failed validation'
m['From'] = self.from_addr
m['Reply-To'] = self.reply_to
result = defer.Deferred()
sender_factory = SMTPSenderFactory(self.from_addr, emails,
StringIO(m.as_string()), result)
reactor.connectTCP(self.smtp_host, 25, sender_factory)
@defer.inlineCallbacks
def gotChange(self, change, important):
try:
yield self._gotChangeImpl(change, important)
except Exception as e:
log.msg('Exception in try job scheduler: %s' % (e,))
import traceback
traceback.print_exc()
@defer.inlineCallbacks
def _gotChangeImpl(self, change, _important):
"""Process the received data and send the queue buildset."""
# Find poller that this change came from.
for poller in self.pollers:
if not isinstance(poller, gitiles_poller.GitilesPoller):
continue
if poller.repo_url == change.repository:
break
else:
raise BadJobfile(
'Received tryjob from unsupported repository %s' % change.repository)
# pylint: disable=W0631
file_contents = yield self.loadGitilesChangeFile(poller, change)
parsed = {}
try:
parsed = self.load_job(file_contents)
self.validate_job(parsed)
self.updateJobDesc(parsed)
except BadJobfile as e:
self.send_validation_fail_email(parsed.setdefault('name', ''),
parsed['email'], str(e))
raise
# The sourcestamp/buildsets created will be merge-able.
ssid = yield self.master.db.sourcestamps.addSourceStamp(
branch=change.branch,
revision=change.revision,
project=change.project,
repository=change.repository,
changeids=[change.number])
yield self.create_buildset(ssid, parsed)
@defer.inlineCallbacks
def loadGitilesChangeFile(self, poller, change):
if len(change.files) != 1:
# We only accept changes with 1 diff file.
raise BadJobfile(
'Try job with too many files %s' % (','.join(change.files)))
# Load the contents of the modified file.
path = self._GITILES_PATH_TMPL % {
'repo': poller.repo_path,
'revision': change.revision,
'path': change.files[0],
}
contents_b64 = yield poller.agent.request('GET', path, retry=5,
protocol=StringResponse.Get)
defer.returnValue(base64.b64decode(contents_b64))
| 37.165714
| 80
| 0.676661
| 1,704
| 13,008
| 5.034624
| 0.269366
| 0.020981
| 0.01119
| 0.008742
| 0.052104
| 0.026577
| 0.00746
| 0.00746
| 0
| 0
| 0
| 0.006039
| 0.223478
| 13,008
| 349
| 81
| 37.272206
| 0.843283
| 0.293973
| 0
| 0.101382
| 0
| 0
| 0.154999
| 0.013755
| 0
| 0
| 0
| 0
| 0.004608
| 1
| 0.082949
| false
| 0
| 0.101382
| 0
| 0.239631
| 0.004608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b94d43136b5079271270c2099bbeca811ff9b1ce
| 1,412
|
py
|
Python
|
Medium/515.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6
|
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Medium/515.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1
|
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Medium/515.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# 515. Find Largest Value in Each Tree Row
#
# Description:
# You need to find the largest value in each row of a binary tree.
# Example:
# Input:
# 1
# / \
# 3 2
# / \ \
# 5 3 9
# Output: [1, 3, 9]
#
# Version: 1.0
# 12/22/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
children = [root]
res = []
while children:
temp = [] # Node of next row
largest = -sys.maxsize # Largest number of this row
for i in range(len(children)):
node = children[i]
largest = max(node.val, largest)
if node.left:
temp.append(node.left)
if node.right:
temp.append(node.right)
res.append(largest)
children = temp
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# BFS solution.
| 23.147541
| 66
| 0.434136
| 144
| 1,412
| 4.173611
| 0.520833
| 0.039933
| 0.046589
| 0.0599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023952
| 0.40864
| 1,412
| 61
| 67
| 23.147541
| 0.695808
| 0.444051
| 0
| 0
| 0
| 0
| 0.011019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b94d5a11e77235531376a017f673e8c5a0fdf637
| 9,578
|
py
|
Python
|
opsmop/meta/docs/exparser.py
|
lachmanfrantisek/opsmop
|
562ae2d753ff84b3d794a6815d0436753e82d2a0
|
[
"Apache-2.0"
] | null | null | null |
opsmop/meta/docs/exparser.py
|
lachmanfrantisek/opsmop
|
562ae2d753ff84b3d794a6815d0436753e82d2a0
|
[
"Apache-2.0"
] | null | null | null |
opsmop/meta/docs/exparser.py
|
lachmanfrantisek/opsmop
|
562ae2d753ff84b3d794a6815d0436753e82d2a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Michael DeHaan LLC, <michael@michaeldehaan.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class Example(object):
def __init__(self):
# things we'll figure out as we scan an example
self.name = ""
self.see_files = []
self.description = []
self.code = []
class Record(object):
def __init__(self):
# things which we'll figure out as we scan the example
self.name = ""
self.purpose = ""
self.provider_names = []
self.related_modules = []
self.category = ""
self.description = []
self.examples = []
self.current_example = Example()
self.phase = 'module'
self.count = 0
def set_phase(self, phase):
self.phase = phase
print("---------------------------------------------------------")
print("%s phase | %s" % (self.count, self.phase))
print("---------------------------------------------------------")
@classmethod
def from_file(cls, filename):
r = cls()
r.name = os.path.basename(filename).replace(".py","")
print("=========================================================")
print("%s M | %s" % ('0', r.name))
data = open(filename).read().splitlines()
for line in data:
if not r.handle_line(line):
break
return r
def load_command(self, line):
if "DESCRIPTION" in line or '----' in line or '====' in line:
pass
elif not ":" in line:
# commands must contain a colon unless they are blocks or DESCRIPTION starters
return (False, None, None)
if not line.startswith("#"):
# commands must be in comments
return (False, None, None)
if ":" in line:
tokens = line.split(":")
if tokens[0].upper() != tokens[0]:
# commands must be in all caps. This is done
# so we don't get confused by colons in URLs and so on.
print("REJECT: %s" % tokens[0])
return (False, None, None)
# at this point we are sure it is a command
if '#------------' in line.replace(" ",""):
return (True, 'start_block', None)
if '#============' in line.replace(" ",""):
return (True, 'end_block', None)
# throw away the leading comment
line = line.replace("#","",1).strip()
if line.startswith("DESCRIPTION"):
return (True, 'description', None)
tokens = line.split(':', 1)
command = tokens[0].replace("#","").strip().lower()
rest = tokens[1].strip()
return (True, command, rest)
def handle_line(self, line):
self.count = self.count + 1
(is_command, command, rest) = self.load_command(line)
print("%s line | %s" % (self.count, line))
#if command == 'policy':
# return False
if is_command:
#if command not in [ 'start_block', 'end_block' ]:
# print("keyword: %s => %s" % (command, rest))
self.handle_command(command, rest)
return True
#print("PHASE=%s" % self.phase)
#print("LINE=%s" % line)
if self.phase == 'module':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the module phase should be all commands")
elif self.phase == 'description':
# module description lines must be comments
self.handle_module_description(line)
elif self.phase == 'example':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the example phase should be all commands")
elif self.phase == 'example_description':
self.handle_example_description(self.current_example, line)
elif self.phase == 'example_code':
self.handle_example_code(self.current_example, line)
elif self.phase == 'limbo':
#print("ignoring line while in limbo: %s" % line)
pass
elif self.phase == 'done':
#print("ignoring line while done: %s" % line)
pass
else:
raise Exception("unknown phase: %s" % self.phase)
return True # continue
def handle_command(self, command, rest):
#print("<PHASE: %s, COMMAND: %s, REST: %s>" % (self.phase, command, rest))
if self.phase == 'done':
return False
if self.phase == 'module':
# from module mode the only state transition is into module_description mode
# when we find the description command
if command not in ['start_block', 'end_block']:
print("%s set | %-20s | %s" % (self.count, command, rest))
if command == 'module':
pass
elif command == 'start_block':
pass
elif command == 'category':
self.category = rest
elif command == 'purpose':
self.purpose = rest
elif command == 'related':
self.related_modules = [ x.strip() for x in rest.split(",") ]
elif command == 'providers':
self.providers = [ x.strip() for x in rest.split(",") ]
elif command == 'fyi':
pass
elif command == 'description':
print("---------------------------------------------------------")
self.set_phase('description')
elif command == 'end_block':
raise Exception("unexpected end block without description")
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'description':
# in description phase end block moves us into limbo until we find
# another example start block
if command == 'end_block':
self.set_phase('limbo')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'limbo':
# in limbo, seeing a start block moves us into example phase
if command == 'start_block':
self.set_phase('example')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'example':
# in example phase we can only move into example description phase
# by hitting the description command
if command == 'example':
print("---------------------------------------------------------")
print("%s exmp | %s" % (self.count, rest))
print("---------------------------------------------------------")
self.current_example.name = rest
elif command == 'setup':
self.set_phase('done')
elif command == 'description':
print("MOV!")
self.set_phase('example_description')
elif command == 'see_files' or command == 'see_file':
self.current_example.see_files = [ x.strip() for x in rest.split(",")]
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_description':
# in example description phase we can only move into example code phase
# by hitting an end block
if command == 'end_block':
print("-------")
self.set_phase('example_code')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_code':
# in example code phase we can only move back into example phase by
# hitting a start block
if command == 'start_block':
self.examples.append(self.current_example)
self.current_example = Example()
self.set_phase('example')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'done':
return False
else:
raise Exception("unknown phase: %s" % self.phase)
def handle_example_description(self, example, line):
# could be a comment or the code example, we want to keep both
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
print("%s desc | %s" % (self.count, line))
example.description.append(line)
def handle_example_code(self, example, line):
line = line.rstrip()
example.code.append(line)
print("%s code | %s" % (self.count, line))
def handle_module_description(self, line):
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
if line:
print("%s mdesc | %s" % (self.count, line))
self.description.append(line)
| 37.708661
| 90
| 0.516914
| 1,035
| 9,578
| 4.715942
| 0.202899
| 0.042409
| 0.031961
| 0.024585
| 0.315304
| 0.229461
| 0.219217
| 0.170867
| 0.139316
| 0.108175
| 0
| 0.003125
| 0.331802
| 9,578
| 253
| 91
| 37.857708
| 0.759531
| 0.205993
| 0
| 0.423529
| 0
| 0
| 0.162302
| 0.045238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.035294
| 0.005882
| 0
| 0.147059
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b94dd4c5db15c696e937d22b21b3d1a6fd038ef8
| 737
|
py
|
Python
|
pylox/TokenType.py
|
sheunl/Compiler_Tests
|
18c5e0568bc39a60094f3e44943ac252c279ffb9
|
[
"CC0-1.0"
] | null | null | null |
pylox/TokenType.py
|
sheunl/Compiler_Tests
|
18c5e0568bc39a60094f3e44943ac252c279ffb9
|
[
"CC0-1.0"
] | null | null | null |
pylox/TokenType.py
|
sheunl/Compiler_Tests
|
18c5e0568bc39a60094f3e44943ac252c279ffb9
|
[
"CC0-1.0"
] | null | null | null |
from enum import Enum
class T(Enum):
#single character Tokens
LEFT_PAREN =1
RIGHT_PAREN =2
LEFT_BRACE = 3
RIGHT_BRACE = 4
COMMA = 5
DOT = 6
MINUS = 7
PLUS = 8
SEMICOLON = 9
SLASH = 10
STAR = 11
#one or two character tokens
BANG = 12
BANG_EQUAL = 13
EQUAL = 14
EQUAL_EQUAL = 15
GREATER = 16
GREATER_EQUAL = 17
LESS = 18
LESS_EQUAL = 19
#Literals
IDENTIFIER = 20
STRING = 21
NUMBER = 22
#keywords
AND = 23
CLASS = 24
ELSE = 25
FALSE = 26
FUN = 27
FOR = 28
IF = 29
NIL =30
OR =31
PRINT =32
RETURN = 33
SUPER = 34
THIS = 35
TRUE = 36
VAR = 37
WHILE = 38
EOF= 39
| 14.45098
| 32
| 0.522388
| 103
| 737
| 3.660194
| 0.825243
| 0.079576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160093
| 0.415197
| 737
| 51
| 33
| 14.45098
| 0.714617
| 0.089552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9516c7b124e87fce1712aca1aa49ef2cd923f11
| 3,056
|
py
|
Python
|
lib/two/mongomgr.py
|
erkyrath/tworld
|
9f5237771196b03753d027277ffc296e25fd7425
|
[
"MIT"
] | 38
|
2015-01-03T16:59:20.000Z
|
2021-10-13T09:15:53.000Z
|
lib/two/mongomgr.py
|
Oreolek/tworld
|
9f5237771196b03753d027277ffc296e25fd7425
|
[
"MIT"
] | 32
|
2015-01-04T01:59:34.000Z
|
2016-05-20T16:29:26.000Z
|
lib/two/mongomgr.py
|
Oreolek/tworld
|
9f5237771196b03753d027277ffc296e25fd7425
|
[
"MIT"
] | 7
|
2015-10-08T21:01:20.000Z
|
2020-05-21T17:42:54.000Z
|
"""
Manage the connection to the MongoDB server.
"""
import tornado.gen
import tornado.ioloop
import motor
class MongoMgr(object):
def __init__(self, app):
# Keep a link to the owning application.
self.app = app
self.log = self.app.log
# This will be the Motor (MongoDB) connection. We'll open it in the
# first monitor_mongo_status call.
self.mongo = None
self.mongoavailable = False # true if self.mongo exists and is open
self.mongotimerbusy = False # true while monitor_mongo_status runs
# We also manage self.app.mongodb, a MotorDatabase. This must be
# non-None exactly when mongoavailable is true.
def init_timers(self):
ioloop = tornado.ioloop.IOLoop.instance()
# The mongo status monitor. We set up one call immediately, and then
# try again every three seconds.
ioloop.add_callback(self.monitor_mongo_status)
res = tornado.ioloop.PeriodicCallback(self.monitor_mongo_status, 3000)
res.start()
def close(self):
"""Close the connection to mongodb. (The monitor will start it
right back up again, or try to.)
"""
if self.mongo:
try:
self.mongo.disconnect()
except Exception as ex:
self.log.error('Problem disconnecting mongo: %s', ex)
self.mongo = None
self.app.mongodb = None
@tornado.gen.coroutine
def monitor_mongo_status(self):
if (self.mongotimerbusy):
self.log.warning('monitor_mongo_status: already in flight; did a previous call jam?')
return
if (self.app.shuttingdown):
self.log.warning('monitor_mongo_status: server is shutting down, never mind')
return
self.mongotimerbusy = True
if (self.mongoavailable):
try:
res = yield motor.Op(self.mongo.admin.command, 'ping')
if (not res):
self.log.error('Mongo client not alive')
self.mongoavailable = False
except Exception as ex:
self.log.error('Mongo client not alive: %s', ex)
self.mongoavailable = False
if (not self.mongoavailable):
self.close()
if (not self.mongoavailable):
try:
self.mongo = motor.MotorClient(tz_aware=True)
res = yield motor.Op(self.mongo.open)
### maybe authenticate to a database?
self.mongoavailable = True
self.app.mongodb = self.mongo[self.app.opts.mongo_database]
self.log.info('Mongo client open')
self.app.queue_command({'cmd':'dbconnected'})
except Exception as ex:
self.mongoavailable = False
self.app.mongodb = None
self.log.error('Mongo client not open: %s', ex)
self.mongotimerbusy = False
| 35.534884
| 97
| 0.576571
| 352
| 3,056
| 4.940341
| 0.332386
| 0.040253
| 0.072455
| 0.032777
| 0.156987
| 0.143761
| 0.064405
| 0
| 0
| 0
| 0
| 0.001981
| 0.339332
| 3,056
| 85
| 98
| 35.952941
| 0.859336
| 0.193717
| 0
| 0.357143
| 0
| 0
| 0.107807
| 0.017348
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.053571
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95332c99e63e536863282307e578d423edf7664
| 644
|
py
|
Python
|
tests/models/test_documents.py
|
airslate-oss/python-airslate
|
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
|
[
"Apache-2.0"
] | 3
|
2021-02-07T20:04:26.000Z
|
2021-09-22T08:32:26.000Z
|
tests/models/test_documents.py
|
airslate-oss/python-airslate
|
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
|
[
"Apache-2.0"
] | 15
|
2021-01-21T15:38:37.000Z
|
2021-02-16T07:52:20.000Z
|
tests/models/test_documents.py
|
airslate-oss/python-airslate
|
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of the airslate.
#
# Copyright (c) 2021 airSlate, Inc.
#
# For the full copyright and license information, please view
# the LICENSE file that was distributed with this source code.
from airslate.models.documents import UpdateFields
from airslate.entities.fields import Field
def test_empty_update_fields__to_dict():
model = UpdateFields()
assert model.to_dict() == {'data': []}
def test_update_fields__to_dict():
model = UpdateFields(data=[Field('123'), Field('abc')])
assert model.to_dict() == {'data': [
{'id': '123', 'type': 'dictionary'},
{'id': 'abc', 'type': 'dictionary'}
]}
| 28
| 62
| 0.677019
| 83
| 644
| 5.096386
| 0.554217
| 0.056738
| 0.066194
| 0.085106
| 0.264775
| 0.165485
| 0
| 0
| 0
| 0
| 0
| 0.018939
| 0.180124
| 644
| 22
| 63
| 29.272727
| 0.782197
| 0.293478
| 0
| 0
| 0
| 0
| 0.116071
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.181818
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9556579b31dd7d2370d8083a431ada02beb471d
| 2,205
|
py
|
Python
|
cdnu/ccds.py
|
Indy2222/mbg-codon-usage
|
d415076a8150cd712010c0389c71ef22ba9ad850
|
[
"MIT"
] | null | null | null |
cdnu/ccds.py
|
Indy2222/mbg-codon-usage
|
d415076a8150cd712010c0389c71ef22ba9ad850
|
[
"MIT"
] | null | null | null |
cdnu/ccds.py
|
Indy2222/mbg-codon-usage
|
d415076a8150cd712010c0389c71ef22ba9ad850
|
[
"MIT"
] | null | null | null |
from typing import List, NamedTuple
CCDS_FILE = 'CCDS.current.txt'
CHROMOSOMES = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
'X', 'Y')
class CdsPos(NamedTuple):
ccds_id: str
indexes: list
"""2-tuples with start (inclusive) and stop indexes (exclusive) in
reference genome. Whole CDS can be constructed as concatenation of the
sub-sequences."""
molecule: str
"""Molecule name, see :const:`CHROMOSOMES`"""
def load_ccds() -> List[CdsPos]:
"""Load file with CDS locations within GRCh38 genome as a list of
:class:`CdsPos`."""
cds = []
with open(CCDS_FILE, encoding='utf-8', newline='\n') as fp:
for line in fp:
if not line:
# Skip empty lines
continue
if line.startswith('#'):
# Skip comments
continue
parts = line.split('\t')
ccds_id = parts[4]
status = parts[5]
if 'Public' not in status:
# CDS is not yet public
continue
if parts[6] == '-':
# CDS strand negative order = reverse-complement
continue
locations_str = parts[9]
if locations_str == '-':
# CDS location unknown
continue
chromosome = parts[0]
assert chromosome in CHROMOSOMES, chromosome
locations = []
assert locations_str.startswith('[')
assert locations_str.endswith(']')
for location_str in locations_str[1:-1].split(','):
start_str, stop_str = location_str.split('-')
start, stop = int(start_str), int(stop_str) + 1
locations.append((start, stop))
if sum(b - a for a, b in locations) % 3 != 0:
# Skip CDS which are not multiple of three in length.
continue
cds.append(CdsPos(
ccds_id=ccds_id,
molecule='chr' + chromosome,
indexes=locations
))
return cds
| 30.205479
| 77
| 0.502494
| 246
| 2,205
| 4.430894
| 0.46748
| 0.055046
| 0.033028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035585
| 0.37551
| 2,205
| 72
| 78
| 30.625
| 0.755991
| 0.114739
| 0
| 0.136364
| 0
| 0
| 0.045113
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 1
| 0.022727
| false
| 0
| 0.022727
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9576be4fad430a84f92a2e3dc9d1b34f113118c
| 2,732
|
py
|
Python
|
test/test_resolve_errors.py
|
ITMO-NSS-team/GEFEST
|
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
|
[
"BSD-3-Clause"
] | 12
|
2022-01-19T11:06:32.000Z
|
2022-02-21T14:59:23.000Z
|
test/test_resolve_errors.py
|
ITMO-NSS-team/GEFEST
|
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
|
[
"BSD-3-Clause"
] | 9
|
2022-01-19T11:09:11.000Z
|
2022-03-29T13:36:41.000Z
|
test/test_resolve_errors.py
|
ITMO-NSS-team/GEFEST
|
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
|
[
"BSD-3-Clause"
] | 2
|
2022-01-19T11:37:24.000Z
|
2022-03-24T19:35:33.000Z
|
import pytest
from copy import deepcopy
from gefest.core.structure.point import Point
from gefest.core.structure.polygon import Polygon
from gefest.core.structure.structure import Structure
from gefest.core.algs.postproc.resolve_errors import *
from gefest.core.algs.geom.validation import *
# marking length and width for testing polygon
poly_width = 10
poly_length = 20
# creating a testing polygons via corner points
rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)]
out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points])
triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)]
unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points])
incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)]
incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points])
domain = Domain()
def test_unclosed_poly():
input_structure = Structure([unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert unclosed_poly(input_structure, domain)
assert not unclosed_poly(observed_structure, domain)
def test_self_intersection():
input_structure = Structure([incorrect_poly])
observed_structure = postprocess(input_structure, domain)
assert self_intersection(input_structure)
assert not self_intersection(observed_structure)
def test_out_of_bound():
input_structure = Structure([out_bounds_rectangle_poly])
observed_structure = postprocess(input_structure, domain)
assert out_of_bound(input_structure, domain)
assert not out_of_bound(observed_structure, domain)
def test_fixed_polys():
domain = Domain(fixed_points=[[[15, 30],
[40, 30],
[15, 40]]])
poly_like_fixed = Polygon('like_fixed', points=[Point(15, 30), Point(40, 30), Point(15, 40)])
input_structure = Structure([poly_like_fixed, unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert all([np.isclose(len(observed_structure.polygons), 2),
'like_fixed' not in [poly.id for poly in observed_structure.polygons],
'fixed' in [poly.id for poly in observed_structure.polygons]])
def test_too_close():
same_poly = deepcopy(unclosed_triangle_poly)
same_poly.id = 'same_triangle'
input_structure = Structure([unclosed_triangle_poly, same_poly])
observed_structure = postprocess(input_structure, domain)
print(observed_structure.polygons)
assert np.isclose(len(observed_structure.polygons), 1)
| 37.424658
| 107
| 0.739019
| 352
| 2,732
| 5.471591
| 0.198864
| 0.094496
| 0.07269
| 0.080997
| 0.454309
| 0.347871
| 0.207684
| 0.180685
| 0.120457
| 0.076843
| 0
| 0.024327
| 0.157394
| 2,732
| 72
| 108
| 37.944444
| 0.812337
| 0.032943
| 0
| 0.104167
| 0
| 0
| 0.026146
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.104167
| false
| 0
| 0.145833
| 0
| 0.25
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95a54ae27c88b1a727a1742ed1880093d3693e0
| 971
|
py
|
Python
|
hvac/api/secrets_engines/gcp.py
|
nested-tech/hvac
|
2a58ac9850b882e43c1617ae6b0ea93104c99794
|
[
"Apache-2.0"
] | null | null | null |
hvac/api/secrets_engines/gcp.py
|
nested-tech/hvac
|
2a58ac9850b882e43c1617ae6b0ea93104c99794
|
[
"Apache-2.0"
] | null | null | null |
hvac/api/secrets_engines/gcp.py
|
nested-tech/hvac
|
2a58ac9850b882e43c1617ae6b0ea93104c99794
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Gcp methods module."""
from hvac import exceptions
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.gcp import DEFAULT_MOUNT_POINT, ALLOWED_CREDS_ENDPOINTS
class Gcp(VaultApiBase):
def generate_credentials(self, roleset, endpoint='key', mount_point=DEFAULT_MOUNT_POINT):
if endpoint not in ALLOWED_CREDS_ENDPOINTS:
error_msg = 'invalid endpoint argument provided "{arg}", supported types: "{allowed_endpoints}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=endpoint,
allowed_endpoints=', '.join(ALLOWED_CREDS_ENDPOINTS),
))
api_path = '/v1/{mount_point}/{endpoint}/{roleset}'.format(
mount_point=mount_point,
endpoint=endpoint,
roleset=roleset,
)
response = self._adapter.get(
url=api_path
)
return response.json()
| 34.678571
| 108
| 0.652935
| 106
| 971
| 5.754717
| 0.528302
| 0.098361
| 0.103279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002725
| 0.244078
| 971
| 27
| 109
| 35.962963
| 0.828338
| 0.063852
| 0
| 0
| 0
| 0
| 0.138581
| 0.06541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95b84a26deaf7cd8b371b13b34ee9e7005ee7c0
| 9,155
|
py
|
Python
|
ypricemagic/uniswap.py
|
poolpitako/ypricemagic
|
882aa2071a918937e77e0b85e5f52191a4714d28
|
[
"MIT"
] | null | null | null |
ypricemagic/uniswap.py
|
poolpitako/ypricemagic
|
882aa2071a918937e77e0b85e5f52191a4714d28
|
[
"MIT"
] | null | null | null |
ypricemagic/uniswap.py
|
poolpitako/ypricemagic
|
882aa2071a918937e77e0b85e5f52191a4714d28
|
[
"MIT"
] | null | null | null |
import token
from tokenize import tokenize
from brownie import Contract, chain
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from .utils.cache import memory
from .utils.multicall2 import fetch_multicall
from .interfaces.ERC20 import ERC20ABI
import ypricemagic.magic
import ypricemagic.utils.utils
from .constants import STABLECOINS, dai, usdc, usdt, wbtc, weth, sushi
# NOTE: If this is failing to pull a price for a token you need, it's likely because that token requires a special swap path.
# Please add a viable swap path below to fetch price data successfully.
#project.load()
if chain.id == 1:
FACTORIES = {
"uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac",
}
ROUTERS = {
"uniswap": Contract("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
"sushiswap": Contract("0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"),
}
SPECIAL_PATHS = {
"sushiswap": {
"0xEF69B5697f2Fb0345cC680210fD39b593a2f9684": ["0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e": ["0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e","0xC28E27870558cF22ADD83540d2126da2e4b464c2",weth,usdc]
,"0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2": ["0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2","0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6": ["0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6","0x87F5F9eBE40786D49D35E1B5997b07cCAA8ADbFF",weth,usdc]
,"0x4954Db6391F4feB5468b6B943D4935353596aEC9": ["0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0": ["0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0","0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d": ["0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d","0xba100000625a3754423978a60c9317c58a424e3D",weth,usdc]
,"0xBA50933C268F567BDC86E1aC131BE072C6B0b71a": ["0xBA50933C268F567BDC86E1aC131BE072C6B0b71a",weth,usdc]
,"0x6102407f07029892eB5Ff02164ADFaFb85f4d222": ["0x6102407f07029892eB5Ff02164ADFaFb85f4d222",usdt]
,"0x85034b3b2e292493D029443455Cc62ab669573B3": ["0x85034b3b2e292493D029443455Cc62ab669573B3","0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984",weth,usdc]
,"0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8": ["0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8", usdc]
,"0x383518188C0C6d7730D91b2c03a03C837814a899": ["0x383518188C0C6d7730D91b2c03a03C837814a899",dai]
,"0xafcE9B78D409bF74980CACF610AFB851BF02F257": ["0xafcE9B78D409bF74980CACF610AFB851BF02F257",wbtc,weth,usdc]
},
"uniswap": {
}
}
elif chain.id == 56:
ROUTERS = {
"pancakeswapv2": Contract("0x10ED43C718714eb63d5aA57B78B54704E256024E"),
"pancakeswapv1": Contract("0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F")
}
FACTORIES = {
"pancakeswapv2": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73",
"pancakeswapv1": "0xBCfCcbde45cE874adCB698cC183deBcF17952812"
}
SPECIAL_PATHS = {
"pancakeswapv2": {
},
"pancakeswapv1": {
}
}
elif chain.id == 137:
ROUTERS = {
"quickswap": Contract("0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff")
}
FACTORIES = {
"quickswap": "0x5757371414417b8C6CAad45bAeF941aBc7d3Ab32",
}
SPECIAL_PATHS = {
"quickswap": {
}
}
FACTORY_TO_ROUTER = {FACTORIES[name]: ROUTERS[name] for name in FACTORIES}
FACTORY_TO_PROTOCOL = {FACTORIES[name]: name for name in FACTORIES}
@ttl_cache(ttl=36000)
def get_price(token_in, token_out=usdc, router="uniswap", block=None, paired_against=weth):
"""
Calculate a price based on Uniswap Router quote for selling one `token_in`.
Always uses intermediate WETH pair if `[token_in,weth,token_out]` swap path available.
"""
if chain.id == 56 and token_out == usdc:
busd = Contract("0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56")
token_out = busd
tokens = [str(token) for token in [token_in, token_out]]
amount_in = 10 ** ypricemagic.utils.utils.get_decimals_with_override(tokens[0])
if str(token_in) in STABLECOINS:
return 1
elif str(paired_against) in STABLECOINS and str(token_out) in STABLECOINS:
path = [token_in, paired_against]
elif weth in (token_in, token_out):
path = [token_in, token_out]
elif paired_against == sushi and token_out != sushi:
path = [token_in,sushi,weth,token_out]
elif str(token_in) in SPECIAL_PATHS[router].keys() and str(token_out) in STABLECOINS:
path = SPECIAL_PATHS[router][str(token_in)]
elif chain.id == 56: #bsc
from .constants import cake, wbnb
if wbnb in (token_in, token_out):
path = [token_in, token_out]
elif cake in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wbnb,token_out]
elif chain.id == 137: #bsc
from .constants import wmatic
if wmatic in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wmatic,token_out]
else:
path = [token_in, weth, token_out]
fees = 0.997 ** (len(path) - 1)
if router in ROUTERS:
router = ROUTERS[router]
try:
quote = router.getAmountsOut(amount_in, path, block_identifier=block)
amount_out = quote[-1] / 10 ** ypricemagic.utils.utils.get_decimals_with_override(str(path[-1]))
return amount_out / fees
except ValueError as e:
return
@ttl_cache(ttl=600)
def get_price_v1(asset, block=None):
factory = Contract("0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95")
try:
exchange = Contract(factory.getExchange(asset))
eth_bought = exchange.getTokenToEthInputPrice(10 ** ypricemagic.utils.utils.get_decimals_with_override(asset), block_identifier=block)
exchange = Contract(factory.getExchange(usdc))
usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6
fees = 0.997 ** 2
return usdc_bought / fees
except (ContractNotFound, ValueError) as e:
pass
@memory.cache()
def is_uniswap_pool(address):
try:
return Contract(address).factory() in FACTORY_TO_ROUTER
except (ValueError, OverflowError, AttributeError):
pass
return False
@ttl_cache(ttl=600)
def lp_price(address, block=None):
""" Get Uniswap/Sushiswap LP token price. """
def extrapolate_balance_if_needed():
nonlocal balances
if balances[0] and not balances[1]:
balances[1] = balances[0]
if balances[1] and not balances[0]:
balances[0] = balances[1]
return balances
pair = Contract(address)
if chain.id not in [56, 137]: # No multicall2 on bsc or poly
factory, token0, token1, supply, reserves = fetch_multicall(
[pair, "factory"],
[pair, "token0"],
[pair, "token1"],
[pair, "totalSupply"],
[pair, "getReserves"],
block=block
)
else:
factory = pair.factory(block_identifier = block)
token0 = pair.token0(block_identifier = block)
token1 = pair.token1(block_identifier = block)
supply = pair.totalSupply(block_identifier = block)
reserves = pair.getReserves(block_identifier = block)
router = FACTORY_TO_PROTOCOL[factory]
tokens = [ypricemagic.utils.utils.Contract_with_erc20_fallback(token) for token in [token0, token1]]
price0 = get_price(tokens[0], paired_against=tokens[1], router=router, block=block)
price1 = get_price(tokens[1], paired_against=tokens[0], router=router, block=block)
prices = [price0,price1]
scales = [10 ** ypricemagic.utils.utils.get_decimals_with_override(str(token)) for token in tokens]
supply = supply / 1e18
try:
balances = [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
except TypeError as e: # If can't get price via router, try to get from elsewhere
if not price0:
try:
price0 = ypricemagic.magic.get_price(tokens[0], block)
except ypricemagic.magic.PriceError:
price0 is None
if not price1:
try:
price1 = ypricemagic.magic.get_price(tokens[1], block)
except ypricemagic.magic.PriceError:
price1 is None
prices = [price0,price1]
balances = [None,None] # [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
if price0:
balances[0] = reserves[0] / scales[0] * price0
if price1:
balances[1] = reserves[1] / scales[1] * price1
balances = extrapolate_balance_if_needed()
try:
return sum(balances) / supply
except TypeError:
return
| 43.388626
| 205
| 0.68935
| 888
| 9,155
| 6.980856
| 0.228604
| 0.025972
| 0.021294
| 0.024197
| 0.138087
| 0.091144
| 0.087433
| 0.077432
| 0.062591
| 0.046782
| 0
| 0.18197
| 0.216057
| 9,155
| 211
| 206
| 43.388626
| 0.681761
| 0.0639
| 0
| 0.206522
| 0
| 0
| 0.249824
| 0.226283
| 0
| 0
| 0.226283
| 0
| 0
| 1
| 0.027174
| false
| 0.01087
| 0.070652
| 0
| 0.146739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95bf173c71497f893fb19ff1c8e2576967d5c36
| 611
|
py
|
Python
|
configs/configuration_textrnn.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | 2
|
2020-08-19T12:32:21.000Z
|
2021-11-08T15:50:08.000Z
|
configs/configuration_textrnn.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | null | null | null |
configs/configuration_textrnn.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | 1
|
2020-08-19T12:32:48.000Z
|
2020-08-19T12:32:48.000Z
|
""" TextRNN model configuration """
class TextRNNConfig(object):
def __init__(
self,
vocab_size=30000,
pretrained_embedding=None,
embedding_matrix=None,
embedding_dim=300,
embedding_dropout=0.3,
lstm_hidden_size=128,
output_dim=1,
**kwargs
):
self.pretrained_embedding = pretrained_embedding
self.embedding_matrix = embedding_matrix
self.embedding_dim = embedding_dim
self.embedding_dropout = embedding_dropout
self.lstm_hidden_size = lstm_hidden_size
self.output_dim = output_dim
| 27.772727
| 56
| 0.657938
| 65
| 611
| 5.784615
| 0.415385
| 0.151596
| 0.111702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031532
| 0.273322
| 611
| 21
| 57
| 29.095238
| 0.815315
| 0.04419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95d5c160689db0e0a64a0a455645d72081698d5
| 2,992
|
py
|
Python
|
core/src/zeit/cms/content/caching.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 5
|
2019-05-16T09:51:29.000Z
|
2021-05-31T09:30:03.000Z
|
core/src/zeit/cms/content/caching.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 107
|
2019-05-24T12:19:02.000Z
|
2022-03-23T15:05:56.000Z
|
core/src/zeit/cms/content/caching.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 3
|
2020-08-14T11:01:17.000Z
|
2022-01-08T17:32:19.000Z
|
from collections import defaultdict
from logging import getLogger
from operator import itemgetter
from os import environ
from time import time
from zope.cachedescriptors.property import Lazy as cachedproperty
from zeit.cms.content.sources import FEATURE_TOGGLES
from zope.component import getUtility
from zeit.connector.interfaces import IConnector
from zeit.connector.filesystem import Connector
log = getLogger(__name__)
class ContentCache(object):
@cachedproperty
def cache(self):
size = environ.get('CONTENT_CACHE_SIZE')
check = environ.get('CONTENT_CACHE_CHECK')
connector = getUtility(IConnector)
if size is not None and type(connector) is Connector:
self.size = int(size)
self.check = int(check) if check is not None else self.size / 5
self.connector = connector
self.cache = defaultdict(lambda: dict(used=0, mtimes={}, data={}))
self.hits = self.misses = 0
log.info('initialized content cache (size %s)', size)
return self.cache
else:
return None
def get(self, unique_id, key, factory, suffix=''):
cache = self.cache
if cache is None or not FEATURE_TOGGLES.find('content_caching'):
return factory()
try:
mtime = int(self.connector.mtime(unique_id, suffix))
except (ValueError, TypeError):
mtime = None
if mtime is None:
return factory()
obj = cache[unique_id]
obj['used'] += 1
obj['last'] = time()
if mtime != obj['mtimes'].get(suffix):
obj['data'].clear()
obj['mtimes'][suffix] = mtime
cache = obj['data']
if key not in cache:
cache[key] = factory()
self.misses += 1
log.debug('added %s (%s)', key, mtime)
if self.misses % self.check == 0:
self.cleanup()
else:
self.hits += 1
return cache[key]
def cleanup(self):
cache = self.cache
over = len(cache) - self.size
log.info('size: %d/%d, hits: %d, misses: %d',
over + self.size, self.size, self.hits, self.misses)
if over > 0:
log.debug('removing %d items', over)
last = sorted((cache[uid]['last'], uid) for uid in cache)
for _, (_, uid) in zip(range(over), last):
del cache[uid]
@property
def usage(self):
cache = self.cache
stats = (dict(uid=uid, used=cache[uid]['used']) for uid in cache)
return sorted(stats, key=itemgetter('used'))
def info(self):
cache = self.cache
usage = {info['uid']: info['used'] for info in reversed(self.usage)}
return dict(
size=self.size,
count=len(cache),
hits=self.hits,
misses=self.misses,
usage=usage)
__cache = ContentCache()
get = __cache.get
info = __cache.info
| 32.521739
| 78
| 0.57988
| 363
| 2,992
| 4.719008
| 0.267218
| 0.047285
| 0.032691
| 0.031524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003863
| 0.307821
| 2,992
| 91
| 79
| 32.879121
| 0.823274
| 0
| 0
| 0.1
| 0
| 0
| 0.065842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.2875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95f2f6c2258ef8998ac2a053019013dbf870640
| 2,351
|
py
|
Python
|
account/views.py
|
KimSoungRyoul/drf_unitteset_study_project
|
9a0d824bdc6343eeba6209299c077a6e9d280516
|
[
"MIT"
] | null | null | null |
account/views.py
|
KimSoungRyoul/drf_unitteset_study_project
|
9a0d824bdc6343eeba6209299c077a6e9d280516
|
[
"MIT"
] | null | null | null |
account/views.py
|
KimSoungRyoul/drf_unitteset_study_project
|
9a0d824bdc6343eeba6209299c077a6e9d280516
|
[
"MIT"
] | null | null | null |
# Create your views here.
from django.db.models import QuerySet
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import mixins
from account.documents import DjangoFilterDescriptionInspector
from account.models import Customer
from account.serializers import CustomerInfoSerializer, SignUpFormSerializer
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_description="회원 개인정보 조회 API",
filter_inspectors=[DjangoFilterDescriptionInspector],
))
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_description="회원 가입 API",
))
@method_decorator(name='update', decorator=swagger_auto_schema(
operation_description="회원 정보 수정 API",
))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_description="회원 탈퇴 API",
))
class CustomerAPIViewSet(mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset: QuerySet = Customer.objects
permission_classes = (IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
def get_serializer_class(self):
if self.request.method == 'POST':
return SignUpFormSerializer
elif self.request.method == 'GET':
return CustomerInfoSerializer
elif self.request.method == 'PUT':
return SignUpFormSerializer
elif self.request.method == 'DELETE':
return SignUpFormSerializer
def get_permissions(self):
if self.request.method == 'POST':
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
| 40.534483
| 103
| 0.722671
| 244
| 2,351
| 6.795082
| 0.368852
| 0.039807
| 0.051267
| 0.062726
| 0.205066
| 0.205066
| 0.115802
| 0
| 0
| 0
| 0
| 0.001579
| 0.191833
| 2,351
| 57
| 104
| 41.245614
| 0.871053
| 0.009783
| 0
| 0.18
| 0
| 0
| 0.047721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.2
| 0
| 0.46
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b95fe9aa9fab4f285d9028f8b01c9820d83254e4
| 3,831
|
py
|
Python
|
src/front-door/azext_front_door/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/front-door/azext_front_door/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/front-door/azext_front_door/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
def get_name_or_id_validator(dest, child_type=None, resource_type='Frontdoors', resource_namespace='Microsoft.Network',
resource_name_dest='front_door_name'):
def _validate_name_or_id(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
is_list = True
# treat single values as a list, but convert back in the end
if not isinstance(names_or_ids, list):
is_list = False
names_or_ids = [names_or_ids]
if names_or_ids == [None] or not names_or_ids:
return
ids = []
for val in names_or_ids:
id_params = {
'subscription': subscription_id,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'name': getattr(namespace, resource_name_dest) if child_type else val,
'child_type_1': child_type,
'child_name_1': val if child_type else None
}
if not is_valid_resource_id(val):
val = resource_id(**id_params)
ids.append(val)
setattr(namespace, dest, ids if is_list else ids[0])
return _validate_name_or_id
def validate_waf_policy(cmd, namespace):
get_name_or_id_validator(
dest='waf_policy',
resource_type='WebApplicationFirewallPolicy'
)(cmd, namespace)
def validate_keyvault(cmd, namespace):
get_name_or_id_validator(
dest='vault',
resource_type='vaults',
resource_namespace='Microsoft.Keyvault'
)(cmd, namespace)
def validate_load_balancing_settings(cmd, namespace):
get_name_or_id_validator('load_balancing_settings', 'loadBalancingSettings')(cmd, namespace)
def validate_probe_settings(cmd, namespace):
get_name_or_id_validator('probe_settings', 'healthProbeSettings')(cmd, namespace)
def validate_frontend_endpoints(cmd, namespace):
get_name_or_id_validator('frontend_endpoints', 'frontendEndpoints')(cmd, namespace)
def validate_backend_pool(cmd, namespace):
get_name_or_id_validator('backend_pool', 'backendPools')(cmd, namespace)
def validate_rules_engine(cmd, namespace):
get_name_or_id_validator('rules_engine', 'rulesEngines')(cmd, namespace)
# pylint: disable=protected-access
class MatchConditionAction(argparse._AppendAction):
# pylint: disable=no-self-use
def parse_match_condition(self, values):
from azext_front_door.vendored_sdks.models import MatchCondition
if not isinstance(values, list):
values = values.split(' ')
try:
return MatchCondition(
match_variable=values[0],
operator=values[1],
match_value=values[2:]
)
except IndexError:
from knack.util import CLIError
raise CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]')
def __call__(self, parser, namespace, values, option_string=None):
match_condition = self.parse_match_condition(values)
super(MatchConditionAction, self).__call__(parser, namespace, match_condition, option_string)
| 35.472222
| 119
| 0.645262
| 429
| 3,831
| 5.426573
| 0.319347
| 0.07732
| 0.034364
| 0.037801
| 0.116838
| 0.116838
| 0.106529
| 0.065292
| 0
| 0
| 0
| 0.002024
| 0.226312
| 3,831
| 107
| 120
| 35.803738
| 0.783401
| 0.119029
| 0
| 0.057971
| 0
| 0
| 0.120285
| 0.021384
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15942
| false
| 0
| 0.072464
| 0
| 0.289855
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b960f3f5be88ef82754359823e7c6a9b7ed78089
| 7,763
|
py
|
Python
|
mimesis/data/int/development.py
|
DevAerial/mimesis
|
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
|
[
"MIT"
] | null | null | null |
mimesis/data/int/development.py
|
DevAerial/mimesis
|
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
|
[
"MIT"
] | 1
|
2022-03-26T07:46:59.000Z
|
2022-03-26T07:47:20.000Z
|
mimesis/data/int/development.py
|
DevAerial/mimesis
|
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
|
[
"MIT"
] | null | null | null |
"""Provides all the data related to the development."""
LICENSES = [
"Apache License, 2.0 (Apache-2.0)",
"The BSD 3-Clause License",
"The BSD 2-Clause License",
"GNU General Public License (GPL)",
"General Public License (LGPL)",
"MIT License (MIT)",
"Mozilla Public License 2.0 (MPL-2.0)",
"Common Development and Distribution License (CDDL-1.0)",
"Eclipse Public License (EPL-1.0)",
]
PROGRAMMING_LANGS = [
"ASP",
"Assembly",
"AutoIt",
"Awk",
"Bash",
"C",
"C Shell",
"C#",
"C++",
"Caml",
"Ceylon",
"Clojure",
"CoffeeScript",
"Common Lisp",
"D",
"Dart",
"Delphi",
"Dylan",
"ECMAScript",
"Elixir",
"Emacs Lisp",
"Erlang",
"F#",
"Falcon",
"Fortran",
"GNU Octave",
"Go",
"Groovy",
"Haskell",
"haXe",
"Io",
"J#",
"Java",
"JavaScript",
"Julia",
"Kotlin",
"Lisp",
"Lua",
"Mathematica",
"Objective-C",
"OCaml",
"Perl",
"PHP",
"PL-I",
"PL-SQL",
"PowerShell",
"Prolog",
"Python",
"R",
"Racket",
"Ruby",
"Rust",
"Scala",
"Scheme",
"Smalltalk",
"Tcl",
"Tex",
"Transact-SQL",
"TypeScript",
"Z shell",
]
OS = [
"Arch",
"CentOS",
"Debian",
"Fedora",
"FreeBSD",
"Gentoo",
"Kali",
"Lubuntu",
"Manjaro",
"Mint",
"OS X",
"macOS",
"OpenBSD",
"PCLinuxOS",
"Slackware",
"Ubuntu",
"Windows 10",
"Windows 7",
"Windows 8",
"Windows 8.1",
"Zorin",
"elementaryOS",
"macOS",
"openSUSE",
]
FOLDERS = [
"Development",
"Downloads",
"Documents",
"Music",
"Video",
"Work",
"Pictures",
"Desktop",
"Study",
]
PROJECT_NAMES = [
"aardonyx",
"abelisaurus",
"achelousaurus",
"achillobator",
"acrocanthosaurus",
"aegyptosaurus",
"afrovenator",
"agilisaurus",
"alamosaurus",
"albertaceratops",
"albertosaurus",
"alectrosaurus",
"alioramus",
"allosaurus",
"alvarezsaurus",
"amargasaurus",
"ammosaurus",
"ampelosaurus",
"amygdalodon",
"anatotitan",
"anchiceratops",
"anchisaurus",
"ankylosaurus",
"anserimimus",
"antarctopelta",
"antarctosaurus",
"apatosaurus",
"aragosaurus",
"aralosaurus",
"archaeoceratops",
"archaeopteryx",
"archaeornithomimus",
"argentinosaurus",
"arrhinoceratops",
"atlascopcosaurus",
"aucasaurus",
"austrosaurus",
"avaceratops",
"avalonia",
"avimimus",
"azendohsaurus",
"bactrosaurus",
"bagaceratops",
"bambiraptor",
"barapasaurus",
"barosaurus",
"baryonyx",
"becklespinax",
"beipiaosaurus",
"bellusaurus",
"borogovia",
"brachiosaurus",
"brachyceratops",
"bugenasaura",
"buitreraptor",
"camarasaurus",
"camptosaurus",
"carnotaurus",
"caudipteryx",
"cedarpelta",
"centrosaurus",
"ceratosaurus",
"cetiosauriscus",
"cetiosaurus",
"chaoyangsaurus",
"chasmosaurus",
"chialingosaurus",
"chindesaurus",
"chinshakiangosaurus",
"chirostenotes",
"chubutisaurus",
"chungkingosaurus",
"citipati",
"coelophysis",
"coelurus",
"coloradisaurus",
"compsognathus",
"conchoraptor",
"confuciusornis",
"corythosaurus",
"cryolophosaurus",
"dacentrurus",
"daspletosaurus",
"datousaurus",
"deinocheirus",
"deinonychus",
"deltadromeus",
"diceratops",
"dicraeosaurus",
"dilophosaurus",
"diplodocus",
"dracorex",
"dravidosaurus",
"dromaeosaurus",
"dromiceiomimus",
"dryosaurus",
"dryptosaurus",
"dubreuillosaurus",
"edmontonia",
"edmontosaurus",
"einiosaurus",
"elaphrosaurus",
"emausaurus",
"eolambia",
"eoraptor",
"eotyrannus",
"equijubus",
"erketu",
"erlikosaurus",
"euhelopus",
"euoplocephalus",
"europasaurus",
"euskelosaurus",
"eustreptospondylus",
"fukuiraptor",
"fukuisaurus",
"gallimimus",
"gargoyleosaurus",
"garudimimus",
"gasosaurus",
"gasparinisaura",
"gastonia",
"giganotosaurus",
"gilmoreosaurus",
"giraffatitan",
"gobisaurus",
"gorgosaurus",
"goyocephale",
"graciliceratops",
"gryposaurus",
"guaibasaurus",
"guanlong",
"hadrosaurus",
"hagryphus",
"haplocanthosaurus",
"harpymimus",
"herrerasaurus",
"hesperosaurus",
"heterodontosaurus",
"homalocephale",
"huayangosaurus",
"hylaeosaurus",
"hypacrosaurus",
"hypselosaurus",
"hypsilophodon",
"iguanodon",
"indosuchus",
"ingenia",
"irritator",
"isisaurus",
"janenschia",
"jaxartosaurus",
"jingshanosaurus",
"jinzhousaurus",
"jobaria",
"juravenator",
"kentrosaurus",
"khaan",
"kotasaurus",
"kritosaurus",
"lamaceratops",
"lambeosaurus",
"lapparentosaurus",
"leaellynasaura",
"leptoceratops",
"lesothosaurus",
"lexovisaurus",
"liaoceratops",
"liaoxiornis",
"ligabuesaurus",
"liliensternus",
"lophorhothon",
"lophostropheus",
"lufengosaurus",
"lurdusaurus",
"lycorhinus",
"magyarosaurus",
"maiasaura",
"majungatholus",
"malawisaurus",
"mamenchisaurus",
"mapusaurus",
"marshosaurus",
"masiakasaurus",
"massospondylus",
"maxakalisaurus",
"megalosaurus",
"melanorosaurus",
"metriacanthosaurus",
"microceratops",
"micropachycephalosaurus",
"microraptor",
"minmi",
"monolophosaurus",
"mononykus",
"mussaurus",
"muttaburrasaurus",
"nanotyrannus",
"nanshiungosaurus",
"nemegtosaurus",
"neovenator",
"neuquenosaurus",
"nigersaurus",
"nipponosaurus",
"noasaurus",
"nodosaurus",
"nomingia",
"nothronychus",
"nqwebasaurus",
"omeisaurus",
"ornitholestes",
"ornithomimus",
"orodromeus",
"oryctodromeus",
"othnielia",
"ouranosaurus",
"oviraptor",
"rebbachisaurus",
"rhabdodon",
"rhoetosaurus",
"rinchenia",
"riojasaurus",
"rugops",
"saichania",
"saltasaurus",
"saltopus",
"sarcosaurus",
"saurolophus",
"sauropelta",
"saurophaganax",
"saurornithoides",
"scelidosaurus",
"scutellosaurus",
"secernosaurus",
"segisaurus",
"segnosaurus",
"seismosaurus",
"shamosaurus",
"shanag",
"shantungosaurus",
"shunosaurus",
"shuvuuia",
"silvisaurus",
"sinocalliopteryx",
"sinornithosaurus",
"sinosauropteryx",
"sinraptor",
"sinvenator",
"zalmoxes",
"zephyrosaurus",
"zuniceratops",
"byzantine",
"svengali",
"accolade",
"acrimony",
"angst",
"anomaly",
"antidote",
"baroque",
"bona_fide",
"bourgeois",
"bravado",
"brogue",
"brusque",
"cacophony",
"caustic",
"charisma",
"cloying",
"deja-vu",
"dichotomy",
"elan",
"ennui",
"epitome",
"esoteric",
"euphemism",
"faux pas",
"fiasco",
"finagle",
"glib",
"harbinger",
"hedonist",
"heresy",
"idyllic",
"insidious",
"junket",
"kitsch",
"litany",
"lurid",
"malaise",
"malinger",
"mantra",
"maudlin",
"mercenary",
"misnomer",
"nirvana",
"oblivion",
"ogle",
"ostracize",
"panacea",
"paradox",
"peevish",
"propriety",
"revel",
"rhetoric",
"spartan",
"stigma",
"stoic",
"suave",
"sycophant",
"tirade",
"tryst",
"untenable",
"vicarious",
"vile",
"waft",
"zealous",
]
| 17.845977
| 61
| 0.551977
| 493
| 7,763
| 8.685598
| 0.900609
| 0.001868
| 0.004204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003572
| 0.278758
| 7,763
| 434
| 62
| 17.887097
| 0.761207
| 0.006312
| 0
| 0.004673
| 0
| 0
| 0.554748
| 0.002984
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b962302fa813576c8cf57a4deea0db5f25dfb918
| 620
|
py
|
Python
|
docs/mathparse.py
|
pcmoritz/flow
|
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
|
[
"MIT"
] | 16
|
2018-05-25T06:30:28.000Z
|
2020-08-08T00:03:47.000Z
|
docs/mathparse.py
|
pcmoritz/flow
|
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
|
[
"MIT"
] | 46
|
2018-05-22T21:32:55.000Z
|
2019-06-12T13:10:02.000Z
|
docs/mathparse.py
|
pcmoritz/flow
|
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
|
[
"MIT"
] | 6
|
2018-06-22T14:59:14.000Z
|
2019-08-29T06:00:34.000Z
|
"""
A preliminary attempt at parsing an RST file's math syntax
in order to make math render as inline rather than display
mode. This doesn't work as of yet but might be useful.
It could, however, be not useful if there's a pandoc option
for converting .md to .rst that makes math inline and not
display. Keeping it around, though.
"""
import re
s = """Define
.. math:: v_{des}
as the desired velocity,
.. math:: 1^k
a vector of ones of length"""
with open('/Users/nishant/Downloads/tutorialtest.rst', 'r') as myfile:
s = myfile.read()
print([elem[11:-2] for elem in re.findall('\n.. math:: *\S*\n\n', s)])
| 22.962963
| 70
| 0.693548
| 110
| 620
| 3.9
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.180645
| 620
| 26
| 71
| 23.846154
| 0.836614
| 0.527419
| 0
| 0
| 0
| 0
| 0.550877
| 0.14386
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96253f9f9bc87e42d80842aebed3aa7dacb859b
| 1,994
|
py
|
Python
|
lib/layout/primitives.py
|
tailhook/pyzza
|
610be6ee4bea9b64f8226faf7338523fdafdf2cf
|
[
"MIT"
] | 2
|
2015-08-07T15:39:25.000Z
|
2019-03-31T12:45:37.000Z
|
lib/layout/primitives.py
|
tailhook/pyzza
|
610be6ee4bea9b64f8226faf7338523fdafdf2cf
|
[
"MIT"
] | null | null | null |
lib/layout/primitives.py
|
tailhook/pyzza
|
610be6ee4bea9b64f8226faf7338523fdafdf2cf
|
[
"MIT"
] | null | null | null |
from layout import Shape, Widget
from flash.text.engine import TextBlock, TextElement
@package('layout')
class Poly(Shape):
__slots__ = ('fillcolor', 'sequence')
def __init__(self, name, fillcolor, seq, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.sequence = seq
def draw(self, w, h):
g = self.graphics
g.clear()
for line in values(self.sequence):
g.beginFill(self.fillcolor)
g.moveTo(int(line[0][0]*w), int(line[0][1]*h))
for idx in range(1, line.length):
g.lineTo(int(line[idx][0]*w), int(line[idx][1]*h))
g.endFill()
@package('layout')
class RoundRect(Shape):
__slots__ = ('fillcolor', 'radius')
def __init__(self, name, fillcolor, radius, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.radius = radius
def draw(self, width, height):
g = self.graphics
g.clear()
g.beginFill(self.fillcolor)
g.drawRoundRect(0, 0, width, height, self.radius, self.radius)
g.endFill()
@package('layout')
class TextLine(Widget):
__slots__ = ('format', 'text', 'textline')
def __init__(self, format, text, name, states):
self.format = format
self.text = text
super().__init__(name, states)
def draw(self, width, height):
if self.textline:
self.removeChild(self.textline)
tb = TextBlock()
tb.content = TextElement(self.text, self.format)
self.textline = tb.createTextLine(None, width)
self.addChild(self.textline)
@package('layout')
class CenteredLine(TextLine):
def __init__(self, format, text, name, states):
super().__init__(format, text, name, states)
def draw(self, width, height):
super().draw(width, height)
self.textline.x = int((width - self.textline.width)/2)
self.textline.y = int((height - self.textline.height)/2)
| 32.688525
| 70
| 0.609829
| 243
| 1,994
| 4.823045
| 0.251029
| 0.081911
| 0.061433
| 0.048635
| 0.385666
| 0.208191
| 0.208191
| 0.153584
| 0.087031
| 0
| 0
| 0.007343
| 0.248746
| 1,994
| 60
| 71
| 33.233333
| 0.775033
| 0
| 0
| 0.415094
| 0
| 0
| 0.037111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.150943
| false
| 0
| 0.037736
| 0
| 0.320755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b963a238595dc05d6bc40e6f5888099b52a8fc14
| 20,515
|
py
|
Python
|
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False):
# print("Capturing expected headers:")
# print(expected_headers)
assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers)
for key, val in expected_headers.items():
assert isinstance(key, str)
assert isinstance(val, str)
cookie_key = uuid.uuid4().hex
log = logging.getLogger("Main.TestServer")
sucuri_reqs_1 = 0
sucuri_reqs_2 = 0
sucuri_reqs_3 = 0
class MockServerRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def validate_headers(self):
for key, value in expected_headers.items():
if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding':
# So PhantomJS monkeys with accept-encoding headers
# Just ignore that particular header, I guess.
pass
# Selenium is fucking retarded, and I can't override the user-agent
# and other assorted parameters via their API at all.
elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language':
pass
elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept':
pass
elif not skip_header_checks:
v1 = value.replace(" ", "")
v2 = self.headers[key]
if v2 is None:
v2 = ""
v2 = v2.replace(" ", "")
test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format(
key,
value,
self.headers[key],
{
'is_annoying_pjs' : is_annoying_pjs,
'is_chromium' : is_chromium,
'is_selenium_garbage_chromium' : is_selenium_garbage_chromium,
'skip_header_checks' : skip_header_checks,
},
)
)
def _get_handler(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
# print("Path: ", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
self.validate_headers()
except Exception:
self.send_response(500)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Headers failed validation!")
raise
if self.path == "/":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/favicon.ico":
self.send_response(404)
self.end_headers()
elif self.path == "/raw-txt":
self.send_response(200)
self.send_header('Content-type', "text/plain")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html-decode":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html/real":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Root OK?</body></html>")
elif self.path == "/compressed/deflate":
self.send_response(200)
self.send_header('Content-Encoding', 'deflate')
self.send_header('Content-type', "text/html")
self.end_headers()
inb = b"Root OK?"
cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)
t1 = cobj.compress(inb) + cobj.flush()
self.wfile.write(t1)
elif self.path == "/compressed/gzip":
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(gzip.compress(b"Root OK?"))
elif self.path == "/json/invalid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT")
elif self.path == "/json/valid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/json/no-coding":
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/filename/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/path-only-trailing-slash/":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-html-suffix":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='lolercoaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='loler coaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\"loler coaster.html\"")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/explicit-html-mime":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/redirect/bad-1":
self.send_response(302)
self.end_headers()
elif self.path == "/redirect/bad-2":
self.send_response(302)
self.send_header('location', "bad-2")
self.end_headers()
elif self.path == "/redirect/bad-3":
self.send_response(302)
self.send_header('location', "gopher://www.google.com")
self.end_headers()
elif self.path == "/redirect/from-1":
self.send_response(302)
self.send_header('location', "to-1")
self.end_headers()
elif self.path == "/redirect/to-1":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-1")
elif self.path == "/redirect/from-2":
self.send_response(302)
self.send_header('uri', "to-2")
self.end_headers()
elif self.path == "/redirect/to-2":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-2")
elif self.path == "/redirect/from-3":
self.send_response(302)
newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1])
self.send_header('uri', newurl)
self.end_headers()
elif self.path == "/password/expect":
# print("Password")
# print(self.headers)
self.send_response(200)
self.end_headers()
if not 'Authorization' in self.headers:
self.wfile.write(b"Password not sent!!")
return
val = self.headers['Authorization']
passval = val.split(" ")[-1]
passstr = base64.b64decode(passval)
if passstr == b'lol:wat':
self.wfile.write(b"Password Ok?")
else:
self.wfile.write(b"Password Bad!")
elif self.path == "/content/have-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>")
elif self.path == "/content/no-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
##################################################################################################################################
# Cookie stuff
##################################################################################################################################
elif self.path == '/cookie_test':
cook = cookies.SimpleCookie()
cook['cookie_test_key'] = cookie_key
cook['cookie_test_key']['path'] = "/"
cook['cookie_test_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())
self.end_headers()
self.wfile.write(b"<html><body>CF Cookie Test</body></html>")
elif self.path == '/cookie_require':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cookie_test_key' and cook_value == cookie_key:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>")
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie is missing</body></html>")
##################################################################################################################################
# Sucuri validation
##################################################################################################################################
elif self.path == '/sucuri_shit_3':
# I'd like to get this down to just 2 requests (cookie bounce, and fetch).
# Doing that requires pulling html content out of chromium, though.
# Annoying.
nonlocal sucuri_reqs_3
sucuri_reqs_3 += 1
if sucuri_reqs_3 > 3:
raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit_2':
# This particular path is the one we should already have a cookie for.
# As such, we expect one request only
nonlocal sucuri_reqs_2
sucuri_reqs_2 += 1
if sucuri_reqs_2 > 1:
raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit':
nonlocal sucuri_reqs_1
sucuri_reqs_1 += 1
if sucuri_reqs_1 > 4:
raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1)
# print("Fetch for ", self.path)
# print("Cookies:", self.headers.get_all('Cookie', failobj=[]))
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
##################################################################################################################################
# Cloudflare validation
##################################################################################################################################
elif self.path == '/cloudflare_under_attack_shit_2':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cloudflare_under_attack_shit':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594':
cook = cookies.SimpleCookie()
cook['cloudflare_validate_key'] = cookie_key
cook['cloudflare_validate_key']['path'] = "/"
cook['cloudflare_validate_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())
self.end_headers()
body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>"
self.wfile.write(body.encode("utf-8"))
##################################################################################################################################
# Handle requests for an unknown path
##################################################################################################################################
else:
test_context.assertEqual(self.path, "This shouldn't happen!")
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
log.info("Request for URL path: '%s'", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
return self._get_handler()
except Exception as e:
log.error("Exception in handler!")
for line in traceback.format_exc().split("\n"):
log.error(line)
raise e
return MockServerRequestHandler
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_server(assertion_class,
from_wg,
port_override = None,
is_chromium = None,
is_selenium_garbage_chromium = False,
is_annoying_pjs = False,
skip_header_checks = False
):
# Configure mock server.
if port_override:
mock_server_port = port_override
else:
mock_server_port = get_free_port()
expected_headers = dict(from_wg.browserHeaders)
print(from_wg)
print(expected_headers)
assert isinstance(expected_headers, dict)
captured_server = capture_expected_headers(
expected_headers = expected_headers,
test_context = assertion_class,
is_chromium = is_chromium,
is_selenium_garbage_chromium = is_selenium_garbage_chromium,
is_annoying_pjs = is_annoying_pjs,
skip_header_checks = skip_header_checks
)
retries = 4
for x in range(retries + 1):
try:
mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)
break
except OSError:
time.sleep(0.2)
if x >= retries:
raise
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_port, mock_server, mock_server_thread
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
| 32.929374
| 165
| 0.640653
| 2,640
| 20,515
| 4.789015
| 0.141667
| 0.05948
| 0.062011
| 0.06644
| 0.689156
| 0.656411
| 0.627541
| 0.617338
| 0.573994
| 0.563474
| 0
| 0.023794
| 0.170314
| 20,515
| 622
| 166
| 32.982315
| 0.718994
| 0.064294
| 0
| 0.509091
| 0
| 0.020455
| 0.257064
| 0.096468
| 0
| 0
| 0
| 0
| 0.020455
| 1
| 0.015909
| false
| 0.027273
| 0.034091
| 0.002273
| 0.079545
| 0.006818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b963e6196b8baa521ce89adb40142bf81a9183a6
| 3,770
|
py
|
Python
|
calcgrades.py
|
qrowsxi/calcgrades
|
93c71c1afef8dde5174726ae1702b71ccba633de
|
[
"MIT"
] | null | null | null |
calcgrades.py
|
qrowsxi/calcgrades
|
93c71c1afef8dde5174726ae1702b71ccba633de
|
[
"MIT"
] | null | null | null |
calcgrades.py
|
qrowsxi/calcgrades
|
93c71c1afef8dde5174726ae1702b71ccba633de
|
[
"MIT"
] | null | null | null |
import csv
import math
import numpy as np
import pandas
import scipy.optimize
import sys
import argparse
def ineq_constraint_1(v):
return np.array([vi for vi in v])
def ineq_constraint_2(v):
return np.array([-vi + 30 for vi in v])
class WeightAverage:
def __init__(self, mean, csv):
self.df = pandas.read_csv(csv)
self.course = self.df['name']
self.expected_mean = mean
self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]
self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))])
self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]
self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0]
self.tot_credits = sum(self.owned_credits) + sum(self.credits)
def weight_average(self, v):
term1 = 0
term2 = 0
for i in range(0, len(self.owned_grades)):
term1 = term1 + self.owned_grades[i] * self.owned_credits[i]
for i in range(0, len(v)):
term2 = term2 + v[i] * self.credits[i]
return (term1 + term2) / self.tot_credits
def eq_constraint(self, v):
return self.weight_average(v) - self.expected_mean
def solve(self):
cons = (
{'type': 'eq', 'fun': self.eq_constraint},
{'type': 'ineq', 'fun': ineq_constraint_1},
{'type': 'ineq', 'fun': ineq_constraint_2})
res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)
if not res.success:
return None
return res.x
def error_no_solution():
print("Mean not possible with current vote :(")
exit(0)
def output_result(solver, sol):
avg = solver.weight_average(sol)
df = solver.df
print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110")
if sol is None:
print("Not Possible with current grades :(")
exit()
for index, row in df.query('grade > 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}")
i = 0
for index, row in df.query('grade == 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}")
i += 1
return 0
def main():
name = "calcGrades"
description = """CalcGrades is an utility which purpose is to compute the minimum
grades required to get a certain weight average of the grades over the credits,
given the desired output and the grades already owned."""
parser = argparse.ArgumentParser(name, description=description)
parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean')
parser.add_argument('--file',dest='file', default='courses.csv', type=str,
help='path to the csv file containing the courses (default: courses.csv)')
parser.add_argument('--floor', default=False, action='store_true',
help='apply floor operation instead of round to solution')
parser.add_argument('--ceil', default=False, action='store_true',
help='apply ceil operation instead of round to solution')
args = parser.parse_args()
mean = args.mean
courses = args.file
solver = WeightAverage(mean, courses)
sol = solver.solve()
if sol is None:
error_no_solution()
if args.ceil:
sol = [math.ceil(x) for x in sol]
elif args.floor:
sol = [math.floor(x) for x in sol]
else:
sol = [round(x) for x in sol]
output_result(solver, sol)
return 0
if __name__ == '__main__':
main()
| 35.566038
| 116
| 0.609284
| 514
| 3,770
| 4.356031
| 0.266537
| 0.024118
| 0.024565
| 0.021438
| 0.261724
| 0.193837
| 0.15096
| 0.118803
| 0.118803
| 0.118803
| 0
| 0.014457
| 0.247745
| 3,770
| 105
| 117
| 35.904762
| 0.775035
| 0
| 0
| 0.045977
| 0
| 0.011494
| 0.237135
| 0
| 0.011494
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.08046
| 0.034483
| 0.287356
| 0.057471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9669e29ffa745ca4256305d7461bcbe497cc930
| 1,428
|
py
|
Python
|
tests/bugs/core_3355_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_3355_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_3355_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_3355
# title: Wrong comparsion of DATE and TIMESTAMP if index is used
# decription:
# tracker_id: CORE-3355
# min_versions: ['2.1.5']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """create table tdate (id integer not null primary key, val date);
create index tdateix1 on tdate (val);
commit;
insert into tdate values (0, '1997-12-31');
insert into tdate values (1, '1998-01-01');
insert into tdate values (2, '1998-01-02');
insert into tdate values (3, '1998-01-03');
insert into tdate values (4, '1998-01-04');
insert into tdate values (5, '1998-01-05');
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000';
select count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
COUNT
=====================
1
COUNT
=====================
5
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 25.052632
| 95
| 0.641457
| 216
| 1,428
| 4.074074
| 0.402778
| 0.047727
| 0.102273
| 0.143182
| 0.125
| 0.125
| 0.125
| 0.125
| 0.125
| 0.125
| 0
| 0.116872
| 0.203081
| 1,428
| 56
| 96
| 25.5
| 0.656415
| 0.168067
| 0
| 0.28125
| 0
| 0.0625
| 0.578231
| 0.035714
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96834dcae4311b040352e86ae4bdc019619193a
| 7,518
|
py
|
Python
|
keystone-moon/keystone/endpoint_policy/controllers.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
keystone-moon/keystone/endpoint_policy/controllers.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
keystone-moon/keystone/endpoint_policy/controllers.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:38:30.000Z
|
2021-03-21T11:38:30.000Z
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import controller
from keystone.common import dependency
from keystone import notifications
@dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api')
class EndpointPolicyV3Controller(controller.V3Controller):
collection_name = 'endpoints'
member_name = 'endpoint'
def __init__(self):
super(EndpointPolicyV3Controller, self).__init__()
notifications.register_event_callback(
'deleted', 'endpoint', self._on_endpoint_delete)
notifications.register_event_callback(
'deleted', 'service', self._on_service_delete)
notifications.register_event_callback(
'deleted', 'region', self._on_region_delete)
notifications.register_event_callback(
'deleted', 'policy', self._on_policy_delete)
def _on_endpoint_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_endpoint(
payload['resource_info'])
def _on_service_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_service(
payload['resource_info'])
def _on_region_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_region(
payload['resource_info'])
def _on_policy_delete(self, service, resource_type, operation, payload):
self.endpoint_policy_api.delete_association_by_policy(
payload['resource_info'])
@controller.protected()
def create_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Create an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.create_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def check_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Check an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.check_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def delete_policy_association_for_endpoint(self, context,
policy_id, endpoint_id):
"""Delete an association between a policy and an endpoint."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_endpoint(endpoint_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, endpoint_id=endpoint_id)
@controller.protected()
def create_policy_association_for_service(self, context,
policy_id, service_id):
"""Create an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.create_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def check_policy_association_for_service(self, context,
policy_id, service_id):
"""Check an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.check_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def delete_policy_association_for_service(self, context,
policy_id, service_id):
"""Delete an association between a policy and a service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, service_id=service_id)
@controller.protected()
def create_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Create an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.create_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def check_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Check an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.check_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def delete_policy_association_for_region_and_service(
self, context, policy_id, service_id, region_id):
"""Delete an association between a policy and region+service."""
self.policy_api.get_policy(policy_id)
self.catalog_api.get_service(service_id)
self.catalog_api.get_region(region_id)
self.endpoint_policy_api.delete_policy_association(
policy_id, service_id=service_id, region_id=region_id)
@controller.protected()
def get_policy_for_endpoint(self, context, endpoint_id):
"""Get the effective policy for an endpoint."""
self.catalog_api.get_endpoint(endpoint_id)
ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id)
# NOTE(henry-nash): since the collection and member for this class is
# set to endpoints, we have to handle wrapping this policy entity
# ourselves.
self._add_self_referential_link(context, ref)
return {'policy': ref}
# NOTE(henry-nash): As in the catalog controller, we must ensure that the
# legacy_endpoint_id does not escape.
@classmethod
def filter_endpoint(cls, ref):
if 'legacy_endpoint_id' in ref:
ref.pop('legacy_endpoint_id')
return ref
@classmethod
def wrap_member(cls, context, ref):
ref = cls.filter_endpoint(ref)
return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref)
@controller.protected()
def list_endpoints_for_policy(self, context, policy_id):
"""List endpoints with the effective association to a policy."""
self.policy_api.get_policy(policy_id)
refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id)
return EndpointPolicyV3Controller.wrap_collection(context, refs)
| 45.017964
| 79
| 0.699654
| 921
| 7,518
| 5.374593
| 0.148751
| 0.048485
| 0.04
| 0.063636
| 0.655152
| 0.632323
| 0.603838
| 0.585051
| 0.565657
| 0.565657
| 0
| 0.002225
| 0.222666
| 7,518
| 166
| 80
| 45.289157
| 0.844798
| 0.186752
| 0
| 0.617391
| 0
| 0
| 0.0341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156522
| false
| 0
| 0.026087
| 0
| 0.243478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96893ff0c22487256e91c812d37a56c2c479eb3
| 11,886
|
py
|
Python
|
src/nibetaseries/cli/run.py
|
ipacheco-uy/NiBetaSeries
|
3d8716552f22f925524d80af9aace09469c22d4d
|
[
"MIT"
] | 1
|
2019-10-03T21:20:48.000Z
|
2019-10-03T21:20:48.000Z
|
src/nibetaseries/cli/run.py
|
ipacheco-uy/NiBetaSeries
|
3d8716552f22f925524d80af9aace09469c22d4d
|
[
"MIT"
] | null | null | null |
src/nibetaseries/cli/run.py
|
ipacheco-uy/NiBetaSeries
|
3d8716552f22f925524d80af9aace09469c22d4d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m nibetaseries` python will execute
``__main__.py`` as a script. That means there won't be any
``nibetaseries.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nibetaseries.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
from __future__ import absolute_import
import os
import argparse
from argparse import RawTextHelpFormatter
from glob import glob
from multiprocessing import cpu_count
from nipype import config as ncfg
def get_parser():
"""Build parser object"""
from ..__init__ import __version__
import sys
verstr = 'nibs v{}'.format(__version__)
parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('derivatives_pipeline', help='The pipeline that contains '
'minimally preprocessed img, brainmask, and confounds.tsv')
parser.add_argument('output_dir', help='The directory where the output directory '
'and files should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='Level of the analysis that will be performed '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir')
parser.add_argument('-v', '--version', action='version',
version=verstr)
# Atlas Arguments (Required Options)
atlas_args = parser.add_argument_group('Required Atlas Arguments')
atlas_args.add_argument('-a', '--atlas-img', action='store',
required=('-l' in sys.argv or '--atlas-lut' in sys.argv),
help='input atlas nifti where each voxel within a "region" '
'is labeled with the same integer and there is a unique '
'integer associated with each region of interest.')
atlas_args.add_argument('-l', '--atlas-lut', action='store',
required=('-a' in sys.argv or '--atlas-img' in sys.argv),
help='atlas look up table (tsv) formatted with the columns: '
'index, regions which correspond to the regions in the '
'nifti file specified by --atlas-img.')
# preprocessing options
proc_opts = parser.add_argument_group('Options for processing')
proc_opts.add_argument('--estimator', default='lss',
choices=['lss', 'lsa'],
help='beta series modeling method')
proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,
help='select a smoothing kernel (mm)')
proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,
default=0.0078125, help='high pass filter (Hz)')
proc_opts.add_argument('-c', '--confounds', help='The confound column names '
'that are to be included in nuisance regression. '
'write the confounds you wish to include separated by a space',
nargs="+")
proc_opts.add_argument('--hrf-model', default='glover',
choices=['glover', 'spm', 'fir',
'glover + derivative',
'glover + derivative + dispersion',
'spm + derivative',
'spm + derivative + dispersion'],
help='convolve your regressors '
'with one of the following hemodynamic response functions')
proc_opts.add_argument('--fir-delays', default=None,
nargs='+', type=int, help='FIR delays in volumes',
metavar='VOL')
proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '
'are stored (i.e. non-essential files). '
'This directory can be deleted once you are reasonably '
'certain nibs finished as expected.')
# Image Selection options
image_opts = parser.add_argument_group('Options for selecting images')
parser.add_argument('--participant-label', nargs="+",
help='The label(s) of the participant(s) '
'that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.')
image_opts.add_argument('--session-label', action='store',
default=None, help='select a session to analyze')
image_opts.add_argument('-t', '--task-label', action='store',
default=None, help='select a specific task to be processed')
image_opts.add_argument('--run-label', action='store',
default=None, help='select a run to analyze')
image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',
choices=['MNI152NLin2009cAsym'],
help='select a bold derivative in a specific space to be used')
image_opts.add_argument('--description-label', action='store',
default=None, help='select a bold file with particular '
'`desc` label to process')
image_opts.add_argument('--exclude-description-label', action='store_true',
default=False, help='exclude this `desc` label from nibetaseries')
# performance options
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
# misc options
misc = parser.add_argument_group('misc options')
misc.add_argument('--graph', action='store_true', default=False,
help='generates a graph png of the workflow')
return parser
def main():
from ..workflows.base import init_nibetaseries_participant_wf
# get commandline options
opts = get_parser().parse_args()
# check inputs
if (opts.hrf_model == 'fir') and (opts.fir_delays is None):
raise ValueError('If the FIR HRF model is selected, '
'FIR delays must be provided.')
# Set up directories
# TODO: set up some sort of versioning system
bids_dir = os.path.abspath(opts.bids_dir)
derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline)
output_dir = os.path.abspath(opts.output_dir)
os.makedirs(output_dir, exist_ok=True)
log_dir = os.path.join(output_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
if opts.work_dir:
work_dir = os.path.abspath(opts.work_dir)
else:
work_dir = os.path.join(os.getcwd(), 'nibetaseries_work')
os.makedirs(work_dir, exist_ok=True)
# only for a subset of subjects
if opts.participant_label:
subject_list = opts.participant_label
# for all subjects
else:
subject_dirs = glob(os.path.join(bids_dir, "sub-*"))
subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs]
# Nipype plugin configuration
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {'log_directory': log_dir,
'log_to_file': True},
'execution': {'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'parameterize_dirs': False},
})
# running participant level
if opts.analysis_level == "participant":
nibetaseries_participant_wf = init_nibetaseries_participant_wf(
estimator=opts.estimator,
atlas_img=os.path.abspath(opts.atlas_img),
atlas_lut=os.path.abspath(opts.atlas_lut),
bids_dir=bids_dir,
derivatives_pipeline_dir=derivatives_pipeline_dir,
exclude_description_label=opts.exclude_description_label,
fir_delays=opts.fir_delays,
hrf_model=opts.hrf_model,
high_pass=opts.high_pass,
output_dir=output_dir,
run_label=opts.run_label,
selected_confounds=opts.confounds,
session_label=opts.session_label,
smoothing_kernel=opts.smoothing_kernel,
space_label=opts.space_label,
subject_list=subject_list,
task_label=opts.task_label,
description_label=opts.description_label,
work_dir=work_dir,
)
if opts.graph:
nibetaseries_participant_wf.write_graph(graph2use='colored',
format='svg',
simple_form=True)
try:
nibetaseries_participant_wf.run(**plugin_settings)
except RuntimeError as e:
if "Workflow did not execute cleanly" in str(e):
print("Workflow did not execute cleanly")
else:
raise e
elif opts.analysis_level == "group":
raise NotImplementedError('group analysis not currently implemented')
def init():
if __name__ == "__main__":
raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n"
"Please `pip install` NiBetaSeries and use the `nibs` command")
init()
| 46.611765
| 98
| 0.595406
| 1,345
| 11,886
| 5.090706
| 0.292193
| 0.04659
| 0.02848
| 0.019425
| 0.121075
| 0.050241
| 0.032715
| 0.0222
| 0
| 0
| 0
| 0.003775
| 0.309187
| 11,886
| 254
| 99
| 46.795276
| 0.830106
| 0.110887
| 0
| 0.021739
| 0
| 0
| 0.309284
| 0.00693
| 0
| 0
| 0
| 0.003937
| 0
| 1
| 0.016304
| false
| 0.016304
| 0.059783
| 0
| 0.081522
| 0.005435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9693ae1ef191dd2735a2abba99bb1bc689af26f
| 2,727
|
py
|
Python
|
custom_components/senz/config_flow.py
|
astrandb/senz_hass
|
6725d37fd9c6d250ac10a16e68c56908bf1c8404
|
[
"MIT"
] | 2
|
2022-01-15T09:55:58.000Z
|
2022-02-10T10:13:35.000Z
|
custom_components/senz/config_flow.py
|
astrandb/senz_hass
|
6725d37fd9c6d250ac10a16e68c56908bf1c8404
|
[
"MIT"
] | 4
|
2022-01-15T19:41:28.000Z
|
2022-02-14T16:01:47.000Z
|
custom_components/senz/config_flow.py
|
astrandb/senz_hass
|
6725d37fd9c6d250ac10a16e68c56908bf1c8404
|
[
"MIT"
] | null | null | null |
"""Config flow for SENZ WiFi."""
from __future__ import annotations
import logging
from typing import Any
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
from .pysenz import PreAPI
class OAuth2FlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle SENZ WiFi OAuth2 authentication."""
DOMAIN = DOMAIN
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": "restapi offline_access",
}
async def async_step_reauth(
self, entry: dict[str, Any] | None = None
) -> FlowResult:
"""Perform reauth upon an API authentication error."""
self.entry = entry
persistent_notification.async_create(
self.hass,
f"Senz integration for account {entry['auth_implementation']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it.",
"Senz re-authentication",
"senz_reauth",
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"account": self.entry["auth_implementation"]},
data_schema=vol.Schema({}),
errors={},
)
persistent_notification.async_dismiss(self.hass, "senz_reauth")
return await self.async_step_user()
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an oauth config entry or update existing entry for reauth."""
pre_api = PreAPI(self.hass)
resp = await pre_api.getAccount(data["token"]["access_token"])
account = resp["userName"]
existing_entry = await self.async_set_unique_id(account)
if existing_entry:
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title=account, data=data)
| 34.518987
| 184
| 0.671067
| 320
| 2,727
| 5.503125
| 0.353125
| 0.030664
| 0.022147
| 0.02385
| 0.126065
| 0.070415
| 0.038614
| 0
| 0
| 0
| 0
| 0.002411
| 0.239457
| 2,727
| 78
| 185
| 34.961538
| 0.846673
| 0.056839
| 0
| 0.074074
| 0
| 0.018519
| 0.135615
| 0.024081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.166667
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9697b05a9b44247d80463465fa92118d707fb98
| 6,465
|
py
|
Python
|
astropy_helpers/git_helpers.py
|
bsipocz/astropy-helpers
|
4999df1cfb6a5022347b0cef9caf8a556517c625
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 9
|
2019-12-06T13:12:33.000Z
|
2021-10-05T12:47:15.000Z
|
astropy_helpers/git_helpers.py
|
bsipocz/astropy-helpers
|
4999df1cfb6a5022347b0cef9caf8a556517c625
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2019-11-28T17:20:27.000Z
|
2019-12-09T18:44:35.000Z
|
astropy_helpers/git_helpers.py
|
bsipocz/astropy-helpers
|
4999df1cfb6a5022347b0cef9caf8a556517c625
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3
|
2019-11-28T17:04:22.000Z
|
2021-10-19T13:12:34.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for retrieving revision information from a project's git repository.
"""
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import locale
import os
import subprocess
import warnings
def _decode_stdio(stream):
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
stdio_encoding = 'utf-8'
try:
text = stream.decode(stdio_encoding)
except UnicodeDecodeError:
# Final fallback
text = stream.decode('latin1')
return text
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
def run_git(cmd):
try:
p = subprocess.Popen(['git'] + cmd, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return (None, b'', b'')
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using '
'default dev version.'.format(path))
return (p.returncode, b'', b'')
if p.returncode == 129:
if show_warning:
warnings.warn('Your git looks old (does it support {0}?); '
'consider upgrading to v1.7.2 or '
'later.'.format(cmd[0]))
return (p.returncode, stdout, stderr)
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: {0}'.format(_decode_stdio(stderr)))
return (p.returncode, stdout, stderr)
return p.returncode, stdout, stderr
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 128:
# git returns 128 if the command is not run from within a git
# repository tree. In this case, a warning is produced above but we
# return the default dev version of '0'.
return '0'
elif not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
# This function is tested but it is only ever executed within a subprocess when
# creating a fake package, so it doesn't get picked up by coverage metrics.
def _get_repo_path(pathname, levels=None): # pragma: no cover
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None
| 33.324742
| 79
| 0.612065
| 870
| 6,465
| 4.493103
| 0.302299
| 0.018419
| 0.02814
| 0.021489
| 0.17012
| 0.113072
| 0.048606
| 0.015861
| 0
| 0
| 0
| 0.010664
| 0.30379
| 6,465
| 193
| 80
| 33.497409
| 0.857809
| 0.403094
| 0
| 0.234043
| 0
| 0
| 0.08601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.042553
| 0
| 0.287234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96bb8e94e8bbfe556cc0ad3a314b6991573aa47
| 544
|
py
|
Python
|
tests/test_db.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 24
|
2017-08-18T20:36:27.000Z
|
2020-03-27T08:55:39.000Z
|
tests/test_db.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 6
|
2017-10-14T05:50:34.000Z
|
2019-06-03T08:39:49.000Z
|
tests/test_db.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 5
|
2018-01-09T11:07:06.000Z
|
2019-06-02T14:34:34.000Z
|
import os
from tendermint.db import VanillaDB
from tendermint.utils import home_dir
def test_database():
dbfile = home_dir('temp', 'test.db')
db = VanillaDB(dbfile)
db.set(b'dave',b'one')
result = db.get(b'dave')
assert(b'one' == result)
db.set(b'dave',b'two')
result = db.get(b'dave')
assert(b'two' == result)
assert(None == db.get(b'doesntexist'))
assert(db.exists(b'dave'))
db.delete(b'dave')
assert(db.exists(b'dave') == False)
if os.path.exists(dbfile):
os.remove(dbfile)
| 20.923077
| 42
| 0.621324
| 84
| 544
| 3.988095
| 0.357143
| 0.104478
| 0.053731
| 0.059701
| 0.316418
| 0.137313
| 0.137313
| 0
| 0
| 0
| 0
| 0
| 0.204044
| 544
| 25
| 43
| 21.76
| 0.773672
| 0
| 0
| 0.111111
| 0
| 0
| 0.113971
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96f6c5854c1e905c9ad5d8f08d016972c710a1f
| 4,134
|
py
|
Python
|
projects/OneNet/onenet/head.py
|
iFighting/OneNet
|
6e33b46d2aa13131262833c75f0fd1c3d224ef03
|
[
"MIT"
] | 2
|
2021-06-16T01:31:17.000Z
|
2021-11-25T15:27:28.000Z
|
projects/OneNet/onenet/head.py
|
xieenze/OneNet
|
3b06ad6832727cef4c0262389de4cdbb2a666197
|
[
"MIT"
] | null | null | null |
projects/OneNet/onenet/head.py
|
xieenze/OneNet
|
3b06ad6832727cef4c0262389de4cdbb2a666197
|
[
"MIT"
] | 1
|
2021-02-04T06:38:42.000Z
|
2021-02-04T06:38:42.000Z
|
#
# Modified by Peize Sun
# Contact: sunpeize@foxmail.com
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
OneNet Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
import math
from typing import Optional, List
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from detectron2.modeling.poolers import ROIPooler, cat
from detectron2.structures import Boxes
from .deconv import CenternetDeconv
class Head(nn.Module):
def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]):
super().__init__()
# Build heads.
num_classes = cfg.MODEL.OneNet.NUM_CLASSES
d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1]
activation = cfg.MODEL.OneNet.ACTIVATION
self.deconv = CenternetDeconv(cfg, backbone_shape)
self.num_classes = num_classes
self.d_model = d_model
self.num_classes = num_classes
self.activation = _get_activation_fn(activation)
self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1)
self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1)
self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1)
# Init parameters.
prior_prob = cfg.MODEL.OneNet.PRIOR_PROB
self.bias_value = -math.log((1 - prior_prob) / prior_prob)
self._reset_parameters()
def _reset_parameters(self):
# init all parameters.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# initialize the bias for focal loss.
if p.shape[-1] == self.num_classes:
nn.init.constant_(p, self.bias_value)
def forward(self, features_list):
features = self.deconv(features_list)
locations = self.locations(features)[None]
feat = self.activation(self.feat1(features))
class_logits = self.cls_score(feat)
pred_ltrb = F.relu(self.ltrb_pred(feat))
pred_bboxes = self.apply_ltrb(locations, pred_ltrb)
return class_logits, pred_bboxes
def apply_ltrb(self, locations, pred_ltrb):
"""
:param locations: (1, 2, H, W)
:param pred_ltrb: (N, 4, H, W)
"""
pred_boxes = torch.zeros_like(pred_ltrb)
pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1
pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1
pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2
pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2
return pred_boxes
@torch.no_grad()
def locations(self, features, stride=4):
"""
Arguments:
features: (N, C, H, W)
Return:
locations: (2, H, W)
"""
h, w = features.size()[-2:]
device = features.device
shifts_x = torch.arange(
0, w * stride, step=stride,
dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, h * stride, step=stride,
dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
locations = locations.reshape(h, w, 2).permute(2, 0, 1)
return locations
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 32.296875
| 94
| 0.600871
| 523
| 4,134
| 4.57935
| 0.323136
| 0.030063
| 0.023382
| 0.021294
| 0.096868
| 0.096868
| 0.073486
| 0.06263
| 0
| 0
| 0
| 0.023834
| 0.27939
| 4,134
| 127
| 95
| 32.551181
| 0.780128
| 0.159168
| 0
| 0.054795
| 0
| 0
| 0.017788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.123288
| 0
| 0.30137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96fae5c29fd446ea7199733a629bbe0f6190046
| 49,876
|
py
|
Python
|
mermaid/utils.py
|
HastingsGreer/mermaid
|
bd13c5fc427eb8cd9054973a8eaaeb302078182d
|
[
"Apache-2.0"
] | 120
|
2019-10-29T23:53:02.000Z
|
2022-03-30T02:59:58.000Z
|
mermaid/utils.py
|
AlexanderChristgau/mermaid
|
ba07883cc3cb5982e4655048a434b4495cb49c6d
|
[
"Apache-2.0"
] | 10
|
2019-11-05T09:28:35.000Z
|
2022-01-09T19:12:51.000Z
|
mermaid/utils.py
|
AlexanderChristgau/mermaid
|
ba07883cc3cb5982e4655048a434b4495cb49c6d
|
[
"Apache-2.0"
] | 19
|
2019-11-10T13:34:39.000Z
|
2022-03-13T20:30:10.000Z
|
"""Various utility functions.
.. todo::
Reorganize this package in a more meaningful way.
"""
from __future__ import print_function
from __future__ import absolute_import
# from builtins import str
# from builtins import range
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from .libraries.modules.stn_nd import STN_ND_BCXYZ
from .data_wrapper import AdaptVal
from .data_wrapper import MyTensor
from . import smoother_factory as sf
from .data_wrapper import USE_CUDA
import numpy as np
from . import finite_differences as fd
import torch.nn as nn
import torch.nn.init as init
from . import module_parameters as pars
from .spline_interpolation import SplineInterpolation_ND_BCXYZ
import os
try:
from .libraries.functions.nn_interpolation import get_nn_interpolation
except ImportError:
print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '
'Some functionality may not be available.')
def my_hasnan(x):
"""Check if any input elements are NaNs.
:param x: numpy array
:return: True if NaNs are present, False else
"""
return (x != x).any()
def create_symlink_with_correct_ext(sf, tf):
abs_s = os.path.abspath(sf)
ext_s = os.path.splitext(abs_s)[1]
abs_t = os.path.abspath(tf)
root_t,ext_t = os.path.splitext(abs_t)
abs_t_with_right_ext = root_t + ext_s
if os.path.isfile(abs_t_with_right_ext):
if os.path.samefile(abs_s,abs_t_with_right_ext):
# nothing to do here, these are already the same file
return
else:
os.remove(abs_t_with_right_ext)
# now we can do the symlink
os.symlink(abs_s,abs_t_with_right_ext)
def combine_dict(d1,d2):
"""Creates a dictionary which has entries from both of them.
:param d1: dictionary 1
:param d2: dictionary 2
:return: resulting dictionary
"""
d = d1.copy()
d.update(d2)
return d
def get_parameter_list_from_parameter_dict(pd):
"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""
pl = []
for key in pd:
pl.append(pd[key])
return pl
def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd):
"""Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys
based on memory id.
:param pd: parameter dictionary
:return: tuple of (parameter_list, name_dictionary)
"""
par_to_name_dict = dict()
pl = []
for key in pd:
pl.append(pd[key])
par_to_name_dict[pd[key]] = key
return pl, par_to_name_dict
def remove_infs_from_variable(v):
# 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor
# 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor
# 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor
# todo: maybe find a cleaner way of handling this
# this is to make sure that subsequent sums work (hence will be smaller than it could be,
# but values of this size should not occur in practice anyway
sz = v.size()
reduction_factor = np.prod(np.array(sz))
condition = True
if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float32').min))/reduction_factor,
max=(np.asscalar(np.finfo('float32').max))/reduction_factor)
elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float64').min))/reduction_factor,
max=(np.asscalar(np.finfo('float64').max))/reduction_factor)
elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float16').min))/reduction_factor,
max=(np.asscalar(np.finfo('float16').max))/reduction_factor)
else:
raise ValueError('Unknown data type: ' + str( type(v.data)))
def lift_to_dimension(A, dim):
"""Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim > dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim == dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def get_dim_of_affine_transform(Ab):
"""Returns the number of dimensions corresponding to an affine transformation of the
form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply
[a1;a2;a3;b], i.e., all columns stacked on top of each other.
:param Ab: parameter vector
:return: dimensionality of transform (1,2,or 3)
"""
nr = len(Ab)
if nr==2:
return 1
elif nr==6:
return 2
elif nr==12:
return 3
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity(Ab):
"""Sets the affine transformation as given by the column vector Ab to the identity transform.
:param Ab: Affine parameter vector (will be overwritten with the identity transform)
:return:
"""
dim = get_dim_of_affine_transform(Ab)
if dim==1:
Ab.zero_()
Ab[0]=1.
elif dim==2:
Ab.zero_()
Ab[0]=1.
Ab[3]=1.
elif dim==3:
Ab.zero_()
Ab[0]=1.
Ab[4]=1.
Ab[8]=1.
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity_multiN(Ab):
"""Set the affine transforms to the identity (in the case of arbitrary batch size).
:param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.
:return:
"""
sz = Ab.size()
nr_of_images = sz[0]
for nrI in range(nr_of_images):
set_affine_transform_to_identity(Ab[nrI, :])
def get_inverse_affine_param(Ab):
"""Computes inverse of affine transformation.
Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb
:param Ab: B x pars (batch size x param. vector)
:return: Inverse of affine parameters
"""
dim =0
if Ab.shape[1] == 2:
dim = 1
elif Ab.shape[1] == 6:
dim = 2
elif Ab.shape[1] == 12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2)
Ab_inv = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_inv = torch.inverse(Ab[n, :, :dim])
Ab_inv[n, :, :dim] = tm_inv
Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim])
inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1)
return inv_affine_param
def update_affine_param(Ab, Cd):
"""Update affine parameters.
Formally: C(Ax+b)+d = CAx+Cb+d
:param Ab: B x pars (batch size x param. vector)
:return: Updated affine parameters
"""
dim = 0
if Ab.shape[1]==2:
dim = 1
elif Ab.shape[1]==6:
dim = 2
elif Ab.shape[1]==12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2)
Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2)
updated_param = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim])
updated_param[n,:,:dim] = tm_param
updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim]
updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1)
return updated_param
def apply_affine_transform_to_map(Ab,phi):
"""Applies an affine transform to a map.
:param Ab: affine transform parameter column vector
:param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed map
"""
sz = phi.size()
dim = len(sz) - 1
if dim not in [1,2,3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
phiR = MyTensor(sz).zero_().type_as(phi)
if dim == 1:
phiR = phi * Ab[0] + Ab[1]
elif dim == 2:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2
elif dim == 3:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9]
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10]
phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11]
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
return phiR
def apply_affine_transform_to_map_multiNC(Ab,phi):
"""Applies an affine transform to maps (for arbitrary batch size).
:param Ab: affine transform parameter column vectors (batch size x param. vector)
:param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed maps
"""
sz = phi.size()
dim = get_dim_of_affine_transform(Ab[0,:])
nr_of_images = Ab.size()[0]
if nr_of_images != sz[0]:
raise ValueError('Incompatible number of affine transforms')
if dim != len(sz)-2:
raise ValueError('Incompatible number of affine transforms')
phiR = MyTensor(sz).zero_().type_as(phi)
for nrI in range(nr_of_images):
phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...])
return phiR
def compute_normalized_gaussian(X, mu, sig):
"""Computes a normalized Gaussian.
:param X: map with coordinates at which to evaluate
:param mu: array indicating the mean
:param sig: array indicating the standard deviations for the different dimensions
:return: Normalized Gaussian evaluated at coordinates in X
Example::
>>> mu, sig = [1,1], [1,1]
>>> X = [0,0]
>>> print(compute_normalized_gaussian(X, mu, sig)
"""
dim = len(mu)
if dim == 1:
g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.)))
g = g/g.sum()
return g
elif dim == 2:
g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.))
- np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)))
g = g/g.sum()
return g
elif dim == 3:
g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.))
-np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))
-np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.)))
g = g / g.sum()
return g
else:
raise ValueError('Can only compute Gaussians in dimensions 1-3')
def _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
# return get_warped_label_map(I0,phi,spacing)
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size XxYxZ
:param phi: map for the warping, size dimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size XxYxZ
"""
# implements this by creating a different view (effectively adding dimensions)
Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))),
phi.view(torch.Size([1] + list(phi.size()))),
spacing,
spline_order,
zero_boundary,
use_01_input)
return Iw.view(I0.size())
def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size BxCxXxYxZ
:param phi: map for the warping, size BxdimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size BxCxXxYxZ
"""
dim = I0.dim()-2
if dim == 1:
return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 2:
return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 3:
return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
else:
raise ValueError('Images can only be warped in dimensions 1 to 3')
def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize):
"""Computes spacing for the low-res parametrization from image spacing.
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
#todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)
def _get_low_res_size_from_size(sz, factor):
"""Returns the corresponding low-res size from a (high-res) sz.
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) or (factor >= 1):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return np.array(sz)
else:
low_res_sz = np.array(sz)
low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16')
return low_res_sz
def _compute_low_res_image(I, spacing, low_res_size, spline_order):
import mermaid.image_sampling as IS
sampler = IS.ResampleImage()
low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order)
return low_res_image
def individual_parameters_to_model_parameters(ind_pars):
model_pars = dict()
if type(ind_pars) == type(dict()):
# should already be in the right format
model_pars = ind_pars
else:
# if ind_pars is not a dictionary assume that they come from the optimizer
# (i.e., list and each list element has a dictionary with keys 'name' and 'model_params'
for par in ind_pars:
model_pars[par['name']] = par['model_params']
return model_pars
def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, BxCxXxYxZ
:param I: image, BxCxXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
nrOfI = sz[0] # number of images
m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC
nrOfC = sz[1]
for c in range(nrOfC): # loop over all the channels and add the results
m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...],
I[:, c, ...],
nrOfI,
sz[2::],
spacing)
return m
def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, batchxXxYxZ
:param I: image, batchXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
fdt = fd.FD_torch(spacing)
dim = len(sz)
m = create_ND_vector_field_variable_multiN(sz, nrOfI)
if dim == 1:
m[:, 0, :] = fdt.dXc(I)*lam
elif dim == 2:
m[:, 0, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :] = fdt.dYc(I)*lam
elif dim == 3:
m[:, 0, :, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :, :] = fdt.dYc(I)*lam
m[:, 2, :, :, :] = fdt.dZc(I)*lam
else:
raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3')
return m
def create_ND_vector_field_variable_multiN(sz, nr_of_images=1):
"""
Create vector field torch Variable of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nr_of_images, dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0., 1e-7)
def create_ND_vector_field_variable(sz):
"""Create vector field torch Variable of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:return: returns vector field of size dimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0.,1e-7)
def create_vector_parameter(nr_of_elements):
"""Creates a vector parameters with a specified number of elements.
:param nr_of_elements: number of vector elements
:return: returns the parameter vector
"""
return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7))
def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False):
"""Create vector field torch Parameter of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI, dim]+list(csz))
if get_field_from_external_network:
tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7)
tmp.requires_grad = True
else:
tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
return tmp
def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
nr_of_mg_weights = len(gaussian_std_weights)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nr_of_mg_weights]+list(csz))
weights = torch.empty(*csz)
# set the default
if sched =='w_K_w':
gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights]
for g in range(nr_of_mg_weights):
weights[:, g, ...] = gaussian_std_weights[g]
tmp = AdaptVal(weights)
if get_preweight_from_network:
tmp.requires_grad = True
else:
tmp = Parameter(tmp)
return tmp
def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:param nrOfC: number of channels
:return: returns vector field of size nrOfIxnrOfCxXxYxZ
"""
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nrOfC]+list(csz))
return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
def centered_identity_map_multiN(sz, spacing, dtype='float32'):
"""
Create a centered identity map (shifted so it is centered around 0)
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz) - 2
nrOfI = sz[0]
if dim == 1:
id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype)
return id
def identity_map_multiN(sz,spacing,dtype='float32'):
"""
Create an identity map
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz)-2
nrOfI = int(sz[0])
if dim == 1:
id = np.zeros([nrOfI,1,sz[2]],dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n,...] = identity_map(sz[2::],spacing,dtype=dtype)
return id
def centered_identity_map(sz, spacing, dtype='float32'):
"""
Returns a centered identity map (with 0 in the middle) if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0:sz[0]]
elif dim == 2:
id = np.mgrid[0:sz[0], 0:sz[1]]
elif dim == 3:
id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
for d in range(dim):
id[d] *= spacing[d]
if sz[d]%2==0:
#even
id[d] -= spacing[d]*(sz[d]//2)
else:
#odd
id[d] -= spacing[d]*((sz[d]+1)//2)
# and now store it in a dim+1 array
if dim == 1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0, :] = id[0]
elif dim == 2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0, :, :] = id[0]
idnp[1, :, :] = id[1]
elif dim == 3:
idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0, :, :, :] = id[0]
idnp[1, :, :, :] = id[1]
idnp[2, :, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
return idnp
#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.ones(*mask_sz))*mask_value
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
return mask.detach()
def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.zeros(*mask_sz))
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
if pow ==2:
mask = mask**2
if pow ==3:
mask = mask*mask*mask
return mask
# def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
def get_single_gaussian_smoother(gaussian_std,sz,spacing):
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = gaussian_std
s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)
return s_m
def get_warped_label_map(label_map, phi, spacing, sched='nn'):
if sched == 'nn':
warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True)
# check if here should be add assert
assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise"
else:
raise ValueError(" the label warping method is not implemented")
return warped_label_map
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.detach()).cpu().numpy()
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
def get_scalar(v):
if isinstance(v, float):
return v
elif isinstance(v, np.ndarray) and v.size == 1:
return float(v)
def checkNan(x):
""""
input should be list of Variable
"""
return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]
def noramlized_spacing_to_smallest(spacing):
min_sp = np.min(spacing)
spacing[spacing>min_sp]=min_sp
return spacing
def time_warped_function(f):
def __time_warped_function(input=None):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
output = f(input)
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print(start.elapsed_time(end))
return output
return __time_warped_function
def interoplate_boundary_right(tensor):
dim = len(tensor.shape)-2
if dim==1:
tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3]
if dim==2:
tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:]
tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3]
if dim==3:
tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :]
tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1. / (np.array(img_sz) - 1)
if identity_map is not None: # todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize = desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,
zero_boundary=zero_boundary, identity_map=identity_map)
return resampled
def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""
desiredSize = desiredSize[2:]
is_numpy = False
if not isinstance(I, torch.Tensor):
I = torch.Tensor(I)
is_numpy = True
sz = np.array(list(I.size()))
# check that the batch size and the number of channels is the same
nrOfI = sz[0]
nrOfC = sz[1]
desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize))
newspacing = spacing * ((sz[2::].astype('float') - 1.) / (
desiredSizeNC[2::].astype('float') - 1.)) ###########################################
if identity_map is not None:
idDes = identity_map
else:
idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing)))
# now use this map for resampling
ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary)
return ID if not is_numpy else ID.numpy(), newspacing
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')
else:
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')
if lowResSize[-1] % 2 != 0:
lowResSize[-1] -= 1
print(
'\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')
return lowResSize
def get_res_spacing_from_spacing(spacing, sz, lowResSize):
"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
# todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1)
########################################## Adaptive Net ###################################################3
def space_normal(tensors, std=0.1):
"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""
if isinstance(tensors, Variable):
space_normal(tensors.data, std=std)
return tensors
for n in range(tensors.size()[0]):
for c in range(tensors.size()[1]):
dim = tensors[n][c].dim()
sz = tensors[n][c].size()
mus = np.zeros(dim)
stds = std * np.ones(dim)
print('WARNING: What should the spacing be here? Needed for new identity map code')
raise ValueError('Double check the spacing here before running this code')
spacing = np.ones(dim)
centered_id = centered_identity_map(sz,spacing)
g = compute_normalized_gaussian(centered_id, mus, stds)
tensors[n,c] = torch.from_numpy(g)
def weights_init_uniform(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.038, 0.042)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
space_normal(m.weight.data)
elif classname.find('Linear') != -1:
space_normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_rd_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data)
elif classname.find('Linear') != -1:
init.normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'rd_normal':
net.apply(weights_init_rd_normal)
elif init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'uniform':
net.apply(weights_init_uniform)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def organize_data(moving, target, sched='depth_concat'):
if sched == 'depth_concat':
input = torch.cat([moving, target], dim=1)
elif sched == 'width_concat':
input = torch.cat((moving, target), dim=3)
elif sched == 'list_concat':
input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0)
elif sched == 'difference':
input = moving-target
return input
def bh(m,gi,go):
print("Grad Input")
print((torch.sum(gi[0].data), torch.sum(gi[1].data)))
print("Grad Output")
print(torch.sum(go[0].data))
return gi[0], gi[1], gi[2]
class ConvBnRel(nn.Module):
# conv + bn (optional) + relu
def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,
bn=False, reverse=False, bias=False):
super(ConvBnRel, self).__init__()
padding = int((kernel_size - 1) // 2) if same_padding else 0
if not reverse:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
else:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias)
#y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
#When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants.
self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class FcRel(nn.Module):
# fc+ relu(option)
def __init__(self, in_features, out_features, active_unit='relu'):
super(FcRel, self).__init__()
self.fc = nn.Linear(in_features, out_features)
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.fc(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class AdpSmoother(nn.Module):
"""
a simple conv. implementation, generate displacement field
"""
def __init__(self, inputs, dim, net_sched=None):
# settings should include [using_bias, using bn, using elu]
# inputs should be a dictionary could contain ['s'],['t']
super(AdpSmoother, self).__init__()
self.dim = dim
self.net_sched = 'm_only'
self.s = inputs['s'].detach()
self.t = inputs['t'].detach()
self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True)
self.get_net_sched()
#self.net.register_backward_hook(bh)
def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5):
# return the self.net and self.net_input
padding_size = (kernel_size-1)//2
if self.net_sched == 'm_only':
if debugging:
self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2)
else:
net = \
[ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched =='m_f_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_f_s_t':
if debugging:
self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s_f_t':
if debugging:
self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
def prepare_data(self, m, new_s):
input=None
if self.net_sched == 'm_only':
input = m
elif self.net_sched == 'm_f_s':
input = organize_data(m,self.s,sched='depth_concat')
elif self.net_sched == 'm_d_s':
input = organize_data(m, new_s, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_d_s_f_t':
input = organize_data(m, new_s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
return input
def forward(self, m,new_s=None):
m = m * self.mask
input = self.prepare_data(m,new_s)
x= input
x = self.net(x)
return x
| 34.805304
| 130
| 0.602675
| 7,191
| 49,876
| 4.027673
| 0.09053
| 0.014812
| 0.008286
| 0.009944
| 0.610192
| 0.579187
| 0.554121
| 0.517074
| 0.488451
| 0.471084
| 0
| 0.027274
| 0.263413
| 49,876
| 1,432
| 131
| 34.829609
| 0.761092
| 0.272035
| 0
| 0.502488
| 0
| 0.001244
| 0.064918
| 0
| 0
| 0
| 0
| 0.003492
| 0.001244
| 1
| 0.087065
| false
| 0
| 0.026119
| 0
| 0.201493
| 0.016169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b96fca03cef0164231c4fa09bc83db6c5b2aa7db
| 1,093
|
py
|
Python
|
examples/io/plot_read_evoked.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3
|
2021-01-04T08:45:56.000Z
|
2021-05-19T12:25:59.000Z
|
examples/io/plot_read_evoked.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 28
|
2020-05-07T00:58:34.000Z
|
2020-08-29T23:02:17.000Z
|
examples/io/plot_read_evoked.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3
|
2019-01-28T13:48:00.000Z
|
2019-07-10T16:02:11.000Z
|
"""
==================================
Reading and writing an evoked file
==================================
This script shows how to read and write evoked datasets.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from mne import read_evokeds
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
###############################################################################
# Show result as a butterfly plot:
# By using exclude=[] bad channels are not excluded and are shown in red
evoked.plot(exclude=[], time_unit='s')
# Show result as a 2D image (x: time, y: channels, color: amplitude)
evoked.plot_image(exclude=[], time_unit='s')
###############################################################################
# Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked
# responses to a file.
| 29.540541
| 79
| 0.569076
| 130
| 1,093
| 4.676923
| 0.592308
| 0.039474
| 0.039474
| 0.042763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003165
| 0.132662
| 1,093
| 36
| 80
| 30.361111
| 0.638186
| 0.47667
| 0
| 0
| 0
| 0
| 0.120603
| 0.082915
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b970f8ccb56e24dd8d65fd92869bbf7790f6e611
| 5,298
|
py
|
Python
|
yt_dlp/extractor/ninenow.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 11
|
2022-01-06T22:09:50.000Z
|
2022-03-12T22:26:22.000Z
|
yt_dlp/extractor/ninenow.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 4
|
2022-02-25T08:20:18.000Z
|
2022-03-17T16:16:20.000Z
|
yt_dlp/extractor/ninenow.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 3
|
2022-02-19T08:59:13.000Z
|
2022-03-06T16:11:21.000Z
|
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
smuggle_url,
str_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
class NineNowIE(InfoExtractor):
IE_NAME = '9now.com.au'
_VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
_TESTS = [{
# clip
'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc',
'md5': '17cf47d63ec9323e562c9957a968b565',
'info_dict': {
'id': '16801',
'ext': 'mp4',
'title': 'St. Kilda\'s Joey Montagna on the potential for a player\'s strike',
'description': 'Is a boycott of the NAB Cup "on the table"?',
'uploader_id': '4460760524001',
'upload_date': '20160713',
'timestamp': 1468421266,
},
'skip': 'Only available in Australia',
}, {
# episode
'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19',
'only_matching': True,
}, {
# DRM protected
'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1',
'only_matching': True,
}, {
# episode of series
'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3',
'info_dict': {
'id': '6249614030001',
'title': 'Episode 3',
'ext': 'mp4',
'season_number': 3,
'episode_number': 3,
'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.',
'uploader_id': '4460760524001',
'timestamp': 1619002200,
'upload_date': '20210421',
},
'expected_warnings': ['Ignoring subtitle tracks'],
'params':{
'skip_download': True,
}
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
page_data = self._parse_json(self._search_regex(
r'window\.__data\s*=\s*({.*?});', webpage,
'page data', default='{}'), display_id, fatal=False)
if not page_data:
page_data = self._parse_json(self._parse_json(self._search_regex(
r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;',
webpage, 'page data'), display_id), display_id)
for kind in ('episode', 'clip'):
current_key = page_data.get(kind, {}).get(
'current%sKey' % kind.capitalize())
if not current_key:
continue
cache = page_data.get(kind, {}).get('%sCache' % kind, {})
if not cache:
continue
common_data = {
'episode': (cache.get(current_key) or list(cache.values())[0])[kind],
'season': (cache.get(current_key) or list(cache.values())[0]).get('season', None)
}
break
else:
raise ExtractorError('Unable to find video data')
if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool):
self.report_drm(display_id)
brightcove_id = try_get(
common_data, lambda x: x['episode']['video']['brightcoveId'], compat_str) or 'ref:%s' % common_data['episode']['video']['referenceId']
video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id
title = try_get(common_data, lambda x: x['episode']['name'], compat_str)
season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int)
episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int)
timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], compat_str))
release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], compat_str))
thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {}
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
'width': int_or_none(thumbnail_id[1:]),
} for thumbnail_id, thumbnail_url in thumbnails_data.items()]
return {
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': self._GEO_COUNTRIES}),
'id': video_id,
'title': title,
'description': try_get(common_data, lambda x: x['episode']['description'], compat_str),
'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000),
'thumbnails': thumbnails,
'ie_key': 'BrightcoveNew',
'season_number': season_number,
'episode_number': episode_number,
'timestamp': timestamp,
'release_date': release_date,
}
| 43.073171
| 146
| 0.575123
| 608
| 5,298
| 4.784539
| 0.317434
| 0.044689
| 0.045376
| 0.060502
| 0.266758
| 0.231351
| 0.207631
| 0.207631
| 0.131317
| 0.081815
| 0
| 0.041958
| 0.271234
| 5,298
| 122
| 147
| 43.42623
| 0.711474
| 0.008305
| 0
| 0.117117
| 0
| 0.045045
| 0.29358
| 0.038484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009009
| false
| 0
| 0.027027
| 0
| 0.099099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97242dec299cf214174fe1ceb1c2d4c7e16b595
| 4,783
|
py
|
Python
|
apex/fp16_utils/fused_weight_norm.py
|
mcarilli/apex
|
766e36c9e10fe4efd847c3f77c3b38974c89eab1
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T01:37:42.000Z
|
2020-05-05T01:37:42.000Z
|
apex/fp16_utils/fused_weight_norm.py
|
mcarilli/apex
|
766e36c9e10fe4efd847c3f77c3b38974c89eab1
|
[
"BSD-3-Clause"
] | 1
|
2018-06-24T18:56:56.000Z
|
2018-06-24T18:56:56.000Z
|
apex/fp16_utils/fused_weight_norm.py
|
mcarilli/apex
|
766e36c9e10fe4efd847c3f77c3b38974c89eab1
|
[
"BSD-3-Clause"
] | 1
|
2020-07-03T00:37:20.000Z
|
2020-07-03T00:37:20.000Z
|
import torch
from torch.autograd import Variable
from torch.autograd.function import Function, once_differentiable
import apex_C
def check_contig_cuda(tensors, names):
for tensor, name in zip(tensors, names):
if not tensor.is_contiguous():
raise RuntimeError(name+" with size {} is not contiguous"
.format(tensor.size()))
if not tensor.is_cuda:
raise RuntimeError(name+".is_cuda = False."
"Currently, only cuda tensors are supported.")
class Fused_Weight_Norm(Function):
"""
Custom autograd function that implements weight norm, as presented in
`<https://arxiv.org/abs/1602.07868>`_,
along a tensor's slowest or
fastest dimension using fused kernel launches for the forward and backward passes.
Accepts fp32 or fp16 input; the output type will match the input type.
Within the kernels, all calculations are performed in fp32 for numerical stability, regardless
of input/output precision.
"""
@staticmethod
def forward(ctx, input, g, dim=0):
"""
Args:
input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous.
g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``.
dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported.
Returns:
Output tensor corresponding to **w** in the paper. Output type and precision will match
type and precision of ``input``.
"""
# torch.cuda.nvtx.range_push("FusedNorm.forward, input.size() = {}"
# .format(input.size()))
check_contig_cuda((input,g),("input","g"))
"""
This is ok, new() treats a torch.Size object properly.
No need to unpack with an asterisk via new(*input.size()).
"""
output = input.new(input.size()).contiguous()
"""
For output with size (slow, faster, faster, ...fastest), we want
norms with size (slow, 1, 1, ...1), so that if you want retrieve norms
and apply the same normalizing factors to another Tensor "t" with the
same size as output, "t/norms" will broadcast each element of norms
across the corresponding slowest dim of t.
"""
if dim == 0:
norm_size = (output.size(0),) + (1,)*(output.dim() - 1)
elif dim == output.dim() - 1:
norm_size = (1,)*(output.dim() - 1) + (output.size(-1),)
else:
raise RuntimeError("Currently, Fused_Weight_Norm only supports first or last dimension.")
norms = torch.cuda.FloatTensor(*norm_size).contiguous()
"""
Beware: If you call the following:
norms = torch.cuda.FloatTensor(norm_size).contiguous()
the constructor sees a tuple:
FloatTensor( (output_size(0),1,1,...) )
and creates a 1D tensor with values from the tuple:
[output_size(0),1,1,...].
"""
apex_C.weight_norm_fwd(output, norms, input, g, dim)
ctx.save_for_backward(input, g)
# save_for_backward can only save input or output tensors,
# use ctx state to save the norms and dimension:
ctx.norms = norms
ctx.dim = dim
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
"""
Args:
grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance.
Returns:
Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``.
"""
check_contig_cuda((grad_output), ("grad_output"))
savedInput, savedg = ctx.saved_tensors
savedNorms = ctx.norms
# We expect that these .contiguous() calls will be no-ops. They're present for safety.
grad_output_contig = grad_output.contiguous()
grad_input = grad_output_contig.new(grad_output.size()).contiguous()
grad_g = savedg.new(savedg.size()).contiguous()
apex_C.weight_norm_bwd(grad_input,
grad_g,
grad_output_contig,
savedInput,
savedg,
savedNorms,
ctx.dim)
return grad_input, grad_g, None
| 41.95614
| 175
| 0.604223
| 590
| 4,783
| 4.805085
| 0.305085
| 0.035273
| 0.035273
| 0.02328
| 0.120282
| 0.111111
| 0.092063
| 0.047266
| 0.047266
| 0.047266
| 0
| 0.010423
| 0.29793
| 4,783
| 113
| 176
| 42.327434
| 0.83383
| 0.343717
| 0
| 0.043478
| 0
| 0
| 0.081547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9724b70833f729e47c38eb018294247250b7282
| 23,312
|
py
|
Python
|
bzt/modules/grinder.py
|
gerardorf/taurus
|
610872b4cf70af31d79a346db1aebd3466310d77
|
[
"Apache-2.0"
] | 1
|
2019-01-15T17:23:58.000Z
|
2019-01-15T17:23:58.000Z
|
bzt/modules/grinder.py
|
gerardorf/taurus
|
610872b4cf70af31d79a346db1aebd3466310d77
|
[
"Apache-2.0"
] | null | null | null |
bzt/modules/grinder.py
|
gerardorf/taurus
|
610872b4cf70af31d79a346db1aebd3466310d77
|
[
"Apache-2.0"
] | null | null | null |
"""
Module holds all stuff regarding Grinder tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import time
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.java import TaurusJavaHelper
from bzt.requests_model import HTTPRequest
from bzt.six import iteritems
from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR
class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
"""
Grinder executor module
"""
def __init__(self):
super(GrinderExecutor, self).__init__()
self.script = None
self.exec_id = "grinder-bzt-%s" % id(self)
self.properties_file = None
self.kpi_file = None
self.cmd_line = None
self.process = None
self.end_time = None
self.retcode = None
self.java_helper = None
def __write_base_props(self, fds):
"""
write base properties and base properties file contents to fds
:param fds: fds
:return:
"""
base_props_file = self.settings.get("properties-file")
if base_props_file:
fds.write("# Base Properies File Start: %s\n" % base_props_file)
with open(base_props_file) as bpf:
fds.write(bpf.read())
fds.write("# Base Properies File End: %s\n\n" % base_props_file)
# base props
base_props = self.settings.get("properties")
if base_props:
fds.write("# Base Properies Start\n")
for key, val in iteritems(base_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Base Properies End\n\n")
def __write_scenario_props(self, fds, scenario):
"""
Write scenario props and scenario file props to fds
:param fds:
:param scenario: dict
:return:
"""
script_props_file = scenario.get("properties-file")
if script_props_file:
fds.write("# Script Properies File Start: %s\n" % script_props_file)
with open(script_props_file) as spf:
fds.write(spf.read())
fds.write("# Script Properies File End: %s\n\n" % script_props_file)
# scenario props
local_props = scenario.get("properties")
if local_props:
fds.write("# Scenario Properies Start\n")
for key, val in iteritems(local_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Scenario Properies End\n\n")
def __write_bzt_props(self, fds):
"""
Write bzt properties to fds
:param fds:
:return:
"""
fds.write("# BZT Properies Start\n")
fds.write("grinder.hostID=%s\n" % self.exec_id)
fds.write("grinder.script=%s\n" % self.script.replace(os.path.sep, "/"))
fds.write("grinder.logDirectory=%s\n" % self.engine.artifacts_dir.replace(os.path.sep, "/"))
load = self.get_load()
if load.iterations or load.concurrency:
fds.write("grinder.runs=%s\n" % load.iterations or 0)
if load.concurrency:
fds.write("grinder.threads=%s\n" % load.concurrency)
if load.duration:
fds.write("grinder.duration=%s\n" % int(load.duration * 1000))
fds.write("# taurus load values in case you need them\n")
fds.write("taurus.concurrency=%s\n" % load.concurrency)
fds.write("taurus.throughput=%s\n" % load.throughput)
fds.write("taurus.ramp_up=%s\n" % load.ramp_up)
fds.write("taurus.steps=%s\n" % load.steps)
fds.write("taurus.hold_for=%s\n" % load.hold)
fds.write("taurus.iterations=%s\n" % load.iterations)
fds.write("# BZT Properies End\n")
def prepare(self):
self.stdout = open(self.engine.create_artifact("grinder", ".out"), "w")
self.stderr = open(self.engine.create_artifact("grinder", ".err"), "w")
self.install_required_tools()
scenario = self.get_scenario()
self.exec_id = self.label
self.script = self.get_script_path()
if not self.script:
if "requests" in scenario:
self.script = self.__scenario_from_requests()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Grinder tool (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
self.properties_file = self.engine.create_artifact("grinder", ".properties")
with open(self.properties_file, 'w') as fds:
self.__write_base_props(fds)
self.__write_scenario_props(fds, scenario)
self.__write_bzt_props(fds)
self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + "-kpi.log")
self.reader = DataLogReader(self.kpi_file, self.log)
self.reader.report_by_url = self.settings.get("report-by-url", False)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
# add logback configurations used by worker processes (logback-worker.xml)
self.env.add_path({"CLASSPATH": RESOURCES_DIR}, finish=True)
self.env.add_path({"CLASSPATH": self.java_helper.tool_path}, finish=True)
self.env.add_path({"CLASSPATH": self.settings.get("path", None)}, finish=True)
self.cmd_line = ["java", "net.grinder.Grinder", self.properties_file]
def startup(self):
"""
Should start the tool as fast as possible.
"""
self.env.set({"T_GRINDER_PREFIX": self.exec_id})
self.process = self.execute(self.cmd_line)
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise TaurusToolError:
"""
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode,
self.get_error_diagnostics())
return True
return False
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("Grinder worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
"""
Collect data file artifact
"""
if self.kpi_file:
self.engine.existing_artifact(self.kpi_file)
super(GrinderExecutor, self).post_process()
def __scenario_from_requests(self):
"""
Generate grinder scenario from requests
:return: script
"""
script = self.engine.create_artifact("grinder_requests", ".py")
builder = GrinderScriptBuilder(self.get_scenario(), self.log)
builder.label = self.label
builder.build_source_code()
builder.save(script)
return script
def install_required_tools(self):
grinder = self._get_tool(Grinder, config=self.settings)
self.settings["path"] = grinder.tool_path
self.java_helper = self._get_tool(TaurusJavaHelper)
required_tools = [self._get_tool(TclLibrary),
self._get_tool(JavaVM),
self.java_helper,
grinder]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
def get_widget(self):
if not self.widget:
if self.script is not None:
label = "Grinder: %s" % os.path.basename(self.script)
else:
label = None
self.widget = ExecutorWidget(self, label)
if self.get_load().ramp_up:
self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup
return self.widget
def resource_files(self):
resource_files = []
script_file_path = self.get_script_path()
if script_file_path:
resource_files.append(script_file_path)
prop_file = self.get_scenario().get("properties-file")
if prop_file:
resource_files.append(prop_file)
return resource_files
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
return diagnostics
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
DELIMITER = ","
DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes")
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.report_by_url = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.idx = {}
self.partial_buffer = ""
self.start_time = 0
self.end_time = 0
self.concurrency = 0
self.test_names = {}
self.known_threads = set()
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
self.log.debug("Reading grinder results...")
self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))
lnum = None
start = time.time()
for lnum, line in enumerate(self.lines):
if not self.idx:
if not line.startswith('data.'):
self.__split(line) # to capture early test name records
continue
line = line[line.find(' '):]
header_list = line.strip().split(self.DELIMITER)
for _ix, field in enumerate(header_list):
self.idx[field.strip()] = _ix
data_fields, worker_id = self.__split(line)
if not data_fields:
self.log.debug("Skipping line: %s", line.strip())
continue
yield self.parse_line(data_fields, worker_id, lnum)
if lnum is not None:
duration = time.time() - start
if duration < 0.001:
duration = 0.001
self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration)
def parse_line(self, data_fields, worker_id, lnum):
worker_id = worker_id.split('.')[1]
t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0)
r_time = int(data_fields[self.idx["Test time"]]) / 1000.0
latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0
r_code = data_fields[self.idx["HTTP response code"]].strip()
con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0
con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0
bytes_count = int(data_fields[self.idx["HTTP response length"]].strip())
test_id = data_fields[self.idx["Test"]].strip()
thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip()
if thread_id not in self.known_threads:
self.known_threads.add(thread_id)
self.concurrency += 1
url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)
if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]):
if not error_msg:
if r_code != '0':
error_msg = "HTTP %s" % r_code
else:
error_msg = "Java exception calling TestRunner"
else:
error_msg = None # suppress errors
if self.report_by_url:
label = url
elif test_id in self.test_names:
label = self.test_names[test_id]
else:
label = "Test #%s" % test_id
source_id = '' # maybe use worker_id somehow?
return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count
def __split(self, line):
if not line.endswith("\n"):
self.partial_buffer += line
return None, None
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
if not line.startswith('data.'):
line_parts = line.split(' ')
if len(line_parts) > 1:
if line_parts[1] == 'starting,':
# self.concurrency += 1
pass
elif line_parts[1] == 'finished':
if self.concurrency > 0:
self.concurrency -= 1
elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}:
test_id = line_parts[5][:-1]
test_name = ' '.join(line_parts[6:])
self.test_names[test_id] = test_name
self.log.debug("Recognized test id %s => %s", test_id, test_name)
return None, None
worker_id = line[:line.find(' ')]
line = line[line.find(' '):]
data_fields = line.split(self.DELIMITER)
if not data_fields[1].strip().isdigit():
return None, None
if len(data_fields) < max(self.idx.values()):
return None, None
return data_fields, worker_id
def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count):
url = ''
error_msg = None
for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize?
line = self.lines[lineNo].strip()
matched = self.DETAILS_REGEX.match(line)
if not matched:
continue
if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5):
return matched.group(2), matched.group(4)
return url, error_msg
class Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?)
VERSION = "3.11"
LOCAL_PATH = "~/.bzt/grinder-taurus/lib/grinder.jar"
def __init__(self, config=None, **kwargs):
settings = config or {}
grinder_path = settings.get("path", self.LOCAL_PATH)
grinder_path = get_full_path(grinder_path)
download_link = settings.get("download-link", "")
super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs)
self.version = self.VERSION
self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version)
def check_if_installed(self):
self.log.debug("Trying %s: %s", self.tool_name, self.tool_path)
try:
out, err = self.call(["java", "-classpath", self.tool_path, "net.grinder.Grinder"])
if err:
out += err
self.log.debug("%s stdout: %s", self.tool_name, out)
return True
except CALL_PROBLEMS as exc:
self.log.warning("%s check failed: %s", self.tool_name, exc)
return False
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
grinder_dist = self._download(use_link=bool(self.download_link))
self.log.info("Unzipping %s", grinder_dist)
unzip(grinder_dist, dest, 'grinder-' + self.version)
os.remove(grinder_dist)
self.log.info("Installed grinder successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class GrinderMirrorsManager(MirrorsManager):
MIRRORS_SOURCE = "https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder" \
"%203/{version}/grinder-{version}-binary.zip&dialog=true"
DOWNLOAD_LINK = "https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \
"/grinder-{version}-binary.zip?r=&ts=" + str(int(time.time())) + "&use_mirror=autoselect"
def __init__(self, http_client, parent_logger, grinder_version):
self.grinder_version = grinder_version
base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version)
super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
base_link = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}" \
"-binary.zip/download?use_mirror={mirror}"
li_search_pattern = re.compile(r'<li id=".*?">')
li_elements = li_search_pattern.findall(self.page_source)
if li_elements:
links = [base_link.format(version=self.grinder_version, mirror=link.strip('<li id="').strip('">')) for
link in li_elements]
default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
class GrinderScriptBuilder(PythonGenerator):
IMPORTS = """
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities
from HTTPClient import NVPair
"""
def __init__(self, scenario, parent_logger):
super(GrinderScriptBuilder, self).__init__(scenario, parent_logger)
self.label = "BZT Requests"
def build_source_code(self):
self.log.debug("Generating Python script for Grinder")
self.root.append(self.gen_comment("This script was generated by Taurus", indent=0))
self.root.append(self.add_imports())
self.root.append(self.gen_new_line())
default_address = self.scenario.get("default-address")
url_arg = "url=%r" % default_address if default_address else ""
self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0))
self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0))
self.root.append(self.gen_statement('test.record(request)', indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0))
self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0))
headers = self.scenario.get_headers()
if not self.scenario.get("keepalive", True):
headers['Connection'] = 'close'
if headers:
self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0))
for header, value in iteritems(headers):
self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4))
self.root.append(self.gen_statement("])", indent=0))
global_timeout = dehumanize_time(self.scenario.get("timeout", None))
if global_timeout:
self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0))
cookie_flag = int(self.scenario.get("store-cookie", True))
self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_runner_class())
@staticmethod
def __list_to_nvpair_list(items):
return "[" + ",".join("NVPair(%r, %r)" % (header, value) for header, value in items) + "]"
def gen_runner_class(self):
runner_classdef = self.gen_class_definition("TestRunner", ["object"])
sleep_method = self.gen_method_definition("rampUpSleeper", ["self"])
sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return"))
sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')"))
sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)"))
sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)"))
sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)"))
sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)"))
sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')"))
sleep_method.append(self.gen_new_line())
runner_classdef.append(sleep_method)
main_method = self.gen_method_definition("__call__", ["self"])
main_method.append(self.gen_statement("self.rampUpSleeper()"))
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Grinder script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
method = req.method.upper()
url = req.url
local_headers = req.headers
params = "[]"
headers = self.__list_to_nvpair_list(iteritems(local_headers))
main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers)))
think_time = dehumanize_time(req.priority_option('think-time'))
if think_time:
main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000)))
runner_classdef.append(main_method)
return runner_classdef
| 40.82662
| 119
| 0.618823
| 2,872
| 23,312
| 4.843663
| 0.174791
| 0.016677
| 0.024297
| 0.03163
| 0.199123
| 0.137014
| 0.093379
| 0.057652
| 0.034936
| 0.024297
| 0
| 0.007742
| 0.263126
| 23,312
| 570
| 120
| 40.898246
| 0.802072
| 0.069878
| 0
| 0.08867
| 0
| 0.002463
| 0.155468
| 0.032096
| 0
| 0
| 0
| 0.001754
| 0
| 1
| 0.068966
| false
| 0.007389
| 0.044335
| 0.002463
| 0.189655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b972e358701b6b26d8d3c931dfecc57580620c15
| 467
|
py
|
Python
|
test/Fortran/fixture/myfortran_flags.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/Fortran/fixture/myfortran_flags.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/Fortran/fixture/myfortran_flags.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
import getopt
import sys
comment = ('#' + sys.argv[1]).encode()
opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy')
optstring = ''
length = len(comment)
for opt, arg in opts:
if opt == '-o': out = arg
elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write((optstring + "\n").encode())
for l in infile.readlines():
if l[:length] != comment:
outfile.write(l)
sys.exit(0)
| 27.470588
| 67
| 0.601713
| 71
| 467
| 3.957746
| 0.521127
| 0.049822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.186296
| 467
| 16
| 68
| 29.1875
| 0.728947
| 0
| 0
| 0
| 0
| 0
| 0.044968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9736fc25869ac44481082e255dc93e0f52aa441
| 9,015
|
py
|
Python
|
zen_knit/organizer/__init__.py
|
Zen-Reportz/zen_knit
|
104c2693d2cc61520657131da769f5d59d2df8e9
|
[
"MIT"
] | 30
|
2021-12-25T15:39:42.000Z
|
2022-02-25T04:53:44.000Z
|
zen_knit/organizer/__init__.py
|
Zen-Reportz/zen_knit
|
104c2693d2cc61520657131da769f5d59d2df8e9
|
[
"MIT"
] | 11
|
2022-01-02T22:10:07.000Z
|
2022-02-02T00:56:33.000Z
|
zen_knit/organizer/__init__.py
|
Zen-Reportz/zen_knit
|
104c2693d2cc61520657131da769f5d59d2df8e9
|
[
"MIT"
] | 2
|
2022-01-27T13:22:46.000Z
|
2022-01-30T05:01:59.000Z
|
import io
import os
import base64
from pathlib import Path
from nbconvert import filters
from pygments.formatters.latex import LatexFormatter
from zen_knit import formattor
from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData
from zen_knit.formattor.html_formatter import HTMLFormatter
mime_extensions = {"image/png" : "png",
"image/jpg" : "jpg"}
class BaseOrganizer:
def __init__(self, executed_data: ExecutedData):
self.format_started = False
self.collected_string = ""
self.fig_folder = None
self.executed_data = executed_data
self.formatted_doc = []
self.organized_data = OrganizedData(
global_options = self.executed_data.global_options,
chunks = []
)
self._create_output_folder_name()
self._create_fig_folder()
self._organize_doc()
self._create_output_file_name()
def _create_output_file_name(self):
global_options = self.organized_data.global_options
global_options.output.file_name = global_options.input.file_name.split(".")[0] + "."+ global_options.output.format
def _create_output_folder_name(self):
global_options = self.organized_data.global_options
if global_options.output.dir is None:
global_options.output.dir = global_options.input.dir
def _create_fig_folder(self):
output_folder = self.organized_data.global_options.output.dir
Path(output_folder).mkdir(parents=True, exist_ok=True)
fig_folder = os.path.join(output_folder, self.organized_data.global_options.output.fig_dir)
self.fig_folder = fig_folder
Path(fig_folder).mkdir(parents=True, exist_ok=True)
def _parse_raw(self, data, output_type):
if data.get("code_text_raw") is not None:
if self._clean_up(data['code_text_raw']) is not None:
if output_type in ("code"):
t = {"type": "code", "str_data": data['code_text_raw'] }
elif output_type in ("sql"):
t = {"type": "sql", "str_data": data['code_text_raw'] }
else:
t = {"type": "markdown", "str_data": data['code_text_raw'] }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
else:
return False
def _coder_string(self, data):
list_ = ["stream", "error"]
if data["output_type"] is None:
return False
if data["output_type"] in list_:
if data["output_type"] == "stream":
if self._clean_up(data['text']) is not None:
t = {"type": "se_data", "str_data": data['text'] }
self.organized_data.chunks.append(OrganizedChunk(**t))
if data["output_type"] == "error":
t = {"type": "se_data", "str_data": data["evalue"] + filters.strip_ansi("".join(data["traceback"])) }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _raw_string(self, data):
if data["output_type"] is None:
return False
if data["output_type"] == "execute_result":
if data.get("data") is not None:
if 'matplotlib' in data["data"]["text/plain"]:
# Doing nothing here
return True
else:
if ((data["data"]["text/plain"][0] == "'") or (data["data"]["text/plain"][0] == '"')):
temp = data["data"]["text/plain"][1:-1]
else:
temp = data["data"]["text/plain"]
if "<table" in temp:
t = {"type": "html_data", "str_data":temp.encode().decode() }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
# if "BokehJS" in temp:
# t = {"type": "html_data", "str_data": "<script type='text/javascript'>" + temp.encode().decode() + "</script>" }
# self.organized_data.chunks.append(OrganizedChunk(**t))
# return True
if self._clean_up(temp) is not None:
t = {"type": "e_data", "str_data":temp }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return True
return False
def _raw_plots(self, data, chunk_option:ChunkOption):
if data["output_type"] is None:
return False
if data["output_type"] == "display_data":
plot_infos = self._save_plots(data, chunk_option)
t = {"type": "plot", "complex_data":{"plots": plot_infos, "options": chunk_option }}
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _save_plots(self, data, chunk_option:ChunkOption):
figs = []
i = 1
for m in mime_extensions:
if m in data["data"]:
fig_full_path, fig_relative_path = self._build_file(mime_extensions[m], i, chunk_option.fig_caption, chunk_option.name)
figs.append(fig_relative_path)
bfig = base64.b64decode(data["data"][m])
with open(fig_full_path, "wb") as f:
f.write(bfig)
i += 1
return figs
def _build_file(self, extension, index, fig_caption= None, name =None):
fig_name = ""
if fig_caption is not None:
fig_name = fig_name + "_" + fig_caption
if name is not None:
fig_name = fig_name + "_" + name
fig_name = fig_name + "_" + str(index)
fig_name = fig_name + "." + extension
return os.path.join(self.fig_folder, fig_name), os.path.join(self.fig_folder, fig_name)
def _interactive_plots(self, data):
if data["output_type"] is None:
return False
if data["output_type"] == "display_data":
if "text/html" in data["data"]:
print(self.executed_data.global_options.output.format)
if self.executed_data.global_options.output.format != "html":
raise Exception("output format is not HTML")
else:
t = {"type": "html_data", "str_data":data["data"]["text/html"].encode().decode() }
self.organized_data.chunks.append(OrganizedChunk(**t))
return True
return False
def _organize_doc(self):
for index, chunk in enumerate(self.executed_data.chunks):
chunk_option = chunk.chunk.options
if chunk_option.name:
print(f"organizing {chunk_option.name}")
else:
print(f"organizing index {index}")
results = chunk.results
for result in results:
data = result.data
present = self._parse_raw(data, result.output_type)
if present:
continue
present = self._coder_string(data)
if present:
continue
present = self._raw_string(data)
if present:
continue
present = self._interactive_plots(data)
if present:
continue
present = self._raw_plots(data, chunk_option)
if present:
continue
print("not supported format", data)
t = []
c: OrganizedChunk
for c in self.organized_data.chunks:
last_chank: OrganizedChunk
if len(t)> 0:
last_chank = t[-1]
else:
last_chank = None
if last_chank is None:
t.append(c)
else:
if (c.type == last_chank.type) & (c.type != "plot"):
last_chank.str_data = last_chank.str_data + "\n" + c.str_data
else:
t.append(c)
self.organized_data.chunks = t
@staticmethod
def _clean_up(doc):
d = doc.replace(" ", "").replace("\n", "")
if len(d) != 0:
return doc
else:
return None
# markdown_file = self.executed_data.global_options.input_file_name.split(".")[0] + ".md"
# markdown_file = os.path.join(self.executed_data.global_options.output_file_dir , markdown_file)
# with open(markdown_file, "w") as f:
# text = "\n".join(self.formatted_doc)
# f.write(text)
| 37.5625
| 139
| 0.533888
| 991
| 9,015
| 4.617558
| 0.158426
| 0.051136
| 0.055726
| 0.050262
| 0.429851
| 0.375219
| 0.328234
| 0.218094
| 0.17264
| 0.138549
| 0
| 0.002933
| 0.357072
| 9,015
| 240
| 140
| 37.5625
| 0.786577
| 0.058569
| 0
| 0.3
| 0
| 0
| 0.084464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072222
| false
| 0
| 0.05
| 0
| 0.244444
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b974558759b358f82c2d72d79bab9c7dc3e35a76
| 12,467
|
py
|
Python
|
qibullet/robot_virtual.py
|
mcaniot/qibullet
|
9c5e1b319a18dd289263eb82f9d7303429bcbe21
|
[
"Apache-2.0"
] | null | null | null |
qibullet/robot_virtual.py
|
mcaniot/qibullet
|
9c5e1b319a18dd289263eb82f9d7303429bcbe21
|
[
"Apache-2.0"
] | null | null | null |
qibullet/robot_virtual.py
|
mcaniot/qibullet
|
9c5e1b319a18dd289263eb82f9d7303429bcbe21
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import sys
import pybullet
from qibullet.camera import *
from qibullet.link import Link
from qibullet.joint import Joint
IS_VERSION_PYTHON_3 = sys.version_info[0] >= 3
class RobotVirtual:
"""
Mother class representing a virtual robot
"""
def __init__(self, description_file):
"""
Constructor
Parameters:
description_file - The file giving the description of the virtual
robot. For now, only URDF is handled
"""
self.description_file = description_file
self.physics_client = 0
self.active_camera = None
self.camera_dict = dict()
self.joint_dict = dict()
self.link_dict = dict()
def loadRobot(self, translation, quaternion, physicsClientId=0):
"""
Loads the robot into a simulation, loads the joints and the links
descriptions. The joints are set to 0 rad.
Parameters:
translation - List containing 3 elements, the translation [x, y, z]
of the robot in the WORLD frame
quaternion - List containing 4 elements, the quaternion
[x, y, z, q] of the robot in the WORLD frame
physicsClientId - The id of the simulated instance in which the
robot is supposed to be loaded
Returns:
boolean - True if the method ran correctly, False otherwise
"""
try:
self.physics_client = physicsClientId
self.robot_model = pybullet.loadURDF(
self.description_file,
translation,
quaternion,
useFixedBase=False,
globalScaling=1.0,
physicsClientId=self.physics_client,
flags=pybullet.URDF_USE_SELF_COLLISION |
pybullet.URDF_USE_MATERIAL_COLORS_FROM_MTL)
except pybullet.error as e:
raise pybullet.error("Cannot load robot model: " + str(e))
for i in range(pybullet.getNumJoints(
self.robot_model,
physicsClientId=self.physics_client)):
if IS_VERSION_PYTHON_3:
# PYTHON 3 version needs a conversion bytes to str
joint_info = pybullet.getJointInfo(
self.robot_model,
i,
physicsClientId=self.physics_client)
self.link_dict[joint_info[12].decode('utf-8')] =\
Link(joint_info)
if joint_info[2] == pybullet.JOINT_PRISMATIC or\
joint_info[2] == pybullet.JOINT_REVOLUTE:
self.joint_dict[joint_info[1].decode('utf-8')] =\
Joint(joint_info)
else:
# PYTHON 2 Version
joint_info = pybullet.getJointInfo(
self.robot_model,
i,
physicsClientId=self.physics_client)
self.link_dict[joint_info[12]] = Link(joint_info)
if joint_info[2] == pybullet.JOINT_PRISMATIC or\
joint_info[2] == pybullet.JOINT_REVOLUTE:
self.joint_dict[joint_info[1]] = Joint(joint_info)
def getRobotModel(self):
"""
Returns the pybullet model to which the module is associated.
Returns:
robot_model - The pybullet model of the robot
"""
return self.robot_model
def getPhysicsClientId(self):
"""
Returns the id of the simulated instance in which the module is loaded.
Returns:
physics_client - The id of the simulation in which the robot
(possessing the module) is spawned
"""
return self.physics_client
def setAngles(self, joint_names, joint_values, percentage_speeds):
"""
Set angles on the robot's joints. Tests have to be performed by the
child class to guarantee the validity of the input parameters.
Parameters:
joint_names - List of string containing the name of the joints
to be controlled
joint_values - List of values corresponding to the angles in
radians to be applied
percentage_speeds - Percentages of the max speed to be used for
each joint, has to be strictly superior to 0 and inferior or equal
to 1
"""
try:
assert len(joint_names) ==\
len(joint_values) ==\
len(percentage_speeds)
assert all(
speed >= 0.0 and speed <= 1.0 for speed in percentage_speeds)
except AssertionError:
raise pybullet.error("Error in the setAngles parameters")
for joint_name, joint_value, percentage_speed in zip(
joint_names,
joint_values,
percentage_speeds):
joint_speed =\
self.joint_dict[joint_name].getMaxVelocity() *\
percentage_speed
pybullet.setJointMotorControl2(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
pybullet.POSITION_CONTROL,
targetPosition=joint_value,
maxVelocity=joint_speed,
force=self.joint_dict[joint_name].getMaxEffort(),
physicsClientId=self.physics_client)
def getAnglesPosition(self, joint_names):
"""
Gets the position of the robot's joints in radians. If one of the joint
doesn't exist, the method will raise a KeyError.
Parameters:
joint_names - List of string containing the names of the joints
Returns:
joint_positions - List of floats containing the joint's positions
"""
joint_positions = list()
for joint_name in joint_names:
joint_positions.append(pybullet.getJointState(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
physicsClientId=self.physics_client)[0])
return joint_positions
def getAnglesVelocity(self, joint_names):
"""
Gets the velocity of the robot's joints in rad/s. If one of the joint
doesn't exist, the method will raise a KeyError.
Parameters:
joint_names - List of string containing the names of the joints
Returns:
joint_velocities - List of floats containing the joint's velocities
"""
joint_velocities = list()
for joint_name in joint_names:
joint_velocities.append(pybullet.getJointState(
self.robot_model,
self.joint_dict[joint_name].getIndex(),
physicsClientId=self.physics_client)[1])
return joint_velocities
def subscribeCamera(self, camera_id, resolution=Camera.K_QVGA):
"""
Subscribe to the camera holding the camera id. WARNING: at the moment,
only one camera can be subscribed.
Parameters:
camera_id - The id of the camera to be subscribed
resolution - CameraResolution object, the resolution of the camera
"""
try:
self.active_camera = self.camera_dict[camera_id]
self.active_camera.subscribe(resolution=resolution)
except KeyError:
print("This camera does not exist, use a valid camera id")
def unsubscribeCamera(self, camera_id):
"""
Unsubscribe from a camera, the one holding the camera id.
Parameters:
camera_id - The id of the camera to be unsubscribed
"""
try:
# If no active camera is found, nothing is unsubscribed
assert self.active_camera is not None
if self.active_camera.getCameraId() == camera_id:
self.active_camera.unsubscribe()
self.active_camera = None
except KeyError:
print("This camera does not exist, use a valid camera id")
except AssertionError:
pass
def getCameraFrame(self):
"""
Returns a camera frame. Be advised that the subscribeCamera method
needs to be called beforehand, otherwise a pybullet error will be
raised.
Returns:
frame - The current camera frame as a formatted numpy array,
directly exploitable from OpenCV
"""
try:
assert self.active_camera is not None
return self.active_camera.getFrame()
except AssertionError:
raise pybullet.error("No active camera, cannot retrieve any frame")
def getCameraResolution(self):
"""
Returns the resolution of the active camera. Be advised that the
subscribeCamera method needs to be called beforehand, otherwise a
pybullet error will be raised.
Returns:
resolution - a CameraResolution object describing the resolution of
the active camera
"""
try:
assert self.active_camera is not None
return self.active_camera.getResolution()
except KeyError:
raise pybullet.error("No active camera, resolution unavailable")
def getCameraLink(self):
"""
Returns the link of the active camera. Be advised that the
subscribeCamera method needs to be called beforehand, otherwise a
pybullet error will be raised.
Returns:
resolution - a Link object describing the link to which the active
camera is attached
"""
try:
assert self.active_camera is not None
return self.active_camera.getCameraLink()
except KeyError:
raise pybullet.error("No active camera, cannot retrieve any link")
def getActiveCamera(self):
"""
Returns the active camera of the robot.
Returns:
active_camera - Camera (CameraRgb or CameraDepth) object, the
active camera of the robot. If there is no active camera, a None is
returned
"""
return self.active_camera
def getPosition(self):
"""
Gets the position of the robot's base in the world frame.
Returns:
x - The position of the robot's base on the x axis, in meters
y - The positions of the robot's base on the y axis in meters
theta - The rotation of the robot's base on the z axis in meters
"""
position, quaternions = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
theta = pybullet.getEulerFromQuaternion(quaternions)[2]
return position[0], position[1], theta
def isSelfColliding(self, link_names):
"""
Specifies if a link is colliding with the rest of the virtual robot.
Parameters:
link_names - String or list of string containing the names of the
links to be checked for self collision. WARNING: only the links
with corresponding meshes should be used, otherwise the link cannot
self collide
Returns:
self_colliding - Boolean, if True at least one of the links is self
colliding
"""
try:
if type(link_names) is str:
assert link_names in self.link_dict.keys()
names = [link_names]
else:
assert set(link_names).issubset(self.link_dict.keys())
names = list(link_names)
for name in names:
contact_tuple = pybullet.getContactPoints(
bodyA=self.robot_model,
bodyB=self.robot_model,
linkIndexA=self.link_dict[name].getIndex(),
physicsClientId=self.physics_client)
contact_tuple += pybullet.getContactPoints(
bodyA=self.robot_model,
bodyB=self.robot_model,
linkIndexB=self.link_dict[name].getIndex(),
physicsClientId=self.physics_client)
if len(contact_tuple) != 0:
return True
return False
except AssertionError:
raise pybullet.error(
"Unauthorized link checking for self collisions")
| 35.31728
| 79
| 0.593006
| 1,396
| 12,467
| 5.179799
| 0.198424
| 0.021435
| 0.030978
| 0.044254
| 0.428295
| 0.391094
| 0.364265
| 0.314203
| 0.285714
| 0.240354
| 0
| 0.004791
| 0.347076
| 12,467
| 352
| 80
| 35.417614
| 0.883538
| 0.338173
| 0
| 0.345679
| 0
| 0
| 0.046202
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.092593
| false
| 0.006173
| 0.030864
| 0
| 0.197531
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97645cb1bc48b7d30c6b37e139952912087b791
| 3,348
|
py
|
Python
|
pyMazeBacktrack.py
|
Dozed12/pyMazeBacktrack
|
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
|
[
"MIT"
] | 2
|
2019-02-22T10:35:25.000Z
|
2020-08-11T01:25:12.000Z
|
pyMazeBacktrack.py
|
Dozed12/pyMazeBacktrack
|
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
|
[
"MIT"
] | null | null | null |
pyMazeBacktrack.py
|
Dozed12/pyMazeBacktrack
|
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
|
[
"MIT"
] | null | null | null |
import libtcodpy as libtcod
from random import randint
nSquares = 30
nTiles = nSquares * 2 + 1
SCREEN_WIDTH = nTiles
SCREEN_HEIGHT = nTiles
libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)
def CheckDir(x,y,size,direction,table):
if direction == 1:
if y - 2 <= 0:
return 0
if table[x][y-2] == white:
return 0
elif direction == 2:
if x + 2 >= size:
return 0
if table[x+2][y] == white:
return 0
elif direction == 3:
if y + 2 >= size:
return 0
if table[x][y+2] == white:
return 0
elif direction == 4:
if x - 2 <= 0:
return 0
if table[x-2][y] == white:
return 0
return 1
def Possible(x,y,table,size):
if x+2 < size:
if table[x+2][y] == black:
return 1
if x-2 > 0:
if table[x-2][y] == black:
return 1
if y+2 < size:
if table[x][y+2] == black:
return 1
if y-2 > 0:
if table[x][y-2] == black:
return 1
return 0
black = libtcod.black
white = libtcod.white
Table = [[0 for i in range(nTiles)]for i in range(nTiles)]
for x in range(nTiles):
for y in range(nTiles):
Table[x][y] = black
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
Memory = []
CurrX = 1
CurrY = 1
Table[CurrX][CurrY] = white
end = 0
while end == 0:
while Possible(CurrX,CurrY,Table,nTiles):
Dir = randint(1,4)
while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:
Dir = randint(1,4)
if Dir == 1:
Table[CurrX][CurrY - 1] = white
CurrY -= 2
Table[CurrX][CurrY] = white
elif Dir == 2:
Table[CurrX + 1][CurrY] = white
CurrX += 2
Table[CurrX][CurrY] = white
elif Dir == 3:
Table[CurrX][CurrY + 1] = white
CurrY += 2
Table[CurrX][CurrY] = white
elif Dir == 4:
Table[CurrX - 1][CurrY] = white
CurrX -= 2
Table[CurrX][CurrY] = white
Memory.append(Dir)
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
while Possible(CurrX,CurrY,Table,nTiles) == 0:
MemorySize = len(Memory)
Dir = Memory[MemorySize-1]
if Dir == 1:
CurrY += 2
elif Dir == 2:
CurrX -= 2
elif Dir == 3:
CurrY -= 2
elif Dir == 4:
CurrX += 2
del Memory[MemorySize-1]
if CurrX == 1 and CurrY == 1:
end = 1
break
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
libtcod.console_wait_for_keypress(True)
| 20.168675
| 106
| 0.496416
| 432
| 3,348
| 3.780093
| 0.168981
| 0.015922
| 0.039192
| 0.033068
| 0.560931
| 0.544397
| 0.477036
| 0.464176
| 0.437232
| 0.407838
| 0
| 0.046078
| 0.390681
| 3,348
| 165
| 107
| 20.290909
| 0.754412
| 0.002987
| 0
| 0.475728
| 0
| 0
| 0.009461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019417
| false
| 0
| 0.019417
| 0
| 0.174757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b978586a0e39802db346feaf3a0aa1c91c336f05
| 3,011
|
py
|
Python
|
source/tests/test_resources.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 6
|
2021-09-23T16:33:24.000Z
|
2022-03-31T11:45:13.000Z
|
source/tests/test_resources.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 4
|
2021-09-24T21:34:14.000Z
|
2022-01-27T22:11:08.000Z
|
source/tests/test_resources.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 9
|
2021-09-23T23:24:46.000Z
|
2022-02-12T04:53:16.000Z
|
# ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
import pytest
from shared.resource import (
DatasetGroup,
Schema,
Dataset,
DatasetImportJob,
Solution,
SolutionVersion,
Campaign,
EventTracker,
BatchSegmentJob,
BatchInferenceJob,
)
@pytest.mark.parametrize(
"klass,camel,dash,snake",
[
(DatasetGroup, "datasetGroup", "dataset-group", "dataset_group"),
(Schema, "schema", "schema", "schema"),
(Dataset, "dataset", "dataset", "dataset"),
(
DatasetImportJob,
"datasetImportJob",
"dataset-import-job",
"dataset_import_job",
),
(Solution, "solution", "solution", "solution"),
(SolutionVersion, "solutionVersion", "solution-version", "solution_version"),
(Campaign, "campaign", "campaign", "campaign"),
(EventTracker, "eventTracker", "event-tracker", "event_tracker"),
(
BatchInferenceJob,
"batchInferenceJob",
"batch-inference-job",
"batch_inference_job",
),
(BatchSegmentJob, "batchSegmentJob", "batch-segment-job", "batch_segment_job"),
],
ids=[
"DatasetGroup",
"Schema",
"Dataset",
"DatasetImportJob",
"Solution",
"SolutionVersion",
"Campaign",
"EventTracker",
"BatchInferenceJob",
"BatchSegmentJob,",
],
)
def test_resource_naming(klass, camel, dash, snake):
assert klass().name.camel == camel
assert klass().name.dash == dash
assert klass().name.snake == snake
| 42.408451
| 120
| 0.454334
| 211
| 3,011
| 6.43128
| 0.483412
| 0.044215
| 0.033161
| 0.023581
| 0.123803
| 0.123803
| 0.123803
| 0.123803
| 0
| 0
| 0
| 0.002137
| 0.37828
| 3,011
| 70
| 121
| 43.014286
| 0.722756
| 0.378612
| 0
| 0.148148
| 0
| 0
| 0.321451
| 0.014004
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.018519
| false
| 0
| 0.148148
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97884a1b2bbd76cce01bb9efe2744d31832af25
| 2,182
|
py
|
Python
|
gradefiles-send.py
|
lapets/bu-gsubmit-grading
|
69c40a763908be1c954dce3e5e5aab854ac379ff
|
[
"MIT"
] | 3
|
2016-10-03T15:29:20.000Z
|
2019-06-28T17:33:06.000Z
|
gradefiles-send.py
|
lapets/bu-gsubmit-grading
|
69c40a763908be1c954dce3e5e5aab854ac379ff
|
[
"MIT"
] | null | null | null |
gradefiles-send.py
|
lapets/bu-gsubmit-grading
|
69c40a763908be1c954dce3e5e5aab854ac379ff
|
[
"MIT"
] | null | null | null |
#####################################################################
##
## gradefiles-send.py
##
## Script to send grade files by email to enrolled students; the
## input grade file names should correspond to the user names of
## the students.
##
##
from email.mime.text import MIMEText # For creating a message string.
from subprocess import Popen, PIPE # For sending email on linux.
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## Sending a simple email message.
##
def send(txt, courseNumber, task, sender, targets):
msg = MIMEText(txt)
msg["From"] = sender + "@bu.edu"
msg["To"] = ",".join([target + "@bu.edu" for target in targets])
msg["Cc"] = sender + "@bu.edu"
msg["Subject"] = "CS " + courseNumber + " " + task + " grade"
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(bytes(msg.as_string(), 'UTF-8'))
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 6\
and (int(sys.argv[1][0:3]) in range(100,1000))\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1] # Accepts course names like "591 X1."
season = sys.argv[2]
year = sys.argv[3]
task = sys.argv[4]
sender = sys.argv[5]
else:
print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Send the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
targets = file.split('.')[0].split("_")
send(txt, courseNumber, task, sender, targets)
print('Sent grade file to ' + str(targets) + '.')
#eof
| 33.569231
| 112
| 0.519707
| 264
| 2,182
| 4.287879
| 0.481061
| 0.055654
| 0.028269
| 0.040636
| 0.063604
| 0.063604
| 0
| 0
| 0
| 0
| 0
| 0.017807
| 0.176444
| 2,182
| 65
| 113
| 33.569231
| 0.612131
| 0.211274
| 0
| 0.060606
| 0
| 0.030303
| 0.208676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0
| 0.151515
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9789c0f2981942a54633089abdf3245b58a73a3
| 1,227
|
py
|
Python
|
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
|
GalAster/16
|
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
|
[
"Unlicense"
] | 3
|
2019-10-03T01:51:38.000Z
|
2019-10-04T16:15:43.000Z
|
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
|
GalAster/16
|
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
|
[
"Unlicense"
] | null | null | null |
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
|
GalAster/16
|
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
|
[
"Unlicense"
] | 1
|
2020-03-17T12:58:52.000Z
|
2020-03-17T12:58:52.000Z
|
import os
import pickle
import tensorflow as tf
import wolframclient.serializers as wxf
name = 'karras2018iclr-celebahq-1024x1024'
file = open(name + '.pkl', 'rb')
sess = tf.InteractiveSession()
G, D, Gs = pickle.load(file)
saver = tf.train.Saver()
save_path = "./target/" + name + "/"
model_name = 'model'
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path_full = os.path.join(save_path, model_name)
saver.save(sess, save_path_full)
ckpt = tf.train.get_checkpoint_state(save_path)
reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)
all_variables = list(reader.get_variable_to_shape_map().keys())
npy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))
wxf.export(npy, name + '.wxf', target_format='wxf')
# Save as protobuf
with tf.Session() as sess:
tf.initialize_all_variables().run()
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
# output_node_names=['G_paper_1/images_out']
output_node_names=['G_paper_1/ToRGB_lod0/add']
)
with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型
file.write(output_graph_def.SerializeToString()) # 序列化输出
| 34.083333
| 74
| 0.726976
| 180
| 1,227
| 4.694444
| 0.461111
| 0.066272
| 0.028402
| 0.03787
| 0.052071
| 0.052071
| 0
| 0
| 0
| 0
| 0
| 0.014178
| 0.137734
| 1,227
| 35
| 75
| 35.057143
| 0.784499
| 0.05705
| 0
| 0
| 0
| 0
| 0.085938
| 0.049479
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.137931
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b978dfcb152bc099b2de54896ed9a54dfbc29639
| 6,890
|
py
|
Python
|
src/moveGoogle.py
|
Quanta-Robotics/Robot-Blueberry
|
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
|
[
"MIT"
] | 25
|
2021-06-08T07:09:30.000Z
|
2021-12-30T06:28:35.000Z
|
src/moveGoogle.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 2
|
2021-05-23T12:54:51.000Z
|
2021-06-07T17:47:56.000Z
|
src/moveGoogle.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 14
|
2021-06-08T13:02:28.000Z
|
2021-12-30T20:07:18.000Z
|
#!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
def readYaml():
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servo = yaml.load(conf, Loader=yaml.FullLoader)
return servo
def writeYaml(s=None):
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:
if s==None:
yaml.dump(servo,conf)
else:
yaml.dump(s,conf)
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
def changeDegree(pin,newDegree,time1=0.05,update=5):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,update):
for i in range(0,pinSize):
if Current[pin[i]]<newDegree[i]:
Current[pin[i]] += update
elif Current[pin[i]]>newDegree[i]:
Current[pin[i]] -= update
for i in range(0,pinSize):
hand.servo[pin[i]].angle = Current[pin[i]]
servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]
writeYaml()
time.sleep(time1)
def takePosition():
changeDegree([7,8],[180,0])
changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])
def changeDegreeGpio(pin,degree,update,duration):
pinSize = len(pin)
for i in range(0,pinSize):
p = pin[i]
if CurrentGpio[p]>degree[i]:
update = -update
for deg in range(CurrentGpio[p],degree[i],update):
duty = deg/18
duty+=2
Servo[p].ChangeDutyCycle(duty)
time.sleep(duration)
CurrentGpio[p]=degree[i]
writeYaml()
def Run(a, b, c, d, x):
GPIO.output(Motor1['input1'], GPIO.LOW)
GPIO.output(Motor1['input2'], GPIO.LOW)
GPIO.output(Motor2['input1'], GPIO.LOW)
GPIO.output(Motor2['input2'], GPIO.LOW)
if a==1:
GPIO.output(Motor1['input1'], GPIO.HIGH)
if b==1:
GPIO.output(Motor1['input2'], GPIO.HIGH)
if c==1:
GPIO.output(Motor2['input1'], GPIO.HIGH)
if d==1:
GPIO.output(Motor2['input2'], GPIO.HIGH)
EN2.ChangeDutyCycle(x)
EN1.ChangeDutyCycle(x)
def Stop():
Run(0,0,0,0,0)
def Start_Slow(a, b, c, d):
for i in range(0,100,20):
Run(a,b,c,d,i)
time.sleep(0.5)
def Stop_Slow(a,b,c,d):
for i in range(100,0,-20):
Run(a,b,c,d,i)
time.sleep(0.5)
def yes(times=3):
for i in range(0,times):
changeDegree([0],[30])
time.sleep(0.08)
changeDegree([0],[0])
time.sleep(0.08)
def no(times=3):
for i in range(0,times):
changeDegree([15],[70],5,0.05)
time.sleep(0.2)
changeDegree([15],[110],5,0.05)
time.sleep(0.2)
changeDegree([15],[90],5,0.05)
def move_head(times=3):
for i in range(0,times):
changeDegree([0],[20])
changeDegreeGpio([0],[80],5,0.05)
changeDegree([0],[0])
changeDegreeGpio([0],[100],5,0.05)
changeDegreeGpio([0],[90],10,0.01)
def random0():
r = random.randrange(1,10000000)%3
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
elif(r==2):
changeDegreeGpio([0],[120],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
else:
changeDegreeGpio([0],[60],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
def random1():
r = random.randrange(1,3)
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([3],[50])
changeDegree([9],[100])
changeDegree([9],[60])
changeDegree([3],[0])
elif(r==2):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([4],[120])
changeDegree([10],[140])
changeDegree([10],[180])
changeDegree([4],[170])
else:
changeDegree([3,4],[50,120])
changeDegree([9,10],[100,140])
changeDegree([9,10],[60,180])
changeDegree([3,4],[0,180])
def random2():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]
for i in range(0,15):
r = select[i%len(select)]%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def random3():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
for i in range(0,15):
r = random.randrange(1,1000000)%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
takePosition()
def randomCall(t):
changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])
pin = [5,6,7,8]
deg = [[80,50,100,70],[110,90,110,90]]
select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]
ok = [0,0,0,0]
ln = len(select)
for i in range(0,t*3):
r = select[i%16]%4
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def expression(t):
print (' i got value of t is : ',t)
if(t==0):
random0()
elif(t==1):
random1()
elif(t==2):
random2()
elif(t==3):
random3()
else:
randomCall(t)
def speakOnline(t):
expression(t)
def speakOffline(speech):
t = int(len(speech)/15)
print ('Offline t value is : ',t)
p1 = multiprocessing.Process(target=expression,args=[t])
p1.start()
say(speech)
| 25.330882
| 154
| 0.560377
| 1,039
| 6,890
| 3.699711
| 0.200192
| 0.009365
| 0.021852
| 0.040062
| 0.401145
| 0.30385
| 0.274454
| 0.241415
| 0.215401
| 0.111603
| 0
| 0.121689
| 0.243832
| 6,890
| 271
| 155
| 25.424354
| 0.616123
| 0.002903
| 0
| 0.302326
| 0
| 0
| 0.050532
| 0.011796
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.04186
| 0
| 0.139535
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b978fbbcd4002601ca1e2723cae4385002e671d8
| 2,063
|
py
|
Python
|
src/onegov/translator_directory/models/language.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/translator_directory/models/language.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/translator_directory/models/language.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
from sqlalchemy import Index, Column, Text, Table, ForeignKey
from sqlalchemy.orm import object_session
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
spoken_association_table = Table(
'spoken_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
written_association_table = Table(
'written_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
mother_tongue_association_table = Table(
'mother_tongue_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
class Language(Base):
__tablename__ = 'languages'
__table_args__ = (
Index('unique_name', 'name', unique=True),
)
id = Column(UUID, primary_key=True, default=uuid4)
name = Column(Text, nullable=False)
@property
def speakers_count(self):
session = object_session(self)
return session.query(
spoken_association_table).filter_by(lang_id=self.id).count()
@property
def writers_count(self):
session = object_session(self)
return session.query(
written_association_table).filter_by(lang_id=self.id).count()
@property
def native_speakers_count(self):
"""Having it as mother tongue..."""
session = object_session(self)
return session.query(
mother_tongue_association_table).filter_by(lang_id=self.id).count()
@property
def deletable(self):
return (
self.speakers_count
+ self.writers_count
+ self.native_speakers_count
) == 0
| 25.469136
| 79
| 0.650994
| 228
| 2,063
| 5.649123
| 0.245614
| 0.070652
| 0.074534
| 0.067547
| 0.544255
| 0.544255
| 0.544255
| 0.511646
| 0.511646
| 0.432453
| 0
| 0.001917
| 0.241396
| 2,063
| 80
| 80
| 25.7875
| 0.821086
| 0.014057
| 0
| 0.484375
| 0
| 0
| 0.115385
| 0.035503
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.078125
| 0.015625
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97a0b2a9f0b601569ce8973596517ed7d8790ec
| 3,588
|
py
|
Python
|
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
|
djemeljanovs/tfjs
|
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
|
[
"Apache-2.0"
] | null | null | null |
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
|
djemeljanovs/tfjs
|
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
|
[
"Apache-2.0"
] | null | null | null |
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
|
djemeljanovs/tfjs
|
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
# Custom op name for fused depthwise conv2d
FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative'
# The grappler op name for fused MatMul which starts with '_'
FUSED_MATMUL = '_FusedMatMul'
def node_from_map(node_map, name):
"""Pulls a node def from a dictionary for a given name.
Args:
node_map: Dictionary containing an entry indexed by name for every node.
name: Identifies the node we want to find.
Returns:
NodeDef of the node with the given name.
Raises:
ValueError: If the node isn't present in the dictionary.
"""
stripped_name = node_name_from_input(name)
if stripped_name not in node_map:
raise ValueError("No node named '%s' found in map." % name)
return node_map[stripped_name]
def values_from_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError(
"Node named '%s' should be a Const op for values_from_const." %
node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
# Whether to scale by gamma after normalization.
def scale_after_normalization(node):
if node.op == "BatchNormWithGlobalNormalization":
return node.attr["scale_after_normalization"].b
return True
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove):
"""Clean up the graph def by removing the skipped nodes and clean up the nodes
with inputs that have been removed.
Args:
input_graph_def: GraphDef object to be cleaned.
node_to_skip: Dict with node names to be skipped.
inputs_to_remove: List of nodes to be removed from inputs of all nodes.
Returns:
GraphDef that has been cleaned.
"""
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in nodes_to_skip:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
for value in inputs_to_remove:
for i, input_node in enumerate(new_node.input):
if input_node == value.name:
new_node.input[i] = value.input[0]
result_graph_def.node.extend([new_node])
result_graph_def.library.CopyFrom(input_graph_def.library)
result_graph_def.versions.CopyFrom(input_graph_def.versions)
return result_graph_def
| 33.849057
| 80
| 0.726031
| 540
| 3,588
| 4.651852
| 0.333333
| 0.038217
| 0.025876
| 0.012739
| 0.066879
| 0.023089
| 0.023089
| 0
| 0
| 0
| 0
| 0.006108
| 0.178651
| 3,588
| 105
| 81
| 34.171429
| 0.846284
| 0.479654
| 0
| 0
| 0
| 0
| 0.11573
| 0.046629
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.088889
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97af59ee4283114481f3e83dc8e3cf6244bb61c
| 1,014
|
py
|
Python
|
loss_fn/classification_loss_fns/binary_cross_entropy.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 209
|
2021-10-30T08:32:10.000Z
|
2022-03-31T16:18:03.000Z
|
loss_fn/classification_loss_fns/binary_cross_entropy.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 12
|
2021-12-04T10:47:11.000Z
|
2022-03-31T15:39:40.000Z
|
loss_fn/classification_loss_fns/binary_cross_entropy.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 50
|
2021-11-01T08:15:02.000Z
|
2022-03-29T08:17:34.000Z
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch.nn import functional as F
from torch import Tensor
import argparse
from . import register_classification_loss_fn
from .. import BaseCriteria
@register_classification_loss_fn(name="binary_cross_entropy")
class ClsBinaryCrossEntropy(BaseCriteria):
"""Binary CE for classification tasks"""
def __init__(self, opts, *args, **kwargs) -> None:
super().__init__()
def forward(
self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs
) -> Tensor:
if target.dim() != prediction.dim():
target = F.one_hot(target, num_classes=prediction.shape[-1])
return F.binary_cross_entropy_with_logits(
input=prediction,
target=target.to(prediction.dtype),
weight=None,
reduction="sum",
)
def __repr__(self) -> str:
return "{}()".format(self.__class__.__name__)
| 28.166667
| 87
| 0.667653
| 116
| 1,014
| 5.534483
| 0.586207
| 0.028037
| 0.080997
| 0.087227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006337
| 0.221893
| 1,014
| 35
| 88
| 28.971429
| 0.807351
| 0.129191
| 0
| 0
| 0
| 0
| 0.030892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.227273
| 0.045455
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97c7f15dd61f4851cffcb3982337f852b3b8da5
| 576
|
py
|
Python
|
Sorting/insertion_sort.py
|
lakshyarawal/pythonPractice
|
4b400342198a8270c5ac0c6306afb555f927c6c1
|
[
"MIT"
] | null | null | null |
Sorting/insertion_sort.py
|
lakshyarawal/pythonPractice
|
4b400342198a8270c5ac0c6306afb555f927c6c1
|
[
"MIT"
] | null | null | null |
Sorting/insertion_sort.py
|
lakshyarawal/pythonPractice
|
4b400342198a8270c5ac0c6306afb555f927c6c1
|
[
"MIT"
] | null | null | null |
""" Insertion Sort Algorithm:"""
"""Implementation"""
def insertion_sort(arr) -> list:
n = len(arr)
for i in range(1, n):
swap_index = i
for j in range(i-1, -1, -1):
if arr[swap_index] < arr[j]:
arr[swap_index], arr[j] = arr[j], arr[swap_index]
swap_index -= 1
else:
break
return arr
def main():
arr_input = [10, 5, 30, 1, 2, 5, 10, 10]
a2 = insertion_sort(arr_input)
print(a2)
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 19.2
| 65
| 0.522569
| 80
| 576
| 3.5
| 0.425
| 0.160714
| 0.128571
| 0.107143
| 0.171429
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0.049738
| 0.336806
| 576
| 29
| 66
| 19.862069
| 0.683246
| 0.107639
| 0
| 0
| 0
| 0
| 0.016461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97cd7905f5c596cb6d79b67c2c80e83907421d9
| 8,257
|
py
|
Python
|
network.py
|
tobloef/neural-network
|
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
|
[
"MIT"
] | 3
|
2018-01-06T22:27:58.000Z
|
2018-08-12T20:29:51.000Z
|
network.py
|
tobloef/neural-network
|
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
|
[
"MIT"
] | 1
|
2018-03-31T18:49:56.000Z
|
2018-04-19T04:52:33.000Z
|
network.py
|
tobloef/neural-network
|
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
|
[
"MIT"
] | null | null | null |
import numpy as np
from mathUtils import *
class Network(object):
"""
Model for a feedforward Neural Network that use backpropagation with stochastic gradient decent.
"""
def __init__(self, layerSizes, biasVectors, weightMatrices):
"""
Initialise the network with a list of layer sizes and lists for biases and weights for the neurons in the network. The first layer is the input layer and the last layer is the output layer.
"""
self.layerSizes = layerSizes
self.biasVectors = biasVectors
self.weightMatrices = weightMatrices
@staticmethod
def generateRandomNetwork(layerSizes):
"""
Initialise a new network with random weights and biases. Input and output layers are included in the layerSizes list. The random weights and biases are generated using a Gaussian distribution, so the results are more probable to be around 0.
"""
biasVectors = []
"""Generate biases for each neuron in each layer, except the input layer."""
for size in layerSizes[1:]:
"""
np.random.randn generates arrays of arrays of random numbers, based on the paramters.
np.random.randn(3,2) will generate an array of 3 arrays with 2 random numbers.
"""
biasVectors.append(np.random.randn(size, 1))
"""Generate weights for connections between layers."""
weightMatrices = []
for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]):
weightMatrices.append(np.random.randn(prevSize, size))
return Network(layerSizes, biasVectors, weightMatrices)
def getOutputs(self, inputs):
"""Return a vector of the network's outputs based on the given inputs, using feedforward."""
activations = inputs
for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):
"""
For every layer, get the bias vector and the weight matrix. Then get dot product between the weight matrix and the output vector and add the bias vector. This is the activation vector for the current layer.
"""
zVector = np.dot(weightMatrix, activations) + biasVector
activations = sigmoid(zVector)
return activations
def train(self, data, epochs, batchSize, rate, testData=None):
"""
Train the neural network using stochastic gradient descent. Smaller batches of random samples from the training are used to reduce the training time. The training date is a list of tuples (inputs, expected outputs). The learning rate is how much to change the values each batch.
"""
print("Training network with shape {}, batch size {} and learning rate {} for {} epochs...".format(self.layerSizes, batchSize, rate, epochs))
for e in range(epochs):
np.random.shuffle(data)
batches = []
for i in range(0, len(data), batchSize):
batches.append(data[i:i+batchSize])
for batch in batches:
self._tuneNetwork(batch, rate)
if (testData):
result = self._evaluate(testData)
print("Epoch #{} completed with {:.2f}% correctness.".format(e+1, 100/len(testData)*result))
else:
print("Epoch #{} completed.".format(e))
def _tuneNetwork(self, batch, rate):
"""
Tune the weights and biases of the network by using backpropagation with gradient descend.
"""
"""
Setup matrix and vector based on the weight matrix and bias vector filled with zeroes. This is used for storing each change to make for each vector, for each set of training date.
"""
sumBiasVectors = []
for biasVector in self.biasVectors:
sumBiasVectors.append(np.zeros(biasVector.shape))
sumWeightMatrices = []
for weightMatrix in self.weightMatrices:
sumWeightMatrices.append(np.zeros(weightMatrix.shape))
for inputs, expected in batch:
"""
Get a matrix/vector with the required changes to the network, based on that set of training data, and add it to a set of matrix/vector totalling the changes needed from all the training data.
"""
deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected)
newSumBiasVectors = []
for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors):
newSumBiasVectors.append(totalBiasVector + deltaBiasVector)
sumBiasVectors = newSumBiasVectors
newSumWeightMatrices = []
for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices):
newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix)
sumWeightMatrices = newSumWeightMatrices
"""
Take each change for each set of training data, get the average of these and subtract them from the current weights and biases. Then use these as the new weights and biases.
"""
newBiasVectors = []
for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors):
newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector)
newWeightMatrices = []
for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices):
newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix)
self.biasVectors = newBiasVectors
self.weightMatrices = newWeightMatrices
def _backpropagate(self, inputs, expected):
"""
Return a tuple with gradient of the cost function for each bias and weight, in the format (vector of bias changes, matrix of weight changes), for the specified set of training data.
"""
deltaBiasVectors = []
for biasVector in self.biasVectors:
deltaBiasVectors.append(np.zeros(biasVector.shape))
deltaWeightMatrices = []
for weightMatrix in self.weightMatrices:
deltaWeightMatrices.append(np.zeros(weightMatrix.shape))
"""Store all activations for the entire network, starting with the input layer."""
activationVector = inputs
activationVectors = [inputs]
"""Find the z-vector for layer in the network"""
zVectors = []
for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):
zVector = np.dot(weightMatrix, activationVector) + biasVector
zVectors.append(zVector)
activationVector = sigmoid(zVector)
activationVectors.append(activationVector)
"""
* Start with output compared to expected, tune weights and biases based on the derivative of the cost function with respect to the weight/bias.
* Then move onto each hidden layer and the input layer.
"""
deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1])
deltaBiasVectors[-1] = deltaBiasVector
deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose())
for l in range(-2, -len(self.layerSizes), -1):
# Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead
weightMatrix = self.weightMatrices[l+1].transpose()
sigmoidDeriv = sigmoidDerivative(zVectors[l])
deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv
deltaBiasVectors[l] = deltaBiasVector
deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose())
return (deltaBiasVectors, deltaWeightMatrices)
def _evaluate(self, testData):
"""Test the network with the specified test data and return the number of correct guesses."""
correctGuesses = 0
for inputs, expected in testData:
"""Increment correct guesses if the most active output is the expected one."""
outputs = self.getOutputs(inputs)
guess = np.argmax(outputs)
if (guess == expected):
correctGuesses += 1
return correctGuesses
| 53.270968
| 286
| 0.657987
| 900
| 8,257
| 6.025556
| 0.253333
| 0.026554
| 0.017702
| 0.011064
| 0.075973
| 0.023234
| 0.023234
| 0.023234
| 0.023234
| 0
| 0
| 0.005106
| 0.264745
| 8,257
| 155
| 287
| 53.270968
| 0.888157
| 0.162529
| 0
| 0.06383
| 0
| 0
| 0.028811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074468
| false
| 0
| 0.021277
| 0
| 0.148936
| 0.031915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97deb7d2bd255cd9a3d9f169d969333b63452ec
| 313
|
py
|
Python
|
sample/pizza.py
|
marianarmorgado/python-starter
|
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
|
[
"MIT"
] | null | null | null |
sample/pizza.py
|
marianarmorgado/python-starter
|
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
|
[
"MIT"
] | null | null | null |
sample/pizza.py
|
marianarmorgado/python-starter
|
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
|
[
"MIT"
] | null | null | null |
# store information about a pizza being ordered
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra vegan cheese']
}
# summarize the order
print("You ordered a " + pizza['crust'] + "-crust pizza" +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
| 26.083333
| 59
| 0.645367
| 38
| 313
| 5.315789
| 0.657895
| 0.059406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198083
| 313
| 12
| 60
| 26.083333
| 0.804781
| 0.207668
| 0
| 0
| 0
| 0
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97e5feb1052b87d359d8e3d9f63ba930bff8e66
| 15,038
|
py
|
Python
|
dnnlib/submission/submit.py
|
gperdrizet/gansformer
|
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
|
[
"MIT"
] | 1,172
|
2021-03-02T02:00:44.000Z
|
2022-03-31T02:46:45.000Z
|
dnnlib/submission/submit.py
|
gperdrizet/gansformer
|
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
|
[
"MIT"
] | 37
|
2021-03-03T14:11:11.000Z
|
2022-03-12T15:40:15.000Z
|
dnnlib/submission/submit.py
|
gperdrizet/gansformer
|
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
|
[
"MIT"
] | 138
|
2021-03-02T06:37:10.000Z
|
2022-03-30T14:59:09.000Z
|
# Submit a function to be run either locally or in a computing cluster.
# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,
# and network recompilation.
import copy
import inspect
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import sys
import time
import traceback
from enum import Enum
from .. import util
from ..util import EasyDict
from . import internal
class SubmitTarget(Enum):
# The target where the function should be run
# LOCAL: Run it locally
LOCAL = 1
class PathType(Enum):
# Determines in which format should a path be formatted
# WINDOWS: Format with Windows style
# LINUX: Format with Linux/Posix style
# AUTO: Use current OS type to select either WINDOWS or LINUX
WINDOWS = 1
LINUX = 2
AUTO = 3
class PlatformExtras:
# A mixed bag of values used by dnnlib heuristics
# Attributes:
# data_reader_buffer_size: Used by DataReader to size internal shared memory buffers
# data_reader_process_count: Number of worker processes to spawn (zero for single
# thread operation)
def __init__(self):
self.data_reader_buffer_size = 1<<30 # 1 GB
self.data_reader_process_count = 0 # single threaded default
_user_name_override = None
class SubmitConfig(util.EasyDict):
# Strongly typed config dict needed to submit runs
# Attributes:
# run_dir_root: Path to the run dir root. Can be optionally templated with tags
# Needs to always be run through get_path_from_template
# run_desc: Description of the run. Will be used in the run dir and task name
# run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir
# run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will
# be the src directory inside the run dir
# submit_target: Submit target enum value. Used to select where the run is actually launched
# num_gpus: Number of GPUs used/requested for the run
# print_info: Whether to print debug information when submitting
# local.do_not_copy_source_files: Do not copy source files from the working directory to the
# run dir.
# run_id: Automatically populated value during submit
# run_name: Automatically populated value during submit
# run_dir: Automatically populated value during submit
# run_func_name: Automatically populated value during submit
# run_func_kwargs: Automatically populated value during submit
# user_name: Automatically populated value during submit. Can be set by the user which will then
# override the automatic value
# task_name: Automatically populated value during submit
# host_name: Automatically populated value during submit
# platform_extras: Automatically populated values during submit. Used by various dnnlib libraries
# such as the DataReader class
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs",
".vscode", "_cudacache"]
self.run_dir_extra_files = []
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.nvprof = False
self.local = internal.local.TargetOptions()
self.datasets = []
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
self.platform_extras = PlatformExtras()
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
# Replace tags in the given path template and return either Windows or Linux formatted path
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
# Convert a normal path back to its template representation
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
# Convert a normal path to template and the convert it back to a normal path with given path type
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
# Set the global username override value
global _user_name_override
_user_name_override = name
def get_user_name():
# Get the current user name
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def make_run_dir_path(*paths):
# Make a path/filename that resides under the current submit run_dir
# Args:
# *paths: Path components to be passed to os.path.join
# Returns:
# A file/dirname rooted at submit_config.run_dir. If there's no
# submit_config or run_dir, the base directory is the current
# working directory.
# E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))`
import dnnlib
if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):
return os.path.join(os.getcwd(), *paths)
return os.path.join(dnnlib.submit_config.run_dir, *paths)
def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str:
# Create a new run dir with increasing ID number at the start
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
os.makedirs(run_dir_root)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if not resume:
if os.path.exists(run_dir) and create_new:
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
if not os.path.exists(run_dir):
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
# Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id
# Assumes IDs are numbers at the start of the directory names
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:
# Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False)
if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:
return
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert "." in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count(".") - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True)
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
def run_wrapper(submit_config: SubmitConfig) -> None:
# Wrap the actual run function call for handling logging, exceptions, typing, etc
is_local = submit_config.submit_target == SubmitTarget.LOCAL
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name = os.path.join(submit_config.run_dir, "log.txt"), file_mode="a", should_flush = True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name = None, should_flush = True)
import dnnlib
dnnlib.submit_config = submit_config
exit_with_errcode = False
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
run_func_obj = util.get_obj_by_name(submit_config.run_func_name)
assert callable(run_func_obj)
sig = inspect.signature(run_func_obj)
if "submit_config" in sig.parameters:
run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs)
else:
run_func_obj(**submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
# Defer sys.exit(1) to happen after we close the logs and create a _finished.txt
exit_with_errcode = True
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.RunContext.get().close()
dnnlib.submit_config = None
logger.close()
# If we hit an error, get out of the script now and signal the error
# to whatever process that started this script.
if exit_with_errcode:
sys.exit(1)
return submit_config
def open_file_or_url(file_or_url):
if util.is_url(file_or_url):
return util.open_url(file_or_url, cache_dir = ".stylegan2-cache")
return open(file_or_url, "rb")
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding = "latin1")
def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False,
resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None:
# Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.
# create_newdir: enforces the creation of a new run directory
# resume: resumes a prior experiment using its existing run directory
# load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters
submit_config = copy.deepcopy(submit_config)
submit_target = submit_config.submit_target
farm = None
if submit_target == SubmitTarget.LOCAL:
farm = internal.local.Target()
assert farm is not None # unknown target
# Disallow submitting jobs with zero num_gpus
if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):
raise RuntimeError("submit_config.num_gpus must be set to a non-zero value")
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
#--------------------------------------------------------------------
# Prepare submission by populating the run dir
#--------------------------------------------------------------------
host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir)
submit_config.task_name = "{}-{:05d}-{}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
if not re.match(docker_valid_name_regex, submit_config.task_name):
raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name)
# Farm specific preparations for a submit
farm.finalize_submit_config(submit_config, host_run_dir)
# In case of resumption, load_config = True to load the prior submit_config file from the directory
# (so to maintain the original configuration of the experiment rather than the newly provided
# command-line arguments.
if load_config:
config_file = os.path.join(host_run_dir, "submit_config.pkl")
if os.path.exists(config_file):
old_submit_config = submit_config
submit_config = load_pkl(config_file)
submit_config["run_id"] = old_submit_config["run_id"]
submit_config["run_name"] = old_submit_config["run_name"]
if "resume_pkl" in old_submit_config["run_func_kwargs"]:
submit_config["run_func_kwargs"]["resume_pkl"] = old_submit_config["run_func_kwargs"]["resume_pkl"]
submit_config["run_func_kwargs"]["resume_kimg"] = old_submit_config["run_func_kwargs"]["resume_kimg"]
_populate_run_dir(submit_config, host_run_dir)
return farm.submit(submit_config, host_run_dir)
| 43.337176
| 238
| 0.691847
| 2,135
| 15,038
| 4.62623
| 0.196253
| 0.095981
| 0.054672
| 0.028855
| 0.273666
| 0.190341
| 0.122709
| 0.048598
| 0.035233
| 0.020047
| 0
| 0.003058
| 0.217117
| 15,038
| 346
| 239
| 43.462428
| 0.835896
| 0.324644
| 0
| 0.110577
| 0
| 0.004808
| 0.082804
| 0.007248
| 0
| 0
| 0
| 0
| 0.014423
| 1
| 0.072115
| false
| 0
| 0.091346
| 0
| 0.288462
| 0.028846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97f4f2077af2e6d4198d160e8fea133c49dee89
| 4,187
|
py
|
Python
|
pyecharts/custom/grid.py
|
zilong305/pycharts
|
6cf1bb7f17001a36da6a766615a78b1dbef5918f
|
[
"MIT"
] | null | null | null |
pyecharts/custom/grid.py
|
zilong305/pycharts
|
6cf1bb7f17001a36da6a766615a78b1dbef5918f
|
[
"MIT"
] | null | null | null |
pyecharts/custom/grid.py
|
zilong305/pycharts
|
6cf1bb7f17001a36da6a766615a78b1dbef5918f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
from pyecharts.option import grid
class Grid(object):
def __init__(self):
self._chart = None
self._js_dependencies = set()
def add(self, chart,
grid_width=None,
grid_height=None,
grid_top=None,
grid_bottom=None,
grid_left=None,
grid_right=None):
"""
:param chart:
chart instance
:param grid_width:
Width of grid component. Adaptive by default.
:param grid_height:
Height of grid component. Adaptive by default.
:param grid_top:
Distance between grid component and the top side of the container.
:param grid_bottom:
Distance between grid component and the bottom side of the container.
:param grid_left:
Distance between grid component and the left side of the container.
:param grid_right:
Distance between grid component and the right side of the container.
:return:
"""
if self._chart is None:
self._chart = chart
self._chart._option.update(grid=[])
self._js_dependencies = chart._js_dependencies
_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)
if _grid:
for _ in range(len(self._chart._option.get('series'))):
self._chart._option.get('grid').append(_grid)
else:
_series = (
chart._option.get('series'),
chart._option.get('xAxis', None),
chart._option.get('yAxis', None),
chart._option.get('legend')[0],
chart._option.get('title')[0]
)
_index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series)
self._chart._option.get('legend').append(_legned)
self._chart._option.get('title').append(_title)
if _xaxis and _yaxis is not None:
try:
_xaxis[0].update(gridIndex=_index-1)
_yaxis[0].update(gridIndex=_index-1)
self._chart._option.get('xAxis').append(_xaxis[0])
self._chart._option.get('yAxis').append(_yaxis[0])
except:
pass
# indexflag is only identify for every series
_flag = self._chart._option.get('series')[0].get('indexflag')
_series_index = 0
for s in self._chart._option.get('series'):
if _flag == s.get('indexflag'):
s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)
else:
_series_index += 1
s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)
_flag = s.get('indexflag')
_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)
for _ in range(_index_once):
self._chart._option.get('grid').append(_grid)
self._js_dependencies.union(chart._js_dependencies)
def __custom(self, series):
"""
:param series:
series data
:return:
"""
_series, _xaxis, _yaxis, _legend, _title = series
for s in _series:
self._chart._option.get('series').append(s)
return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title
def render(self, path="render.html"):
"""
:param path:
:return:
"""
self._chart.render(path)
def render_embed(self):
"""
:return:
"""
return self._chart.render_embed()
def show_config(self):
"""
:return:
"""
import pprint
return pprint.pprint(self._chart._option)
@property
def chart(self):
"""
:return:
"""
return self._chart
def _repr_html_(self):
"""
:return:
"""
return self._chart._repr_html_()
| 31.961832
| 100
| 0.540482
| 449
| 4,187
| 4.721604
| 0.195991
| 0.089151
| 0.10566
| 0.093396
| 0.430189
| 0.300472
| 0.172642
| 0.142453
| 0.057547
| 0.057547
| 0
| 0.004449
| 0.355863
| 4,187
| 130
| 101
| 32.207692
| 0.781609
| 0.1722
| 0
| 0.115942
| 0
| 0
| 0.039253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0.014493
| 0.028986
| 0
| 0.231884
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b97f78c59a8296809ae879f2d6f8355b0f8c52d0
| 4,588
|
py
|
Python
|
smooch/conversations.py
|
devinmcgloin/smooch
|
c9561c3e7f1546efc58daa472b70f738d0d35e13
|
[
"MIT"
] | 3
|
2016-07-04T12:02:03.000Z
|
2017-03-20T19:39:36.000Z
|
smooch/conversations.py
|
devinmcgloin/smooch
|
c9561c3e7f1546efc58daa472b70f738d0d35e13
|
[
"MIT"
] | 41
|
2019-05-28T09:54:04.000Z
|
2020-02-20T05:34:19.000Z
|
smooch/conversations.py
|
devinmcgloin/smooch
|
c9561c3e7f1546efc58daa472b70f738d0d35e13
|
[
"MIT"
] | 2
|
2016-07-20T14:31:45.000Z
|
2016-11-18T12:19:38.000Z
|
import logging
from .endpoint import ask
def send_message(user_id, message, sent_by_maker=True):
if not valid_args(user_id, message):
logging.warning("send message called with invalid args user_id={} message={}".format(user_id, message))
return
logging.debug("Sending message: user_id={0} message={1} sent_by_maker={2}".format(user_id, message, sent_by_maker))
role = "appMaker"
if not sent_by_maker:
role = "appUser"
data = {"text": message, "role": role}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def get_conversation(user_id):
if not user_id:
logging.warning("get conversation called with invalid arg user_id={}".format(user_id))
return
logging.debug("Get conversation: user_id={}".format(user_id))
return ask('appusers/{0}/conversation'.format(user_id), {}, 'get')
def request_payment(user_id, message, options):
"""Note that amount is a integer which specifies the amount of cents in the transaction
Smooch will default to the currency specified in your account settings."""
if not valid_args(user_id, message, options):
logging.warning("request payment called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "buy",
"text": short_text,
"amount": result})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_links(user_id, message, options):
"""Sends a series of links. The options field is a dictionary in which the keys are
descriptions and values uris"""
if not valid_args(user_id, message, options):
logging.warning("send links called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "link",
"text": short_text,
"uri": result})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_postbacks(user_id, message, options):
"""Sends a series of options that you can listen for on your webhook. The options field is a dictionary in which the keys are
descriptions and values the postback payload. You need to set up a webhook to listen for the postback."""
if not valid_args(user_id, message, options):
logging.warning("send postback called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for short_text, result in options:
buttons.append({
"type": "postback",
"text": short_text,
"payload": result
})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def send_buttons(user_id, message, options):
"""Options is a list of tuples in which the first element is the type of the button,
second the short text, and third the result for the specified type."""
if not valid_args(user_id, message, options):
logging.warning("send buttons called with invalid args user_id={} message={} options={}"
.format(user_id, message, options))
return
role = "appMaker"
buttons = []
for text, kind, result in options:
buttons.append({
"type": kind,
"text": text,
"payload": result
})
data = {"text": message,
"role": role,
"actions": buttons}
return ask('appusers/{0}/conversation/messages'.format(user_id),
data,
'post')
def valid_args(user_id, message, options=None):
if options is not None:
if user_id and message and options and type(options) is list:
return True
return False
else:
if user_id and message:
return True
return False
| 30.586667
| 129
| 0.598954
| 556
| 4,588
| 4.829137
| 0.194245
| 0.082682
| 0.106518
| 0.126629
| 0.640223
| 0.609311
| 0.550838
| 0.528119
| 0.502793
| 0.502793
| 0
| 0.002766
| 0.290759
| 4,588
| 149
| 130
| 30.791946
| 0.822372
| 0.13993
| 0
| 0.660194
| 0
| 0
| 0.225377
| 0.049885
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067961
| false
| 0
| 0.019417
| 0
| 0.242718
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b980ab008a2dab6e2778edec1d7d9e24b2315a73
| 1,086
|
py
|
Python
|
cifar/evalit.py
|
Sharkbyteprojects/IRIS-ML_and_Deep-Learning
|
f0e053cf7a0e69019bbba36e6da3e60d76105fe9
|
[
"MIT"
] | null | null | null |
cifar/evalit.py
|
Sharkbyteprojects/IRIS-ML_and_Deep-Learning
|
f0e053cf7a0e69019bbba36e6da3e60d76105fe9
|
[
"MIT"
] | null | null | null |
cifar/evalit.py
|
Sharkbyteprojects/IRIS-ML_and_Deep-Learning
|
f0e053cf7a0e69019bbba36e6da3e60d76105fe9
|
[
"MIT"
] | null | null | null |
import keras
from keras.models import load_model
from PIL import Image
import matplotlib.pylab as plt
import numpy as np
import zipfile
print("Extract")
zip_ref = zipfile.ZipFile("./asset.zip", 'r')
zip_ref.extractall(".")
zip_ref.close()
print("Load Model")
model=load_model("cifar-model.h5")
CIFAR_10_CLASSES=["Plane","Car","bird","cat","deer","dog","frog","horse","ship","truck"]
def calc(imname):
test_image =Image.open("asset/"+imname)
test_image=test_image.resize((32,32),Image.ANTIALIAS)
test_image=np.array(test_image,dtype="float32")
test_image/=255
test_image=test_image.reshape(-1,32,32,3)
predictions=model.predict(test_image)
index_max_pred=np.argmax(predictions)
plt.title("Complete: {}".format(CIFAR_10_CLASSES[index_max_pred]))
plt.imshow(test_image[0].reshape(32,32,3))
print(predictions)
plt.show()
print("START TEST")
calc("lkw-image.jpg")
calc("cat.jpg")
calc("frog.jpg")
calc("fog.jpg")
calc("lfog.jpg")
calc("d.jpg")
calc("b.jpg")
calc("bs.jpg")
calc("plapper.jpg")
calc("ds.jpg")
print("Complete")
print("End")
quit(0)
| 27.15
| 88
| 0.710866
| 172
| 1,086
| 4.354651
| 0.44186
| 0.12016
| 0.037383
| 0.048064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.096685
| 1,086
| 39
| 89
| 27.846154
| 0.735984
| 0
| 0
| 0
| 0
| 0
| 0.189687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.153846
| 0
| 0.179487
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98238142a5e4442e3c9fdd220f6bde9274299de
| 570
|
py
|
Python
|
TwitterImage2JPG.py
|
Tymec/Playground
|
5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
|
[
"MIT"
] | null | null | null |
TwitterImage2JPG.py
|
Tymec/Playground
|
5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
|
[
"MIT"
] | null | null | null |
TwitterImage2JPG.py
|
Tymec/Playground
|
5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
|
[
"MIT"
] | 1
|
2019-02-19T10:32:07.000Z
|
2019-02-19T10:32:07.000Z
|
import glob
import os
def main():
os.chdir("F:/Downloads")
extensions = ["*.jpg_large", "*.png_large", "*.jpg_orig"]
file_list = list()
for extension in extensions:
file_list = file_list + glob.glob(extension)
for file in file_list:
for extension in extensions:
new_extension = extension.replace('*', '')
if file.endswith(new_extension):
new_name = file.replace(new_extension, '') + ".jpg"
os.rename(file, new_name)
print("Done!")
if __name__ == __name__:
main()
| 22.8
| 67
| 0.585965
| 67
| 570
| 4.686567
| 0.38806
| 0.101911
| 0.101911
| 0.11465
| 0.178344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278947
| 570
| 24
| 68
| 23.75
| 0.76399
| 0
| 0
| 0.117647
| 0
| 0
| 0.094737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b982943f0b8c226209550f8c7f62a0e03d0b5ff5
| 6,405
|
py
|
Python
|
Data Analysis/classification.py
|
Riccardo95Facchini/DIL-2019
|
febeda55fd647943a1b8c49b3c5192fcd69fdaf5
|
[
"MIT"
] | null | null | null |
Data Analysis/classification.py
|
Riccardo95Facchini/DIL-2019
|
febeda55fd647943a1b8c49b3c5192fcd69fdaf5
|
[
"MIT"
] | null | null | null |
Data Analysis/classification.py
|
Riccardo95Facchini/DIL-2019
|
febeda55fd647943a1b8c49b3c5192fcd69fdaf5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
#EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB
input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv'
dataset = pd.read_csv(input_file, sep=';', header = 0)
dataset.head()
#DELETE NEXT CALLS DATA
dataset = dataset.drop("contact", axis=1)
dataset = dataset.drop("day", axis=1)
dataset = dataset.drop("month", axis=1)
dataset = dataset.drop("duration", axis=1)
dataset = dataset.drop("campaign", axis=1)
dataset = dataset.drop("pdays", axis=1)
dataset = dataset.drop("previous", axis=1)
dataset = dataset.drop("poutcome", axis=1)
dataset.head()
#FEATURE ENGINEERING
cleanup_nums = {"marital": {"married": 1, "single": 0, "divorced":-1},
"education": {"primary": 1, "secondary": 2, "tertiary": 3},
"default": {"yes": 1, "no": 0},
"housing": {"yes": 1, "no": 0},
"loan": {"yes": 1, "no": 0},
"y": {"yes": 1, "no": 0}}
dataset.replace(cleanup_nums, inplace=True)
dataset.head()
dataset.dtypes
dataset = dataset[dataset.job != 'unknown']
dataset = dataset[dataset.education != 'unknown']
dataset['education'] = dataset['education'].astype(int)
#COLLERATION MATRIX
plt.figure(figsize=(12,10))
cor = dataset.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.show()
#CLASSIFIFICATION
X = dataset.iloc[:, 0:7]
y = dataset.iloc[:, 7]
X = pd.get_dummies(X, columns=["job"], prefix=["job"])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
#DECISION TREE
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
clf_dt = DecisionTreeClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, esito)
print(cm)
plt.hist(esito)
#RANDOM FOREST
from sklearn.ensemble import RandomForestClassifier
clf_dt = RandomForestClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, esito)
print(cm)
plt.hist(esito)
# K-NEAREST NEIGHBOURS
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# TRAINING - TEST
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# SCALING
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# FITTING
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train)
# PREDICTION
y_pred = classifier.predict(X_test)
# CONFUSION MATRIX
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, y_pred,target_names=target_names))
print(cm)
plt.hist(y_pred)
#UNDERSAMPLING
from sklearn.utils import resample
dataset_sample = pd.get_dummies(dataset, columns=["job"], prefix=["job"])
#SPLIT FEATURE AND TARGET
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
#TRAIN TEST
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
X = pd.concat([X_train, y_train], axis=1)
#SELECTING TARGET CLASSES
not_sub = X[X.y==0]
sub = X[X.y==1]
not_sub_downsampled = resample(not_sub,
replace = False,
n_samples = len(sub),
random_state = 27)
# COMBINE MINORITY AND DOWNSAMPLED MAJORITY
downsampled = pd.concat([not_sub_downsampled, sub])
#DECISION TREE
y_train = downsampled.y
X_train = downsampled.drop('y', axis=1)
clf_dt = DecisionTreeClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
#RANDOM FOREST
y_train = downsampled.y
X_train = downsampled.drop('y', axis=1)
clf_dt = RandomForestClassifier()
clt_dt = clf_dt.fit(X_train,y_train)
esito = clf_dt.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, esito,target_names=target_names))
#SMOTE - DECISION TREE
from imblearn.over_sampling import SMOTE
#SPLIT FEATURE TARGET
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
#TRAIN TEST
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
#SMOTE
sm = SMOTE(random_state=27, ratio=1.0)
X_train, y_train = sm.fit_sample(X_train, y_train)
clf_dt = DecisionTreeClassifier()
#FIT
smote = clf_dt.fit(X_train,y_train)
#PREDICITON
smote_pred = smote.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, smote_pred,target_names=target_names))
#SMOTE - RANDOM FOREST
from imblearn.over_sampling import SMOTE
y = dataset_sample.y
X = dataset_sample.drop('y', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
sm = SMOTE(random_state=27, ratio=1.0)
X_train, y_train = sm.fit_sample(X_train, y_train)
clf_dt = RandomForestClassifier()
smote = clf_dt.fit(X_train,y_train)
smote_pred = smote.predict(X_test)
target_names = ['NOT-sub', 'Subscribed']
print(classification_report(y_test, smote_pred,target_names=target_names))
#RECAP on RECALL
x = np.arange(3)
plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT')
plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF')
plt.xticks(x-0.1, ['Normal','Under','Smote'])
plt.legend(loc='upper right')
#RECAP on F1
x = np.arange(3)
plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT')
plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF')
plt.xticks(x-0.1, ['Normal','Under','Smote'])
plt.legend(loc='lower right')
| 25.722892
| 98
| 0.721624
| 986
| 6,405
| 4.497972
| 0.217039
| 0.02841
| 0.01894
| 0.032469
| 0.584893
| 0.519278
| 0.49876
| 0.49876
| 0.487486
| 0.487486
| 0
| 0.020279
| 0.137705
| 6,405
| 249
| 99
| 25.722892
| 0.782727
| 0.081343
| 0
| 0.554745
| 0
| 0.007299
| 0.085909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153285
| 0
| 0.153285
| 0.072993
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b982c2b4e976b723dfa3208c1bc1e4ea51b77ac9
| 5,562
|
py
|
Python
|
tools/c7n_azure/tests/test_route_table.py
|
anastasiia-zolochevska/cloud-custodian
|
f25315a01bec808c16ab0e2d433d6151cf5769e4
|
[
"Apache-2.0"
] | 2
|
2020-01-20T19:46:28.000Z
|
2020-08-19T14:20:27.000Z
|
tools/c7n_azure/tests/test_route_table.py
|
anastasiia-zolochevska/cloud-custodian
|
f25315a01bec808c16ab0e2d433d6151cf5769e4
|
[
"Apache-2.0"
] | 79
|
2019-03-20T12:27:06.000Z
|
2019-08-14T14:07:04.000Z
|
tools/c7n_azure/tests/test_route_table.py
|
anastasiia-zolochevska/cloud-custodian
|
f25315a01bec808c16ab0e2d433d6151cf5769e4
|
[
"Apache-2.0"
] | 2
|
2019-04-22T15:20:23.000Z
|
2019-08-27T12:37:51.000Z
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure_common import BaseTest, arm_template
class RouteTableTest(BaseTest):
route_table_name = 'cctestroutetable'
vnet_name = 'ccroutetablevnet'
allowed_subnet_name = 'cctestsubnet1'
disallowed_subnet_name = 'cctestsubnet2'
@staticmethod
def _subnet_id_suffix(subnet):
return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet)
def test_route_table_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-route-table',
'resource': 'azure.routetable'
}, validate=True)
self.assertTrue(p)
@arm_template('route-table-and-vnet.json')
def test_find_route_table_by_name(self):
p = self.load_policy({
'name': 'test-find-route-table-by-name',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_is_routing_to_correct_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-is-routing-to-correct-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_not_routing_to_incorrect_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-not-routing-to-incorrect-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 0, "A route table is routing to a disallowed subnet")
@arm_template('route-table-and-vnet.json')
def test_detect_route_only_routes_to_specific_subnets(self):
p = self.load_policy({
'name': 'test-detect-route-only-routes-to-specific-subnets',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
},
{
'type': 'value',
'key': 'length(properties.subnets)',
'op': 'eq',
'value': 1
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
def _assert_only_route_table_in_resources(self, resources):
self.assertEqual(len(resources), 1, "Only one route table should be found")
route_table = resources[0]
self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'),
"The wrong route table was found")
properties = route_table.get('properties')
self.assertIsNotNone(properties, "Missing properties")
subnets = properties.get('subnets')
self.assertIsNotNone(subnets, "Missing subnets")
self.assertEqual(1, len(subnets), "There should only be one subnet")
subnet = subnets[0]
self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), "Incorrect subnet")
| 35.426752
| 95
| 0.538655
| 548
| 5,562
| 5.260949
| 0.257299
| 0.097121
| 0.033299
| 0.026015
| 0.515088
| 0.507804
| 0.475199
| 0.465834
| 0.447104
| 0.379813
| 0
| 0.006319
| 0.345559
| 5,562
| 156
| 96
| 35.653846
| 0.785714
| 0.101223
| 0
| 0.504202
| 0
| 0
| 0.22347
| 0.08666
| 0
| 0
| 0
| 0
| 0.10084
| 1
| 0.058824
| false
| 0
| 0.008403
| 0.008403
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98531b0567b9e4719006397ec461d3fa4999e4b
| 11,730
|
py
|
Python
|
proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
|
pkthein/sparts_all_fam
|
ff162e4ea8c3919a197dc0cc13fde6b32da113c7
|
[
"Apache-2.0"
] | 1
|
2019-04-03T18:31:36.000Z
|
2019-04-03T18:31:36.000Z
|
proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
|
pkthein/sparts_all_fam
|
ff162e4ea8c3919a197dc0cc13fde6b32da113c7
|
[
"Apache-2.0"
] | null | null | null |
proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
|
pkthein/sparts_all_fam
|
ff162e4ea8c3919a197dc0cc13fde6b32da113c7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Intel Corporation
# Copyright 2017 Wind River
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
################################################################################
# LIBRARIES & DEPENDENCIES #
################################################################################
import hashlib
import logging
import json
from collections import OrderedDict
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.processor.handler import TransactionHandler
LOGGER = logging.getLogger(__name__)
################################################################################
# HANDLER OBJ #
################################################################################
class ArtifactTransactionHandler:
"""
Class for handling the Transaction Family : Artifact
Attributes:
namespace_prefix (str): The namespace prefix of the transaction family
"""
def __init__(self, namespace_prefix):
"""
Constructs the ArtifactTransactionHandler object.
Args:
namespace_prefix (str):
The namepsace prefix of the transaction family
"""
self._namespace_prefix = namespace_prefix
@property
def family_name(self):
"""
type: str
Returns the family name of the handler object.
"""
return "artifact"
@property
def family_versions(self):
"""
type: list of str
Returns the family version of the handler object.
"""
return ["1.0"]
@property
def encodings(self):
"""
type: list of str
Returns the encoding scheme used for the data for the handler object.
"""
return ["csv-utf8"]
@property
def namespaces(self):
"""
type: list of str
Returns the namespaces associating with the handler object.
"""
return [self._namespace_prefix]
################################################################################
# FUNCTIONS #
################################################################################
def apply(self, transaction, context):
"""
Applys the payload from transaction onto the state storage.
Args:
transaction (Transaction): The transaction pertaining the payload
context (State): The current state of the ledger
Returns:
type: State
The new state of the ledger, which includes the data from the
transaction, is returned to be stored on the state storage.
Raises:
InvalidTransaction:
* If deserialization for payload from transaction failed
* If "create" was called on non-unique uuid
* If "amend" was called on non-existing uuid
* If "Add..." were called on non-existing uuid
* If invalid operation was called
InternalError:
* If deserialization of State.data failed
"""
# Parsing required fields from transaction payload
try:
payload = json.loads(transaction.payload.decode())
artifact_id = payload["uuid"]
artifact_alias = payload["alias"]
artifact_name = payload["name"]
artifact_type = payload["content_type"]
artifact_checksum = payload["checksum"]
artifact_label = payload["label"]
artifact_openchain = payload["openchain"]
action = payload["action"]
prev = payload["prev_block"]
cur = payload["cur_block"]
timestamp = payload["timestamp"]
artifact_list = payload["artifact_list"]
uri_list = payload["uri_list"]
except ValueError:
raise InvalidTransaction("Invalid payload serialization")
# Soft sanity check and loading required data
validate_transaction(artifact_id, action)
data_address = make_artifact_address(self._namespace_prefix,
artifact_id)
state_entries = context.get_state([data_address])
# Hard sanity check before creating final payload for the state storage
if len(state_entries) != 0:
try:
stored_artifact = json.loads(state_entries[0].data.decode())
stored_artifact_id = stored_artifact["uuid"]
except ValueError:
raise InternalError("Failed to deserialize data.")
else:
stored_artifact_id = stored_artifact = None
if action == "create" and stored_artifact_id is not None:
raise InvalidTransaction("Invalid Action-artifact already exists.")
elif action == "create":
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp)
elif action == "amend" and stored_artifact_id is not None:
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list, uri_list)
elif action == "AddArtifact" or action == "AddURI":
if stored_artifact_id is None:
raise InvalidTransaction(
"Invalid Action-requires an existing artifact."
)
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp,
artifact_list, uri_list)
# Adding the final payload to the state storage
data = json.dumps(artifact).encode()
addresses = context.set_state({data_address:data})
return addresses
################################################################################
# HELPER FUNCTIONS #
################################################################################
def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,
artifact_checksum, artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list=[], uri_list=[]):
"""
Constructs the payload to be stored in the state storage.
Args:
artifact_uuid (str): The uuid of the artifact
artifact_alias (str): The alias of the artifact
artifact_name (str): The name of the artifact
artifact_type (str): The type of the artifact
artifact_checksum (str): The checksum of the artifact
artifact_label (str): The label of the artifact
artifact_openchain (str): The openchain of the artifact
prev (str): The previous block id of the transaction (default "0")
cur (str): the current block id of the transaction
timestamp (str): The UTC time for when the transaction was submitted
artifact_list (list of dict):
The list of the artifact uuid associated with the artifact
(default [])
uri_list (list of dict):
The list of the uri associated with the artifact (default [])
Returns:
type: dict
The dictionary pertaining all the param is created and returned to
be stored on the state storage.
"""
return {
"uuid" : artifact_id,
"alias" : artifact_alias,
"name" : artifact_name,
"content_type" : artifact_type,
"checksum" : artifact_checksum,
"label" : artifact_label,
"openchain" : artifact_openchain,
"prev_block" : prev,
"cur_block" : cur,
"timestamp" : timestamp,
"artifact_list" : artifact_list,
"uri_list" : uri_list
}
def validate_transaction(artifact_id, action):
"""
Performs soft sanity check in order to improve runtime by eliminating the
obvious exception errors.
Args:
artifact_id (str): The uuid of the artifact
action (str): The command to be performed
Raises:
InvalidTransaction:
If the uuid or the action are not passed in or the
action is not a valid action.
"""
if not artifact_id:
raise InvalidTransaction("Artifact ID is required")
if not action:
raise InvalidTransaction("Action is required")
if action not in ("AddArtifact", "create", "AddURI", "amend"):
raise InvalidTransaction("Invalid action: {}".format(action))
def make_artifact_address(namespace_prefix, artifact_id):
"""
Creates an artifact address which will be used to recover the associated
UUID if the artifact already exists in the state storage; or, used as a key to
store the new data into the state storage.
Args:
namespace_prefix (str):
The prefix associating with the transaction family
artifact_id (str): The uuid of the artifact
Returns:
type: str
The address-to-be, which associates the uuid and the namespace prefix.
"""
return namespace_prefix + \
hashlib.sha512(artifact_id.encode("utf-8")).hexdigest()[:64]
def _display(msg):
"""
Logs the message to the debug logger.
Args:
msg (str): The message that is to be logged into the debug logger
"""
n = msg.count("\n")
if n > 0:
msg = msg.split("\n")
length = max(len(line) for line in msg)
else:
length = len(msg)
msg = [msg]
LOGGER.debug("+" + (length + 2) * "-" + "+")
for line in msg:
LOGGER.debug("+ " + line.center(length) + " +")
LOGGER.debug("+" + (length + 2) * "-" + "+")
################################################################################
# #
################################################################################
| 39.897959
| 82
| 0.521313
| 1,107
| 11,730
| 5.400181
| 0.229449
| 0.016728
| 0.021746
| 0.021077
| 0.254433
| 0.175811
| 0.150217
| 0.1273
| 0.095851
| 0.095851
| 0
| 0.003494
| 0.341262
| 11,730
| 293
| 83
| 40.03413
| 0.770157
| 0.370247
| 0
| 0.179487
| 0
| 0
| 0.087176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08547
| false
| 0
| 0.059829
| 0
| 0.213675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9877d896f97460bc5a35787da6277925368bc9f
| 764
|
py
|
Python
|
ReviewsCollector.py
|
fsandx/moodybooks
|
5c13fe43849e4fa861a163c74411e9f796518bc9
|
[
"MIT"
] | null | null | null |
ReviewsCollector.py
|
fsandx/moodybooks
|
5c13fe43849e4fa861a163c74411e9f796518bc9
|
[
"MIT"
] | null | null | null |
ReviewsCollector.py
|
fsandx/moodybooks
|
5c13fe43849e4fa861a163c74411e9f796518bc9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
STEP 2
Takes the list of urls in the json files and downloads the html files to local drive
Start with: scrapy runspider ReviewsCollector.py
"""
import scrapy
import json
class ReviewsCollector(scrapy.Spider):
def start_requests(self):
with open("data/books.json") as f:
self.data = json.load(f)
for item in self.data:
if (item['url'] is not None):
yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse)
def parse(self, response):
filename = response.url.split("/")[-1] + '.html'
with open('data/reviews/' + filename, 'wb+') as f:
f.write(response.body)
| 29.384615
| 124
| 0.611257
| 104
| 764
| 4.480769
| 0.634615
| 0.034335
| 0.051502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00692
| 0.243456
| 764
| 26
| 125
| 29.384615
| 0.799308
| 0.240838
| 0
| 0
| 0
| 0
| 0.126095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98b6f0b6e5f35ef44fd272ec1f3a99b4d72acf0
| 1,293
|
py
|
Python
|
PolymorphismPYTHON/Polypy.py
|
cadeng23/oop-cjgustafson
|
cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
|
[
"MIT"
] | null | null | null |
PolymorphismPYTHON/Polypy.py
|
cadeng23/oop-cjgustafson
|
cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
|
[
"MIT"
] | null | null | null |
PolymorphismPYTHON/Polypy.py
|
cadeng23/oop-cjgustafson
|
cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
|
[
"MIT"
] | null | null | null |
import random
class Family:
def __init__(self,first, last, hair):
self.first = first
self.last = last
self.hair = hair
def fullname(self):
return '{} {}'.format(self.first,self.last)
def eyefind(self):
temp = random.choice([1,2])
#using the punnet square in genetics we know thatt a donor
#with blue eyes and one with brown makes it 50/50 odds
#that the childs eyes will be brown or blue
if (temp == 1):
self.EYES = ("Brown")
else:
self.EYES = ("Blue")
return self.EYES
def Apply_eyes(self):
self.eyes = self.EYES
Daughter = Family('Ashley', 'Smith', 'Brown')
Son = Family('Kevin', 'Smith', 'Brown')
print(Daughter.eyes)
print(Son.eyes)
#with the kids being born it will define what color hair and eyes
# they may randomly get through inheritance
class Kids(Family):
pass
#Eyes are marked as Grey because they are unknown for now
# hair colors are brown because brown is the dominant hair color
Daughter = Kids('Danielle', 'Smith', 'Brown' )
Son = Kids('Kevin','Smith','Brown')
print(Daughter.eyes)
print(Son.eyes)
Daughter.Apply_eyes()
Son.Apply_eyes()
print(Daughter.eyes)
print(Son.eyes)
| 23.089286
| 66
| 0.618716
| 178
| 1,293
| 4.455056
| 0.438202
| 0.050441
| 0.064313
| 0.083228
| 0.147541
| 0.147541
| 0.110971
| 0.110971
| 0.110971
| 0
| 0
| 0.007431
| 0.271462
| 1,293
| 56
| 67
| 23.089286
| 0.834395
| 0.292343
| 0
| 0.193548
| 0
| 0
| 0.085809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0.032258
| 0.032258
| 0.032258
| 0.354839
| 0.193548
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98c3a1636cff18e5244db1f52b8e6e89e2c99b5
| 1,494
|
py
|
Python
|
homeassistant/components/device_tracker/owntracks.py
|
evancohen/home-assistant
|
dafc0ced6b07025c03417d8e7a2c0133b4c622fc
|
[
"MIT"
] | 14
|
2015-11-10T07:57:43.000Z
|
2021-08-29T13:45:26.000Z
|
homeassistant/components/device_tracker/owntracks.py
|
evancohen/home-assistant
|
dafc0ced6b07025c03417d8e7a2c0133b4c622fc
|
[
"MIT"
] | null | null | null |
homeassistant/components/device_tracker/owntracks.py
|
evancohen/home-assistant
|
dafc0ced6b07025c03417d8e7a2c0133b4c622fc
|
[
"MIT"
] | 8
|
2015-11-14T16:40:41.000Z
|
2020-02-17T19:48:08.000Z
|
"""
homeassistant.components.device_tracker.owntracks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OwnTracks platform for the device tracker.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import homeassistant.components.mqtt as mqtt
DEPENDENCIES = ['mqtt']
LOCATION_TOPIC = 'owntracks/+/+'
def setup_scanner(hass, config, see):
""" Set up a OwnTracksks tracker. """
def owntracks_location_update(topic, payload, qos):
""" MQTT message received. """
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
logging.getLogger(__name__).error(
'Unable to parse payload as JSON: %s', payload)
return
if not isinstance(data, dict) or data.get('_type') != 'location':
return
parts = topic.split('/')
kwargs = {
'dev_id': '{}_{}'.format(parts[1], parts[2]),
'host_name': parts[1],
'gps': (data['lat'], data['lon']),
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
return True
| 27.666667
| 74
| 0.582999
| 162
| 1,494
| 5.259259
| 0.561728
| 0.045775
| 0.053991
| 0.075117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003623
| 0.261044
| 1,494
| 53
| 75
| 28.188679
| 0.768116
| 0.287818
| 0
| 0.071429
| 0
| 0
| 0.122959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.107143
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98c6a6e2a07073f4614093d6ae5d6469afd6835
| 48,027
|
py
|
Python
|
src/models/end_to_end_event_coreference.py
|
luyaojie/E3C
|
4b2f33da4629211fd6a3738077794f821c7f7c8b
|
[
"MIT"
] | 2
|
2022-02-20T15:13:11.000Z
|
2022-03-22T03:47:21.000Z
|
src/models/end_to_end_event_coreference.py
|
luyaojie/E3C
|
4b2f33da4629211fd6a3738077794f821c7f7c8b
|
[
"MIT"
] | null | null | null |
src/models/end_to_end_event_coreference.py
|
luyaojie/E3C
|
4b2f33da4629211fd6a3738077794f821c7f7c8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by Roger on 2019-09-10
# Mostly by AllenNLP
import logging
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Pruner
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder
from allennlp.modules.similarity_functions import DotProductSimilarity
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import Average
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from src.metrics.event_coref_scores import EventCorefScores
from src.metrics.mention_f1 import TopSpanMentionTypeF1
from src.utils.cluster_decoding_utils import node_decode
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("end-to-end-event-coreference")
class End2EndEventCoreferenceResolver(Model):
"""
This ``Model`` implements the coreference resolution model described "End-to-end Neural
Coreference Resolution"
<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>
by Lee et al., 2017.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward: ``FeedForward``
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
max_span_width: ``int``
The maximum width of candidate spans.
spans_per_word: float, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: int, required.
For each mention which survives the pruning stage, we consider this many antecedents.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
mention_feedforward: FeedForward,
antecedent_feedforward: FeedForward,
feature_size: int,
context_layer: Seq2SeqEncoder = None,
max_span_width: int = 1,
spans_per_word: float = 0.1,
max_antecedents: int = 50,
lexical_dropout: float = 0.2,
pretrain_ed: bool = False,
pretrain_coref: bool = False,
coref_loss_weight: float = 1.0,
bce_loss_weight: float = 1.0,
bce_pos_weight: float = None,
local_window_size: int = 10,
attention_type: str = 'dot',
decoding: str = 'type-guided',
type_threshold: float = -1.,
type_refine: bool = True,
type_match_in_eval: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer)
logger.info(vocab)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
self._event_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))
)
self._pretrain_ed = pretrain_ed
self._pretrain_coref = pretrain_coref
self._mention_pruner = Pruner(self._event_scorer)
self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))
self._local_window_size = local_window_size
self._attention_type = attention_type
self._decoding = decoding
self._type_threshold = type_threshold
logger.info(vocab.get_token_from_index(0, "labels"))
if context_layer is not None:
endpoint_span_extractor_dim = context_layer.get_output_dim()
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim()
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
else:
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
if max_span_width > 1:
endpoint_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
else:
self._endpoint_span_extractor = None
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
if self._endpoint_span_extractor is not None:
span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim()
else:
span_embedding_size = self._attentive_span_extractor.get_output_dim()
if type_refine:
self._type_refine_gate = torch.nn.Sequential(
TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)),
torch.nn.Sigmoid()
)
else:
self._type_refine_gate = None
# NIL for Unified Event
self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'),
embedding_dim=span_embedding_size)
self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2,
self._event_embedding.get_output_dim())
self._positive_label_size = vocab.get_vocab_size('labels') - 1
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
self._coref_loss_weight = coref_loss_weight
self._bce_loss_weight = bce_loss_weight
self._bce_pos_weight = bce_pos_weight
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._mention_f1_score = TopSpanMentionTypeF1()
self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval)
self._type_loss_metric = Average()
self._realis_loss_metric = Average()
self._coref_loss_metric = Average()
self._coref_label_metric = Average()
self._type_label_metric = Average()
self._nil_label_metric = Average()
if self._bce_pos_weight:
self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight))
else:
self._bce_loss = BCEWithLogitsLoss(reduction='none')
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
def _get_event_embedding(self, span_mask):
"""
:param span_mask:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1
event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1)
event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)])
event_embeddings = self._event_embedding(event_indices)
event_embeddings = event_embeddings.reshape(event_embeddings.size(0),
event_embeddings.size(1) * event_embeddings.size(2))
event_embeddings = self._event_embedding_map.forward(event_embeddings)
event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0),
event_embeddings.size(0),
event_embeddings.size(1),
)
return event_embeddings
def _get_type_antecedent_labels(self, top_event_type_labels):
"""
:param top_event_type_labels:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'),
device=util.get_device_of(top_event_type_labels))
top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0),
top_event_type_labels.size(1),
event_indices.size(0)])
type_antecedent_labels = (top_event_type_labels == event_indices).float()
return type_antecedent_labels
def _type_refine_embedding(self, top_embeddings, event_embeddings):
# (batch, top_span_size, emb_size) bmm
event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2))
shape = [event_prob.size(0), event_prob.size(1), 1]
dummy_scores = event_prob.new_zeros(*shape)
event_prob = torch.cat([dummy_scores, event_prob], -1)
event_prob = torch.softmax(event_prob, -1)
event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings
refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1))
top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep
return top_embeddings
def _local_attention(self, raw_contextualized_embeddings, text_mask):
device = util.get_device_of(raw_contextualized_embeddings)
if device < 0:
device = 'cpu'
attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device)
# attention_mask = attention_mask - torch.eye(text_mask.size(1),
# device=util.get_device_of(contextualized_embeddings))
new_attention_mask = text_mask[:, :, None] * attention_mask
new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size),
-self._local_window_size)
new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings,
new_attention_mask)
return new_contextualized_embeddings
@overrides
def forward(self, # type: ignore
text: Dict[str, torch.LongTensor],
spans: torch.IntTensor,
coref_labels: torch.IntTensor = None,
event_type_labels: torch.IntTensor = None,
realis_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``, required.
The output of a ``TextField`` representing the text of
the document.
spans : ``torch.IntTensor``, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
indices into the text of the document.
coref_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
event_type_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the event label of the specific span.
realis_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the realis label of the specific span.
metadata : ``List[Dict[str, Any]]``, optional (default = None).
A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys
from this dictionary, which respectively have the original text and the annotated gold coreference
clusters for that instance.
Returns
-------
An output dictionary consisting of:
top_spans : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : ``torch.IntTensor``
A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text).float()
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
if self._context_layer:
# Shape: (batch_size, document_length, encoding_dim)
raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)
# span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
else:
raw_contextualized_embeddings = text_embeddings
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
span_embeddings_list = list()
attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)
span_embeddings_list += [attended_span_embeddings]
if self._endpoint_span_extractor is not None:
# Shape: (batch_size, num_spans, embedding_size)
endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
span_embeddings_list += [endpoint_span_embeddings]
span_embeddings = torch.cat(span_embeddings_list, -1)
# event_scores = self._event_classifier.forward(span_embeddings)
# Shape: (batch_size, num_spans, num_event_realis_label)
# Shape: (batch_size, num_spans, num_event_realis_label)
# event_realis_scores = self._event_realis_classifier.forward(span_embeddings)
# Prune based on mention scores.
num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))
(top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep_according_doc_len,
)
event_embeddings = self._get_event_embedding(span_mask)
top_mask = top_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)
# top_span_embeddings = top_span_embeddings.detach()
# top_span_mention_scores = top_span_mention_scores.detach()
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \
_generate_valid_antecedents(num_spans_to_keep_according_doc_len,
max_antecedents,
util.get_device_of(text_mask))
if self._type_refine_gate is not None:
top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,
valid_antecedent_indices).squeeze(-1)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(
event_embeddings,
candidate_antecedent_embeddings)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# (batch_size, event_type_size, 1)
event_type_prior_scores = self._event_scorer(event_embeddings)
# (batch_size, num_spans_to_keep, event_type_size)
event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(
candidate_antecedent_mention_scores.size(0),
candidate_antecedent_mention_scores.size(1),
-1)
# (batch_size, num_spans_to_keep, event_type_size + max_antecedents)
candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,
candidate_antecedent_mention_scores],
-1)
# Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {"top_spans": top_spans,
"antecedent_indices": valid_antecedent_indices,
"predicted_antecedents": predicted_antecedents,
"coreference_scores": coreference_scores,
}
if coref_labels is not None and event_type_labels is not None:
pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)
type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),
top_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
type_antecedent_labels,
antecedent_labels)
bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),
(event_type_labels > 0).float()) * span_mask
bce_loss = bce_loss.sum() * self._bce_loss_weight
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
if self._pretrain_ed:
# All antecedent mask is 0
top_mask = top_mask.expand_as(coreference_scores).clone()
top_mask[:, :, self._positive_label_size + 2:] = 0
coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight
output_dict["loss"] = coref_loss + bce_loss
decoded_result = self.decode(output_dict)
pred_label_spans_list = decoded_result['pred_label_spans']
gold_label_spans_list = [m['gold_label_spans'] for m in metadata]
self._mention_f1_score(pred_label_spans_list,
gold_label_spans_list,
)
self._conll_coref_scores(decoded_result['clusters'],
metadata,
pred_label_spans_list,
gold_label_spans_list)
self._type_loss_metric(bce_loss.item())
self._coref_loss_metric(negative_marginal_log_likelihood.item())
else:
self._coref_loss_metric(0.)
if metadata is not None:
output_dict["document"] = [x["original_text"] for x in metadata]
output_dict["offset"] = [x["token_offset"] for x in metadata]
output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata]
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
return node_decode(output_dict,
self.vocab, decoding_algorithm=self._decoding,
positive_label_size=self._positive_label_size,
type_threshold=self._type_threshold)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_result = self._mention_f1_score.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {"c_p": coref_precision,
"c_r": coref_recall,
"c_f1": coref_f1,
"m_p": mention_result['precision'],
"m_r": mention_result['recall'],
"m_f1": mention_result['f1-score'],
"nil": self._nil_label_metric.get_metric(reset),
"type": self._type_label_metric.get_metric(reset),
"coref": self._coref_label_metric.get_metric(reset),
"t_l": self._type_loss_metric.get_metric(reset),
"c_l": self._coref_loss_metric.get_metric(reset),
"a_f1": (mention_result['f1-score'] + coref_f1) / 2.}
@staticmethod
def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor):
"""
event_embeddings: ``torch.FloatTensor``, required.
Embedding representations of the event types. Has shape
(batch_size, event_type_size, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
return:
(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
"""
event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
event_embeddings.size(1),
antecedent_embeddings.size(3),))
return torch.cat([event_embeddings, antecedent_embeddings], 2)
def _compute_span_pair_embeddings(self,
top_span_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor,
antecedent_offsets: torch.FloatTensor):
"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
---------- shape
(batch_size, event_type_size, embedding_size).
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size).
antecedent_offsets : ``torch.IntTensor``, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (1, max_antecedents).
Returns
-------
span_pair_embeddings : ``torch.FloatTensor``
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (1, max_antecedents)
bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)
# (1, event_type)
label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size))
# Shape: (1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
torch.cat([bucket_values, label_bucket_values], 1)
)
# Shape: (1, 1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)
expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
antecedent_embeddings.size(2),
antecedent_distance_embeddings.size(-1))
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
span_pair_embeddings = torch.cat([target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings], -1)
return span_pair_embeddings
def _compute_antecedent_gold_labels(self,
top_span_labels: torch.IntTensor,
type_antecedent_labels: torch.IntTensor,
antecedent_labels: torch.IntTensor):
"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
Parameters
----------
top_span_labels : ``torch.IntTensor``, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : ``torch.IntTensor``, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
Returns
-------
pairwise_labels_with_dummy_label : ``torch.FloatTensor``
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
# print(top_span_labels)
# print(antecedent_labels)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
if self._pretrain_ed:
pairwise_labels = pairwise_labels * 0
else:
# for pairwise_labels without type_antecedent_labels
pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float()
type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator)
self._coref_label_metric(torch.sum(pairwise_labels).item())
self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item())
self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item())
# print(pairwise_labels)
#
# # Shape: (batch_size, num_spans_to_keep, 1)
# dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(self,
pairwise_embeddings: torch.FloatTensor,
top_span_mention_scores: torch.FloatTensor,
antecedent_mention_scores: torch.FloatTensor,
antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:
"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
Parameters
----------
pairwise_embeddings: ``torch.FloatTensor``, required.
Embedding representations of pairs of spans. Has shape
(batch_size, num_spans_to_keep, max_antecedents, encoding_dim)
top_span_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every span. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every antecedent. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_log_mask: ``torch.FloatTensor``, required.
The log of the mask for valid antecedents.
Returns
-------
coreference_scores: ``torch.FloatTensor``
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0),
antecedent_log_mask.size(1),
self._positive_label_size)),
antecedent_log_mask],
-1)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)
antecedent_scores += top_span_mention_scores + antecedent_mention_scores
antecedent_scores += antecedent_log_mask
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
def _generate_valid_antecedents(num_spans_to_keep: int,
max_antecedents: int,
device: int) -> Tuple[torch.IntTensor,
torch.IntTensor,
torch.FloatTensor]:
"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
Parameters
----------
num_spans_to_keep : ``int``, required.
The number of spans that were kept while pruning.
max_antecedents : ``int``, required.
The maximum number of antecedent spans to consider for every span.
device: ``int``, required.
The CUDA device to use.
Returns
-------
valid_antecedent_indices : ``torch.IntTensor``
The indices of every antecedent to consider with respect to the top k spans.
Has shape ``(num_spans_to_keep, max_antecedents)``.
valid_antecedent_offsets : ``torch.IntTensor``
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape ``(1, max_antecedents)``.
valid_antecedent_log_mask : ``torch.FloatTensor``
The logged mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape ``(1, num_spans_to_keep, max_antecedents)``.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# We're generating a logspace mask here because we will eventually create a
# distribution over these indices, so we need the 0 elements of the mask to be -inf
# in order to not mess up the normalisation of the distribution.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
| 54.514188
| 134
| 0.629271
| 5,379
| 48,027
| 5.30396
| 0.115821
| 0.018787
| 0.018577
| 0.026008
| 0.390431
| 0.305293
| 0.264774
| 0.237855
| 0.193831
| 0.165545
| 0
| 0.006786
| 0.303496
| 48,027
| 880
| 135
| 54.576136
| 0.846103
| 0.363483
| 0
| 0.182033
| 0
| 0
| 0.012454
| 0.00169
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030733
| false
| 0
| 0.047281
| 0
| 0.108747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98ccbb0c859fdccad6b30924e5845122d497aa5
| 1,964
|
py
|
Python
|
week2/7litersProblem.py
|
vietanhtran2710/ArtificialIntelligenceHomework
|
f4da761016d67477b50856cadf1e2560230d3f79
|
[
"MIT"
] | 3
|
2021-09-20T08:32:23.000Z
|
2021-09-25T08:11:48.000Z
|
week2/7litersProblem.py
|
vietanhtran2710/ArtificialIntelligenceHomework
|
f4da761016d67477b50856cadf1e2560230d3f79
|
[
"MIT"
] | null | null | null |
week2/7litersProblem.py
|
vietanhtran2710/ArtificialIntelligenceHomework
|
f4da761016d67477b50856cadf1e2560230d3f79
|
[
"MIT"
] | null | null | null |
"""
Given 3 bottles of capacities 3, 5, and 9 liters,
count number of all possible solutions to get 7 liters
"""
current_path = [[0, 0, 0]]
CAPACITIES = (3, 5, 9)
solutions_count = 0
def move_to_new_state(current_state):
global solutions_count, current_path
if 7 in current_state:
solutions_count += 1
else:
# Empty bottle
for i in range(3):
if current_state[i] != 0:
new_state = list(current_state)
new_state[i] = 0
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
# Fill bottle
for i in range(3):
if current_state[i] != CAPACITIES[i]:
new_state = list(current_state)
new_state[i] = CAPACITIES[i]
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
# Pour from one bottle to another
for i in range(3):
for j in range(3):
if i != j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]:
new_state = list(current_state)
liters_change = min(CAPACITIES[j] - current_state[j], current_state[i])
new_state[j] += liters_change
new_state[i] -= liters_change
if new_state not in current_path:
current_path.append(new_state)
move_to_new_state(new_state)
current_path.pop()
if __name__ == "__main__":
try:
current_state = [0, 0, 0]
move_to_new_state(current_state)
print(solutions_count)
except KeyboardInterrupt:
print(solutions_count)
# Result: at least 44900799 solution
| 35.709091
| 91
| 0.548371
| 244
| 1,964
| 4.122951
| 0.233607
| 0.166998
| 0.044732
| 0.069583
| 0.478131
| 0.442346
| 0.390656
| 0.390656
| 0.32505
| 0.32505
| 0
| 0.026016
| 0.373727
| 1,964
| 54
| 92
| 36.37037
| 0.79187
| 0.100815
| 0
| 0.47619
| 0
| 0
| 0.004579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0
| 0
| 0.02381
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b98d02f62eca1818cb1fb297d1c8644dd35ff288
| 8,263
|
py
|
Python
|
st2common/st2common/bootstrap/rulesregistrar.py
|
avezraj/st2
|
519c7f6819e52fb289c440bb7d1df7b558bb9ed7
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/bootstrap/rulesregistrar.py
|
avezraj/st2
|
519c7f6819e52fb289c440bb7d1df7b558bb9ed7
|
[
"Apache-2.0"
] | null | null | null |
st2common/st2common/bootstrap/rulesregistrar.py
|
avezraj/st2
|
519c7f6819e52fb289c440bb7d1df7b558bb9ed7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.constants.pack import DEFAULT_PACK_NAME
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.rule import RuleAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.rule import Rule
from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count
from st2common.exceptions.db import coditationDBObjectNotFoundError
import st2common.content.utils as content_utils
__all__ = [
'RulesRegistrar',
'register_rules'
]
LOG = logging.getLogger(__name__)
class RulesRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_from_packs(self, base_dirs):
"""
:return: Number of rules registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='rules')
for pack, rules_dir in six.iteritems(content):
if not rules_dir:
LOG.debug('Pack %s does not contain rules.', pack)
continue
try:
LOG.debug('Registering rules from pack: %s', pack)
rules = self._get_rules_from_pack(rules_dir)
count = self._register_rules_from_pack(pack, rules)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all rules from pack: %s', rules_dir)
return registered_count
def register_from_pack(self, pack_dir):
"""
Register all the rules from the provided pack.
:return: Number of rules registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='rules')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not rules_dir:
return registered_count
LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir)
try:
rules = self._get_rules_from_pack(rules_dir=rules_dir)
registered_count = self._register_rules_from_pack(pack=pack, rules=rules)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all rules from pack: %s', rules_dir)
return registered_count
def _get_rules_from_pack(self, rules_dir):
return self.get_resources_from_pack(resources_dir=rules_dir)
def _register_rules_from_pack(self, pack, rules):
registered_count = 0
# TODO: Refactor this monstrosity
for rule in rules:
LOG.debug('Loading rule from %s.', rule)
try:
content = self._meta_loader.load(rule)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack,
file_path=rule,
use_pack_cache=True)
content['metadata_file'] = metadata_file
rule_api = RuleAPI(**content)
rule_api.validate()
rule_db = RuleAPI.to_model(rule_api)
# Migration from rule without pack to rule with pack.
# There might be a rule with same name but in pack `default`
# generated in migration script. In this case, we want to
# delete so we don't have duplicates.
if pack_field != DEFAULT_PACK_NAME:
try:
rule_ref = ResourceReference.to_string_reference(name=content['name'],
pack=DEFAULT_PACK_NAME)
LOG.debug('Looking for rule %s in pack %s', content['name'],
DEFAULT_PACK_NAME)
existing = Rule.get_by_ref(rule_ref)
LOG.debug('Existing = %s', existing)
if existing:
LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref)
Rule.delete(existing)
except:
LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME)
try:
rule_ref = ResourceReference.to_string_reference(name=content['name'],
pack=content['pack'])
existing = Rule.get_by_ref(rule_ref)
if existing:
rule_db.id = existing.id
LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id)
except coditationDBObjectNotFoundError:
LOG.debug('Rule %s not found. Creating new one.', rule)
try:
rule_db = Rule.add_or_update(rule_db)
increment_trigger_ref_count(rule_api=rule_api)
extra = {'rule_db': rule_db}
LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra)
except Exception:
LOG.exception('Failed to create rule %s.', rule_api.name)
# If there was an existing rule then the ref count was updated in
# to_model so it needs to be adjusted down here. Also, update could
# lead to removal of a Trigger so now is a good time for book-keeping.
if existing:
cleanup_trigger_db_for_rule(existing)
except Exception as e:
if self._fail_on_failure:
msg = ('Failed to register rule "%s" from pack "%s": %s' % (rule, pack,
six.text_type(e)))
raise ValueError(msg)
LOG.exception('Failed registering rule from %s.', rule)
else:
registered_count += 1
return registered_count
def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = RulesRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_from_packs(base_dirs=packs_base_paths)
return result
| 41.109453
| 98
| 0.57824
| 952
| 8,263
| 4.782563
| 0.230042
| 0.026356
| 0.028553
| 0.0123
| 0.217
| 0.177246
| 0.167582
| 0.110037
| 0.095541
| 0.087415
| 0
| 0.004294
| 0.351809
| 8,263
| 200
| 99
| 41.315
| 0.84578
| 0.143894
| 0
| 0.272727
| 0
| 0
| 0.096118
| 0
| 0
| 0
| 0
| 0.005
| 0.007576
| 1
| 0.037879
| false
| 0
| 0.098485
| 0.007576
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9912797a8155d6800745fe804b93206d95de8ac
| 91,819
|
py
|
Python
|
sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 1
|
2021-09-07T18:43:20.000Z
|
2021-09-07T18:43:20.000Z
|
sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
|
aiven/azure-sdk-for-python
|
8764dc07423beca46ed0b51212d81289d9e52c60
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
|
msyyc/azure-sdk-for-python
|
e2dba75181f8b4336ae57e75aa391322c12c3123
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._cost_management_client_enums import *
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.tags = None
class Alert(Resource):
"""An individual alert.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param definition: defines the type of alert.
:type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition
:param description: Alert description.
:type description: str
:param source: Source of alert. Possible values include: "Preset", "User".
:type source: str or ~azure.mgmt.costmanagement.models.AlertSource
:param details: Alert details.
:type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails
:param cost_entity_id: related budget.
:type cost_entity_id: str
:param status: alert status. Possible values include: "None", "Active", "Overridden",
"Resolved", "Dismissed".
:type status: str or ~azure.mgmt.costmanagement.models.AlertStatus
:param creation_time: dateTime in which alert was created.
:type creation_time: str
:param close_time: dateTime in which alert was closed.
:type close_time: str
:param modification_time: dateTime in which alert was last modified.
:type modification_time: str
:param status_modification_user_name:
:type status_modification_user_name: str
:param status_modification_time: dateTime in which the alert status was last modified.
:type status_modification_time: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},
'description': {'key': 'properties.description', 'type': 'str'},
'source': {'key': 'properties.source', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},
'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'close_time': {'key': 'properties.closeTime', 'type': 'str'},
'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},
'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},
'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},
}
def __init__(
self,
*,
definition: Optional["AlertPropertiesDefinition"] = None,
description: Optional[str] = None,
source: Optional[Union[str, "AlertSource"]] = None,
details: Optional["AlertPropertiesDetails"] = None,
cost_entity_id: Optional[str] = None,
status: Optional[Union[str, "AlertStatus"]] = None,
creation_time: Optional[str] = None,
close_time: Optional[str] = None,
modification_time: Optional[str] = None,
status_modification_user_name: Optional[str] = None,
status_modification_time: Optional[str] = None,
**kwargs
):
super(Alert, self).__init__(**kwargs)
self.definition = definition
self.description = description
self.source = source
self.details = details
self.cost_entity_id = cost_entity_id
self.status = status
self.creation_time = creation_time
self.close_time = close_time
self.modification_time = modification_time
self.status_modification_user_name = status_modification_user_name
self.status_modification_time = status_modification_time
class AlertPropertiesDefinition(msrest.serialization.Model):
"""defines the type of alert.
:param type: type of alert. Possible values include: "Budget", "Invoice", "Credit", "Quota",
"General", "xCloud", "BudgetForecast".
:type type: str or ~azure.mgmt.costmanagement.models.AlertType
:param category: Alert category. Possible values include: "Cost", "Usage", "Billing", "System".
:type category: str or ~azure.mgmt.costmanagement.models.AlertCategory
:param criteria: Criteria that triggered alert. Possible values include:
"CostThresholdExceeded", "UsageThresholdExceeded", "CreditThresholdApproaching",
"CreditThresholdReached", "QuotaThresholdApproaching", "QuotaThresholdReached",
"MultiCurrency", "ForecastCostThresholdExceeded", "ForecastUsageThresholdExceeded",
"InvoiceDueDateApproaching", "InvoiceDueDateReached", "CrossCloudNewDataAvailable",
"CrossCloudCollectionError", "GeneralThresholdError".
:type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'criteria': {'key': 'criteria', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "AlertType"]] = None,
category: Optional[Union[str, "AlertCategory"]] = None,
criteria: Optional[Union[str, "AlertCriteria"]] = None,
**kwargs
):
super(AlertPropertiesDefinition, self).__init__(**kwargs)
self.type = type
self.category = category
self.criteria = criteria
class AlertPropertiesDetails(msrest.serialization.Model):
"""Alert details.
:param time_grain_type: Type of timegrain cadence. Possible values include: "None", "Monthly",
"Quarterly", "Annually", "BillingMonth", "BillingQuarter", "BillingAnnual".
:type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType
:param period_start_date: datetime of periodStartDate.
:type period_start_date: str
:param triggered_by: notificationId that triggered this alert.
:type triggered_by: str
:param resource_group_filter: array of resourceGroups to filter by.
:type resource_group_filter: list[object]
:param resource_filter: array of resources to filter by.
:type resource_filter: list[object]
:param meter_filter: array of meters to filter by.
:type meter_filter: list[object]
:param tag_filter: tags to filter by.
:type tag_filter: object
:param threshold: notification threshold percentage as a decimal which activated this alert.
:type threshold: float
:param operator: operator used to compare currentSpend with amount. Possible values include:
"None", "EqualTo", "GreaterThan", "GreaterThanOrEqualTo", "LessThan", "LessThanOrEqualTo".
:type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator
:param amount: budget threshold amount.
:type amount: float
:param unit: unit of currency being used.
:type unit: str
:param current_spend: current spend.
:type current_spend: float
:param contact_emails: list of emails to contact.
:type contact_emails: list[str]
:param contact_groups: list of action groups to broadcast to.
:type contact_groups: list[str]
:param contact_roles: list of contact roles.
:type contact_roles: list[str]
:param overriding_alert: overriding alert.
:type overriding_alert: str
"""
_attribute_map = {
'time_grain_type': {'key': 'timeGrainType', 'type': 'str'},
'period_start_date': {'key': 'periodStartDate', 'type': 'str'},
'triggered_by': {'key': 'triggeredBy', 'type': 'str'},
'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'},
'resource_filter': {'key': 'resourceFilter', 'type': '[object]'},
'meter_filter': {'key': 'meterFilter', 'type': '[object]'},
'tag_filter': {'key': 'tagFilter', 'type': 'object'},
'threshold': {'key': 'threshold', 'type': 'float'},
'operator': {'key': 'operator', 'type': 'str'},
'amount': {'key': 'amount', 'type': 'float'},
'unit': {'key': 'unit', 'type': 'str'},
'current_spend': {'key': 'currentSpend', 'type': 'float'},
'contact_emails': {'key': 'contactEmails', 'type': '[str]'},
'contact_groups': {'key': 'contactGroups', 'type': '[str]'},
'contact_roles': {'key': 'contactRoles', 'type': '[str]'},
'overriding_alert': {'key': 'overridingAlert', 'type': 'str'},
}
def __init__(
self,
*,
time_grain_type: Optional[Union[str, "AlertTimeGrainType"]] = None,
period_start_date: Optional[str] = None,
triggered_by: Optional[str] = None,
resource_group_filter: Optional[List[object]] = None,
resource_filter: Optional[List[object]] = None,
meter_filter: Optional[List[object]] = None,
tag_filter: Optional[object] = None,
threshold: Optional[float] = None,
operator: Optional[Union[str, "AlertOperator"]] = None,
amount: Optional[float] = None,
unit: Optional[str] = None,
current_spend: Optional[float] = None,
contact_emails: Optional[List[str]] = None,
contact_groups: Optional[List[str]] = None,
contact_roles: Optional[List[str]] = None,
overriding_alert: Optional[str] = None,
**kwargs
):
super(AlertPropertiesDetails, self).__init__(**kwargs)
self.time_grain_type = time_grain_type
self.period_start_date = period_start_date
self.triggered_by = triggered_by
self.resource_group_filter = resource_group_filter
self.resource_filter = resource_filter
self.meter_filter = meter_filter
self.tag_filter = tag_filter
self.threshold = threshold
self.operator = operator
self.amount = amount
self.unit = unit
self.current_spend = current_spend
self.contact_emails = contact_emails
self.contact_groups = contact_groups
self.contact_roles = contact_roles
self.overriding_alert = overriding_alert
class AlertsResult(msrest.serialization.Model):
"""Result of alerts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of alerts.
:vartype value: list[~azure.mgmt.costmanagement.models.Alert]
:ivar next_link: URL to get the next set of alerts results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Alert]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AlertsResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class CommonExportProperties(msrest.serialization.Model):
"""The common properties of the export.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Required. Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Required. Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
"""
_validation = {
'delivery_info': {'required': True},
'definition': {'required': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},
}
def __init__(
self,
*,
delivery_info: "ExportDeliveryInfo",
definition: "ExportDefinition",
format: Optional[Union[str, "FormatType"]] = None,
run_history: Optional["ExportExecutionListResult"] = None,
**kwargs
):
super(CommonExportProperties, self).__init__(**kwargs)
self.format = format
self.delivery_info = delivery_info
self.definition = definition
self.run_history = run_history
self.next_run_time_estimate = None
class Dimension(Resource):
"""Dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar description: Dimension description.
:vartype description: str
:ivar filter_enabled: Filter enabled.
:vartype filter_enabled: bool
:ivar grouping_enabled: Grouping enabled.
:vartype grouping_enabled: bool
:param data:
:type data: list[str]
:ivar total: Total number of data for the dimension.
:vartype total: int
:ivar category: Dimension category.
:vartype category: str
:ivar usage_start: Usage start.
:vartype usage_start: ~datetime.datetime
:ivar usage_end: Usage end.
:vartype usage_end: ~datetime.datetime
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
'description': {'readonly': True},
'filter_enabled': {'readonly': True},
'grouping_enabled': {'readonly': True},
'total': {'readonly': True},
'category': {'readonly': True},
'usage_start': {'readonly': True},
'usage_end': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'},
'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'},
'data': {'key': 'properties.data', 'type': '[str]'},
'total': {'key': 'properties.total', 'type': 'int'},
'category': {'key': 'properties.category', 'type': 'str'},
'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'},
'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
}
def __init__(
self,
*,
data: Optional[List[str]] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.description = None
self.filter_enabled = None
self.grouping_enabled = None
self.data = data
self.total = None
self.category = None
self.usage_start = None
self.usage_end = None
self.next_link = None
class DimensionsListResult(msrest.serialization.Model):
"""Result of listing dimensions. It contains a list of available dimensions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of dimensions.
:vartype value: list[~azure.mgmt.costmanagement.models.Dimension]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Dimension]'},
}
def __init__(
self,
**kwargs
):
super(DimensionsListResult, self).__init__(**kwargs)
self.value = None
class DismissAlertPayload(msrest.serialization.Model):
"""The request payload to update an alert.
:param definition: defines the type of alert.
:type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition
:param description: Alert description.
:type description: str
:param source: Source of alert. Possible values include: "Preset", "User".
:type source: str or ~azure.mgmt.costmanagement.models.AlertSource
:param details: Alert details.
:type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails
:param cost_entity_id: related budget.
:type cost_entity_id: str
:param status: alert status. Possible values include: "None", "Active", "Overridden",
"Resolved", "Dismissed".
:type status: str or ~azure.mgmt.costmanagement.models.AlertStatus
:param creation_time: dateTime in which alert was created.
:type creation_time: str
:param close_time: dateTime in which alert was closed.
:type close_time: str
:param modification_time: dateTime in which alert was last modified.
:type modification_time: str
:param status_modification_user_name:
:type status_modification_user_name: str
:param status_modification_time: dateTime in which the alert status was last modified.
:type status_modification_time: str
"""
_attribute_map = {
'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},
'description': {'key': 'properties.description', 'type': 'str'},
'source': {'key': 'properties.source', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},
'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'close_time': {'key': 'properties.closeTime', 'type': 'str'},
'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},
'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},
'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},
}
def __init__(
self,
*,
definition: Optional["AlertPropertiesDefinition"] = None,
description: Optional[str] = None,
source: Optional[Union[str, "AlertSource"]] = None,
details: Optional["AlertPropertiesDetails"] = None,
cost_entity_id: Optional[str] = None,
status: Optional[Union[str, "AlertStatus"]] = None,
creation_time: Optional[str] = None,
close_time: Optional[str] = None,
modification_time: Optional[str] = None,
status_modification_user_name: Optional[str] = None,
status_modification_time: Optional[str] = None,
**kwargs
):
super(DismissAlertPayload, self).__init__(**kwargs)
self.definition = definition
self.description = description
self.source = source
self.details = details
self.cost_entity_id = cost_entity_id
self.status = status
self.creation_time = creation_time
self.close_time = close_time
self.modification_time = modification_time
self.status_modification_user_name = status_modification_user_name
self.status_modification_time = status_modification_time
class ErrorDetails(msrest.serialization.Model):
"""The details of the error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.message = None
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message.
Some Error responses:
*
429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the "x-ms-ratelimit-microsoft.consumption-retry-after" header.
*
503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the "Retry-After" header.
:param error: The details of the error.
:type error: ~azure.mgmt.costmanagement.models.ErrorDetails
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetails'},
}
def __init__(
self,
*,
error: Optional["ErrorDetails"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ProxyResource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.e_tag = e_tag
class Export(ProxyResource):
"""An export resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
:param schedule: Has schedule information for the export.
:type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'format': {'key': 'properties.format', 'type': 'str'},
'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'},
'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
format: Optional[Union[str, "FormatType"]] = None,
delivery_info: Optional["ExportDeliveryInfo"] = None,
definition: Optional["ExportDefinition"] = None,
run_history: Optional["ExportExecutionListResult"] = None,
schedule: Optional["ExportSchedule"] = None,
**kwargs
):
super(Export, self).__init__(e_tag=e_tag, **kwargs)
self.format = format
self.delivery_info = delivery_info
self.definition = definition
self.run_history = run_history
self.next_run_time_estimate = None
self.schedule = schedule
class ExportDataset(msrest.serialization.Model):
"""The definition for data in the export.
:param granularity: The granularity of rows in the export. Currently only 'Daily' is supported.
Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: The export dataset configuration.
:type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["ExportDatasetConfiguration"] = None,
**kwargs
):
super(ExportDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
class ExportDatasetConfiguration(msrest.serialization.Model):
"""The export dataset configuration. Allows columns to be selected for the export. If not provided then the export will include all available columns.
:param columns: Array of column names to be included in the export. If not provided then the
export will include all available columns. The available columns can vary by customer channel
(see examples).
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(ExportDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class ExportDefinition(msrest.serialization.Model):
"""The definition of an export.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the export. Note that 'Usage' is equivalent to 'ActualCost'
and is applicable to exports that do not yet provide data for charges or amortization for
service reservations. Possible values include: "Usage", "ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ExportType
:param timeframe: Required. The time frame for pulling data for the export. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType
:param time_period: Has time period for pulling data for the export.
:type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod
:param data_set: The definition for data in the export.
:type data_set: ~azure.mgmt.costmanagement.models.ExportDataset
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'},
'data_set': {'key': 'dataSet', 'type': 'ExportDataset'},
}
def __init__(
self,
*,
type: Union[str, "ExportType"],
timeframe: Union[str, "TimeframeType"],
time_period: Optional["ExportTimePeriod"] = None,
data_set: Optional["ExportDataset"] = None,
**kwargs
):
super(ExportDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.data_set = data_set
class ExportDeliveryDestination(msrest.serialization.Model):
"""The destination information for the delivery of the export. To allow access to a storage account, you must register the account's subscription with the Microsoft.CostManagementExports resource provider. This is required once per subscription. When creating an export in the Azure portal, it is done automatically, however API users need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services .
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource id of the storage account where exports will be
delivered.
:type resource_id: str
:param container: Required. The name of the container where exports will be uploaded.
:type container: str
:param root_folder_path: The name of the directory where exports will be uploaded.
:type root_folder_path: str
"""
_validation = {
'resource_id': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: str,
container: str,
root_folder_path: Optional[str] = None,
**kwargs
):
super(ExportDeliveryDestination, self).__init__(**kwargs)
self.resource_id = resource_id
self.container = container
self.root_folder_path = root_folder_path
class ExportDeliveryInfo(msrest.serialization.Model):
"""The delivery information associated with a export.
All required parameters must be populated in order to send to Azure.
:param destination: Required. Has destination for the export being delivered.
:type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination
"""
_validation = {
'destination': {'required': True},
}
_attribute_map = {
'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'},
}
def __init__(
self,
*,
destination: "ExportDeliveryDestination",
**kwargs
):
super(ExportDeliveryInfo, self).__init__(**kwargs)
self.destination = destination
class ExportExecution(Resource):
"""An export execution.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param execution_type: The type of the export execution. Possible values include: "OnDemand",
"Scheduled".
:type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType
:param status: The last known status of the export execution. Possible values include:
"Queued", "InProgress", "Completed", "Failed", "Timeout", "NewDataNotAvailable",
"DataNotAvailable".
:type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus
:param submitted_by: The identifier for the entity that executed the export. For OnDemand
executions it is the user email. For scheduled executions it is 'System'.
:type submitted_by: str
:param submitted_time: The time when export was queued to be executed.
:type submitted_time: ~datetime.datetime
:param processing_start_time: The time when export was picked up to be executed.
:type processing_start_time: ~datetime.datetime
:param processing_end_time: The time when the export execution finished.
:type processing_end_time: ~datetime.datetime
:param file_name: The name of the exported file.
:type file_name: str
:param run_settings: The export settings that were in effect for this execution.
:type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties
:param error: The details of any error.
:type error: ~azure.mgmt.costmanagement.models.ErrorDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'execution_type': {'key': 'properties.executionType', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'},
'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'},
'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'},
'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'},
'file_name': {'key': 'properties.fileName', 'type': 'str'},
'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'},
'error': {'key': 'properties.error', 'type': 'ErrorDetails'},
}
def __init__(
self,
*,
execution_type: Optional[Union[str, "ExecutionType"]] = None,
status: Optional[Union[str, "ExecutionStatus"]] = None,
submitted_by: Optional[str] = None,
submitted_time: Optional[datetime.datetime] = None,
processing_start_time: Optional[datetime.datetime] = None,
processing_end_time: Optional[datetime.datetime] = None,
file_name: Optional[str] = None,
run_settings: Optional["CommonExportProperties"] = None,
error: Optional["ErrorDetails"] = None,
**kwargs
):
super(ExportExecution, self).__init__(**kwargs)
self.execution_type = execution_type
self.status = status
self.submitted_by = submitted_by
self.submitted_time = submitted_time
self.processing_start_time = processing_start_time
self.processing_end_time = processing_end_time
self.file_name = file_name
self.run_settings = run_settings
self.error = error
class ExportExecutionListResult(msrest.serialization.Model):
"""Result of listing the execution history of an export.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A list of export executions.
:vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExportExecution]'},
}
def __init__(
self,
**kwargs
):
super(ExportExecutionListResult, self).__init__(**kwargs)
self.value = None
class ExportListResult(msrest.serialization.Model):
"""Result of listing exports. It contains a list of available exports in the scope provided.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of exports.
:vartype value: list[~azure.mgmt.costmanagement.models.Export]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Export]'},
}
def __init__(
self,
**kwargs
):
super(ExportListResult, self).__init__(**kwargs)
self.value = None
class ExportProperties(CommonExportProperties):
"""The properties of the export.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param format: The format of the export being delivered. Currently only 'Csv' is supported.
Possible values include: "Csv".
:type format: str or ~azure.mgmt.costmanagement.models.FormatType
:param delivery_info: Required. Has delivery information for the export.
:type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo
:param definition: Required. Has the definition for the export.
:type definition: ~azure.mgmt.costmanagement.models.ExportDefinition
:param run_history: If requested, has the most recent execution history for the export.
:type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the
next execution time.
:vartype next_run_time_estimate: ~datetime.datetime
:param schedule: Has schedule information for the export.
:type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule
"""
_validation = {
'delivery_info': {'required': True},
'definition': {'required': True},
'next_run_time_estimate': {'readonly': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},
'definition': {'key': 'definition', 'type': 'ExportDefinition'},
'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},
'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'ExportSchedule'},
}
def __init__(
self,
*,
delivery_info: "ExportDeliveryInfo",
definition: "ExportDefinition",
format: Optional[Union[str, "FormatType"]] = None,
run_history: Optional["ExportExecutionListResult"] = None,
schedule: Optional["ExportSchedule"] = None,
**kwargs
):
super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs)
self.schedule = schedule
class ExportRecurrencePeriod(msrest.serialization.Model):
"""The start and end date for recurrence schedule.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date of recurrence.
:type from_property: ~datetime.datetime
:param to: The end date of recurrence.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: Optional[datetime.datetime] = None,
**kwargs
):
super(ExportRecurrencePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ExportSchedule(msrest.serialization.Model):
"""The schedule associated with the export.
All required parameters must be populated in order to send to Azure.
:param status: The status of the export's schedule. If 'Inactive', the export's schedule is
paused. Possible values include: "Active", "Inactive".
:type status: str or ~azure.mgmt.costmanagement.models.StatusType
:param recurrence: Required. The schedule recurrence. Possible values include: "Daily",
"Weekly", "Monthly", "Annually".
:type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType
:param recurrence_period: Has start and end date of the recurrence. The start date must be in
future. If present, the end date must be greater than start date.
:type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod
"""
_validation = {
'recurrence': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'recurrence': {'key': 'recurrence', 'type': 'str'},
'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'},
}
def __init__(
self,
*,
recurrence: Union[str, "RecurrenceType"],
status: Optional[Union[str, "StatusType"]] = None,
recurrence_period: Optional["ExportRecurrencePeriod"] = None,
**kwargs
):
super(ExportSchedule, self).__init__(**kwargs)
self.status = status
self.recurrence = recurrence
self.recurrence_period = recurrence_period
class ExportTimePeriod(msrest.serialization.Model):
"""The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date for export data.
:type from_property: ~datetime.datetime
:param to: Required. The end date for export data.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ExportTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ForecastDataset(msrest.serialization.Model):
"""The definition of data present in the forecast.
:param granularity: The granularity of rows in the forecast. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the forecast. The key of
each item in the dictionary is the alias for the aggregated column. forecast can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param filter: Has filter expression to use in the forecast.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(ForecastDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.filter = filter
class ForecastDefinition(msrest.serialization.Model):
"""The definition of a forecast.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the forecast. Possible values include: "Usage",
"ActualCost", "AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ForecastType
:param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType
:param time_period: Has time period for pulling data for the forecast.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this forecast.
:type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset
:param include_actual_cost: a boolean determining if actualCost will be included.
:type include_actual_cost: bool
:param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included.
:type include_fresh_partial_cost: bool
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ForecastDataset'},
'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'},
'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'},
}
def __init__(
self,
*,
type: Union[str, "ForecastType"],
timeframe: Union[str, "ForecastTimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["ForecastDataset"] = None,
include_actual_cost: Optional[bool] = None,
include_fresh_partial_cost: Optional[bool] = None,
**kwargs
):
super(ForecastDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
self.include_actual_cost = include_actual_cost
self.include_fresh_partial_cost = include_fresh_partial_cost
class KpiProperties(msrest.serialization.Model):
"""Each KPI must contain a 'type' and 'enabled' key.
:param type: KPI type (Forecast, Budget). Possible values include: "Forecast", "Budget".
:type type: str or ~azure.mgmt.costmanagement.models.KpiType
:param id: ID of resource related to metric (budget).
:type id: str
:param enabled: show the KPI in the UI?.
:type enabled: bool
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
type: Optional[Union[str, "KpiType"]] = None,
id: Optional[str] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(KpiProperties, self).__init__(**kwargs)
self.type = type
self.id = id
self.enabled = enabled
class Operation(msrest.serialization.Model):
"""A Cost management REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.costmanagement.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft.CostManagement.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Dimensions, Query.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
class OperationListResult(msrest.serialization.Model):
"""Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of cost management operations supported by the Microsoft.CostManagement
resource provider.
:vartype value: list[~azure.mgmt.costmanagement.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PivotProperties(msrest.serialization.Model):
"""Each pivot must contain a 'type' and 'name'.
:param type: Data type to show in view. Possible values include: "Dimension", "TagKey".
:type type: str or ~azure.mgmt.costmanagement.models.PivotType
:param name: Data field to show in view.
:type name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "PivotType"]] = None,
name: Optional[str] = None,
**kwargs
):
super(PivotProperties, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(QueryAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class QueryColumn(msrest.serialization.Model):
"""QueryColumn.
:param name: The name of column.
:type name: str
:param type: The type of column.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(QueryColumn, self).__init__(**kwargs)
self.name = name
self.type = type
class QueryComparisonExpression(msrest.serialization.Model):
"""The comparison expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to use in comparison.
:type name: str
:param operator: Required. The operator to use for comparison. Possible values include: "In",
"Contains".
:type operator: str or ~azure.mgmt.costmanagement.models.OperatorType
:param values: Required. Array of values to use for comparison.
:type values: list[str]
"""
_validation = {
'name': {'required': True},
'operator': {'required': True},
'values': {'required': True, 'min_items': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
}
def __init__(
self,
*,
name: str,
operator: Union[str, "OperatorType"],
values: List[str],
**kwargs
):
super(QueryComparisonExpression, self).__init__(**kwargs)
self.name = name
self.operator = operator
self.values = values
class QueryDataset(msrest.serialization.Model):
"""The definition of data present in the query.
:param granularity: The granularity of rows in the query. Possible values include: "Daily".
:type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType
:param configuration: Has configuration information for the data in the export. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the query. The key of each
item in the dictionary is the alias for the aggregated column. Query can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]
:param grouping: Array of group by expression to use in the query. Query can have up to 2 group
by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping]
:param filter: Has filter expression to use in the query.
:type filter: ~azure.mgmt.costmanagement.models.QueryFilter
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},
'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'},
'filter': {'key': 'filter', 'type': 'QueryFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "GranularityType"]] = None,
configuration: Optional["QueryDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "QueryAggregation"]] = None,
grouping: Optional[List["QueryGrouping"]] = None,
filter: Optional["QueryFilter"] = None,
**kwargs
):
super(QueryDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.filter = filter
class QueryDatasetConfiguration(msrest.serialization.Model):
"""The configuration of dataset in the query.
:param columns: Array of column names to be included in the query. Any valid query column name
is allowed. If not provided, then query includes all columns.
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(QueryDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class QueryDefinition(msrest.serialization.Model):
"""The definition of a query.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the query. Possible values include: "Usage", "ActualCost",
"AmortizedCost".
:type type: str or ~azure.mgmt.costmanagement.models.ExportType
:param timeframe: Required. The time frame for pulling data for the query. If custom, then a
specific time period must be provided. Possible values include: "MonthToDate",
"BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType
:param time_period: Has time period for pulling data for the query.
:type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod
:param dataset: Has definition for data in this query.
:type dataset: ~azure.mgmt.costmanagement.models.QueryDataset
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'QueryDataset'},
}
def __init__(
self,
*,
type: Union[str, "ExportType"],
timeframe: Union[str, "TimeframeType"],
time_period: Optional["QueryTimePeriod"] = None,
dataset: Optional["QueryDataset"] = None,
**kwargs
):
super(QueryDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class QueryFilter(msrest.serialization.Model):
"""The filter expression to be used in the export.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.QueryFilter
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[QueryFilter]'},
'or_property': {'key': 'or', 'type': '[QueryFilter]'},
'not_property': {'key': 'not', 'type': 'QueryFilter'},
'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'},
'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["QueryFilter"]] = None,
or_property: Optional[List["QueryFilter"]] = None,
not_property: Optional["QueryFilter"] = None,
dimension: Optional["QueryComparisonExpression"] = None,
tag: Optional["QueryComparisonExpression"] = None,
**kwargs
):
super(QueryFilter, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class QueryGrouping(msrest.serialization.Model):
"""The group by expression to be used in the query.
All required parameters must be populated in order to send to Azure.
:param type: Required. Has type of the column to group. Possible values include: "Tag",
"Dimension".
:type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType
:param name: Required. The name of the column to group.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "QueryColumnType"],
name: str,
**kwargs
):
super(QueryGrouping, self).__init__(**kwargs)
self.type = type
self.name = name
class QueryResult(Resource):
"""Result of query. It contains all columns listed under groupings and aggregation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:param next_link: The link (url) to the next page of results.
:type next_link: str
:param columns: Array of columns.
:type columns: list[~azure.mgmt.costmanagement.models.QueryColumn]
:param rows: Array of rows.
:type rows: list[list[object]]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'},
'rows': {'key': 'properties.rows', 'type': '[[object]]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
columns: Optional[List["QueryColumn"]] = None,
rows: Optional[List[List[object]]] = None,
**kwargs
):
super(QueryResult, self).__init__(**kwargs)
self.next_link = next_link
self.columns = columns
self.rows = rows
class QueryTimePeriod(msrest.serialization.Model):
"""The start and end date for pulling data for the query.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date to pull data from.
:type from_property: ~datetime.datetime
:param to: Required. The end date to pull data to.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(QueryTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class ReportConfigAggregation(msrest.serialization.Model):
"""The aggregation expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to aggregate.
:type name: str
:param function: Required. The name of the aggregation function to use. Possible values
include: "Sum".
:type function: str or ~azure.mgmt.costmanagement.models.FunctionType
"""
_validation = {
'name': {'required': True},
'function': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'function': {'key': 'function', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
function: Union[str, "FunctionType"],
**kwargs
):
super(ReportConfigAggregation, self).__init__(**kwargs)
self.name = name
self.function = function
class ReportConfigComparisonExpression(msrest.serialization.Model):
"""The comparison expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column to use in comparison.
:type name: str
:param operator: Required. The operator to use for comparison. Possible values include: "In",
"Contains".
:type operator: str or ~azure.mgmt.costmanagement.models.OperatorType
:param values: Required. Array of values to use for comparison.
:type values: list[str]
"""
_validation = {
'name': {'required': True},
'operator': {'required': True},
'values': {'required': True, 'min_items': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
}
def __init__(
self,
*,
name: str,
operator: Union[str, "OperatorType"],
values: List[str],
**kwargs
):
super(ReportConfigComparisonExpression, self).__init__(**kwargs)
self.name = name
self.operator = operator
self.values = values
class ReportConfigDataset(msrest.serialization.Model):
"""The definition of data present in the report.
:param granularity: The granularity of rows in the report. Possible values include: "Daily",
"Monthly".
:type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType
:param configuration: Has configuration information for the data in the report. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the report. The key of each
item in the dictionary is the alias for the aggregated column. Report can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]
:param grouping: Array of group by expression to use in the report. Report can have up to 2
group by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]
:param sorting: Array of order by expression to use in the report.
:type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]
:param filter: Has filter expression to use in the report.
:type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},
'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},
'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},
'filter': {'key': 'filter', 'type': 'ReportConfigFilter'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "ReportGranularityType"]] = None,
configuration: Optional["ReportConfigDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None,
grouping: Optional[List["ReportConfigGrouping"]] = None,
sorting: Optional[List["ReportConfigSorting"]] = None,
filter: Optional["ReportConfigFilter"] = None,
**kwargs
):
super(ReportConfigDataset, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.sorting = sorting
self.filter = filter
class ReportConfigDatasetAutoGenerated(msrest.serialization.Model):
"""The definition of data present in the report.
:param granularity: The granularity of rows in the report. Possible values include: "Daily",
"Monthly".
:type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType
:param configuration: Has configuration information for the data in the report. The
configuration will be ignored if aggregation and grouping are provided.
:type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration
:param aggregation: Dictionary of aggregation expression to use in the report. The key of each
item in the dictionary is the alias for the aggregated column. Report can have up to 2
aggregation clauses.
:type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]
:param grouping: Array of group by expression to use in the report. Report can have up to 2
group by clauses.
:type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]
:param sorting: Array of order by expression to use in the report.
:type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]
:param filter: Has filter expression to use in the report.
:type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated
"""
_validation = {
'grouping': {'max_items': 2, 'min_items': 0},
}
_attribute_map = {
'granularity': {'key': 'granularity', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},
'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},
'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},
'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},
'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'},
}
def __init__(
self,
*,
granularity: Optional[Union[str, "ReportGranularityType"]] = None,
configuration: Optional["ReportConfigDatasetConfiguration"] = None,
aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None,
grouping: Optional[List["ReportConfigGrouping"]] = None,
sorting: Optional[List["ReportConfigSorting"]] = None,
filter: Optional["ReportConfigFilterAutoGenerated"] = None,
**kwargs
):
super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs)
self.granularity = granularity
self.configuration = configuration
self.aggregation = aggregation
self.grouping = grouping
self.sorting = sorting
self.filter = filter
class ReportConfigDatasetConfiguration(msrest.serialization.Model):
"""The configuration of dataset in the report.
:param columns: Array of column names to be included in the report. Any valid report column
name is allowed. If not provided, then report includes all columns.
:type columns: list[str]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[str]'},
}
def __init__(
self,
*,
columns: Optional[List[str]] = None,
**kwargs
):
super(ReportConfigDatasetConfiguration, self).__init__(**kwargs)
self.columns = columns
class ReportConfigDefinition(msrest.serialization.Model):
"""The definition of a report config.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the report. Usage represents actual usage, forecast
represents forecasted data and UsageAndForecast represents both usage and forecasted data.
Actual usage and forecasted data can be differentiated based on dates. Possible values include:
"Usage".
:type type: str or ~azure.mgmt.costmanagement.models.ReportType
:param timeframe: Required. The time frame for pulling data for the report. If custom, then a
specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate",
"YearToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated
"""
_validation = {
'type': {'required': True},
'timeframe': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'timeframe': {'key': 'timeframe', 'type': 'str'},
'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'},
'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'},
}
def __init__(
self,
*,
type: Union[str, "ReportType"],
timeframe: Union[str, "ReportTimeframeType"],
time_period: Optional["ReportConfigTimePeriod"] = None,
dataset: Optional["ReportConfigDatasetAutoGenerated"] = None,
**kwargs
):
super(ReportConfigDefinition, self).__init__(**kwargs)
self.type = type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class ReportConfigFilter(msrest.serialization.Model):
"""The filter expression to be used in the report.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'},
'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'},
'not_property': {'key': 'not', 'type': 'ReportConfigFilter'},
'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},
'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["ReportConfigFilter"]] = None,
or_property: Optional[List["ReportConfigFilter"]] = None,
not_property: Optional["ReportConfigFilter"] = None,
dimension: Optional["ReportConfigComparisonExpression"] = None,
tag: Optional["ReportConfigComparisonExpression"] = None,
**kwargs
):
super(ReportConfigFilter, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class ReportConfigFilterAutoGenerated(msrest.serialization.Model):
"""The filter expression to be used in the report.
:param and_property: The logical "AND" expression. Must have at least 2 items.
:type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]
:param or_property: The logical "OR" expression. Must have at least 2 items.
:type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]
:param not_property: The logical "NOT" expression.
:type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated
:param dimension: Has comparison expression for a dimension.
:type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
:param tag: Has comparison expression for a tag.
:type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression
"""
_validation = {
'and_property': {'min_items': 2},
'or_property': {'min_items': 2},
}
_attribute_map = {
'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'},
'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'},
'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'},
'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},
'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},
}
def __init__(
self,
*,
and_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None,
or_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None,
not_property: Optional["ReportConfigFilterAutoGenerated"] = None,
dimension: Optional["ReportConfigComparisonExpression"] = None,
tag: Optional["ReportConfigComparisonExpression"] = None,
**kwargs
):
super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs)
self.and_property = and_property
self.or_property = or_property
self.not_property = not_property
self.dimension = dimension
self.tag = tag
class ReportConfigGrouping(msrest.serialization.Model):
"""The group by expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param type: Required. Has type of the column to group. Possible values include: "Tag",
"Dimension".
:type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType
:param name: Required. The name of the column to group. This version supports subscription
lowest possible grain.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "ReportConfigColumnType"],
name: str,
**kwargs
):
super(ReportConfigGrouping, self).__init__(**kwargs)
self.type = type
self.name = name
class ReportConfigSorting(msrest.serialization.Model):
"""The order by expression to be used in the report.
All required parameters must be populated in order to send to Azure.
:param direction: Direction of sort. Possible values include: "Ascending", "Descending".
:type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection
:param name: Required. The name of the column to sort.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'direction': {'key': 'direction', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
direction: Optional[Union[str, "ReportConfigSortingDirection"]] = None,
**kwargs
):
super(ReportConfigSorting, self).__init__(**kwargs)
self.direction = direction
self.name = name
class ReportConfigTimePeriod(msrest.serialization.Model):
"""The start and end date for pulling data for the report.
All required parameters must be populated in order to send to Azure.
:param from_property: Required. The start date to pull data from.
:type from_property: ~datetime.datetime
:param to: Required. The end date to pull data to.
:type to: ~datetime.datetime
"""
_validation = {
'from_property': {'required': True},
'to': {'required': True},
}
_attribute_map = {
'from_property': {'key': 'from', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'iso-8601'},
}
def __init__(
self,
*,
from_property: datetime.datetime,
to: datetime.datetime,
**kwargs
):
super(ReportConfigTimePeriod, self).__init__(**kwargs)
self.from_property = from_property
self.to = to
class View(ProxyResource):
"""States and configurations of Cost Analysis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be
used to determine whether the user is updating the latest version or not.
:type e_tag: str
:param display_name: User input name of the view. Required.
:type display_name: str
:param scope: Cost Management scope to save the view on. This includes
'subscriptions/{subscriptionId}' for subscription scope,
'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for
Department scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for BillingProfile scope,
'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'
for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'
for Management Group scope,
'/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for
ExternalBillingAccount scope, and
'/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for
ExternalSubscription scope.
:type scope: str
:ivar created_on: Date the user created this view.
:vartype created_on: ~datetime.datetime
:ivar modified_on: Date when the user last modified this view.
:vartype modified_on: ~datetime.datetime
:param chart: Chart type of the main view in Cost Analysis. Required. Possible values include:
"Area", "Line", "StackedColumn", "GroupedColumn", "Table".
:type chart: str or ~azure.mgmt.costmanagement.models.ChartType
:param accumulated: Show costs accumulated over time. Possible values include: "true", "false".
:type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType
:param metric: Metric to use when displaying costs. Possible values include: "ActualCost",
"AmortizedCost", "AHUB".
:type metric: str or ~azure.mgmt.costmanagement.models.MetricType
:param kpis: List of KPIs to show in Cost Analysis UI.
:type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties]
:param pivots: Configuration of 3 sub-views in the Cost Analysis UI.
:type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties]
:param type_properties_query_type: The type of the report. Usage represents actual usage,
forecast represents forecasted data and UsageAndForecast represents both usage and forecasted
data. Actual usage and forecasted data can be differentiated based on dates. Possible values
include: "Usage".
:type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType
:param timeframe: The time frame for pulling data for the report. If custom, then a specific
time period must be provided. Possible values include: "WeekToDate", "MonthToDate",
"YearToDate", "Custom".
:type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType
:param time_period: Has time period for pulling data for the report.
:type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod
:param dataset: Has definition for data in this report config.
:type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'},
'chart': {'key': 'properties.chart', 'type': 'str'},
'accumulated': {'key': 'properties.accumulated', 'type': 'str'},
'metric': {'key': 'properties.metric', 'type': 'str'},
'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'},
'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'},
'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'},
'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'},
'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'},
'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'},
}
def __init__(
self,
*,
e_tag: Optional[str] = None,
display_name: Optional[str] = None,
scope: Optional[str] = None,
chart: Optional[Union[str, "ChartType"]] = None,
accumulated: Optional[Union[str, "AccumulatedType"]] = None,
metric: Optional[Union[str, "MetricType"]] = None,
kpis: Optional[List["KpiProperties"]] = None,
pivots: Optional[List["PivotProperties"]] = None,
type_properties_query_type: Optional[Union[str, "ReportType"]] = None,
timeframe: Optional[Union[str, "ReportTimeframeType"]] = None,
time_period: Optional["ReportConfigTimePeriod"] = None,
dataset: Optional["ReportConfigDataset"] = None,
**kwargs
):
super(View, self).__init__(e_tag=e_tag, **kwargs)
self.display_name = display_name
self.scope = scope
self.created_on = None
self.modified_on = None
self.chart = chart
self.accumulated = accumulated
self.metric = metric
self.kpis = kpis
self.pivots = pivots
self.type_properties_query_type = type_properties_query_type
self.timeframe = timeframe
self.time_period = time_period
self.dataset = dataset
class ViewListResult(msrest.serialization.Model):
"""Result of listing views. It contains a list of available views.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of views.
:vartype value: list[~azure.mgmt.costmanagement.models.View]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[View]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ViewListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
| 38.16251
| 498
| 0.649005
| 9,864
| 91,819
| 5.916363
| 0.066809
| 0.018472
| 0.045717
| 0.057643
| 0.675474
| 0.643088
| 0.60952
| 0.580853
| 0.564215
| 0.555064
| 0
| 0.001518
| 0.224964
| 91,819
| 2,405
| 499
| 38.178378
| 0.818557
| 0.424618
| 0
| 0.633308
| 0
| 0
| 0.241078
| 0.060967
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042017
| false
| 0
| 0.00382
| 0
| 0.16272
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9921ebf7fdd9b5fb1dd763092a97ae1888e730f
| 3,860
|
py
|
Python
|
test/test_simple_compression.py
|
jayvdb/brotlipy
|
ffddf2ea5adc584c8c353d246bb1077b7e781b63
|
[
"MIT"
] | null | null | null |
test/test_simple_compression.py
|
jayvdb/brotlipy
|
ffddf2ea5adc584c8c353d246bb1077b7e781b63
|
[
"MIT"
] | null | null | null |
test/test_simple_compression.py
|
jayvdb/brotlipy
|
ffddf2ea5adc584c8c353d246bb1077b7e781b63
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_simple_compression
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for compression of single chunks.
"""
import brotli
import pytest
from hypothesis import given
from hypothesis.strategies import binary, integers, sampled_from, one_of
def test_roundtrip_compression_with_files(simple_compressed_file):
"""
Roundtripping data through the compressor works correctly.
"""
with open(simple_compressed_file[0], 'rb') as f:
uncompressed_data = f.read()
assert brotli.decompress(
brotli.compress(uncompressed_data)
) == uncompressed_data
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression_flush(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected, including flushes
after each chunk.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.flush())
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(binary())
def test_compressed_data_roundtrips(s):
assert brotli.decompress(brotli.compress(s)) == s
@given(binary(), binary())
def test_compressed_data_with_dictionaries(s, dictionary):
d = brotli.Decompressor(dictionary)
compressed = brotli.compress(s, dictionary=dictionary)
uncompressed = d.decompress(compressed)
assert uncompressed == s
@pytest.mark.parametrize(
"params",
[
{"mode": 52},
{"quality": 52},
{"lgwin": 52},
{"lgblock": 52},
]
)
@pytest.mark.parametrize("exception_cls", [brotli.Error, brotli.error])
def test_bad_compressor_parameters(params, exception_cls):
with pytest.raises(exception_cls):
brotli.Compressor(**params)
| 29.692308
| 78
| 0.615803
| 431
| 3,860
| 5.306265
| 0.232019
| 0.048098
| 0.069961
| 0.050284
| 0.652383
| 0.597289
| 0.597289
| 0.597289
| 0.597289
| 0.597289
| 0
| 0.015686
| 0.273316
| 3,860
| 129
| 79
| 29.922481
| 0.799643
| 0.08342
| 0
| 0.580645
| 0
| 0
| 0.014938
| 0
| 0
| 0
| 0
| 0
| 0.053763
| 1
| 0.064516
| false
| 0
| 0.043011
| 0
| 0.107527
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9957182927ee0480e35dd837a4d9ee2d8587462
| 3,207
|
py
|
Python
|
nuitka/codegen/LoopCodes.py
|
RESP3CT88/Nuitka
|
0fcc25d9f00c4fc78c79a863c4b7987f573962e1
|
[
"Apache-2.0"
] | 1
|
2021-05-25T12:48:28.000Z
|
2021-05-25T12:48:28.000Z
|
venv/Lib/site-packages/nuitka/codegen/LoopCodes.py
|
matthijsvanvliet/raytracing-python
|
73d692b47330ab94eedde579a51063e3a907e92b
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/nuitka/codegen/LoopCodes.py
|
matthijsvanvliet/raytracing-python
|
73d692b47330ab94eedde579a51063e3a907e92b
|
[
"MIT"
] | null | null | null |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop codes.
Code generation for loops, breaking them, or continuing them. In Nuitka, there
are no for-loops or while-loops at this point. They have been re-formulated in
a simpler loop without a condition, and statements there-in that break under
certain conditions.
See Developer Manual for how the CPython loops are mapped to these nodes.
"""
from .CodeHelpers import generateStatementSequenceCode
from .ErrorCodes import getErrorExitBoolCode
from .ExceptionCodes import getExceptionUnpublishedReleaseCode
from .LabelCodes import getGotoCode, getLabelCode
def generateLoopBreakCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
break_target = context.getLoopBreakTarget()
getGotoCode(break_target, emit)
def generateLoopContinueCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
continue_target = context.getLoopContinueTarget()
getGotoCode(continue_target, emit)
def generateLoopCode(statement, emit, context):
loop_start_label = context.allocateLabel("loop_start")
if not statement.isStatementAborting():
loop_end_label = context.allocateLabel("loop_end")
else:
loop_end_label = None
getLabelCode(loop_start_label, emit)
old_loop_break = context.setLoopBreakTarget(loop_end_label)
old_loop_continue = context.setLoopContinueTarget(loop_start_label)
generateStatementSequenceCode(
statement_sequence=statement.subnode_loop_body,
allow_none=True,
emit=emit,
context=context,
)
context.setLoopBreakTarget(old_loop_break)
context.setLoopContinueTarget(old_loop_continue)
# Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway.
old_source_ref = context.setCurrentSourceCodeReference(
statement.getSourceReference()
)
getErrorExitBoolCode(
condition="CONSIDER_THREADING() == false", emit=emit, context=context
)
context.setCurrentSourceCodeReference(old_source_ref)
getGotoCode(loop_start_label, emit)
if loop_end_label is not None:
getLabelCode(loop_end_label, emit)
| 34.858696
| 111
| 0.752728
| 391
| 3,207
| 6.069054
| 0.455243
| 0.032448
| 0.025284
| 0.013485
| 0.157606
| 0.133165
| 0.133165
| 0.133165
| 0.133165
| 0.133165
| 0
| 0.003051
| 0.182413
| 3,207
| 91
| 112
| 35.241758
| 0.901983
| 0.443717
| 0
| 0.051282
| 0
| 0
| 0.026781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.102564
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b995831c9a98c5b05882c5bbcc4b241cd51503bd
| 4,837
|
py
|
Python
|
3_module/C_BloomFilter.py
|
L4mborg1n1-D14610/Algoritms_and_DataStructure
|
f61b7434dbc600da02e8ec38648fa84beb160f17
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
3_module/C_BloomFilter.py
|
L4mborg1n1-D14610/Algoritms_and_DataStructure
|
f61b7434dbc600da02e8ec38648fa84beb160f17
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
3_module/C_BloomFilter.py
|
L4mborg1n1-D14610/Algoritms_and_DataStructure
|
f61b7434dbc600da02e8ec38648fa84beb160f17
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
import math
from sys import exit
# итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер
# структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P
# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции,
# pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число.
# При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter
# и будем хранить в структуре данных.
# Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует,
# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить
# указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив
Mersen_31 = 2147483647
class BitArray:
def __init__(self, size):
self.__array = bytearray(int(math.ceil(size / 8)))
self.__size = size
def add_bit(self, i):
# i-тый бит содержится в i//8 байте на i % 8 месте
self.__array[i // 8] |= 2 ** (7 - (i % 8))
def check_bit(self, i):
if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0:
return False
else:
return True
def print(self):
array_str = ""
for byte in self.__array:
_line = str(bin(byte))[2:]
if len(_line) != 8:
_line = '0' * (8 - len(_line)) + _line
array_str += _line
return array_str[:self.__size]
class BloomFilter:
def __init__(self, n: int, p: float):
self.size = int(-round(n * math.log2(p) / math.log(2)))
self.hash_numbers = int(-round(math.log2(p)))
self.__prime_numbers = list()
self.__get_prime(self.hash_numbers + 1)
self.__bitarray = BitArray(self.size)
def __get_prime(self, prime_size):
# обычный проход по всем числам и их проверка на простоту - сложно по времени
# немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на
# делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)
if prime_size == 1:
self.__prime_numbers.append(2)
return
self.__prime_numbers.append(2)
i = 3
while len(self.__prime_numbers) < prime_size:
j = 1
prime_flag = True
while j < len(self.__prime_numbers):
if (i % self.__prime_numbers[j]) == 0:
prime_flag = False
break
j += 1
if prime_flag:
self.__prime_numbers.append(i)
i += 2
def __get_hash(self, x, i):
return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size
def add(self, key: int):
i = 0
while i < self.hash_numbers:
self.__bitarray.add_bit(self.__get_hash(key, i))
i += 1
def search(self, key: int):
i = 0
while i < self.hash_numbers:
if not self.__bitarray.check_bit(self.__get_hash(key, i)):
return False
i += 1
return True
def print(self):
return self.__bitarray.print()
bloom_filter = 0
while True:
try:
line = input().split()
if len(line) == 0:
continue
else:
if line[0] == "set":
try:
elements_number = int(line[1])
probability = float(line[2])
if (elements_number <= 0) | (probability <= 0) | (probability >= 1):
print("error")
continue
bloom_filter = BloomFilter(elements_number, probability)
if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):
print("error")
continue
break
except TypeError:
print("error")
continue
else:
print("error")
continue
except EOFError:
exit()
print(bloom_filter.size, bloom_filter.hash_numbers)
while True:
try:
line = input().split()
if len(line) == 0:
continue
elif line[0] == "print":
print(bloom_filter.print())
elif (line[0] == "add") & (line[1].isnumeric()):
bloom_filter.add(int(line[1]))
elif (line[0] == "search") & (line[1].isnumeric()):
print(int(bloom_filter.search(int(line[1]))))
else:
print("error")
except EOFError:
break
| 34.798561
| 116
| 0.551995
| 603
| 4,837
| 4.252073
| 0.334992
| 0.031591
| 0.049922
| 0.025741
| 0.120905
| 0.085803
| 0.071763
| 0.071763
| 0.060062
| 0.060062
| 0
| 0.028536
| 0.340707
| 4,837
| 138
| 117
| 35.050725
| 0.775478
| 0.247674
| 0
| 0.409524
| 0
| 0
| 0.011862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.019048
| 0.019048
| 0.209524
| 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9982b7f935a0931c3a9dc4e8ec48b12b5523acb
| 22,060
|
py
|
Python
|
lingvo/core/inference_graph_exporter.py
|
RunzheYang/lingvo
|
1291e29812f9ee9836f9cacbb05db9ec6b095234
|
[
"Apache-2.0"
] | 1
|
2021-09-02T18:04:13.000Z
|
2021-09-02T18:04:13.000Z
|
lingvo/core/inference_graph_exporter.py
|
RunzheYang/lingvo
|
1291e29812f9ee9836f9cacbb05db9ec6b095234
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/inference_graph_exporter.py
|
RunzheYang/lingvo
|
1291e29812f9ee9836f9cacbb05db9ec6b095234
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for exporting an InferenceGraph proto from model params."""
import collections
import contextlib
import re
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import bfloat16_variables
from lingvo.core import inference_graph_pb2
from lingvo.core import py_utils
import six
from google.protobuf import text_format
FLAGS = tf.flags.FLAGS
# InferenceDeviceOptions contains options to configure inference on the device.
# device: Device to infer on.
# retain_device_placement: If true, the specified device in the generated
# inference graph nodes will be retained. Otherwise, the specified device
# will be cleared, so that the runtime can choose automatically.
# var_options: Options on handling variables. For TPUs, variables can be
# either placed on device through 'ON_DEVICE' option, or treated as
# constants with AS_CONSTANTS.
# gen_init_op: Whether to serialize initialization ops for the device. For TPUs,
# servers can be initialized globally once, in which case this should be
# turned off to avoid tripping initialization checks.
# dtype_override: Whether to override the dtype to use for activations and
# weights in the model. Options supported are None or tf.bfloat16.
InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [
'device', 'retain_device_placement', 'var_options', 'gen_init_op',
'dtype_override', 'fprop_dtype_override'
])
_CONST_GUARANTEE = None
@contextlib.contextmanager
def NoConstGuaranteeScope():
"""Disallow const gauranteeing variable with-in scope."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_caching_device(None)
_CONST_GUARANTEE = False
yield
_CONST_GUARANTEE = old_val
var_scope.set_caching_device(old_caching_device)
# Marks variable as constants for compilation
def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):
global _CONST_GUARANTEE
if _CONST_GUARANTEE:
with tf.control_dependencies(None):
return tf.guarantee_const(
getter(name, *args, **kwargs), name=name + '/GuaranteeConst')
else:
return getter(name, *args, **kwargs)
@contextlib.contextmanager
def ConstGuaranteeScope():
"""Treats all variables under this scope as constants."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_custom_getter = var_scope.custom_getter
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_custom_getter(MaybeGuaranteeConstGetter)
var_scope.set_caching_device(lambda op: op.device)
_CONST_GUARANTEE = True
yield
_CONST_GUARANTEE = old_val
var_scope.set_custom_getter(old_custom_getter)
var_scope.set_caching_device(old_caching_device)
@contextlib.contextmanager
def _DummyScope():
yield None
def _GetVarName(v):
return v.name[:-len(':0')]
def _MakeVariableDictionary(variables):
"""Returns a dictionary with name -> tf.Variable() mapping."""
vars_dict = {}
for v in variables:
vars_dict[_GetVarName(v)] = v
return vars_dict
def IsTpu(device_options):
return device_options.device == 'tpu'
def ShouldForceBfloat16ForWeightsAndActivations(device_options):
return device_options.dtype_override == tf.bfloat16
def ShouldForceBfloat16ForActivations(device_options):
return device_options.fprop_dtype_override == tf.bfloat16
def ConvertSubgraphDictToProto(subgraphs_dict):
"""Converts dict of subgraphs/feeds/fetches to InferenceGraph.
Args:
subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a
NestedMap.
Returns:
Equivalent InferenceGraph.
"""
# Build the output inference graph.
inference_graph_proto = inference_graph_pb2.InferenceGraph()
for subgraph_name, tensors in subgraphs_dict.items():
fetches = tensors[0]
feeds = tensors[1]
# Rewrite fetches and feeds to map to their tensor name instead of
# Tensor instance.
named_fetches = {k: v.name for k, v in fetches.items() if v is not None}
named_feeds = {k: v.name for k, v in feeds.items() if v is not None}
# Export as subgraph.
inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches)
inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds)
return inference_graph_proto
def GetOutputOpNames(graph,
inference_graph_proto,
subgraphs=None,
preserve_colocation_nodes=True,
preserve_saver_restore_nodes=False,
preserve_extra_ops=None):
"""Gets output op names from an inference graph.
Args:
graph: The tf graph.
inference_graph_proto: an InferenceGraph proto.
subgraphs: an optional list of subgraph names. If provided, only output ops
from these subgraphs are preserved. Otherwise, all subgraphs are included.
preserve_colocation_nodes: a Python bool, default to True. Preserves nodes
colocating with the closure of output ops in the returned array.
preserve_saver_restore_nodes: a Python bool, default to False. Preserves
nodes for restoring according to inference_graph_proto.saver_def.
preserve_extra_ops: an optional list of extra op names to preserve as long
as they present in the graph.
Returns:
Array of tf op names that should be preserved in the graph.
"""
output_op_names = set()
def _GetOpName(tensor_or_op_name):
"""Returns the op name of the given node name."""
# Tensor names have format <op_name>:<output_index>. Some inference
# graphs put tensors and others put ops in the feeds/fetches (depends
# on how it is used). We differentiate here. We still do the lookup in
# the graph to sanity check (versus relying on the text manipulation).
# If this logic ever breaks, TensorFlow will raise a ValueError with
# a description of the syntax of each.
if re.search(r':[0-9]+$', tensor_or_op_name):
# Tensor-name.
t = graph.get_tensor_by_name(tensor_or_op_name)
return t.op.name
else:
op = graph.get_operation_by_name(tensor_or_op_name)
return op.name
for subgraph_name, subgraph in inference_graph_proto.subgraphs.items():
if subgraphs and subgraph_name not in subgraphs:
tf.logging.info('Skip subgraph %s.', subgraph_name)
continue
# Sometimes feeds aren't connected to any outputs but keep them in the graph
# anyways to avoid errors.
for tensor_or_op_name in (list(subgraph.feeds.values()) +
list(subgraph.fetches.values())):
output_op_names.add(_GetOpName(tensor_or_op_name))
if preserve_saver_restore_nodes:
# Only nodes for restoring is preserved. saver_def.save_tensor_name is
# skipped because it's only used for saving.
saver_def = inference_graph_proto.saver_def
for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]:
try:
output_op_names.add(_GetOpName(op_name))
except KeyError:
tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name)
if not preserve_colocation_nodes and not preserve_extra_ops:
return sorted(list(output_op_names))
# We also need to preserve any nodes that are used for colocation.
# E.g., a node may have this attr:
# attr {
# key: "_class"
# value {
# list {
# s: "loc:@inference/embedding_lookup/Read/ReadVariableOp"
# }
# }
# }
#
# In this case, we need to make sure the node
# inference/embedding_lookup/Read/ReadVariableOp is not pruned.
#
# TODO(zhifengc): It's possible that it's better to fix in
# tf.graph_util.extract_sub_graph.
graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),
list(output_op_names))
reachable_vars = [node.name for node in graph_def.node]
for node in graph.get_operations():
if preserve_extra_ops and node.name in preserve_extra_ops:
output_op_names.add(node.name)
elif preserve_colocation_nodes and '_class' in node.node_def.attr:
for loc in node.node_def.attr['_class'].list.s:
loc = six.ensure_text(loc, 'utf-8')
if loc.startswith('loc:@'):
loc_name = loc[5:]
if loc_name not in reachable_vars:
# Skip nodes that cannot be reached from the pruned graph.
continue
output_op_names.add(node.name)
return sorted(list(output_op_names))
def _ParamExists(param_obj, param_name):
"""Tests whether param_name is contained in param_obj."""
if not param_obj:
return
for k, _ in param_obj.IterParams():
if k == param_name:
return True
return False
def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names):
"""Freezes a graph from a checkpoint.
Args:
graph: tf.Graph.
saver: The tf.Saver to use for restoration.
checkpoint: The checkpoint to restore.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
sess = tf.Session(graph=graph, config=py_utils.SessionConfig())
saver.restore(sess, checkpoint)
return tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_op_names)
def _FreezeDefaults(graph, output_op_names):
"""Default initializes a graph and freezes it.
Args:
graph: tf.Graph.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess:
sess.run(graph.get_operation_by_name('init_all_variables'))
return tf.graph_util.convert_variables_to_constants(sess,
graph.as_graph_def(),
output_op_names)
class InferenceGraphExporter:
"""Class for exporting inference graphs."""
@classmethod
def Export(cls,
model_cfg,
model_task_name=None,
device_options=InferenceDeviceOptions(
device='',
retain_device_placement=False,
var_options=None,
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None),
freeze_checkpoint=None,
freeze_defaults=False,
export_path=None,
subgraph_filter=None,
random_seed=None,
disable_packed_input=True):
"""Exports a InferenceGraph proto with piecewise subgraphs.
Sets FLAGS.enable_asserts to False unless user explicitly sets it to True.
Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing
and multi-core inference on TPUs work properly.
Args:
model_cfg: a Params instance as returned by
model_registry.GetParams(modelname, 'Test') or model_params.Model().
model_task_name: The task to generate an inference graph for. Should be
None for single-task models.
device_options: Device options for the accelerator used for serving.
freeze_checkpoint: The checkpoint to load. Loads and freezes the model if
given.
freeze_defaults: Default initializes the graph and freeze. Useful for
early testing of downstream tools without having a checkpoint.
export_path: If not None, write the inference graph in ASCII to this path.
subgraph_filter: A string or a list of subgraph names. If not None or
empty, export only this list of inference subgraphs.
random_seed: Fixes the random seed in the exported inference graph.
disable_packed_input: Disable packed input for inference writing purposes.
Returns:
InferenceGraph proto.
Raises:
ValueError: if the model does not support the listed subgraphs.
"""
assert issubclass(model_cfg.cls, base_model.BaseModel)
if device_options.dtype_override and device_options.fprop_dtype_override:
raise ValueError(
'device_options{dtype_override,fprop_dtype_override) can not both be'
'set.')
if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)):
subgraph_filter = [subgraph_filter]
# Disable assertions unless user explicitly enables it.
if FLAGS['enable_asserts'].using_default_value:
FLAGS.enable_asserts = False
# TODO(laurenzo): Work out how much we need to specify here in terms of
# cluster configuration.
cls._SetClusterParams(model_cfg.cluster, device_options)
# Configure the model.
model_cfg.random_seed = random_seed
model_cfg.is_inference = True
if disable_packed_input:
def _DisablePackedInput(task):
if (_ParamExists(task, 'encoder') and
_ParamExists(task.encoder, 'packed_input')):
task.encoder.packed_input = False
if (_ParamExists(task, 'decoder') and
_ParamExists(task.decoder, 'packed_input')):
task.decoder.packed_input = False
if issubclass(model_cfg.cls, base_model.MultiTaskModel):
for _, task_param in model_cfg.task_params.IterParams():
_DisablePackedInput(task_param)
else:
_DisablePackedInput(model_cfg.task)
tf.logging.debug('Model %s params:', model_cfg.name)
for line in model_cfg.ToText().split('\n'):
tf.logging.debug('%s', line)
# Instantiate the graph.
graph = tf.Graph()
with graph.as_default():
tf.random.set_seed(random_seed)
cluster = model_cfg.cluster.Instantiate()
device = cluster.GetPlacer()
tpu_const_scope = _DummyScope()
if (IsTpu(device_options) and
device_options.var_options == 'AS_CONSTANTS'):
# Do not specify devices for variables if we are marking them as
# constants.
device = ''
tpu_const_scope = ConstGuaranteeScope()
with cluster, tf.device(device), tpu_const_scope:
bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations(
device_options)
if bfloat16_override:
py_utils.UpdateDtype(model_cfg, tf.bfloat16)
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
act_bfloat16_override = ShouldForceBfloat16ForActivations(
device_options)
if act_bfloat16_override:
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
# Hard-code TPU-related flags prior to instantiating model.
old_enable_asserts = FLAGS.enable_asserts
old_xla_device = FLAGS.xla_device
if IsTpu(device_options):
FLAGS.enable_asserts = False
FLAGS.xla_device = 'tpu'
try:
mdl = model_cfg.Instantiate()
task = mdl.GetTask(model_task_name)
variables_to_restore = (
_MakeVariableDictionary(tf.global_variables()) if not mdl.ema else
mdl.ema.variables_to_restore(mdl.variables_for_ema))
if bfloat16_override:
saver_var_spec = (
bfloat16_variables
.get_saver_spec_for_variables_with_bf16_overrides(
variables_to_restore))
else:
saver_var_spec = variables_to_restore
saver = tf.train.Saver(saver_var_spec)
tf.variables_initializer(
tf.global_variables(), name='init_all_variables')
if IsTpu(device_options) and device_options.gen_init_op:
tf.group(tf.tpu.initialize_system(), name='tpu_init_op')
if freeze_checkpoint or freeze_defaults:
# Replace variables with tensors using tf.identity in theta before
# freezing to avoid the graph referencing types of DT_RESOURCE.
def AddIdentityToTheta(layer):
layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access
layer.children.Transform(AddIdentityToTheta)
AddIdentityToTheta(task)
inference_graph_proto = inference_graph_pb2.InferenceGraph()
subgraphs_proto = task.Inference()
if isinstance(subgraphs_proto, dict):
subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto)
for name, subgraph in subgraphs_proto.subgraphs.items():
if not subgraph_filter or name in subgraph_filter:
inference_graph_proto.subgraphs[name].CopyFrom(subgraph)
# Yes, graph collections are bad, however this seems to be the
# easiest way to get this assets registered from
# TextFileInitializer.
assets_collection = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS)
for asset in assets_collection:
if asset.op.type == 'Const' and asset.op.get_attr(
'dtype') == tf.dtypes.string:
constant_value = asset.op.get_attr('value')
if constant_value.string_val:
tf.logging.info('Found asset file_path: %s',
constant_value.string_val[0])
asset_file_def = inference_graph_proto.asset_file_def.add()
asset_file_def.tensor_info.name = asset.name
asset_file_def.filename = constant_value.string_val[0]
# Add a table init op and global variable init op to the graph.
# Tables can be declared anywhere in the graph, so this op has to be
# added last.
tf.tables_initializer(name='init_all_tables')
finally:
# Reset TPU-related flags after model instantiation.
FLAGS.enable_asserts = old_enable_asserts
FLAGS.xla_device = old_xla_device
tf.logging.info('Graph contains ops: %r',
[op.name for op in graph.get_operations()])
# Collection defs
if not tf.executing_eagerly():
meta_graph = tf.train.export_meta_graph(graph=graph)
for key in meta_graph.collection_def:
tf.logging.info('copying collection %s', key)
inference_graph_proto.collection_def[key].CopyFrom(
meta_graph.collection_def[key])
else:
tf.logging.warning('Not exporting collection defs '
'since operating in eager mode.')
# Freezing.
if freeze_defaults or freeze_checkpoint:
output_op_names = GetOutputOpNames(
graph,
inference_graph_proto,
preserve_colocation_nodes=False,
preserve_saver_restore_nodes=False)
if cls._DeviceSupportsFreezing(device_options):
raise ValueError('freeze_checkpoint cannot be used with device ' +
device_options.device)
if freeze_checkpoint:
tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint)
graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint,
output_op_names)
elif freeze_defaults:
tf.logging.info('Default initializing graph and freezing.')
graph_def = _FreezeDefaults(graph, output_op_names)
else:
inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def())
output_op_names = GetOutputOpNames(graph, inference_graph_proto)
# Prune the graph to just the parts we need.
# To support restoring, we have to not prune out the restore node.
output_op_names.append('init_all_tables')
output_op_names.append('init_all_variables')
output_op_names.append('save/control_dependency')
output_op_names.append('save/restore_all')
if IsTpu(device_options) and device_options.gen_init_op:
output_op_names.append('tpu_init_op')
graph_def = graph.as_graph_def()
tf.logging.info('Pruning graph to output ops: %r', output_op_names)
graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names)
if not device_options.retain_device_placement:
# Clear the device so that the runtime can choose.
tf.logging.info('Clearing device placement for: %s',
device_options.device)
for node in graph_def.node:
node.ClearField('device')
for function in graph_def.library.function:
for node_def in function.node_def:
node_def.ClearField('device')
inference_graph_proto.graph_def.CopyFrom(graph_def)
if export_path:
with tf.io.gfile.GFile(export_path, 'w') as f:
f.write(text_format.MessageToString(inference_graph_proto))
return inference_graph_proto
@classmethod
def _SetClusterParams(cls, cluster_params, device_options):
"""Sets cluster params.
Args:
cluster_params: Model().cluster config.
device_options: InferenceDeviceOptions.
"""
def Update(p):
"""Update cluster params `p`."""
p.name = '/job:localhost'
p.replicas = 1
p.tpus_per_replica = 1 if IsTpu(device_options) else 0
p.gpus_per_replica = 0
p.devices_per_split = 1
cluster_params.mode = 'sync'
cluster_params.job = 'decoder'
cluster_params.add_summary = False
cluster_params.do_eval = True
Update(cluster_params.controller)
Update(cluster_params.worker)
Update(cluster_params.ps)
Update(cluster_params.evaler)
Update(cluster_params.decoder)
Update(cluster_params.input)
@classmethod
def _DeviceSupportsFreezing(cls, device_options):
return IsTpu(device_options)
| 38.100173
| 116
| 0.694334
| 2,820
| 22,060
| 5.204255
| 0.201064
| 0.02746
| 0.023031
| 0.005724
| 0.199305
| 0.127215
| 0.090147
| 0.069978
| 0.04504
| 0.039725
| 0
| 0.003759
| 0.228151
| 22,060
| 578
| 117
| 38.16609
| 0.858166
| 0.309565
| 0
| 0.167683
| 0
| 0
| 0.059147
| 0.007989
| 0
| 0
| 0
| 0.00346
| 0.018293
| 1
| 0.064024
| false
| 0
| 0.030488
| 0.015244
| 0.155488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9982e3e4e7a4b4799e5780bd7629d5235cc1b40
| 1,836
|
py
|
Python
|
src/preprocessing/annual_hc_by_crime_loc.py
|
VijayKalmath/USCrimeAnalysis
|
14c96aae52547a4f7ea140395c62a621a97def50
|
[
"MIT"
] | null | null | null |
src/preprocessing/annual_hc_by_crime_loc.py
|
VijayKalmath/USCrimeAnalysis
|
14c96aae52547a4f7ea140395c62a621a97def50
|
[
"MIT"
] | null | null | null |
src/preprocessing/annual_hc_by_crime_loc.py
|
VijayKalmath/USCrimeAnalysis
|
14c96aae52547a4f7ea140395c62a621a97def50
|
[
"MIT"
] | null | null | null |
#! usr/env/bin python
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
def main():
# Fetch File Paths
file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls')
# Sort them according to year
file_paths.sort(key = lambda x: int(x[-8:-4]))
# Create a result dataframe to store the data
df_res = get_place_crime_count(file_paths[0])
# Iterate over the rest of the files
for p in tqdm(file_paths[1:]):
df_temp = get_place_crime_count(p)
df_res = pd.merge(df_res, df_temp, on = "Place", how = "left")
# Save the result to disk
df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False)
def get_place_crime_count(path:str)->pd.DataFrame:
"""
Function to return
"""
# Extracting the table name from and year from the given file path
t_name = " ".join(path[path.index("Table"):path.index("_Incidents")].split("_"))
t_year = path[path.index(".xls")-4:path.index(".xls")]
try:
# Read the Excel spreadsheet
df = pd.read_excel(path,sheet_name=t_name)
# Get the start and end indices of the interested datapoints
start = df.index[df[t_name] == "Total"][0] + 1
end = df.index[df[t_name] == "Multiple locations"][0]
# Slice the dataset
df = df.iloc[start:end,0:2]
# Reset the index for the reduced dataframe
df.reset_index(drop = True, inplace = True)
# Rename the columns
df.rename(columns={t_name: "Place", "Unnamed: 1": t_year}, inplace = True)
# Return the value
return df
except:
# If there is no such data return an empty dataframe
i_list = list(range(0,47))
return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year])
if __name__ == '__main__':
main()
| 33.381818
| 84
| 0.6378
| 285
| 1,836
| 3.933333
| 0.42807
| 0.040143
| 0.03479
| 0.048171
| 0.024978
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010007
| 0.238017
| 1,836
| 54
| 85
| 34
| 0.791279
| 0.264706
| 0
| 0
| 0
| 0
| 0.130204
| 0.065859
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b998534e368ce74be309448b790e384f839c6d4a
| 1,672
|
py
|
Python
|
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
|
ethanjperez/allennlp
|
e520993f16f0da7e2c40f6e44b8dc56338f46b57
|
[
"Apache-2.0"
] | 24
|
2019-09-16T00:10:54.000Z
|
2021-09-08T19:31:51.000Z
|
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
|
ethanjperez/allennlp
|
e520993f16f0da7e2c40f6e44b8dc56338f46b57
|
[
"Apache-2.0"
] | null | null | null |
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
|
ethanjperez/allennlp
|
e520993f16f0da7e2c40f6e44b8dc56338f46b57
|
[
"Apache-2.0"
] | 7
|
2019-09-16T02:37:31.000Z
|
2021-09-01T06:06:17.000Z
|
# pylint: disable=no-self-use,invalid-name
import numpy as np
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBagOfWordCountsTokenEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
def test_forward_calculates_bow_properly(self):
params = Params({})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_projects_properly(self):
params = Params({"projection_dim": 50})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| 45.189189
| 93
| 0.70634
| 216
| 1,672
| 5.268519
| 0.296296
| 0.063269
| 0.013181
| 0.059754
| 0.404218
| 0.369069
| 0.27065
| 0.27065
| 0.27065
| 0.27065
| 0
| 0.027596
| 0.176435
| 1,672
| 36
| 94
| 46.444444
| 0.798838
| 0.023923
| 0
| 0.193548
| 0
| 0
| 0.012883
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.096774
| false
| 0
| 0.225806
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b998e92d411833a80bc4657adf0243c90d5c6084
| 5,457
|
py
|
Python
|
demo/demo_shapenet.py
|
hengkaiz/meshrcnn
|
eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
|
[
"BSD-3-Clause"
] | null | null | null |
demo/demo_shapenet.py
|
hengkaiz/meshrcnn
|
eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
|
[
"BSD-3-Clause"
] | null | null | null |
demo/demo_shapenet.py
|
hengkaiz/meshrcnn
|
eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import logging
import multiprocessing as mp
import logging
import os
from detectron2.evaluation import inference_context
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from fvcore.common.file_io import PathManager
from pathlib import Path
from pytorch3d.io import save_obj
from shapenet.config.config import get_shapenet_cfg
from shapenet.data.utils import imagenet_preprocess
from shapenet.modeling.heads import voxel_head
from shapenet.modeling.mesh_arch import build_model
from shapenet.utils.checkpoint import clean_state_dict
import torchvision.transforms as T
import glob
from PIL import Image
import trimesh
import pyvista as pv
import pyacvd
import numpy as np
logger = logging.getLogger('demo')
def setup_cfgs(args):
cfg = get_shapenet_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/shapenet/voxmesh_R50.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input main folder")
# parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def resample_mesh(mesh, count=2466):
pv_mesh = pv.wrap(mesh)
# logger.info('Original mesh:')
# print(pv_mesh)
clus = pyacvd.Clustering(pv_mesh)
clus.subdivide(3)
clus.cluster(count)
# remesh
remesh = clus.create_mesh()
# verts = remesh.points
# faces = remesh.faces.reshape((-1, 4))[:, 1:]
return remesh
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
device = torch.device("cuda:%d" % 0)
logger = setup_logger(name="demo shapenet")
logger.info("Arguments: " + str(args))
cfg = setup_cfgs(args)
# load checkpoing and build model
if cfg.MODEL.CHECKPOINT == "":
raise ValueError("Invalid checkpoing provided")
logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT))
cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT))
state_dict = clean_state_dict(cp["best_states"]["model"])
model = build_model(cfg)
model.load_state_dict(state_dict)
logger.info("Model loaded")
model.to(device)
sub_dir = sorted(os.listdir(args.input))
for sd in sub_dir:
curr_path = os.path.join(args.input, sd)
images = glob.glob(curr_path + "/*.png")
for img_dir in images:
# load image
transform = [T.ToTensor()]
transform.append(imagenet_preprocess())
transform = T.Compose(transform)
im_name = img_dir.split("/")[-1].split(".")[0]
with PathManager.open(img_dir, "rb") as f:
img = Image.open(f).convert("RGB")
img = transform(img)
img = img[None, :, :, :]
img = img.to(device)
with inference_context(model):
img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img)
# Save voxel_score
voxel_odir = os.path.join(curr_path, "voxel_score")
if not Path(voxel_odir).is_dir():
os.mkdir(voxel_odir)
voxel_file = os.path.join(voxel_odir, "%s.pt" % (im_name))
torch.save(voxel_scores, voxel_file)
# Save image features
imgfeat_odir = os.path.join(curr_path, "img_feat")
if not Path(imgfeat_odir).is_dir():
os.mkdir(imgfeat_odir)
img_feat_file = os.path.join(imgfeat_odir, "%s.pt" % (im_name))
torch.save(img_feats, img_feat_file)
# Save P
p_odir = os.path.join(curr_path, "P")
if not Path(p_odir).is_dir():
os.mkdir(p_odir)
p_file = os.path.join(p_odir, "%s.pt" % (im_name))
torch.save(P, p_file)
# Save cubified mesh
cmesh_odir = os.path.join(curr_path, "cube_mesh")
if not Path(cmesh_odir).is_dir():
os.mkdir(cmesh_odir)
cube_mesh_file = os.path.join(cmesh_odir, "%s_cube.obj" % (im_name))
c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0)
save_obj(cube_mesh_file, c_verts, c_faces)
# Save predicted mesh
mesh_odir = os.path.join(curr_path, "final_mesh")
if not Path(mesh_odir).is_dir():
os.mkdir(mesh_odir)
save_file = os.path.join(mesh_odir, "%s.obj" % (im_name))
verts, faces = meshes_pred[-1].get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
logger.info("Predictions saved for %s/%s" % (curr_path.split('/')[-1], im_name))
| 31.912281
| 99
| 0.637713
| 726
| 5,457
| 4.589532
| 0.289256
| 0.019808
| 0.033013
| 0.021008
| 0.092437
| 0.068427
| 0.035414
| 0.015606
| 0
| 0
| 0
| 0.006101
| 0.249038
| 5,457
| 170
| 100
| 32.1
| 0.806979
| 0.05974
| 0
| 0.04918
| 0
| 0
| 0.103576
| 0.006449
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02459
| false
| 0
| 0.213115
| 0
| 0.262295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b998f6994cf6e83702b501cd661bb37f91b59317
| 7,854
|
py
|
Python
|
proglearn/voters.py
|
jshin13/progressive-learning
|
dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
|
[
"Apache-2.0"
] | null | null | null |
proglearn/voters.py
|
jshin13/progressive-learning
|
dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
|
[
"Apache-2.0"
] | null | null | null |
proglearn/voters.py
|
jshin13/progressive-learning
|
dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
# from sklearn.ensemble import BaggingClassifier
# from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from .base import BaseVoter
from tensorflow import keras
from keras import layers
class TreeClassificationVoter(BaseVoter):
def __init__(self, finite_sample_correction=False):
"""
Doc strings here.
"""
self.finite_sample_correction = finite_sample_correction
self._is_fitted = False
self.multilabel = False
def fit(self, X, y):
"""
Doc strings here.
"""
check_classification_targets(y)
if type_of_target(y) == 'multilabel-indicator':
# Fit multilabel binary task.
self.multilabel = True
return self.fit_multilabel(X, y)
num_classes = len(np.unique(y))
self.uniform_posterior = np.ones(num_classes) / num_classes
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
class_counts = [
len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)
]
posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts))
if self.finite_sample_correction:
posteriors = self._finite_sample_correction(
posteriors, len(idxs_in_leaf), len(np.unique(y))
)
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def fit_multilabel(self, X, y):
num_labels = y.shape[1]
self.uniform_posterior = y.sum(axis=0) / len(y)
# Each posterior is now a num_labels size vector or binary probabilities.
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
label_counts = [
len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels)
]
posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts))
# TODO: multilabel finite sample correction.
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_posterior.keys()):
votes_per_example.append(self.leaf_to_posterior[x])
else:
votes_per_example.append(self.uniform_posterior)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):
"""
encourage posteriors to approach uniform when there is low data
"""
correction_constant = 1 / (num_classes * num_points_in_partition)
zero_posterior_idxs = np.where(posteriors == 0)[0]
posteriors[zero_posterior_idxs] = correction_constant
posteriors /= sum(posteriors)
return posteriors
class KNNClassificationVoter(BaseVoter):
def __init__(self, k, kwargs={}):
"""
Doc strings here.
"""
self._is_fitted = False
self.k = k
self.kwargs = kwargs
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.knn = KNeighborsClassifier(self.k, **self.kwargs)
self.knn.fit(X, y)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.knn.predict_proba(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class NeuralRegressionVoter(BaseVoter):
def __init__(
self, validation_split=0.25, loss="mse", epochs=100, lr=1e-4, verbose=False,
):
"""
Doc strings here.
"""
self.validation_split = validation_split
self.loss = loss
self.epochs = epochs
self.lr = lr
self.verbose = verbose
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.voter = keras.Sequential()
self.voter.add(
layers.Dense(
1,
activation="linear",
input_shape=(X.shape[1],),
name="transform_to_vote",
)
)
self.voter.compile(
loss=self.loss, metrics=["mae"], optimizer=keras.optimizers.Adam(self.lr)
)
self.voter.fit(
X,
y,
epochs=self.epochs,
callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor="val_loss")],
verbose=self.verbose,
validation_split=self.validation_split,
shuffle=True,
)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.voter.predict(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class TreeRegressionVoter(BaseVoter):
def __init__(self):
"""
Doc strings here.
"""
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
self.leaf_to_yhat = {}
self.global_yhat = np.mean(y)
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
# class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)]
self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf]))
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_yhat.keys()):
votes_per_example.append(self.leaf_to_yhat[x])
else:
votes_per_example.append(self.global_yhat)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
| 26.805461
| 99
| 0.556277
| 918
| 7,854
| 4.528322
| 0.175381
| 0.040414
| 0.049074
| 0.027424
| 0.521049
| 0.479192
| 0.474621
| 0.436372
| 0.413038
| 0.405581
| 0
| 0.004652
| 0.343137
| 7,854
| 293
| 100
| 26.805461
| 0.801124
| 0.086071
| 0
| 0.431138
| 0
| 0
| 0.073872
| 0
| 0
| 0
| 0
| 0.003413
| 0
| 1
| 0.107784
| false
| 0
| 0.041916
| 0
| 0.263473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9993aa0d134cc4869bfe49fd1ecd6dc8c6b0b96
| 23,640
|
py
|
Python
|
rotkehlchen/exchanges/coinbase.py
|
vnavascues/rotki
|
8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
|
[
"BSD-3-Clause"
] | null | null | null |
rotkehlchen/exchanges/coinbase.py
|
vnavascues/rotki
|
8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
|
[
"BSD-3-Clause"
] | null | null | null |
rotkehlchen/exchanges/coinbase.py
|
vnavascues/rotki
|
8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
|
[
"BSD-3-Clause"
] | null | null | null |
import hashlib
import hmac
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import asset_from_coinbase
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset
from rotkehlchen.exchanges.data_structures import AssetMovement, Trade
from rotkehlchen.exchanges.exchange import ExchangeInterface
from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_asset_amount_force_positive,
deserialize_asset_movement_category,
deserialize_fee,
deserialize_timestamp_from_date,
deserialize_trade_type,
)
from rotkehlchen.typing import (
ApiKey,
ApiSecret,
AssetMovementCategory,
Fee,
Location,
Price,
Timestamp,
TradePair,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock
from rotkehlchen.utils.serialization import rlk_jsonloads_dict
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]:
"""Turns a coinbase transaction into a rotkehlchen Trade.
https://developers.coinbase.com/api/v2?python#buys
If the coinbase transaction is not a trade related transaction returns None
Throws:
- UnknownAsset due to Asset instantiation
- DeserializationError due to unexpected format of dict entries
- KeyError due to dict entires missing an expected entry
"""
if raw_trade['status'] != 'completed':
# We only want to deal with completed trades
return None
if raw_trade['instant']:
raw_time = raw_trade['created_at']
else:
raw_time = raw_trade['payout_at']
timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase')
trade_type = deserialize_trade_type(raw_trade['resource'])
tx_amount = deserialize_asset_amount(raw_trade['amount']['amount'])
tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp)
native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount'])
native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp)
# in coinbase you are buying/selling tx_asset for native_asset
pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}')
amount = tx_amount
# The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee']['amount'])
fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp)
return Trade(
timestamp=timestamp,
location=Location.COINBASE,
pair=pair,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['id']),
)
class CoinbasePermissionError(Exception):
pass
class Coinbase(ExchangeInterface):
def __init__(
self,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super(Coinbase, self).__init__('coinbase', api_key, secret, database)
self.apiversion = 'v2'
self.base_uri = 'https://api.coinbase.com'
self.msg_aggregator = msg_aggregator
def first_connection(self) -> None:
self.first_connection_made = True
def _validate_single_api_key_action(
self,
method_str: str,
ignore_pagination: bool = False,
) -> Tuple[Optional[List[Any]], str]:
try:
result = self._api_query(method_str, ignore_pagination=ignore_pagination)
except CoinbasePermissionError as e:
error = str(e)
if 'transactions' in method_str:
permission = 'wallet:transactions:read'
elif 'buys' in method_str:
permission = 'wallet:buys:read'
elif 'sells' in method_str:
permission = 'wallet:sells:read'
elif 'deposits' in method_str:
permission = 'wallet:deposits:read'
elif 'withdrawals' in method_str:
permission = 'wallet:withdrawals:read'
elif 'trades' in method_str:
permission = 'wallet:trades:read'
# the accounts elif should be at the end since the word appears
# in other endpoints
elif 'accounts' in method_str:
permission = 'wallet:accounts:read'
else:
raise AssertionError(
f'Unexpected coinbase method {method_str} at API key validation',
)
msg = (
f'Provided Coinbase API key needs to have {permission} permission activated. '
f'Please log into your coinbase account and set all required permissions: '
f'wallet:accounts:read, wallet:transactions:read, '
f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, '
f'wallet:deposits:read, wallet:trades:read'
)
return None, msg
except RemoteError as e:
error = str(e)
if 'invalid signature' in error:
return None, 'Failed to authenticate with the Provided API key/secret'
elif 'invalid api key' in error:
return None, 'Provided API Key is invalid'
else:
# any other remote error
return None, error
return result, ''
def validate_api_key(self) -> Tuple[bool, str]:
"""Validates that the Coinbase API key is good for usage in Rotki
Makes sure that the following permissions are given to the key:
wallet:accounts:read, wallet:transactions:read,
wallet:buys:read, wallet:sells:read, wallet:withdrawals:read,
wallet:deposits:read
"""
result, msg = self._validate_single_api_key_action('accounts')
if result is None:
return False, msg
# now get the account ids
account_ids = self._get_account_ids(result)
if len(account_ids) != 0:
# and now try to get all transactions of an account to see if that's possible
method = f'accounts/{account_ids[0]}/transactions'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all buys of an account to see if that's possible
method = f'accounts/{account_ids[0]}/buys'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all sells of an account to see if that's possible
method = f'accounts/{account_ids[0]}/sells'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all deposits of an account to see if that's possible
method = f'accounts/{account_ids[0]}/deposits'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
# and now try to get all withdrawals of an account to see if that's possible
method = f'accounts/{account_ids[0]}/withdrawals'
result, msg = self._validate_single_api_key_action(method)
if result is None:
return False, msg
return True, ''
def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]:
"""Gets the account ids out of the accounts response"""
account_ids = []
for account_data in accounts:
if 'id' not in account_data:
self.msg_aggregator.add_error(
'Found coinbase account entry without an id key. Skipping it. ',
)
continue
if not isinstance(account_data['id'], str):
self.msg_aggregator.add_error(
f'Found coinbase account entry with a non string id: '
f'{account_data["id"]}. Skipping it. ',
)
continue
account_ids.append(account_data['id'])
return account_ids
def _api_query(
self,
endpoint: str,
options: Optional[Dict[str, Any]] = None,
pagination_next_uri: str = None,
ignore_pagination: bool = False,
) -> List[Any]:
"""Performs a coinbase API Query for endpoint
You can optionally provide extra arguments to the endpoint via the options argument.
If this is an ongoing paginating call then provide pagination_next_uri.
If you want just the first results then set ignore_pagination to True.
"""
request_verb = "GET"
if pagination_next_uri:
request_url = pagination_next_uri
else:
request_url = f'/{self.apiversion}/{endpoint}'
if options:
request_url += urlencode(options)
timestamp = str(int(time.time()))
message = timestamp + request_verb + request_url
signature = hmac.new(
self.secret,
message.encode(),
hashlib.sha256,
).hexdigest()
log.debug('Coinbase API query', request_url=request_url)
self.session.headers.update({
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
# This is needed to guarantee the up to the given date
# API version response.
'CB-VERSION': '2019-08-25',
})
full_url = self.base_uri + request_url
try:
response = self.session.get(full_url)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Coinbase API request failed due to {str(e)}')
if response.status_code == 403:
raise CoinbasePermissionError(f'API key does not have permission for {endpoint}')
if response.status_code != 200:
raise RemoteError(
f'Coinbase query {full_url} responded with error status code: '
f'{response.status_code} and text: {response.text}',
)
try:
json_ret = rlk_jsonloads_dict(response.text)
except JSONDecodeError:
raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}')
if 'data' not in json_ret:
raise RemoteError(f'Coinbase json response does not contain data: {response.text}')
final_data = json_ret['data']
# If we got pagination and this is the first query, gather all the subsequent queries
if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination:
if 'next_uri' not in json_ret['pagination']:
raise RemoteError('Coinbase json response contained no "next_uri" key')
next_uri = json_ret['pagination']['next_uri']
if not next_uri:
# As per the docs: https://developers.coinbase.com/api/v2?python#pagination
# once we get an empty next_uri we are done
return final_data
additional_data = self._api_query(
endpoint=endpoint,
options=options,
pagination_next_uri=next_uri,
)
final_data.extend(additional_data)
return final_data
@protect_with_lock()
@cache_response_timewise()
def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]:
try:
resp = self._api_query('accounts')
except RemoteError as e:
msg = (
'Coinbase API request failed. Could not reach coinbase due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
returned_balances: Dict[Asset, Dict[str, Any]] = {}
for account in resp:
try:
if not account['balance']:
continue
amount = deserialize_asset_amount(account['balance']['amount'])
# ignore empty balances. Coinbase returns zero balances for everything
# a user does not own
if amount == ZERO:
continue
asset = asset_from_coinbase(account['balance']['currency'])
try:
usd_price = Inquirer().find_usd_price(asset=asset)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing coinbase balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
if asset in returned_balances:
amount = returned_balances[asset]['amount'] + amount
else:
returned_balances[asset] = {}
returned_balances[asset]['amount'] = amount
usd_value = returned_balances[asset]['amount'] * usd_price
returned_balances[asset]['usd_value'] = usd_value
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase balance result with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase balance result with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a coinbase account balance. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a coinbase account balance',
account_balance=account,
error=msg,
)
continue
return returned_balances, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Trade]:
account_data = self._api_query('accounts')
# now get the account ids and for each one query buys/sells
# Looking at coinbase's API no other type of transaction
# https://developers.coinbase.com/api/v2?python#list-transactions
# consitutes something that Rotkehlchen would need to return in query_trade_history
account_ids = self._get_account_ids(account_data)
raw_data = []
for account_id in account_ids:
raw_data.extend(self._api_query(f'accounts/{account_id}/buys'))
raw_data.extend(self._api_query(f'accounts/{account_id}/sells'))
log.debug('coinbase buys/sells history result', results_num=len(raw_data))
trades = []
for raw_trade in raw_data:
try:
trade = trade_from_coinbase(raw_trade)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase transaction with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase trade with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a coinbase trade. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a coinbase trade',
trade=raw_trade,
error=msg,
)
continue
# limit coinbase trades in the requested time range here since there
# is no argument in the API call
if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts:
trades.append(trade)
return trades
def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]:
"""Processes a single deposit/withdrawal from coinbase and deserializes it
Can log error/warning and return None if something went wrong at deserialization
"""
try:
if raw_data['status'] != 'completed':
return None
payout_date = raw_data.get('payout_at', None)
if payout_date:
timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase')
else:
timestamp = deserialize_timestamp_from_date(
raw_data['created_at'],
'iso8601',
'coinbase',
)
# Only get address/transaction id for "send" type of transactions
address = None
transaction_id = None
# movement_category: Union[Literal['deposit'], Literal['withdrawal']]
if 'type' in raw_data:
# Then this should be a "send" which is the way Coinbase uses to send
# crypto outside of the exchange
# https://developers.coinbase.com/api/v2?python#transaction-resource
msg = 'Non "send" type found in coinbase deposit/withdrawal processing'
assert raw_data['type'] == 'send', msg
movement_category = AssetMovementCategory.WITHDRAWAL
# Can't see the fee being charged from the "send" resource
amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])
asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)
# Fees dont appear in the docs but from an experiment of sending ETH
# to an address from coinbase there is the network fee in the response
fee = Fee(ZERO)
raw_network = raw_data.get('network', None)
if raw_network:
raw_fee = raw_network.get('transaction_fee', None)
if raw_fee:
# Since this is a withdrawal the fee should be the same as the moved asset
if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp):
# If not we set ZERO fee and ignore
log.error(
f'In a coinbase withdrawal of {asset.identifier} the fee'
f'is denoted in {raw_fee["currency"]}',
)
else:
fee = deserialize_fee(raw_fee['amount'])
if 'network' in raw_data:
transaction_id = get_key_if_has_val(raw_data['network'], 'hash')
if 'to' in raw_data:
address = deserialize_asset_movement_address(raw_data['to'], 'address', asset)
else:
movement_category = deserialize_asset_movement_category(raw_data['resource'])
amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])
fee = deserialize_fee(raw_data['fee']['amount'])
asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)
return AssetMovement(
location=Location.COINBASE,
category=movement_category,
address=address,
transaction_id=transaction_id,
timestamp=timestamp,
asset=asset,
amount=amount,
fee_asset=asset,
fee=fee,
link=str(raw_data['id']),
)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase deposit/withdrawal with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found coinbase deposit/withdrawal with unsupported asset '
f'{e.asset_name}. Ignoring it.',
)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Unexpected data encountered during deserialization of a coinbase '
'asset movement. Check logs for details and open a bug report.',
)
log.error(
f'Unexpected data encountered during deserialization of coinbase '
f'asset_movement {raw_data}. Error was: {str(e)}',
)
return None
def query_online_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[AssetMovement]:
account_data = self._api_query('accounts')
account_ids = self._get_account_ids(account_data)
raw_data = []
for account_id in account_ids:
raw_data.extend(self._api_query(f'accounts/{account_id}/deposits'))
raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals'))
# also get transactions to get the "sends", which in Coinbase is the
# way to send Crypto out of the exchange
txs = self._api_query(f'accounts/{account_id}/transactions')
for tx in txs:
if 'type' not in tx:
continue
if tx['type'] == 'send':
raw_data.append(tx)
log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data))
movements = []
for raw_movement in raw_data:
movement = self._deserialize_asset_movement(raw_movement)
# limit coinbase deposit/withdrawals in the requested time range
# here since there is no argument in the API call
if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts:
movements.append(movement)
return movements
| 40.688468
| 98
| 0.592047
| 2,639
| 23,640
| 5.133005
| 0.151194
| 0.015503
| 0.016315
| 0.017717
| 0.319504
| 0.280969
| 0.232393
| 0.210247
| 0.201757
| 0.189207
| 0
| 0.002593
| 0.331049
| 23,640
| 580
| 99
| 40.758621
| 0.85399
| 0.140398
| 0
| 0.297968
| 0
| 0
| 0.180838
| 0.029701
| 0
| 0
| 0
| 0
| 0.004515
| 1
| 0.024831
| false
| 0.002257
| 0.051919
| 0
| 0.13544
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9994eb6b47f29e07dc9f474ab82878fdc8ae029
| 3,533
|
py
|
Python
|
lib/python3.7/site-packages/ldap/controls/deref.py
|
aonrobot/MSC-thug-auth-provider
|
aef37ef5a000586b8502cc536244f31e08b9c2db
|
[
"Apache-2.0"
] | 1
|
2019-06-21T11:51:26.000Z
|
2019-06-21T11:51:26.000Z
|
lib/python3.7/site-packages/ldap/controls/deref.py
|
aonrobot/MSC-thug-auth-provider
|
aef37ef5a000586b8502cc536244f31e08b9c2db
|
[
"Apache-2.0"
] | 13
|
2019-07-03T21:28:31.000Z
|
2022-02-26T10:42:05.000Z
|
lib/python3.7/site-packages/ldap/controls/deref.py
|
aonrobot/MSC-thug-auth-provider
|
aef37ef5a000586b8502cc536244f31e08b9c2db
|
[
"Apache-2.0"
] | 2
|
2020-02-11T09:34:39.000Z
|
2020-11-10T14:41:32.000Z
|
# -*- coding: utf-8 -*-
"""
ldap.controls.deref - classes for
(see https://tools.ietf.org/html/draft-masarati-ldap-deref)
See https://www.python-ldap.org/ for project details.
"""
__all__ = [
'DEREF_CONTROL_OID',
'DereferenceControl',
]
import ldap.controls
from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS
import pyasn1_modules.rfc2251
from pyasn1.type import namedtype,univ,tag
from pyasn1.codec.ber import encoder,decoder
from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue
DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16'
# Request types
#---------------------------------------------------------------------------
# For compatibility with ASN.1 declaration in I-D
AttributeList = AttributeDescriptionList
class DerefSpec(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'derefAttr',
AttributeDescription()
),
namedtype.NamedType(
'attributes',
AttributeList()
),
)
class DerefSpecs(univ.SequenceOf):
componentType = DerefSpec()
# Response types
#---------------------------------------------------------------------------
class AttributeValues(univ.SetOf):
componentType = AttributeValue()
class PartialAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', AttributeValues()),
)
class PartialAttributeList(univ.SequenceOf):
componentType = PartialAttribute()
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
class DerefRes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('derefAttr', AttributeDescription()),
namedtype.NamedType('derefVal', LDAPDN()),
namedtype.OptionalNamedType('attrVals', PartialAttributeList()),
)
class DerefResultControlValue(univ.SequenceOf):
componentType = DerefRes()
class DereferenceControl(LDAPControl):
controlType = DEREF_CONTROL_OID
def __init__(self,criticality=False,derefSpecs=None):
LDAPControl.__init__(self,self.controlType,criticality)
self.derefSpecs = derefSpecs or {}
def _derefSpecs(self):
deref_specs = DerefSpecs()
i = 0
for deref_attr,deref_attribute_names in self.derefSpecs.items():
deref_spec = DerefSpec()
deref_attributes = AttributeList()
for j in range(len(deref_attribute_names)):
deref_attributes.setComponentByPosition(j,deref_attribute_names[j])
deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr))
deref_spec.setComponentByName('attributes',deref_attributes)
deref_specs.setComponentByPosition(i,deref_spec)
i += 1
return deref_specs
def encodeControlValue(self):
return encoder.encode(self._derefSpecs())
def decodeControlValue(self,encodedControlValue):
decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue())
self.derefRes = {}
for deref_res in decodedValue:
deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2]
partial_attrs_dict = {
str(tv[0]): [str(v) for v in tv[1]]
for tv in deref_vals or []
}
try:
self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict))
except KeyError:
self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)]
KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl
| 29.441667
| 102
| 0.711577
| 362
| 3,533
| 6.770718
| 0.348066
| 0.044064
| 0.01836
| 0.041616
| 0.155855
| 0.136271
| 0.114239
| 0.088943
| 0.088943
| 0.088943
| 0
| 0.012871
| 0.142372
| 3,533
| 119
| 103
| 29.689076
| 0.79604
| 0.112652
| 0
| 0.088608
| 0
| 0
| 0.04196
| 0.008008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.075949
| 0.012658
| 0.367089
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b99b2da4f2ac2ca37d2ded7c72545cef1cab4228
| 5,356
|
py
|
Python
|
scripts/summaryPlot.py
|
Hespian/ParFastKer
|
5ddf1685c0652e73c889cfc64c7ec1fd827f905c
|
[
"BSD-3-Clause",
"MIT"
] | 3
|
2019-08-10T08:24:19.000Z
|
2019-08-12T07:16:03.000Z
|
scripts/summaryPlot.py
|
Hespian/ParFastKer
|
5ddf1685c0652e73c889cfc64c7ec1fd827f905c
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
scripts/summaryPlot.py
|
Hespian/ParFastKer
|
5ddf1685c0652e73c889cfc64c7ec1fd827f905c
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
import get_data_ours
import get_data_akiba
import get_data_NearLinear
import get_data_LinearTime
import os
import matplotlib.pyplot as plt
# graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "RHG-100000000-nodes-2000000000-edges", "delaunay_n24", "del26"]
graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "delaunay_n24", "del26"]
linearTimeDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs"
partitioningDir = "../../LinearTimeKernels/partitions"
ourTimeDir = "../../results/LinearTimeKernelsScalingAll"
nearLinearDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear"
akibaDir = "../../akiba_vertex_cover/results"
def getOurTimeAndSizeSequential(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["sequential_quasikernel_time"] + res["lineartime_time"]
result["size"] = res["sequential_quasikernel_size"]
return result
def getOurTimeAndSizeParallel(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["parallel_quasikernel_time"] + res["lineartime_time"] + res["partitioning_time"]
result["size"] = res["parallel_quasikernel_size"]
return result
def getAkibaTimeAndSize(graph):
return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir)
def getNearLinearTimeAndSize(graph):
return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir)
def getLinearTimeTimeAndSize(graph):
return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir)
def minProperty(graph, prop):
oursequential = getOurTimeAndSizeSequential(graph)[prop]
ourparallel = getOurTimeAndSizeParallel(graph)[prop]
akiba = getAkibaTimeAndSize(graph)[prop]
nearLinear = getNearLinearTimeAndSize(graph)[prop]
linearTime = getLinearTimeTimeAndSize(graph)[prop]
data = [oursequential, ourparallel, akiba, nearLinear, linearTime]
# data = [oursequential, ourparallel, akiba, nearLinear]
data = filter(lambda x : x >= 0, data)
minimum = min(data)
if minimum == 0:
return 1
return minimum
oursizeSequential = []
ourtimeSequential = []
oursizeParallel = []
ourtimeParallel = []
akibasize = []
akibatime = []
nearlinearsize = []
nearlineartime = []
lineartimesize = []
lineartimetime = []
for graph in graphs:
minsize = getAkibaTimeAndSize(graph)["size"]
mintime = getAkibaTimeAndSize(graph)["time"]
oss = getOurTimeAndSizeSequential(graph)["size"] / minsize
# print(graph + "(sequential): " + str(getOurTimeAndSizeSequential(graph)["size"]))
ots = getOurTimeAndSizeSequential(graph)["time"] / mintime
if oss > 0 and ots > 0:
oursizeSequential.append(oss)
ourtimeSequential.append(ots)
osp = getOurTimeAndSizeParallel(graph)["size"] / minsize
# print(graph + "(parallel): " + str(getOurTimeAndSizeParallel(graph)["size"]))
otp = getOurTimeAndSizeParallel(graph)["time"] / mintime
if osp > 0 and otp > 0:
oursizeParallel.append(osp)
ourtimeParallel.append(otp)
aks = getAkibaTimeAndSize(graph)["size"] / minsize
akt = getAkibaTimeAndSize(graph)["time"] / mintime
if aks > 0 and akt > 0:
akibasize.append(aks)
akibatime.append(akt)
nls = getNearLinearTimeAndSize(graph)["size"] / minsize
nlt = getNearLinearTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
nearlinearsize.append(nls)
nearlineartime.append(nlt)
lts = getLinearTimeTimeAndSize(graph)["size"] / minsize
ltt = getLinearTimeTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
lineartimesize.append(lts)
lineartimetime.append(ltt)
# print("We")
# print(oursizeSequential)
# print(ourtimeSequential)
# print("We (parallel)")
# print(oursizeParallel)
# print(ourtimeParallel)
# print("Akiba")
# print(akibasize)
# print(akibatime)
# print("NearLinear")
# print(nearlinearsize)
# print(nearlineartime)
# print("LinearTime")
# print(lineartimesize)
# print(lineartimetime)
plt.rc('font', size=14)
fig = plt.figure(figsize=(3.2, 2.4))
ax = fig.add_subplot(1,1,1)
plt.title("Summary", fontsize=14)
ax.set_yscale("log")
ax.set_xscale("log")
ax.scatter(ourtimeSequential, oursizeSequential, label="FastKer", marker="x", color="green")
ax.scatter(ourtimeParallel, oursizeParallel, label="ParFastKer", marker="+", color="black")
# ax.scatter(akibatime, akibasize, label="VCSolver", marker="^", edgecolors="blue", facecolors="none")
ax.scatter(nearlineartime, nearlinearsize, label="NearLinear", marker="o", edgecolors="red", facecolors="none")
ax.scatter(lineartimetime, lineartimesize, label="LinearTime", marker="^", edgecolors="magenta", facecolors="none")
plt.xlabel("time / VCSolver time")
plt.ylabel("size / VCSolver size")
plt.xticks([0.0001, 0.01, 1])
ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode="expand")
plt.savefig("summaryplot_vcsolver_baseline.pdf", bbox_inches="tight")
# plt.show()
| 39.094891
| 234
| 0.720127
| 578
| 5,356
| 6.577855
| 0.314879
| 0.01657
| 0.021042
| 0.023672
| 0.216202
| 0.147291
| 0.147291
| 0.126249
| 0.126249
| 0.110994
| 0
| 0.03031
| 0.131441
| 5,356
| 136
| 235
| 39.382353
| 0.786973
| 0.161875
| 0
| 0.084211
| 0
| 0
| 0.173271
| 0.086859
| 0.010526
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0
| 0.063158
| 0.031579
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b99c2305beceab596bedee8ad399b6faa3216070
| 3,587
|
py
|
Python
|
bouncer/cli/base.py
|
lrnt/git-bouncer
|
3015e11a5d2c90986124de73bf1fd0f5a8563360
|
[
"MIT"
] | null | null | null |
bouncer/cli/base.py
|
lrnt/git-bouncer
|
3015e11a5d2c90986124de73bf1fd0f5a8563360
|
[
"MIT"
] | null | null | null |
bouncer/cli/base.py
|
lrnt/git-bouncer
|
3015e11a5d2c90986124de73bf1fd0f5a8563360
|
[
"MIT"
] | null | null | null |
import configparser
import sys
import inspect
from argparse import ArgumentParser, RawDescriptionHelpFormatter
def opt(*args, **kwargs):
def decorator(method):
if not hasattr(method, 'options'):
method.options = []
method.options.append((args, kwargs))
return method
return decorator
def noopts(method):
method.options = []
return method
class HelpMixin(object):
def help(self):
print('available commands:')
for name, command in self.commands.items():
description = str(command.__doc__ or '').strip('\n')
print(' ', name.ljust(10), description)
return 1
class SubParser(HelpMixin):
def __init__(self, commands):
self.commands = self._commands(commands)
def _commands(self, commands):
prog = sys.argv[0]
result = {}
for cmd in commands:
name = getattr(cmd, '_name', None)
if not name:
continue
cmd.prog = prog
result[name] = cmd
return result
def run(self):
args = sys.argv[1:]
for index, arg in enumerate(args):
if arg in self.commands.keys():
args.pop(index)
return self.commands[arg](args)
return self.help()
class Command(HelpMixin):
def __init__(self):
self.global_options = []
self.commands = self._methods_with_opts()
def _methods_with_opts(self):
result = {}
for name in dir(self):
if name.startswith('__'):
continue
method = getattr(self, name)
if not hasattr(method, 'options'):
continue
result[name] = method
return result
def _parse_args(self, method, args):
prog = '{} {} {}'.format(self.prog, self._name, method.__name__)
parser = ArgumentParser(
prog=prog,
description=(method.__doc__ or ''),
formatter_class=RawDescriptionHelpFormatter
)
for opt in method.options + self.global_options:
parser.add_argument(*opt[0], **opt[1])
return vars(parser.parse_args(args))
def _call_method(self, method, args):
# Find out which arguments the method expects
expected_args, _, _, _ = inspect.getargspec(method)
expected_args.remove('self')
self_args = self._parse_args(method, args)
method_args = {}
# Get the expected method arguments, ignore rest
for name in expected_args:
if name in args:
method_args[name] = args.pop(name)
# Put rest of the arguments in self
for name, value in self_args.items():
setattr(self, name, value)
self.pre_command()
return method(**method_args)
def __call__(self, args):
for index, arg in enumerate(args):
if arg in self.commands.keys():
args.pop(index)
return self._call_method(self.commands[arg], args)
return self.help()
def opt(self, *args, **kwargs):
self.global_options.append((args, kwargs))
def pre_command(self):
pass
class BaseCommand(Command):
def __init__(self):
super(BaseCommand, self).__init__()
self.opt(
'-c', dest='config_path', help='Configuration file',
default='~/.test.conf'
)
def pre_command(self):
config = configparser.ConfigParser()
config.read(self.config_path)
print(config.sections())
| 27.381679
| 72
| 0.578199
| 395
| 3,587
| 5.063291
| 0.263291
| 0.06
| 0.021
| 0.018
| 0.127
| 0.102
| 0.102
| 0.071
| 0.071
| 0.071
| 0
| 0.002849
| 0.315026
| 3,587
| 130
| 73
| 27.592308
| 0.811152
| 0.034569
| 0
| 0.255102
| 0
| 0
| 0.028621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0.010204
| 0.040816
| 0
| 0.367347
| 0.030612
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b99c4d9fb380e0635cac67dff2a6820b500bf34f
| 13,728
|
py
|
Python
|
Examples/ExampleCodes_ssccoorriinngg.py
|
MahdadJafarzadeh/ssccoorriinngg
|
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
|
[
"MIT"
] | 2
|
2020-04-28T12:50:26.000Z
|
2020-05-13T08:52:42.000Z
|
Examples/ExampleCodes_ssccoorriinngg.py
|
MahdadJafarzadeh/ssccoorriinngg
|
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
|
[
"MIT"
] | null | null | null |
Examples/ExampleCodes_ssccoorriinngg.py
|
MahdadJafarzadeh/ssccoorriinngg
|
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
|
[
"MIT"
] | 1
|
2020-07-14T13:48:56.000Z
|
2020-07-14T13:48:56.000Z
|
#%% Import libs
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score
import h5py
import time
from ssccoorriinngg import ssccoorriinngg
import numpy as np
from sklearn.model_selection import cross_validate
#%% Picking featureset of interest and apply classification
Object = ssccoorriinngg(filename='', channel='', fs = 200, T = 30)
path = 'C:/PhD/ML in depression/'
fname = 'feat42_Fp1-Fp2_train'
feats = 'featureset'
labels = 'labels'
# Train set
X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels)
# Test set
fname = 'feat42_Fp1-Fp2_test'
X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels)
# Define the scoring criteria:
scoring = {'accuracy' : make_scorer(accuracy_score),
'precision' : make_scorer(precision_score),
'recall' : make_scorer(recall_score),
'f1_score' : make_scorer(f1_score)}
# Cross-validation using logistic Random Forests
y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv = 10)
Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF)
# Cross-validation using XGBoost
y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000,
cv = 10 , max_depth=3, learning_rate=.1)
Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb)
#%% Outcome measures
# Defien required metrics here:
Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score']
for metric in Metrics:
#RF
r1 = results_RF[metric].mean()
std1 = results_RF[metric].std()
print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}')
# xgb
r2 = results_xgb[metric].mean()
std2 = results_xgb[metric].std()
print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}')
# SVM
r3 = results_SVM[metric].mean()
std3 = results_SVM[metric].std()
print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}')
# LR
r4 = results_LR[metric].mean()
std4 = results_LR[metric].std()
print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}')
#%% Applying Randomized grid search to find the best config. of RF
BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y,
estimator = RandomForestClassifier(), scoring = scoring,
n_estimators = [int(x) for x in np.arange(10, 500, 20)],
max_features = ['log2', 'sqrt'],
max_depth = [int(x) for x in np.arange(10, 100, 30)],
min_samples_split = [2, 5, 10],
min_samples_leaf = [1, 2, 4],
bootstrap = [True, False],
n_iter = 100, cv = 10)
#%% Test feature selection methods ##
# PCA
PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5)
# Boruta
ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7)
# Lasso
Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1)
#ANOVA
Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80)
#Recruisive
ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20)
#### NOW TEST CLASSIFIERS WITH SELECTED FEATS
results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv = 10)
#%% Example save featureset
path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3')
#%% Example load features:
X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/',
fname = 'feat42_N3_fp2-M1',
feats = 'featureset',
labels = 'labels')
#%% Combining some REM and SWS epochs
Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/',
ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1',
REM_fname = 'tr90_fp1-M2_fp2-M1',
saving = True, fname_save = 'tr90_N3&REM_fp1-M2')
#%% How to save some results?
directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/'
fname = '42feats_N3'
with h5py.File((directory+fname + '.h5'), 'w') as wf:
# Accuracies
dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy'])
dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy'])
dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy'])
dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy'])
# Precision
dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision'])
dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision'])
dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision'])
dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision'])
# Recall
dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall'])
dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall'])
dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall'])
dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall'])
# f1-score
dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score'])
dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score'])
dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score'])
dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score'])
#%% Extracting features from more than one channel:
tic = time.time()
########### Central electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
fname_C_N3 = (main_path+"tr90_N3_C3-M2_C4-M1.h5")
fname_C_REM = (main_path+"tr90_REM_C3-M2_C4-M1.h5")
ch_C4 = 'C4-M1'
ch_C3 = 'C3-M2'
Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30)
X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction()
Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM')
Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30)
X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction()
Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM')
Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30)
X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction()
Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3')
Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30)
X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction()
Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3')
########### Occipital electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
fname_O_N3 = (main_path+"tr90_N3_O1-M2_O2-M1.h5")
fname_O_REM = (main_path+"tr90_REM_O1-M2_O2-M1.h5")
ch_O2 = 'O2-M1'
ch_O1 = 'O1-M2'
Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30)
X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction()
Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM')
Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30)
X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction()
Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM')
Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30)
X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction()
Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3')
Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30)
X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction()
Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3')
########### Fp electrodes #############
main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/"
fname_fp_N3 = (main_path+"tr90_N3_fp1-M2_fp2-M1.h5")
fname_fp_REM = (main_path+"tr90_REM_fp1-M2_fp2-M1.h5")
ch_fp2 = 'fp2-M1'
ch_fp1 = 'fp1-M2'
Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30)
X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction()
Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM')
Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30)
X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction()
Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM')
Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30)
X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction()
Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3')
Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30)
X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction()
Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3')
toc = time.time()
print(f'time taken: {toc - tic}')
########## Concatenate all features #########
# RIGHT hemisphere - REM
X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM))
X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM))
# RIGHT hemisphere - N3
X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3))
X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3))
# LEFT hemisphere - REM
X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM))
X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM))
# LEFT hemisphere - N3
X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3))
X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3))
# Both sides - REM
X_REM = np.column_stack((X_rh_REM, X_lh_REM))
# Both sides - N3
X_N3 = np.column_stack((X_rh_N3, X_lh_N3))
# Combine SWS and REM
X_SWS_REM = np.row_stack((X_N3, X_REM))
y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM))
# SAVE ALL COMBINATIONS
Object = ML_Depression(filename='', channel='', fs = 200, T = 30)
# one hemisphere
Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM')
Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM')
Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3')
Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3')
# Both hemisphere
Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3')
Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM')
# Both hemispheres- SWS &REM combination
Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM')
#%% Load features from different brain regions, sleep stage and combine them
Object = ML_Depression(filename='', channel='', fs = 200, T = 30)
path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'
feats = 'featureset'
labels = 'labels'
# Pick right hemisphere N3
fname_rh_N3 = 'feat42_rh_N3'
X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels)
# Pick left hemisphere N3
fname_lh_N3 = 'feat42_lh_N3'
X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels)
# Pick right hemisphere REM
fname_rh_REM = 'feat42_rh_REM'
X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels)
# Pick LEFT hemisphere REM
fname_lh_REM = 'feat42_lh_REM'
X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels)
# Combine them
X_N3 = np.column_stack((X_rh_N3, X_lh_N3))
X_REM = np.column_stack((X_rh_REM, X_lh_REM))
# Save combination
Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3')
Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')
| 53.209302
| 127
| 0.682984
| 2,105
| 13,728
| 4.104038
| 0.119715
| 0.021299
| 0.045839
| 0.048617
| 0.536983
| 0.418798
| 0.330594
| 0.273064
| 0.227341
| 0.13196
| 0
| 0.059051
| 0.18954
| 13,728
| 257
| 128
| 53.416342
| 0.717419
| 0.084135
| 0
| 0.119318
| 0
| 0.022727
| 0.171937
| 0.052217
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051136
| 0
| 0.051136
| 0.028409
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b99d08420cae81be117acdda96af821aba38eea2
| 6,891
|
py
|
Python
|
igibson/examples/behavior/behavior_demo_collection.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | null | null | null |
igibson/examples/behavior/behavior_demo_collection.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | null | null | null |
igibson/examples/behavior/behavior_demo_collection.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | null | null | null |
"""
Main BEHAVIOR demo collection entrypoint
"""
import argparse
import copy
import datetime
import os
import bddl
import numpy as np
import igibson
from igibson.activity.activity_base import iGBEHAVIORActivityInstance
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings
from igibson.simulator import Simulator
from igibson.utils.ig_logging import IGLogWriter
POST_TASK_STEPS = 200
PHYSICS_WARMING_STEPS = 200
def parse_args():
scene_choices = [
"Beechwood_0_int",
"Beechwood_1_int",
"Benevolence_0_int",
"Benevolence_1_int",
"Benevolence_2_int",
"Ihlen_0_int",
"Ihlen_1_int",
"Merom_0_int",
"Merom_1_int",
"Pomaria_0_int",
"Pomaria_1_int",
"Pomaria_2_int",
"Rs_int",
"Wainscott_0_int",
"Wainscott_1_int",
]
task_id_choices = [0, 1]
parser = argparse.ArgumentParser(description="Run and collect an ATUS demo")
parser.add_argument(
"--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl."
)
parser.add_argument(
"--task_id",
type=int,
required=True,
choices=task_id_choices,
nargs="?",
help="BDDL integer ID, matching suffix of bddl.",
)
parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log")
parser.add_argument(
"--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes."
)
parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.")
parser.add_argument(
"--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches."
)
parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.")
parser.add_argument(
"--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions."
)
parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.")
return parser.parse_args()
def main():
args = parse_args()
bddl.set_backend("iGibson")
collect_demo(
args.task,
args.task_id,
args.scene,
args.vr_log_path,
args.disable_save,
args.max_steps,
args.no_vr,
args.disable_scene_cache,
args.profile,
)
def collect_demo(
task,
task_id,
scene,
vr_log_path=None,
disable_save=False,
max_steps=-1,
no_vr=False,
disable_scene_cache=False,
profile=False,
):
# HDR files for PBR rendering
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
# VR rendering settings
vr_rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
enable_pbr=True,
msaa=False,
light_dimming_factor=1.0,
)
# VR system settings
mode = "headless" if no_vr else "vr"
s = Simulator(
mode=mode,
rendering_settings=vr_rendering_settings,
vr_settings=VrSettings(use_vr=True),
physics_timestep=1 / 300.0,
render_timestep=1 / 30.0,
)
igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)
scene_kwargs = None
online_sampling = True
if not disable_scene_cache:
scene_kwargs = {
"urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id),
}
online_sampling = False
igbhvr_act_inst.initialize_simulator(
simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling
)
vr_agent = igbhvr_act_inst.simulator.robots[0]
if not no_vr:
vr_cs = VrConditionSwitcher(
igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction
)
log_writer = None
if not disable_save:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if vr_log_path is None:
vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp)
log_writer = IGLogWriter(
s,
log_filepath=vr_log_path,
task=igbhvr_act_inst,
store_vr=False if no_vr else True,
vr_robot=vr_agent,
profiling_mode=profile,
filter_objects=True,
)
log_writer.set_up_data_storage()
satisfied_predicates_cached = {}
post_task_steps = copy.deepcopy(POST_TASK_STEPS)
physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)
steps = 0
while max_steps < 0 or steps < max_steps:
igbhvr_act_inst.simulator.step(print_stats=profile)
task_done, satisfied_predicates = igbhvr_act_inst.check_success()
if no_vr:
if steps < 2:
action = np.zeros((28,))
action[19] = 1
action[27] = 1
else:
action = np.random.uniform(-0.01, 0.01, size=(28,))
else:
action = igbhvr_act_inst.simulator.gen_vr_robot_action()
if steps < physics_warming_steps:
action = np.zeros_like(action)
vr_agent.update(action)
if not no_vr:
if satisfied_predicates != satisfied_predicates_cached:
vr_cs.refresh_condition(switch=False)
satisfied_predicates_cached = satisfied_predicates
if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"):
vr_cs.refresh_condition()
if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"):
vr_cs.toggle_show_state()
if log_writer and not disable_save:
log_writer.process_frame()
if task_done:
post_task_steps -= 1
if post_task_steps == 0:
break
steps += 1
if log_writer and not disable_save:
log_writer.end_log_session()
s.disconnect()
if __name__ == "__main__":
main()
| 31.465753
| 120
| 0.652881
| 855
| 6,891
| 4.929825
| 0.278363
| 0.025623
| 0.037011
| 0.031317
| 0.160854
| 0.130961
| 0.117675
| 0.081613
| 0.055991
| 0.038434
| 0
| 0.013749
| 0.250617
| 6,891
| 218
| 121
| 31.610092
| 0.802479
| 0.015963
| 0
| 0.061111
| 0
| 0
| 0.155959
| 0.010781
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.066667
| 0
| 0.088889
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b99e3b0ee335439a781ae231769595415a1dc6ec
| 546
|
py
|
Python
|
wagtail/wagtailadmin/menu.py
|
digitalmarmalade/wagtail
|
ac4d23172ff3f42746625630583b17d243fb9822
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T18:02:04.000Z
|
2015-11-05T18:02:04.000Z
|
wagtail/wagtailadmin/menu.py
|
digitalmarmalade/wagtail
|
ac4d23172ff3f42746625630583b17d243fb9822
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailadmin/menu.py
|
digitalmarmalade/wagtail
|
ac4d23172ff3f42746625630583b17d243fb9822
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils.text import slugify
from django.utils.html import format_html
class MenuItem(object):
def __init__(self, label, url, name=None, classnames='', order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(unicode(label)))
self.order = order
def render_html(self):
return format_html(
u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>""",
self.name, self.url, self.classnames, self.label)
| 32.117647
| 79
| 0.611722
| 73
| 546
| 4.479452
| 0.465753
| 0.082569
| 0.091743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019185
| 0.236264
| 546
| 16
| 80
| 34.125
| 0.764988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0.076923
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a14f8cda479b51cbe9296c63d8ae7397078bc7
| 760
|
py
|
Python
|
robotframework_iperf3/__main__.py
|
scathaig/robotframework-iperf3
|
cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
|
[
"Apache-2.0"
] | null | null | null |
robotframework_iperf3/__main__.py
|
scathaig/robotframework-iperf3
|
cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
|
[
"Apache-2.0"
] | null | null | null |
robotframework_iperf3/__main__.py
|
scathaig/robotframework-iperf3
|
cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from robotremoteserver import RobotRemoteServer
from .iperf3 import Iperf3
if __name__ == '__main__':
# create commandline parser
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.prog = 'python3 -m robotframework_iperf3'
# add parser options
parser.add_argument(
"-a",
"--address",
type=str,
help="server listen address",
default='0.0.0.0')
parser.add_argument(
"-p",
"--port",
type=int,
help="server listen port",
default=8270)
args = parser.parse_args()
server = RobotRemoteServer(
Iperf3(),
host=args.address,
port=args.port
)
server.serve()
| 21.111111
| 92
| 0.619737
| 76
| 760
| 6.026316
| 0.513158
| 0.0131
| 0.074236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023593
| 0.275
| 760
| 35
| 93
| 21.714286
| 0.807623
| 0.057895
| 0
| 0.08
| 0
| 0
| 0.147265
| 0.029453
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a1dbb5125acea57356714e95e66c8e3a612e30
| 1,101
|
py
|
Python
|
FluentPython/dynamic_attr_and_prop/frozen_json.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
FluentPython/dynamic_attr_and_prop/frozen_json.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
FluentPython/dynamic_attr_and_prop/frozen_json.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from collections import abc
from keyword import iskeyword
class FronzenJSON:
def __init__(self, mapping):
self._data = {}
for key, value in mapping.items():
if iskeyword(key):
key += '_'
# self._data = dict(mapping)
self._data[key] = value
def __getattr__(self, name):
if hasattr(self._data, name):
return getattr(self._data, name)
else:
# return FronzenJSON.build(self._data[name])
return FronzenJSON(self._data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableMapping):
return [cls.build(item) for item in obj]
else:
return obj
def __new__(cls, arg):
if isinstance(arg, abc.Mapping):
return super().__new__(cls)
elif isinstance(arg, abc.MutableSequence):
return [cls[item] for item in arg]
else:
return arg
| 27.525
| 56
| 0.561308
| 124
| 1,101
| 4.790323
| 0.354839
| 0.094276
| 0.080808
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002721
| 0.332425
| 1,101
| 39
| 57
| 28.230769
| 0.805442
| 0.104451
| 0
| 0.103448
| 0
| 0
| 0.001018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.068966
| 0
| 0.517241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a20089dfb3f5c8a3472d1f3be189af236d4d44
| 4,062
|
py
|
Python
|
pomdp_problems/tag/models/transition_model.py
|
Semanti1/pomdp_findit
|
b96c1c06aab4b485fa005654cf6438ff63718083
|
[
"MIT"
] | null | null | null |
pomdp_problems/tag/models/transition_model.py
|
Semanti1/pomdp_findit
|
b96c1c06aab4b485fa005654cf6438ff63718083
|
[
"MIT"
] | null | null | null |
pomdp_problems/tag/models/transition_model.py
|
Semanti1/pomdp_findit
|
b96c1c06aab4b485fa005654cf6438ff63718083
|
[
"MIT"
] | null | null | null |
"""The Tag problem. Implemented according to the paper `Anytime Point-Based
Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_.
Transition model: the robot moves deterministically. The target's movement
depends on the robot; With Pr=0.8 the target moves away from the robot,
and with Pr=0.2, the target stays at the same place. The target never
moves closer to the robot.
"""
import copy
import pomdp_py
import pomdp_problems.util as util
import pomdp_problems.tag.constants as constants
from pomdp_problems.tag.domain.action import *
class TagTransitionModel(pomdp_py.TransitionModel):
def __init__(self,
grid_map,
target_motion_policy):
self._grid_map = grid_map
self.target_motion_policy = target_motion_policy
@classmethod
def if_move_by(cls, grid_map, position, action):
if isinstance(action, MotionAction):
dx, dy = action.motion
next_position = (position[0] + dx,
position[1] + dy)
if grid_map.valid_pose(next_position):
return next_position
return position
def probability(self, next_state, state, action, **kwargs):
# Robot motion
expected_robot_position = TagTransitionModel.if_move_by(self._grid_map,
state.robot_position,
action)
if expected_robot_position != next_state.robot_position:
return constants.EPSILON
if isinstance(action, TagAction):
if next_state.target_position == next_state.robot_position:
if next_state.target_found:
return 1.0 - constants.EPSILON
else:
return constants.EPSILON
else:
if next_state.target_found:
return constants.EPSILON
else:
return 1.0 - constants.EPSILON
# Target motion
valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)
return self.target_motion_policy.probability(next_state.target_position,
state.target_position,
state.robot_position,
valid_target_motion_actions)
def sample(self, state, action, argmax=False):
# Robot motion
next_state = copy.deepcopy(state)
next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map,
state.robot_position,
action)
# If Tag action
if isinstance(action, TagAction):
if not state.target_found:
if state.robot_position == state.target_position:
next_state.target_found = True
return next_state
# Target motion
valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)
if not argmax:
next_state.target_position = self.target_motion_policy.random(state.robot_position,
state.target_position,
valid_target_motion_actions)
else:
next_state.target_position = self.target_motion_policy.mpe(state.robot_position,
state.target_position,
valid_target_motion_actions)
return next_state
def argmax(self, state, action, **kwargs):
return self.sample(state, action, argmax=True)
| 45.640449
| 103
| 0.537912
| 394
| 4,062
| 5.281726
| 0.246193
| 0.079289
| 0.091302
| 0.057665
| 0.396444
| 0.29457
| 0.24988
| 0.24988
| 0.206631
| 0.206631
| 0
| 0.007475
| 0.407189
| 4,062
| 88
| 104
| 46.159091
| 0.856728
| 0.115953
| 0
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078125
| false
| 0
| 0.078125
| 0.015625
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a21ff5a8c4fcb07930580d031f6847ecfaed43
| 4,731
|
py
|
Python
|
packit/fedpkg.py
|
bocekm/packit
|
b5da23c0fa3f205537551b9ed212d8f77d00d705
|
[
"MIT"
] | null | null | null |
packit/fedpkg.py
|
bocekm/packit
|
b5da23c0fa3f205537551b9ed212d8f77d00d705
|
[
"MIT"
] | null | null | null |
packit/fedpkg.py
|
bocekm/packit
|
b5da23c0fa3f205537551b9ed212d8f77d00d705
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from typing import Optional
from packit.exceptions import PackitCommandFailedError
from packit.utils import commands # so we can mock utils
from packit.utils.logging import logger
class FedPKG:
"""
Part of the code is from release-bot:
https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py
"""
def __init__(
self, fas_username: str = None, directory: str = None, stage: bool = False
):
self.fas_username = fas_username
self.directory = directory
self.stage = stage
self.fedpkg_exec = "fedpkg-stage" if stage else "fedpkg"
def __repr__(self):
return (
"FedPKG("
f"fas_username='{self.fas_username}', "
f"directory='{self.directory}', "
f"stage='{self.stage}')"
)
def new_sources(self, sources="", fail=True):
if not Path(self.directory).is_dir():
raise Exception("Cannot access fedpkg repository:")
return commands.run_command_remote(
cmd=[self.fedpkg_exec, "new-sources", sources],
cwd=self.directory,
error_message="Adding new sources failed:",
fail=fail,
)
def build(
self,
scratch: bool = False,
nowait: bool = False,
koji_target: Optional[str] = None,
srpm_path: Optional[Path] = None,
):
"""
build in koji
:param scratch: scratch (temporary) build or not?
:param nowait: False == wait for the build to finish
:param koji_target: koji target to build in (`koji list-targets`)
:param srpm_path: use selected SRPM for build, not dist-git repo & ref
:return:
"""
cmd = [self.fedpkg_exec, "build"]
if scratch:
cmd.append("--scratch")
if nowait:
cmd.append("--nowait")
if koji_target:
cmd += ["--target", koji_target]
if srpm_path:
cmd += ["--srpm", str(srpm_path)]
try:
commands.run_command_remote(
cmd=cmd,
cwd=self.directory,
error_message="Submission of build to koji failed.",
fail=True,
)
except PackitCommandFailedError as ex:
# fail on the fedpkg side, the build is triggered
if (
"watch_tasks() got an unexpected keyword argument 'ki_handler'"
in ex.stderr_output
):
logger.info(
"The 'fedpkg build' command crashed which is a known issue: "
"the build is submitted in koji anyway."
)
logger.debug(ex.stdout_output)
else:
raise
def clone(self, package_name: str, target_path: str, anonymous: bool = False):
"""
clone a dist-git repo; this has to be done in current env
b/c we don't have the keytab in sandbox
"""
cmd = [self.fedpkg_exec]
if self.fas_username:
cmd += ["--user", self.fas_username]
cmd += ["-q", "clone"]
if anonymous:
cmd += ["-a"]
cmd += [package_name, target_path]
error_msg = (
f"Packit failed to clone the repository {package_name}; "
"please make sure that you are authorized to clone repositories "
"from Fedora dist-git - this may require SSH keys set up or "
"Kerberos ticket being active."
)
commands.run_command(cmd=cmd, error_message=error_msg)
| 35.044444
| 82
| 0.609808
| 590
| 4,731
| 4.80678
| 0.410169
| 0.03103
| 0.026446
| 0.017983
| 0.038787
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001214
| 0.303741
| 4,731
| 134
| 83
| 35.30597
| 0.859745
| 0.34052
| 0
| 0.063291
| 0
| 0
| 0.211055
| 0.028476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0
| 0.063291
| 0.012658
| 0.164557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a524c2d76717a70aa199aeb8c04e4579e1a276
| 2,217
|
py
|
Python
|
src/models/text_node.py
|
moevm/nosql1h19-text-graph
|
410f156ad4f232f8aa060d43692ab020610ddfd4
|
[
"MIT"
] | null | null | null |
src/models/text_node.py
|
moevm/nosql1h19-text-graph
|
410f156ad4f232f8aa060d43692ab020610ddfd4
|
[
"MIT"
] | null | null | null |
src/models/text_node.py
|
moevm/nosql1h19-text-graph
|
410f156ad4f232f8aa060d43692ab020610ddfd4
|
[
"MIT"
] | null | null | null |
from neomodel import StructuredNode, StringProperty, JSONProperty, \
Relationship, IntegerProperty
import numpy as np
import re
from models.text_relation import TextRelation
__all__ = ['TextNode']
class TextNode(StructuredNode):
order_id = IntegerProperty(required=True, unique_index=True)
label = StringProperty(required=True)
text = StringProperty(required=True)
alg_results = JSONProperty()
link = Relationship('TextNode', 'ALG', model=TextRelation)
def short(self):
res = ''.join([word.strip() + ' '
for word in re.split(r'[\n ]', self.text, 5)[:5]])
return res
def describe(self):
return f"""
<h1>Фрагмент: {self.order_id} </h1>
<table border="1" width=100%>
<caption>
Информация о вершине
</caption>
<tr>
<th>Количество символов</th>
<td>{self.character_num()}</td>
</tr>
<tr>
<th>Количество слов</th>
<td>{self.words_num()}</td>
</tr>
<tr>
<th>Количество предложений</th>
<td>{self.sentences_num()}</td>
</tr>
<tr>
<th>Количество связей</th>
<td>{len(self.link)}</td>
</tr>
</table>
"""
def preview(self, frag_num=0):
leading = 3
if frag_num > 0:
leading = int(np.floor(np.log10(frag_num))) + 1
if str(self.order_id) != str(self.label):
return f"{str(self.order_id).zfill(leading)}: " \
+ f"[{self.label}] {self.short()}..."
else:
return f"{str(self.order_id).zfill(leading)}: " \
+ f"[{self.label}] {self.short()}..."
return f"[{self.label}] {self.short()}..."
def words_num(self):
return len(self.text.split())
def character_num(self):
return len(self.text)
def sentences_num(self):
return len([s for s in self.text.split('.') if len(s) > 2])
| 31.671429
| 73
| 0.488498
| 232
| 2,217
| 4.577586
| 0.362069
| 0.032957
| 0.041431
| 0.025424
| 0.220339
| 0.202448
| 0.097928
| 0.097928
| 0.097928
| 0.097928
| 0
| 0.010722
| 0.368967
| 2,217
| 69
| 74
| 32.130435
| 0.748392
| 0
| 0
| 0.206897
| 0
| 0
| 0.438881
| 0.083897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0.068966
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a5362ea01805df4bb2ad83d0b9f037b0c75078
| 481
|
py
|
Python
|
lib/fmdplugins/list_records.py
|
GonzaloAlvarez/py-ga-sysadmin
|
fbbbbcad36df9f1b3e40328ff48c22bad13a56f4
|
[
"MIT"
] | 2
|
2018-01-05T15:32:06.000Z
|
2021-06-02T13:15:05.000Z
|
lib/fmdplugins/list_records.py
|
GonzaloAlvarez/devops-tools
|
fbbbbcad36df9f1b3e40328ff48c22bad13a56f4
|
[
"MIT"
] | 67
|
2017-01-09T19:39:19.000Z
|
2018-02-28T05:33:40.000Z
|
lib/fmdplugins/list_records.py
|
GonzaloAlvarez/devops-tools
|
fbbbbcad36df9f1b3e40328ff48c22bad13a56f4
|
[
"MIT"
] | null | null | null |
from lib.fmd.namedentity import NamedEntity
from lib.fmd.decorators import Action, ListStage, GetStage
from lib.exceptions.workflow import EntryException
@Action(ListStage.DATAGATHERING)
def list_records(context, output):
output = []
if hasattr(context, 'filter'):
context.log.debug('Using filter [%s]' % context.filter)
entries = context.ddb.list(context.filter)
else:
entries = context.ddb.list()
return NamedEntity('records', entries)
| 30.0625
| 63
| 0.719335
| 56
| 481
| 6.160714
| 0.517857
| 0.06087
| 0.057971
| 0.121739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170478
| 481
| 15
| 64
| 32.066667
| 0.864662
| 0
| 0
| 0
| 0
| 0
| 0.06237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a5aa9a635301ab37ae92c6395e50231bd81a4b
| 6,033
|
py
|
Python
|
pysoa/server/action/switched.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | null | null | null |
pysoa/server/action/switched.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | null | null | null |
pysoa/server/action/switched.py
|
zetahernandez/pysoa
|
006e55ba877196a42c64f2ff453583d366082d55
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import (
absolute_import,
unicode_literals,
)
import abc
import six
from pysoa.server.internal.types import is_switch
__all__ = (
'SwitchedAction',
)
def _len(item):
# Safe length that won't raise an error on values that don't support length
return getattr(item, '__len__', lambda *_: -1)()
class _DefaultAction(object):
def __int__(self):
d = id(self)
return d if d < 0 else -d
def __eq__(self, other):
return getattr(other, '__class__', None) == _DefaultAction
class _SwitchedActionMetaClass(abc.ABCMeta):
def __new__(mcs, name, bases, body):
"""
Validate the switch_to_action_map when the class is created, instead of doing it every time the class
is instantiated. This identifies problems earlier (on import) and improves performance by not performing this
validation every time the action is called.
"""
cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body)
# noinspection PyUnresolvedReferences
if bases[0] is not object and (
not cls.switch_to_action_map or
not hasattr(cls.switch_to_action_map, '__iter__') or
_len(cls.switch_to_action_map) < 2 or
any(
True for i in cls.switch_to_action_map
if not hasattr(i, '__getitem__') or _len(i) != 2 or not is_switch(i[0]) or not callable(i[1])
)
):
raise ValueError(
'Class attribute switch_to_action_map must be an iterable of at least two indexable items, each '
'with exactly two indexes, where the first element is a switch and the second element is an action '
'(callable).'
)
return cls
@six.add_metaclass(_SwitchedActionMetaClass)
class SwitchedAction(object):
"""
A specialized action that defers to other, concrete actions based on request switches. Subclasses must not
override any methods and must override `switch_to_action_map`. `switch_to_action_map` should be some iterable
object that provides `__len__` (such as a tuple [recommended] or list). Its items must be indexable objects that
provide `__len__` (such as a tuple [recommended] or list) and have exactly two elements.
For each item in `switch_to_action_map`, the first element must be a switch that provides `__int__` (such as an
actual integer) or a switch that provides an attribute `value` which, itself, provides `__int__` (or is an int).
The second element must be an action, such as an action class (e.g. one that extends `Action`) or any callable
that accepts a server settings object and returns a new callable that, itself, accepts an `ActionRequest` object
and returns an `ActionResponse` object or raises an `ActionError`.
`switch_to_action_map` must have at least two items in it. `SwitchedAction` will iterate over that list, checking
the first element (switch) of each item to see if it is enabled in the request. If it is, the second element (the
action) of that item will be deferred to. If it finds no items whose switches are enabled, it will use the very
last action in `switch_to_action_map`. As such, you can treat the last item as a default, and its switch could
simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could also be a valid switch, and
it would still be treated as the default in the case that no other items matched).
Example usage:
.. code-block:: python
class UserActionV1(Action):
...
class UserActionV2(Action):
...
class UserTransitionAction(SwitchedAction):
switch_to_action_map = (
(USER_VERSION_2_ENABLED, UserActionV2),
(SwitchedAction.DEFAULT_ACTION, UserActionV1),
)
"""
DEFAULT_ACTION = _DefaultAction()
switch_to_action_map = ()
def __init__(self, settings=None):
"""
Construct a new action. Concrete classes should not override this.
:param settings: The server settings object
:type settings: dict
"""
if self.__class__ is SwitchedAction:
raise TypeError('Cannot instantiate abstract SwitchedAction')
self.settings = settings
def get_uninitialized_action(self, action_request):
"""
Get the raw action (such as the action class or the base action callable) without instantiating/calling
it, based on the switches in the action request, or the default raw action if no switches were present or
no switches matched.
:param action_request: The request object
:type action_request: EnrichedActionRequest
:return: The action
:rtype: callable
"""
last_action = None
matched_action = None
default_action = None
for switch, action in self.switch_to_action_map:
if switch == self.DEFAULT_ACTION:
default_action = action
elif switch and action_request.switches.is_active(switch):
matched_action = action
break
else:
last_action = action
return matched_action or default_action or last_action
def __call__(self, action_request):
"""
Main entry point for actions from the `Server` (or potentially from tests). Finds the appropriate real action
to invoke based on the switches enabled in the request, initializes the action with the server settings, and
then calls the action with the request object, returning its response directly.
:param action_request: The request object
:type action_request: EnrichedActionRequest
:return: The response object
:rtype: ActionResponse
:raise: ActionError, ResponseValidationError
"""
return self.get_uninitialized_action(action_request)(self.settings)(action_request)
| 38.673077
| 117
| 0.673463
| 781
| 6,033
| 5.003841
| 0.300896
| 0.028659
| 0.050154
| 0.060901
| 0.113869
| 0.05783
| 0.05783
| 0.05783
| 0.041453
| 0.041453
| 0
| 0.002705
| 0.264711
| 6,033
| 155
| 118
| 38.922581
| 0.878269
| 0.539698
| 0
| 0
| 0
| 0
| 0.121299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118644
| false
| 0
| 0.084746
| 0.033898
| 0.389831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a6e1263697c6f30d94bde78d6313fed9c57e76
| 542
|
py
|
Python
|
Seeder/settings/tests.py
|
WebarchivCZ/Seeder
|
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
|
[
"MIT"
] | 8
|
2017-08-16T19:18:57.000Z
|
2022-01-24T10:08:19.000Z
|
Seeder/settings/tests.py
|
WebarchivCZ/Seeder
|
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
|
[
"MIT"
] | 242
|
2017-02-03T19:15:52.000Z
|
2022-03-25T08:02:52.000Z
|
Seeder/settings/tests.py
|
WebarchivCZ/Seeder
|
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
|
[
"MIT"
] | 2
|
2019-03-06T12:36:29.000Z
|
2019-07-08T12:52:20.000Z
|
from .base import *
SECRET_KEY = 'test'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite3.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
},
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
| 19.357143
| 66
| 0.605166
| 58
| 542
| 5.551724
| 0.775862
| 0.055901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.232472
| 542
| 28
| 67
| 19.357143
| 0.754808
| 0.116236
| 0
| 0.1
| 0
| 0
| 0.389121
| 0.24477
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.05
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a7d44b00e1b419e797c8637498d8abc23d4def
| 13,322
|
bzl
|
Python
|
java/image.bzl
|
Springworks/rules_docker
|
b943cd1fe3bf1c6c5fdac1889e952408599cffff
|
[
"Apache-2.0"
] | null | null | null |
java/image.bzl
|
Springworks/rules_docker
|
b943cd1fe3bf1c6c5fdac1889e952408599cffff
|
[
"Apache-2.0"
] | null | null | null |
java/image.bzl
|
Springworks/rules_docker
|
b943cd1fe3bf1c6c5fdac1889e952408599cffff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rule for creating a Java container image.
The signature of java_image is compatible with java_binary.
The signature of war_image is compatible with java_library.
"""
load(
"//container:container.bzl",
"container_pull",
_repositories = "repositories",
)
# Load the resolved digests.
load(
":java.bzl",
_JAVA_DIGESTS = "DIGESTS",
)
load(
":jetty.bzl",
_JETTY_DIGESTS = "DIGESTS",
)
def repositories():
# Call the core "repositories" function to reduce boilerplate.
# This is idempotent if folks call it themselves.
_repositories()
excludes = native.existing_rules().keys()
if "java_image_base" not in excludes:
container_pull(
name = "java_image_base",
registry = "gcr.io",
repository = "distroless/java",
digest = _JAVA_DIGESTS["latest"],
)
if "java_debug_image_base" not in excludes:
container_pull(
name = "java_debug_image_base",
registry = "gcr.io",
repository = "distroless/java",
digest = _JAVA_DIGESTS["debug"],
)
if "jetty_image_base" not in excludes:
container_pull(
name = "jetty_image_base",
registry = "gcr.io",
repository = "distroless/java/jetty",
digest = _JETTY_DIGESTS["latest"],
)
if "jetty_debug_image_base" not in excludes:
container_pull(
name = "jetty_debug_image_base",
registry = "gcr.io",
repository = "distroless/java/jetty",
digest = _JETTY_DIGESTS["debug"],
)
if "servlet_api" not in excludes:
native.maven_jar(
name = "javax_servlet_api",
artifact = "javax.servlet:javax.servlet-api:3.0.1",
)
DEFAULT_JAVA_BASE = select({
"@io_bazel_rules_docker//:fastbuild": "@java_image_base//image",
"@io_bazel_rules_docker//:debug": "@java_debug_image_base//image",
"@io_bazel_rules_docker//:optimized": "@java_image_base//image",
"//conditions:default": "@java_image_base//image",
})
DEFAULT_JETTY_BASE = select({
"@io_bazel_rules_docker//:fastbuild": "@jetty_image_base//image",
"@io_bazel_rules_docker//:debug": "@jetty_debug_image_base//image",
"@io_bazel_rules_docker//:optimized": "@jetty_image_base//image",
"//conditions:default": "@jetty_image_base//image",
})
load(
"//container:container.bzl",
_container = "container",
)
def java_files(f):
files = []
if java_common.provider in f:
java_provider = f[java_common.provider]
files += list(java_provider.transitive_runtime_jars)
if hasattr(f, "files"): # a jar file
files += list(f.files)
return files
load(
"//lang:image.bzl",
"dep_layer_impl",
"layer_file_path",
)
def _jar_dep_layer_impl(ctx):
"""Appends a layer for a single dependency's runfiles."""
return dep_layer_impl(ctx, runfiles = java_files)
jar_dep_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The dependency whose runfiles we're appending.
"dep": attr.label(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/app"),
# https://github.com/bazelbuild/bazel/issues/2176
"data_path": attr.string(default = "."),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _jar_dep_layer_impl,
)
def _jar_app_layer_impl(ctx):
"""Appends the app layer with all remaining runfiles."""
available = depset()
for jar in ctx.attr.jar_layers:
available += java_files(jar)
# We compute the set of unavailable stuff by walking deps
# in the same way, adding in our binary and then subtracting
# out what it available.
unavailable = depset()
for jar in ctx.attr.deps + ctx.attr.runtime_deps:
unavailable += java_files(jar)
unavailable += java_files(ctx.attr.binary)
unavailable = [x for x in unavailable if x not in available]
classpath = ":".join([
layer_file_path(ctx, x)
for x in available + unavailable
])
# Classpaths can grow long and there is a limit on the length of a
# command line, so mitigate this by always writing the classpath out
# to a file instead.
classpath_file = ctx.new_file(ctx.attr.name + ".classpath")
ctx.actions.write(classpath_file, classpath)
binary_path = layer_file_path(ctx, ctx.files.binary[0])
classpath_path = layer_file_path(ctx, classpath_file)
entrypoint = [
"/usr/bin/java",
"-cp",
# Support optionally passing the classpath as a file.
"@" + classpath_path if ctx.attr._classpath_as_file else classpath,
] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args
file_map = {
layer_file_path(ctx, f): f
for f in unavailable + [classpath_file]
}
return _container.image.implementation(
ctx,
# We use all absolute paths.
directory = "/",
file_map = file_map,
entrypoint = entrypoint,
)
jar_app_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The binary target for which we are synthesizing an image.
"binary": attr.label(mandatory = True),
# The full list of dependencies that have their own layers
# factored into our base.
"jar_layers": attr.label_list(),
# The rest of the dependencies.
"deps": attr.label_list(),
"runtime_deps": attr.label_list(),
"jvm_flags": attr.string_list(),
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The main class to invoke on startup.
"main_class": attr.string(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Whether the classpath should be passed as a file.
"_classpath_as_file": attr.bool(default = False),
# Override the defaults.
"directory": attr.string(default = "/app"),
# https://github.com/bazelbuild/bazel/issues/2176
"data_path": attr.string(default = "."),
"legacy_run_behavior": attr.bool(default = False),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _jar_app_layer_impl,
)
def java_image(
name,
base = None,
main_class = None,
deps = [],
runtime_deps = [],
layers = [],
jvm_flags = [],
**kwargs):
"""Builds a container image overlaying the java_binary.
Args:
layers: Augments "deps" with dependencies that should be put into
their own layers.
**kwargs: See java_binary.
"""
binary_name = name + ".binary"
native.java_binary(
name = binary_name,
main_class = main_class,
# If the rule is turning a JAR built with java_library into
# a binary, then it will appear in runtime_deps. We are
# not allowed to pass deps (even []) if there is no srcs
# kwarg.
deps = (deps + layers) or None,
runtime_deps = runtime_deps,
jvm_flags = jvm_flags,
**kwargs
)
base = base or DEFAULT_JAVA_BASE
for index, dep in enumerate(layers):
this_name = "%s.%d" % (name, index)
jar_dep_layer(name = this_name, base = base, dep = dep)
base = this_name
visibility = kwargs.get("visibility", None)
jar_app_layer(
name = name,
base = base,
binary = binary_name,
main_class = main_class,
jvm_flags = jvm_flags,
deps = deps,
runtime_deps = runtime_deps,
jar_layers = layers,
visibility = visibility,
args = kwargs.get("args"),
)
def _war_dep_layer_impl(ctx):
"""Appends a layer for a single dependency's runfiles."""
# TODO(mattmoor): Today we run the risk of filenames colliding when
# they get flattened. Instead of just flattening and using basename
# we should use a file_map based scheme.
return _container.image.implementation(
ctx,
files = java_files(ctx.attr.dep),
)
_war_dep_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The dependency whose runfiles we're appending.
"dep": attr.label(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"),
# WE WANT PATHS FLATTENED
# "data_path": attr.string(default = "."),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _war_dep_layer_impl,
)
def _war_app_layer_impl(ctx):
"""Appends the app layer with all remaining runfiles."""
available = depset()
for jar in ctx.attr.jar_layers:
available += java_files(jar)
# This is based on rules_appengine's WAR rules.
transitive_deps = depset()
transitive_deps += java_files(ctx.attr.library)
# TODO(mattmoor): Handle data files.
# If we start putting libs in servlet-agnostic paths,
# then consider adding symlinks here.
files = [d for d in transitive_deps if d not in available]
return _container.image.implementation(ctx, files = files)
_war_app_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The library target for which we are synthesizing an image.
"library": attr.label(mandatory = True),
# The full list of dependencies that have their own layers
# factored into our base.
"jar_layers": attr.label_list(),
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
"entrypoint": attr.string_list(default = []),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"),
# WE WANT PATHS FLATTENED
# "data_path": attr.string(default = "."),
"legacy_run_behavior": attr.bool(default = False),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _war_app_layer_impl,
)
def war_image(name, base = None, deps = [], layers = [], **kwargs):
"""Builds a container image overlaying the java_library as an exploded WAR.
TODO(mattmoor): For `bazel run` of this to be useful, we need to be able
to ctrl-C it and have the container actually terminate. More information:
https://github.com/bazelbuild/bazel/issues/3519
Args:
layers: Augments "deps" with dependencies that should be put into
their own layers.
**kwargs: See java_library.
"""
library_name = name + ".library"
native.java_library(name = library_name, deps = deps + layers, **kwargs)
base = base or DEFAULT_JETTY_BASE
for index, dep in enumerate(layers):
this_name = "%s.%d" % (name, index)
_war_dep_layer(name = this_name, base = base, dep = dep)
base = this_name
visibility = kwargs.get("visibility", None)
tags = kwargs.get("tags", None)
_war_app_layer(
name = name,
base = base,
library = library_name,
jar_layers = layers,
visibility = visibility,
tags = tags,
)
| 34.246787
| 79
| 0.642546
| 1,702
| 13,322
| 4.866628
| 0.190364
| 0.017385
| 0.013522
| 0.021248
| 0.574188
| 0.529156
| 0.499698
| 0.490764
| 0.462031
| 0.426053
| 0
| 0.002419
| 0.255367
| 13,322
| 388
| 80
| 34.335052
| 0.83256
| 0.337712
| 0
| 0.378723
| 0
| 0
| 0.158374
| 0.077517
| 0
| 0
| 0
| 0.005155
| 0
| 1
| 0.034043
| false
| 0
| 0
| 0
| 0.055319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9a831ae9aec7e87ced37e12721727df9e75bb48
| 17,427
|
py
|
Python
|
cupyx/jit/_builtin_funcs.py
|
khushi-411/cupy
|
b5221a478c800c5e60eef65545467de9eb00c0d9
|
[
"MIT"
] | null | null | null |
cupyx/jit/_builtin_funcs.py
|
khushi-411/cupy
|
b5221a478c800c5e60eef65545467de9eb00c0d9
|
[
"MIT"
] | null | null | null |
cupyx/jit/_builtin_funcs.py
|
khushi-411/cupy
|
b5221a478c800c5e60eef65545467de9eb00c0d9
|
[
"MIT"
] | null | null | null |
import warnings
import cupy
from cupy_backends.cuda.api import runtime
from cupy.cuda import device
from cupyx.jit import _cuda_types
from cupyx.jit._internal_types import BuiltinFunc
from cupyx.jit._internal_types import Data
from cupyx.jit._internal_types import Constant
from cupyx.jit._internal_types import Range
from cupyx.jit import _compile
from functools import reduce
class RangeFunc(BuiltinFunc):
def __call__(self, *args, unroll=None):
"""Range with loop unrolling support.
Args:
start (int):
Same as that of built-in :obj:`range`.
stop (int):
Same as that of built-in :obj:`range`.
step (int):
Same as that of built-in :obj:`range`.
unroll (int or bool or None):
- If `True`, add ``#pragma unroll`` directive before the
loop.
- If `False`, add ``#pragma unroll(1)`` directive before
the loop to disable unrolling.
- If an `int`, add ``#pragma unroll(n)`` directive before
the loop, where the integer ``n`` means the number of
iterations to unroll.
- If `None` (default), leave the control of loop unrolling
to the compiler (no ``#pragma``).
.. seealso:: `#pragma unroll`_
.. _#pragma unroll:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll
"""
super().__call__()
def call(self, env, *args, unroll=None):
if len(args) == 0:
raise TypeError('range expected at least 1 argument, got 0')
elif len(args) == 1:
start, stop, step = Constant(0), args[0], Constant(1)
elif len(args) == 2:
start, stop, step = args[0], args[1], Constant(1)
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError(
f'range expected at most 3 argument, got {len(args)}')
if unroll is not None:
if not all(isinstance(x, Constant)
for x in (start, stop, step, unroll)):
raise TypeError(
'loop unrolling requires constant start, stop, step and '
'unroll value')
unroll = unroll.obj
if not (isinstance(unroll, int) or isinstance(unroll, bool)):
raise TypeError(
'unroll value expected to be of type int, '
f'got {type(unroll).__name__}')
if unroll is False:
unroll = 1
if not (unroll is True or 0 < unroll < 1 << 31):
warnings.warn(
'loop unrolling is ignored as the unroll value is '
'non-positive or greater than INT_MAX')
if isinstance(step, Constant):
step_is_positive = step.obj >= 0
elif step.ctype.dtype.kind == 'u':
step_is_positive = True
else:
step_is_positive = None
stop = Data.init(stop, env)
start = Data.init(start, env)
step = Data.init(step, env)
if start.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if stop.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if step.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if env.mode == 'numpy':
ctype = _cuda_types.Scalar(int)
elif env.mode == 'cuda':
ctype = stop.ctype
else:
assert False
return Range(start, stop, step, ctype, step_is_positive, unroll=unroll)
class LenFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) != 1:
raise TypeError(f'len() expects only 1 argument, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
arg = args[0]
if not isinstance(arg.ctype, _cuda_types.CArray):
raise TypeError('len() supports only array type')
if not arg.ctype.ndim:
raise TypeError('len() of unsized array')
return Data(f'static_cast<long long>({arg.code}.shape()[0])',
_cuda_types.Scalar('q'))
class MinFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'min() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.minimum, (a, b), None, env), args)
class MaxFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'max() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.maximum, (a, b), None, env), args)
class SyncThreads(BuiltinFunc):
def __call__(self):
"""Calls ``__syncthreads()``.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call_const(self, env):
return Data('__syncthreads()', _cuda_types.void)
class SyncWarp(BuiltinFunc):
def __call__(self, *, mask=0xffffffff):
"""Calls ``__syncwarp()``.
Args:
mask (int): Active threads in a warp. Default is 0xffffffff.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call(self, env, *, mask=None):
if runtime.is_hip:
if mask is not None:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
mask = None
if mask:
if isinstance(mask, Constant):
if not (0x0 <= mask.obj <= 0xffffffff):
raise ValueError('mask is out of range')
mask = _compile._astype_scalar(
mask, _cuda_types.int32, 'same_kind', env)
mask = Data.init(mask, env)
code = f'__syncwarp({mask.code})'
else:
code = '__syncwarp()'
return Data(code, _cuda_types.void)
class SharedMemory(BuiltinFunc):
def __call__(self, dtype, size, alignment=None):
"""Allocates shared memory and returns it as a 1-D array.
Args:
dtype (dtype):
The dtype of the returned array.
size (int or None):
If ``int`` type, the size of static shared memory.
If ``None``, declares the shared memory with extern specifier.
alignment (int or None): Enforce the alignment via __align__(N).
"""
super().__call__()
def call_const(self, env, dtype, size, alignment=None):
name = env.get_fresh_variable_name(prefix='_smem')
child_type = _cuda_types.Scalar(dtype)
while env[name] is not None:
name = env.get_fresh_variable_name(prefix='_smem') # retry
var = Data(name, _cuda_types.SharedMem(child_type, size, alignment))
env.decls[name] = var
env.locals[name] = var
return Data(name, _cuda_types.Ptr(child_type))
class AtomicOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = 'atomic' + op
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function to operate atomically on
``array[index]``. Please refer to `Atomic Functions`_ for detailed
explanation.
Args:
array: A :class:`cupy.ndarray` to index over.
index: A valid index such that the address to the corresponding
array element ``array[index]`` can be computed.
value: Represent the value to use for the specified operation. For
the case of :obj:`atomic_cas`, this is the value for
``array[index]`` to compare with.
alt_value: Only used in :obj:`atomic_cas` to represent the value
to swap to.
.. seealso:: `Numba's corresponding atomic functions`_
.. _Atomic Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
.. _Numba's corresponding atomic functions:
https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations
"""
self.__doc__ = doc
def __call__(self, array, index, value, alt_value=None):
super().__call__()
def call(self, env, array, index, value, value2=None):
name = self._name
op = self._op
array = Data.init(array, env)
if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)):
raise TypeError('The first argument must be of array type.')
target = _compile._indexing(array, index, env)
ctype = target.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
# On HIP, 'e' is not supported and we will never reach here
if (op == 'Add' and ctype.dtype.char == 'e'
and runtime.runtimeGetVersion() < 10000):
raise RuntimeError(
'float16 atomic operation is not supported before CUDA 10.0.')
value = _compile._astype_scalar(value, ctype, 'same_kind', env)
value = Data.init(value, env)
if op == 'CAS':
assert value2 is not None
# On HIP, 'H' is not supported and we will never reach here
if ctype.dtype.char == 'H':
if runtime.runtimeGetVersion() < 10010:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'CUDA 10.1')
if int(device.get_compute_capability()) < 70:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'sm_70')
value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env)
value2 = Data.init(value2, env)
code = f'{name}(&{target.code}, {value.code}, {value2.code})'
else:
assert value2 is None
code = f'{name}(&{target.code}, {value.code})'
return Data(code, ctype)
class GridFunc(BuiltinFunc):
def __init__(self, mode):
if mode == 'grid':
self._desc = 'Compute the thread index in the grid.'
self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x'
self._link = 'numba.cuda.grid'
self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}'
elif mode == 'gridsize':
self._desc = 'Compute the grid size.'
self._eq = 'jit.blockDim.x * jit.gridDim.x'
self._link = 'numba.cuda.gridsize'
self._code = 'blockDim.{n} * gridDim.{n}'
else:
raise ValueError('unsupported function')
doc = f""" {self._desc}
Computation of the first integer is as follows::
{self._eq}
and for the other two integers the ``y`` and ``z`` attributes are used.
Args:
ndim (int): The dimension of the grid. Only 1, 2, or 3 is allowed.
Returns:
int or tuple:
If ``ndim`` is 1, an integer is returned, otherwise a tuple.
.. note::
This function follows the convention of Numba's
:func:`{self._link}`.
"""
self.__doc__ = doc
def __call__(self, ndim):
super().__call__()
def call_const(self, env, ndim):
if not isinstance(ndim, int):
raise TypeError('ndim must be an integer')
# Numba convention: for 1D we return a single variable,
# otherwise a tuple
if ndim == 1:
return Data(self._code.format(n='x'), _cuda_types.uint32)
elif ndim == 2:
dims = ('x', 'y')
elif ndim == 3:
dims = ('x', 'y', 'z')
else:
raise ValueError('Only ndim=1,2,3 are supported')
elts_code = ', '.join(self._code.format(n=n) for n in dims)
ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim)
return Data(f'thrust::make_tuple({elts_code})', ctype)
class WarpShuffleOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = '__shfl_' + (op + '_' if op else '') + 'sync'
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function. Please refer to
`Warp Shuffle Functions`_ for detailed explanation.
.. _Warp Shuffle Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions
"""
self.__doc__ = doc
def __call__(self, mask, var, val_id, *, width=32):
super().__call__()
def call(self, env, mask, var, val_id, *, width=None):
name = self._name
var = Data.init(var, env)
ctype = var.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
try:
mask = mask.obj
except Exception:
raise TypeError('mask must be an integer')
if runtime.is_hip:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
elif not (0x0 <= mask <= 0xffffffff):
raise ValueError('mask is out of range')
# val_id refers to "delta" for shfl_{up, down}, "srcLane" for shfl, and
# "laneMask" for shfl_xor
if self._op in ('up', 'down'):
val_id_t = _cuda_types.uint32
else:
val_id_t = _cuda_types.int32
val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env)
val_id = Data.init(val_id, env)
if width:
if isinstance(width, Constant):
if width.obj not in (2, 4, 8, 16, 32):
raise ValueError('width needs to be power of 2')
else:
width = Constant(64) if runtime.is_hip else Constant(32)
width = _compile._astype_scalar(
width, _cuda_types.int32, 'same_kind', env)
width = Data.init(width, env)
code = f'{name}({hex(mask)}, {var.code}, {val_id.code}'
code += f', {width.code})'
return Data(code, ctype)
class LaneID(BuiltinFunc):
def __call__(self):
"""Returns the lane ID of the calling thread, ranging in
``[0, jit.warpsize)``.
.. note::
Unlike :obj:`numba.cuda.laneid`, this is a callable function
instead of a property.
"""
super().__call__()
def _get_preamble(self):
preamble = '__device__ __forceinline__ unsigned int LaneId() {'
if not runtime.is_hip:
# see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419
preamble += """
unsigned int ret;
asm ("mov.u32 %0, %%laneid;" : "=r"(ret) );
return ret; }
"""
else:
# defined in hip/hcc_detail/device_functions.h
preamble += """
return __lane_id(); }
"""
return preamble
def call_const(self, env):
env.generated.add_code(self._get_preamble())
return Data('LaneId()', _cuda_types.uint32)
builtin_functions_dict = {
range: RangeFunc(),
len: LenFunc(),
min: MinFunc(),
max: MaxFunc(),
}
range_ = RangeFunc()
syncthreads = SyncThreads()
syncwarp = SyncWarp()
shared_memory = SharedMemory()
grid = GridFunc('grid')
gridsize = GridFunc('gridsize')
laneid = LaneID()
# atomic functions
atomic_add = AtomicOp(
'Add',
('int32', 'uint32', 'uint64', 'float32', 'float64')
+ (() if runtime.is_hip else ('float16',)))
atomic_sub = AtomicOp(
'Sub', ('int32', 'uint32'))
atomic_exch = AtomicOp(
'Exch', ('int32', 'uint32', 'uint64', 'float32'))
atomic_min = AtomicOp(
'Min', ('int32', 'uint32', 'uint64'))
atomic_max = AtomicOp(
'Max', ('int32', 'uint32', 'uint64'))
atomic_inc = AtomicOp(
'Inc', ('uint32',))
atomic_dec = AtomicOp(
'Dec', ('uint32',))
atomic_cas = AtomicOp(
'CAS',
('int32', 'uint32', 'uint64')
+ (() if runtime.is_hip else ('uint16',)))
atomic_and = AtomicOp(
'And', ('int32', 'uint32', 'uint64'))
atomic_or = AtomicOp(
'Or', ('int32', 'uint32', 'uint64'))
atomic_xor = AtomicOp(
'Xor', ('int32', 'uint32', 'uint64'))
# warp-shuffle functions
_shfl_dtypes = (
('int32', 'uint32', 'int64', 'float32', 'float64')
+ (() if runtime.is_hip else ('uint64', 'float16')))
shfl_sync = WarpShuffleOp('', _shfl_dtypes)
shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes)
shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes)
shfl_xor_sync = WarpShuffleOp('xor', _shfl_dtypes)
| 35.206061
| 115
| 0.569748
| 2,094
| 17,427
| 4.582139
| 0.178128
| 0.029182
| 0.017196
| 0.018343
| 0.309536
| 0.279208
| 0.240542
| 0.216988
| 0.203544
| 0.173528
| 0
| 0.0164
| 0.310725
| 17,427
| 494
| 116
| 35.277328
| 0.782384
| 0.131348
| 0
| 0.236994
| 0
| 0.008671
| 0.287642
| 0.012954
| 0
| 0
| 0.002442
| 0
| 0.008671
| 1
| 0.066474
| false
| 0
| 0.031792
| 0.00289
| 0.17341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9acae3f6c9a11754c72065d93acff3857609af2
| 5,423
|
py
|
Python
|
toontown/estate/DistributedHouseDoor.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
toontown/estate/DistributedHouseDoor.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/estate/DistributedHouseDoor.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from toontown.toonbase.ToonBaseGlobal import *
from panda3d.core import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.distributed import DistributedObject
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.MessengerGlobal import messenger
from direct.fsm import ClassicFSM
from toontown.building import DistributedDoor
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.building import FADoorCodes
from toontown.building import DoorTypes
from toontown.estate.DistributedHouse import DistributedHouse
class DistributedHouseDoor(DistributedDoor.DistributedDoor):
def __init__(self, cr):
DistributedDoor.DistributedDoor.__init__(self, cr)
def disable(self):
DistributedDoor.DistributedDoor.disable(self)
self.ignoreAll()
def setZoneIdAndBlock(self, zoneId, block):
self.houseId = block
DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block)
def getTriggerName(self):
return 'door_trigger_' + str(self.houseId)
def hideDoorParts(self):
try:
self.findDoorNode('doorFrameHoleRight').hide()
self.findDoorNode('doorFrameHoleLeft').hide()
except:
pass
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
if self.doorType == DoorTypes.EXT_STANDARD:
house = base.cr.doId2do.get(self.houseId)
if not isinstance(house, DistributedHouse):
self.notify.error('tried to use {0} as house'.format(house.__class__.__name__))
if house and house.house_loaded:
self.__gotRelatedHouse()
else:
self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse)
elif self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
if door.isEmpty():
self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse)
else:
self.__gotRelatedHouse()
def __gotRelatedHouse(self):
self.doPostAnnounceGenerate()
self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()
self.hideDoorParts()
building = self.getBuilding()
doorTrigger = building.find('**/door_trigger*')
doorTrigger.setName(self.getTriggerName())
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.acceptOnce('clearOutToonInterior', self.doorTrigger)
self.zoneDoneLoading = 0
def getBuilding(self, allowEmpty = False):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
self.building = door.getParent()
elif self.doorType == DoorTypes.EXT_STANDARD:
if self.houseId:
self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None)
if allowEmpty:
return self.building
return self.building
def isInterior(self):
if self.doorType == DoorTypes.INT_STANDARD:
return 1
return 0
def getDoorNodePath(self):
if self.doorType == DoorTypes.INT_STANDARD:
otherNP = render.find('**/door_origin')
elif self.doorType == DoorTypes.EXT_STANDARD:
building = self.getBuilding()
otherNP = building.find('**/door')
if otherNP.isEmpty():
otherNP = building.find('**/door_origin')
else:
self.notify.error('No such door type as ' + str(self.doorType))
return otherNP
def enterClosing(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterClosing(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
base.cr.playGame.hood.loader.setHouse(self.houseId)
zoneId = self.otherZoneId
if self.doorType == DoorTypes.EXT_STANDARD:
whereTo = 'house'
else:
whereTo = 'estate'
request = {'loader': 'safeZoneLoader',
'where': whereTo,
'how': 'doorIn',
'hoodId': ToontownGlobals.MyEstate,
'zoneId': zoneId,
'shardId': None,
'avId': -1,
'allowRedirect': 0,
'doorDoId': self.otherDoId}
messenger.send('doorDoneEvent', [request])
return
| 40.17037
| 299
| 0.638761
| 521
| 5,423
| 6.564299
| 0.318618
| 0.031579
| 0.049123
| 0.033626
| 0.123977
| 0.122807
| 0.081287
| 0.059064
| 0.032164
| 0.032164
| 0
| 0.005984
| 0.260372
| 5,423
| 134
| 300
| 40.470149
| 0.846672
| 0
| 0
| 0.191667
| 0
| 0
| 0.089434
| 0.004057
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091667
| false
| 0.008333
| 0.125
| 0.008333
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|