hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5c5b472f23085a3ff9231afa8359301b567e4e4
| 280
|
py
|
Python
|
managedtenants/__init__.py
|
sugarraysam/managed-tenants-cli
|
59609583a86d1674f2c7376b601ce9ddfeee48de
|
[
"Apache-2.0"
] | null | null | null |
managedtenants/__init__.py
|
sugarraysam/managed-tenants-cli
|
59609583a86d1674f2c7376b601ce9ddfeee48de
|
[
"Apache-2.0"
] | null | null | null |
managedtenants/__init__.py
|
sugarraysam/managed-tenants-cli
|
59609583a86d1674f2c7376b601ce9ddfeee48de
|
[
"Apache-2.0"
] | null | null | null |
from managedtenants.core.tasks_loader.pre_task import PreTask
from managedtenants.core.tasks_loader.task import Task
from managedtenants.core.tasks_loader.post_task import PostTask
from managedtenants.core.status import Status
__all__ = ["PreTask", "Task", "PostTask", "Status"]
| 40
| 63
| 0.828571
| 37
| 280
| 6.027027
| 0.351351
| 0.32287
| 0.394619
| 0.363229
| 0.443946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082143
| 280
| 6
| 64
| 46.666667
| 0.867704
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b5c9cf0c17d966b3aae6fd6682985612ea488ed8
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/progress/counter.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/progress/counter.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/progress/counter.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/33/39/f2/06bbcf5ab3a519ee0c64094651bf6adda3837bafda35878013f54da180
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b5ce66011c5fc39562943c59574216cdb9423e27
| 359
|
py
|
Python
|
bookorbooks/quiz/api/serializers/__init__.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | 1
|
2021-08-10T22:24:17.000Z
|
2021-08-10T22:24:17.000Z
|
bookorbooks/quiz/api/serializers/__init__.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | null | null | null |
bookorbooks/quiz/api/serializers/__init__.py
|
talhakoylu/SummerInternshipBackend
|
4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3
|
[
"MIT"
] | null | null | null |
from .quiz_serializers import QuizStandardSerializer
from .question_serializers import QuestionWithQuizSerializer
from .taking_quiz_serializers import TakingQuizDetailsForParentSerializer, TakingQuizDetailsForInstructorSerializer, TakingQuizDetailsForSpecificClassSerializer, TakingQuizSerializer, TakingQuizCreateSerializer, TakingQuizAnswerCreateSerializer
| 89.75
| 244
| 0.930362
| 21
| 359
| 15.714286
| 0.666667
| 0.154545
| 0.127273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047354
| 359
| 3
| 245
| 119.666667
| 0.964912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bd03c649cd8538bae3977c7077acbd80c6968632
| 23
|
py
|
Python
|
src/yfoption/__init__.py
|
socbase/yfoption
|
31ca5231b52fcf73c6edb56419e1ccba06523c45
|
[
"MIT"
] | null | null | null |
src/yfoption/__init__.py
|
socbase/yfoption
|
31ca5231b52fcf73c6edb56419e1ccba06523c45
|
[
"MIT"
] | null | null | null |
src/yfoption/__init__.py
|
socbase/yfoption
|
31ca5231b52fcf73c6edb56419e1ccba06523c45
|
[
"MIT"
] | null | null | null |
from .yfoption import *
| 23
| 23
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ff43a9fd30e8ba7733635b940c6018e8e740b9e
| 4,864
|
py
|
Python
|
CIM15/IEC61970/Informative/InfOperations/__init__.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 58
|
2015-04-22T10:41:03.000Z
|
2022-03-29T16:04:34.000Z
|
CIM15/IEC61970/Informative/InfOperations/__init__.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 12
|
2015-08-26T03:57:23.000Z
|
2020-12-11T20:14:42.000Z
|
CIM15/IEC61970/Informative/InfOperations/__init__.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 35
|
2015-01-10T12:21:03.000Z
|
2020-09-09T08:18:16.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""TODO: The following has been copied from a very old version of draft Part 11, so the references are wrong, but we store the knowledge here to reuse later: 'The Documentation package is used for the modeling of business documents. Some of these may be electronic realizations of legacy paper document, and some may be electronic information exchanges or collections. Documents will typically reference or describe one or more CIM objects. The DataSets package is used to describe documents tyically used for exchange of collections of object descriptions (e.g., NetworkDataSet). The operational package is used to define documents related to distribution operations business processes (e.g., OperationalRestriction, SwitchingSchedule). TroubleTickets are used by Customers to report problems related to the elctrical distribution network. TroubleTickets may be grouped and be related to a PlannedOutage, OutageNotification and/or PowerSystemResource. The Outage package defines classes related to outage management (OutageStep, OutageRecord, OutageReport).'
"""
from CIM15.IEC61970.Informative.InfOperations.OutageRecord import OutageRecord
from CIM15.IEC61970.Informative.InfOperations.OutageReport import OutageReport
from CIM15.IEC61970.Informative.InfOperations.ChangeItem import ChangeItem
from CIM15.IEC61970.Informative.InfOperations.PSREvent import PSREvent
from CIM15.IEC61970.Informative.InfOperations.PlannedOutage import PlannedOutage
from CIM15.IEC61970.Informative.InfOperations.CircuitSection import CircuitSection
from CIM15.IEC61970.Informative.InfOperations.SafetyDocument import SafetyDocument
from CIM15.IEC61970.Informative.InfOperations.OperationalRestriction import OperationalRestriction
from CIM15.IEC61970.Informative.InfOperations.ChangeSet import ChangeSet
from CIM15.IEC61970.Informative.InfOperations.SwitchingSchedule import SwitchingSchedule
from CIM15.IEC61970.Informative.InfOperations.Circuit import Circuit
from CIM15.IEC61970.Informative.InfOperations.NetworkDataSet import NetworkDataSet
from CIM15.IEC61970.Informative.InfOperations.OutageStep import OutageStep
from CIM15.IEC61970.Informative.InfOperations.OrgPsrRole import OrgPsrRole
from CIM15.IEC61970.Informative.InfOperations.OutageCode import OutageCode
from CIM15.IEC61970.Informative.InfOperations.IncidentCode import IncidentCode
from CIM15.IEC61970.Informative.InfOperations.LandBase import LandBase
from CIM15.IEC61970.Informative.InfOperations.ErpPersonScheduleStepRole import ErpPersonScheduleStepRole
from CIM15.IEC61970.Informative.InfOperations.SwitchingStep import SwitchingStep
from CIM15.IEC61970.Informative.InfOperations.CallBack import CallBack
from CIM15.IEC61970.Informative.InfOperations.TroubleTicket import TroubleTicket
from CIM15.IEC61970.Informative.InfOperations.IncidentRecord import IncidentRecord
from CIM15.IEC61970.Informative.InfOperations.OutageNotification import OutageNotification
from CIM15.IEC61970.Informative.InfOperations.OutageStepPsrRole import OutageStepPsrRole
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15#InfOperations"
nsPrefix = "cimInfOperations"
class SwitchingStepStatusKind(str):
"""Values are: instructed, confirmed, proposed, aborted, skipped
"""
pass
class CircuitConnectionKind(str):
"""Values are: electricallyConnected, nominallyConnected, asBuilt, other
"""
pass
class PSREventKind(str):
"""Values are: pendingRemove, pendingReplace, outOfService, pendingAdd, unknown, inService, other
"""
pass
class TroubleReportingKind(str):
"""Values are: email, call, letter, other
"""
pass
class ChangeItemKind(str):
"""Values are: add, modify, delete
"""
pass
class OutageKind(str):
"""Values are: fixed, flexible, forced
"""
pass
| 59.317073
| 1,059
| 0.822368
| 584
| 4,864
| 6.849315
| 0.400685
| 0.054
| 0.102
| 0.168
| 0.252
| 0.0265
| 0
| 0
| 0
| 0
| 0
| 0.043407
| 0.119038
| 4,864
| 81
| 1,060
| 60.049383
| 0.890082
| 0.51028
| 0
| 0.157895
| 0
| 0
| 0.030082
| 0
| 0
| 0
| 0
| 0.012346
| 0
| 1
| 0
| false
| 0.157895
| 0.631579
| 0
| 0.789474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
952508992d00f41db42bb219cce85ea667ef78c3
| 84
|
py
|
Python
|
graspy/simulations/__init__.py
|
dfrancisco1998/graspy
|
241c2f4586d9d44bc7f2e6a7451c9383ad8ff841
|
[
"Apache-2.0"
] | null | null | null |
graspy/simulations/__init__.py
|
dfrancisco1998/graspy
|
241c2f4586d9d44bc7f2e6a7451c9383ad8ff841
|
[
"Apache-2.0"
] | null | null | null |
graspy/simulations/__init__.py
|
dfrancisco1998/graspy
|
241c2f4586d9d44bc7f2e6a7451c9383ad8ff841
|
[
"Apache-2.0"
] | null | null | null |
from .simulations import *
from .simulations_corr import *
from .rdpg_corr import *
| 21
| 31
| 0.785714
| 11
| 84
| 5.818182
| 0.454545
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 84
| 3
| 32
| 28
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f3b17cb56282c648dc1bf9e9abd6d150ac84e80
| 48
|
py
|
Python
|
model/module/__init__.py
|
nihalsid/dcm-net
|
343608e2cdc565137d6d2958be8bbd751ef20f7d
|
[
"MIT"
] | 100
|
2020-03-20T03:02:21.000Z
|
2022-03-24T10:09:22.000Z
|
model/module/__init__.py
|
nihalsid/dcm-net
|
343608e2cdc565137d6d2958be8bbd751ef20f7d
|
[
"MIT"
] | 13
|
2020-06-10T09:12:54.000Z
|
2021-12-02T19:22:09.000Z
|
model/module/__init__.py
|
nihalsid/dcm-net
|
343608e2cdc565137d6d2958be8bbd751ef20f7d
|
[
"MIT"
] | 11
|
2020-05-08T20:00:52.000Z
|
2021-12-07T07:10:07.000Z
|
from .edge_conv_translation_invariance import *
| 24
| 47
| 0.875
| 6
| 48
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f580d7a15c44c6de3253f646b3af1d226bcddee
| 26
|
py
|
Python
|
piston/__init__.py
|
Roshan-Here/Eval-Code
|
3def7f40ac84991e347a622ec6063d8827bf379d
|
[
"Apache-2.0"
] | 14
|
2021-09-10T08:06:14.000Z
|
2022-01-11T16:57:05.000Z
|
piston/__init__.py
|
Roshan-Here/Eval-Code
|
3def7f40ac84991e347a622ec6063d8827bf379d
|
[
"Apache-2.0"
] | null | null | null |
piston/__init__.py
|
Roshan-Here/Eval-Code
|
3def7f40ac84991e347a622ec6063d8827bf379d
|
[
"Apache-2.0"
] | 19
|
2021-09-10T08:06:03.000Z
|
2022-01-29T11:28:45.000Z
|
from .client import Piston
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1f61b9ad17613f0d7d39364abf31b03beb7a5580
| 43
|
py
|
Python
|
kidcoords.py
|
coolreader18/imagemapping
|
abe418df5524a86e1e39cf98e06d4776ffb79325
|
[
"MIT"
] | 2
|
2019-08-07T04:53:00.000Z
|
2019-08-07T06:42:17.000Z
|
kidcoords.py
|
coolreader18/imagemapping
|
abe418df5524a86e1e39cf98e06d4776ffb79325
|
[
"MIT"
] | null | null | null |
kidcoords.py
|
coolreader18/imagemapping
|
abe418df5524a86e1e39cf98e06d4776ffb79325
|
[
"MIT"
] | null | null | null |
[(0, 0), (336, 19), (325, 201), (24, 225)]
| 21.5
| 42
| 0.418605
| 8
| 43
| 2.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.514286
| 0.186047
| 43
| 1
| 43
| 43
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1f7a40f93c39f287d754d1ba4871d02ddb3c839e
| 140
|
py
|
Python
|
dependencies/panda/Panda3D-1.10.0-x64/panda3d/dtoolconfig.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dependencies/panda/Panda3D-1.10.0-x64/panda3d/dtoolconfig.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dependencies/panda/Panda3D-1.10.0-x64/panda3d/dtoolconfig.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
# This file is automatically generated by makepanda.py. Do not modify.
from __future__ import absolute_import
from .interrogatedb import *
| 35
| 71
| 0.814286
| 19
| 140
| 5.736842
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 140
| 3
| 72
| 46.666667
| 0.908333
| 0.492857
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f2768fc74fbfe7beeb3641d89a9e2adf6bcc2f9
| 66
|
py
|
Python
|
Computer Vision/imports/__init__.py
|
leommiranda/Practical-Deep-Learning-for-Coders-2.0
|
b13076975abb9d2d01e48a573b89dce751cef0f0
|
[
"MIT"
] | 87
|
2020-09-01T04:23:40.000Z
|
2021-03-12T14:44:07.000Z
|
Computer Vision/imports/__init__.py
|
leommiranda/Practical-Deep-Learning-for-Coders-2.0
|
b13076975abb9d2d01e48a573b89dce751cef0f0
|
[
"MIT"
] | 55
|
2020-09-04T05:46:38.000Z
|
2021-03-21T12:00:05.000Z
|
Computer Vision/imports/__init__.py
|
leommiranda/Practical-Deep-Learning-for-Coders-2.0
|
b13076975abb9d2d01e48a573b89dce751cef0f0
|
[
"MIT"
] | 10
|
2020-08-31T13:02:06.000Z
|
2021-01-26T17:28:23.000Z
|
from .model import *
from .utils import *
from .metrics import *
| 22
| 22
| 0.712121
| 9
| 66
| 5.222222
| 0.555556
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 66
| 3
| 22
| 22
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f54fda349bc4aea6c820994ffa8ceba96eee12d
| 34
|
py
|
Python
|
easybot/__init__.py
|
BlizardWizard/Easybot
|
7ef1f4eb156e1a263f4a12d310f3bb79c06d3d5b
|
[
"MIT"
] | null | null | null |
easybot/__init__.py
|
BlizardWizard/Easybot
|
7ef1f4eb156e1a263f4a12d310f3bb79c06d3d5b
|
[
"MIT"
] | 1
|
2019-02-13T01:58:10.000Z
|
2019-11-06T08:02:32.000Z
|
easybot/__init__.py
|
BlizardWizard/easybot
|
7ef1f4eb156e1a263f4a12d310f3bb79c06d3d5b
|
[
"MIT"
] | null | null | null |
from easybot.Client import Client
| 17
| 33
| 0.852941
| 5
| 34
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f961fb625534be2b1866490a6c744458e63c16d
| 6,129
|
py
|
Python
|
tests/test_protocol_integration.py
|
samuelcolvin/em2
|
a587eaa80c09a2b44d9c221d09a563aad5b05d78
|
[
"MIT"
] | 5
|
2019-03-20T19:07:45.000Z
|
2020-10-03T01:16:05.000Z
|
tests/test_protocol_integration.py
|
samuelcolvin/em2
|
a587eaa80c09a2b44d9c221d09a563aad5b05d78
|
[
"MIT"
] | 51
|
2019-03-12T16:19:46.000Z
|
2021-03-09T00:52:24.000Z
|
tests/test_protocol_integration.py
|
samuelcolvin/em2
|
a587eaa80c09a2b44d9c221d09a563aad5b05d78
|
[
"MIT"
] | 1
|
2019-05-31T14:41:18.000Z
|
2019-05-31T14:41:18.000Z
|
from arq import Worker
from pytest_toolbox.comparison import CloseToNow
from em2.core import Action, ActionTypes, construct_conv
from .conftest import Factory
async def test_publish_em2(factory: Factory, worker: Worker, alt_cli, alt_db_conn, alt_conns):
await factory.create_user(email='testing@local.example.com')
recipient = 'recipient@alt.example.com'
await alt_db_conn.fetchval("insert into auth_users (email, account_status) values ($1, 'active')", recipient)
assert await alt_db_conn.fetchval('select count(*) from conversations') == 0
conv = await factory.create_conv(participants=[{'email': recipient}], publish=True)
assert await worker.run_check(max_burst_jobs=2) == 2
assert await alt_db_conn.fetchval('select count(*) from conversations') == 1
user_id = await alt_db_conn.fetchval('select id from users where email=$1', recipient)
conv = await construct_conv(alt_conns, user_id, conv.key)
assert conv == {
'subject': 'Test Subject',
'created': CloseToNow(),
'messages': [
{
'ref': 3,
'author': 'testing@local.example.com',
'body': 'Test Message',
'created': CloseToNow(),
'format': 'markdown',
'active': True,
}
],
'participants': {'testing@local.example.com': {'id': 1}, 'recipient@alt.example.com': {'id': 2}},
}
async def test_em2_second_message(factory: Factory, worker: Worker, alt_factory: Factory, conns, alt_conns):
a = 'testing@local.example.com'
await factory.create_user(email=a)
recip = 'recipient@alt.example.com'
await alt_factory.create_user(email=recip)
assert await alt_conns.main.fetchval('select count(*) from users') == 1
conv = await factory.create_conv(participants=[{'email': recip}], publish=True)
assert await worker.run_check() == 3
assert await conns.main.fetchval('select count(*) from conversations') == 1
assert await conns.main.fetchval('select count(*) from actions') == 4
assert await alt_conns.main.fetchval('select count(*) from conversations') == 1
assert await alt_conns.main.fetchval('select count(*) from actions') == 4
action = Action(actor_id=factory.user.id, act=ActionTypes.msg_add, body='msg 2')
await factory.act(conv.id, action)
assert await worker.run_check() == 6
conv_summary = await construct_conv(conns, factory.user.id, conv.key)
assert conv_summary == {
'subject': 'Test Subject',
'created': CloseToNow(),
'messages': [
{
'ref': 3,
'author': a,
'body': 'Test Message',
'created': CloseToNow(),
'format': 'markdown',
'active': True,
},
{'ref': 5, 'author': a, 'body': 'msg 2', 'created': CloseToNow(), 'format': 'markdown', 'active': True},
],
'participants': {'testing@local.example.com': {'id': 1}, 'recipient@alt.example.com': {'id': 2}},
}
alt_conv_summary = await construct_conv(alt_conns, alt_factory.user.id, conv.key)
assert conv_summary == alt_conv_summary
async def test_em2_reply(factory: Factory, worker: Worker, alt_factory: Factory, conns, alt_conns, alt_worker: Worker):
sender = 'sender@local.example.com'
await factory.create_user(email=sender)
recip = 'recipient@alt.example.com'
await alt_factory.create_user(email=recip)
assert await conns.main.fetchval('select count(*) from conversations') == 0
assert await alt_conns.main.fetchval('select count(*) from conversations') == 0
conv = await factory.create_conv(participants=[{'email': recip}], publish=True)
assert await conns.main.fetchval('select count(*) from conversations') == 1
assert await conns.main.fetchval('select count(*) from actions') == 4
assert await alt_conns.main.fetchval('select count(*) from conversations') == 0
assert await worker.run_check() == 3
assert await conns.main.fetchval('select count(*) from conversations') == 1
assert await conns.main.fetchval('select count(*) from actions') == 4
assert await alt_conns.main.fetchval('select count(*) from conversations') == 1
assert await alt_conns.main.fetchval('select count(*) from actions') == 4
assert await alt_worker.run_check() == 1
action = Action(actor_id=alt_factory.user.id, act=ActionTypes.msg_add, body='msg 3')
alt_conv_id = await alt_conns.main.fetchval('select id from conversations where key=$1', conv.key)
await alt_factory.act(alt_conv_id, action)
assert await conns.main.fetchval('select count(*) from actions') == 4
assert await alt_conns.main.fetchval('select count(*) from actions') == 4
assert await alt_worker.run_check() == 2
assert await conns.main.fetchval('select count(*) from actions') == 5
assert await alt_conns.main.fetchval('select count(*) from actions') == 4
assert await worker.run_check() == 6
assert await conns.main.fetchval('select count(*) from actions') == 5
assert await alt_conns.main.fetchval('select count(*) from actions') == 5
conv_summary = await construct_conv(conns, factory.user.id, conv.key)
assert conv_summary == {
'subject': 'Test Subject',
'created': CloseToNow(),
'messages': [
{
'ref': 3,
'author': 'sender@local.example.com',
'body': 'Test Message',
'created': CloseToNow(),
'format': 'markdown',
'active': True,
},
{
'ref': 5,
'author': 'recipient@alt.example.com',
'body': 'msg 3',
'created': CloseToNow(),
'format': 'markdown',
'active': True,
},
],
'participants': {'sender@local.example.com': {'id': 1}, 'recipient@alt.example.com': {'id': 2}},
}
alt_conv_summary = await construct_conv(alt_conns, alt_factory.user.id, conv.key)
assert conv_summary == alt_conv_summary
| 41.979452
| 119
| 0.630935
| 744
| 6,129
| 5.068548
| 0.114247
| 0.084593
| 0.110846
| 0.134182
| 0.838239
| 0.806417
| 0.765579
| 0.741978
| 0.719438
| 0.674887
| 0
| 0.011019
| 0.230054
| 6,129
| 145
| 120
| 42.268966
| 0.788091
| 0
| 0
| 0.57265
| 0
| 0
| 0.262849
| 0.060695
| 0
| 0
| 0
| 0
| 0.290598
| 1
| 0
| false
| 0
| 0.034188
| 0
| 0.034188
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
85cc79f23a2b33ce9494952d7dd4dc50acc9bd8c
| 45
|
py
|
Python
|
modules/2.79/bpy/types/ShaderNodeBsdfTransparent.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/ShaderNodeBsdfTransparent.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/ShaderNodeBsdfTransparent.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
class ShaderNodeBsdfTransparent:
pass
| 7.5
| 32
| 0.755556
| 3
| 45
| 11.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 45
| 5
| 33
| 9
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
85d950f9708a9c7836ddbacadf382323526fd692
| 203
|
py
|
Python
|
roseltorg_parser/get_tenders.py
|
ServerHack-The-First-Law-Of-Robotics/data_engineering
|
4651d69615eda1eaac518a86b8c9f94b8912146e
|
[
"MIT"
] | null | null | null |
roseltorg_parser/get_tenders.py
|
ServerHack-The-First-Law-Of-Robotics/data_engineering
|
4651d69615eda1eaac518a86b8c9f94b8912146e
|
[
"MIT"
] | null | null | null |
roseltorg_parser/get_tenders.py
|
ServerHack-The-First-Law-Of-Robotics/data_engineering
|
4651d69615eda1eaac518a86b8c9f94b8912146e
|
[
"MIT"
] | null | null | null |
from parser import TenderParser
from config import mapping
TenderParser().go_through_pages(mapping['metal'], 'metal_tenders.txt')
TenderParser().go_through_pages(mapping['metal'], 'rubber_tenders.txt')
| 33.833333
| 71
| 0.807882
| 26
| 203
| 6.076923
| 0.5
| 0.177215
| 0.265823
| 0.329114
| 0.481013
| 0.481013
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064039
| 203
| 5
| 72
| 40.6
| 0.831579
| 0
| 0
| 0
| 0
| 0
| 0.221675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
85ed7ab8eec9d97fb72895a8a4f28ebd9a5d50a7
| 48
|
py
|
Python
|
amazon/kth_smallest_in_sorted_matrix.py
|
dsrao711/DSA-101-HacktoberFest
|
0d04e2aecee224080c34146e327ff6de15d9ba16
|
[
"MIT"
] | 16
|
2021-10-02T20:10:51.000Z
|
2022-03-06T10:31:11.000Z
|
amazon/kth_smallest_in_sorted_matrix.py
|
dsrao711/DSA-101-HacktoberFest
|
0d04e2aecee224080c34146e327ff6de15d9ba16
|
[
"MIT"
] | 55
|
2021-10-02T07:31:41.000Z
|
2021-10-30T06:19:26.000Z
|
amazon/kth_smallest_in_sorted_matrix.py
|
dsrao711/DSA-101-HacktoberFest
|
0d04e2aecee224080c34146e327ff6de15d9ba16
|
[
"MIT"
] | 36
|
2021-10-02T18:00:08.000Z
|
2022-01-03T18:50:35.000Z
|
m = [1,5,9,10,11,13,12,13,15]
m.sort()
print(m)
| 12
| 29
| 0.5625
| 14
| 48
| 1.928571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.348837
| 0.104167
| 48
| 3
| 30
| 16
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c83ae4819a8eac4e0ff39464e6d7f657dc54b3bb
| 16,696
|
py
|
Python
|
training_pipeline/model.py
|
Elucidation/ChessboardDetect
|
a5d2a2c2ab2434e4e041b4f384f3cd7d6884d2c4
|
[
"MIT"
] | 43
|
2016-10-28T02:13:26.000Z
|
2022-02-16T14:20:32.000Z
|
training_pipeline/model.py
|
AnkaChan/ChessboardDetect
|
a5d2a2c2ab2434e4e041b4f384f3cd7d6884d2c4
|
[
"MIT"
] | 3
|
2016-11-15T19:04:46.000Z
|
2020-08-26T20:41:29.000Z
|
training_pipeline/model.py
|
AnkaChan/ChessboardDetect
|
a5d2a2c2ab2434e4e041b4f384f3cd7d6884d2c4
|
[
"MIT"
] | 12
|
2018-08-22T22:33:21.000Z
|
2021-08-20T08:40:42.000Z
|
# CNN model, based off of the Tensorflow CNN Mnist Classifier tutorial.
import tensorflow as tf
def cnn_model(features, labels, mode, params):
"""Model function for CNN."""
# Grayscale winsize=10 (21x21)
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 21, 21, 1])
input_layer = tf.cast(input_layer, tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
if labels is not None:
bool_labels = tf.cast(labels, tf.bool)
tf.summary.image('Input_Good',
tf.boolean_mask(input_layer, bool_labels), max_outputs=10)
tf.summary.image('Input_Bad',
tf.boolean_mask(input_layer, tf.logical_not(bool_labels)), max_outputs=10)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params['filter_sizes'][0],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=params['filter_sizes'][1],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 5 * 5 * params['filter_sizes'][1]])
dense = tf.layers.dense(inputs=pool2_flat, units=params['filter_sizes'][2], activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
return logits
def cnn_model_rgb(features, labels, mode, params):
"""Model function for CNN."""
# RGB winsize=10 (Nx15x15x3)
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 15, 15, 3], name='ReshapeinModel1')
input_layer = tf.cast(input_layer, tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
tf.summary.image('Input', input_layer, max_outputs=5)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params['filter_sizes'][0],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
tf.summary.image('pool1', pool1[:,:,:,:4], max_outputs=5)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=params['filter_sizes'][1],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
tf.summary.image('pool2', pool2[:,:,:,:4], max_outputs=5)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 3 * 3 * params['filter_sizes'][1]], name='ReshapePool2_flat')
dense = tf.layers.dense(inputs=pool2_flat, units=params['filter_sizes'][2], activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.1, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
return logits
def cnn_model_rgb_small(features, labels, mode, params):
"""Model function for CNN."""
# RGB winsize=10 (Nx15x15x3)
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 15, 15, 3], name='ReshapeinModel1')
input_layer = tf.cast(input_layer, tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
tf.summary.image('Input', input_layer, max_outputs=5)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params['filter_sizes'][0],
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
tf.summary.image('pool1', pool1[:,:,:,:4], max_outputs=5)
# Dense Layer
pool1_flat = tf.reshape(pool1, [-1, 7 * 7 * params['filter_sizes'][0]], name='ReshapePool1_flat')
dense = tf.layers.dense(inputs=pool1_flat, units=params['filter_sizes'][2], activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
return logits
def cnn_model_small(features, labels, mode, params):
"""Model function for CNN."""
# Assumes 21x21 input size
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 21, 21, 1])
input_layer = tf.cast(input_layer, tf.float32)
if labels is not None:
bool_labels = tf.cast(labels, tf.bool)
tf.summary.image('Input_Good',
tf.boolean_mask(input_layer, bool_labels), max_outputs=10)
tf.summary.image('Input_Bad',
tf.boolean_mask(input_layer, tf.logical_not(bool_labels)), max_outputs=10)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params['filter_sizes'][0],
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Dense Layer
pool1_flat = tf.reshape(pool1, [-1, 10 * 10 * params['filter_sizes'][0]])
dense = tf.layers.dense(inputs=pool1_flat, units=params['filter_sizes'][1], activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
return logits
def cnn_model_ultrasmall(features, labels, mode, params):
"""Model function for CNN."""
# Assumes 15x15 input size
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 15, 15, 1])
input_layer = tf.cast(input_layer, tf.float32)
if labels is not None:
bool_labels = tf.cast(labels, tf.bool)
tf.summary.image('Input_Good',
tf.boolean_mask(input_layer, bool_labels), max_outputs=5)
tf.summary.image('Input_Bad',
tf.boolean_mask(input_layer, tf.logical_not(bool_labels)), max_outputs=5)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params['filter_sizes'][0],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
tf.summary.image('pool1', pool1[:,:,:,:3], max_outputs=5)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=params['filter_sizes'][1],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
tf.summary.image('pool2', pool2[:,:,:,:3], max_outputs=5)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 3 * 3 * params['filter_sizes'][1]])
dense = tf.layers.dense(inputs=pool2_flat, units=params['filter_sizes'][2], activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
return logits
def cnn_model_big(features, labels, mode, params):
"""Model function for CNN."""
channels = 2 # r g b gx gy
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 15, 15, channels])
input_layer = tf.cast(input_layer, tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# Note, potentially expensive
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
if labels is not None:
bool_labels = tf.cast(labels, tf.bool)
# tf.summary.image('Input_Good',
# tf.boolean_mask(input_layer[:,:,:,:3], bool_labels), max_outputs=10)
# tf.summary.image('Gx Good',
# tf.boolean_mask(tf.expand_dims(input_layer[:,:,:,3], axis=-1), bool_labels), max_outputs=10)
# tf.summary.image('Gy Good',
# tf.boolean_mask(tf.expand_dims(input_layer[:,:,:,4], axis=-1), bool_labels), max_outputs=10)
# tf.summary.image('Input_Bad',
# tf.boolean_mask(input_layer[:,:,:,:3], tf.logical_not(bool_labels)), max_outputs=10)
tf.summary.image('Gx Good',
tf.boolean_mask(tf.expand_dims(input_layer[:,:,:,0], axis=-1), bool_labels), max_outputs=10)
tf.summary.image('Gy Good',
tf.boolean_mask(tf.expand_dims(input_layer[:,:,:,1], axis=-1), bool_labels), max_outputs=10)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params['filter_sizes'][0],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# 31x31x3 output
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# 15x15x3 output
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=params['filter_sizes'][1],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# 15x15x3 input, 15x15xparams['filter_sizes'][1] output
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# 7x7xparams['filter_sizes'][1] output
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 3 * 3 * params['filter_sizes'][1]])
dense = tf.layers.dense(inputs=pool2_flat, units=params['filter_sizes'][2], activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=2)
return logits
def dnn_model_rgb(features, labels, mode, params):
"""Model function for DNN."""
# RGB winsize=10 (Nx15x15x3)
# Input Layer
input_layer = tf.cast(features['x'], tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
tf.summary.image('Input', input_layer, max_outputs=4)
# Dense Layer #1
dense1 = tf.layers.dense(
inputs=input_layer,
units=params['filter_sizes'][0],
activation=tf.nn.relu)
# Dense Layer #2
dense2 = tf.layers.dense(
inputs=dense1,
units=params['filter_sizes'][1],
activation=tf.nn.relu)
# Dense Layer #3
dense3 = tf.layers.dense(
inputs=dense2,
units=params['filter_sizes'][2],
activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense3, rate=0.1, training=mode == tf.estimator.ModeKeys.TRAIN)
# Flatten input Nx15x15xparams['filter_sizes'][2]
dropout_flat = tf.reshape(dropout, [-1, 15*15*params['filter_sizes'][2]], name='flat_dropout')
# Logits Layer
logits = tf.layers.dense(inputs=dropout_flat, units=2)
return logits
def cnn_model_rgb_v2(features, labels, mode, params):
"""Model function for CNN."""
# RGB winsize=10 (Nx15x15x3)
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 15, 15, 3], name='ReshapeinModel1')
input_layer = tf.cast(input_layer, tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
tf.summary.image('Input', input_layer, max_outputs=4)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=128,
kernel_size=[3, 3],
activation=tf.nn.relu)
tf.summary.image('conv1', conv1[:,:,:,:3], max_outputs=4)
# Nx13x13x128
# Convolutional Layer #2
conv2 = tf.layers.conv2d(
inputs=conv1,
filters=64,
kernel_size=[3, 3],
activation=tf.nn.relu)
tf.summary.image('conv2', conv2[:,:,:,:3], max_outputs=4)
# Nx11x11x64
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Nx5x5x64
# Dense Layer 1
pool1_flat = tf.reshape(pool1, [-1, 5 * 5 * 64])
dense1 = tf.layers.dense(inputs=pool1_flat, units=128, activation=tf.nn.relu)
# Dense Layer 2
dense2 = tf.layers.dense(inputs=dense1, units=128, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(
inputs=dense2, rate=0.1, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout1, units=2)
return logits
def cnn_model_rgb_v3(features, labels, mode, params):
"""Model function for CNN."""
# RGB winsize=10 (Nx15x15x3)
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 15, 15, 3], name='ReshapeinModel1')
input_layer = tf.cast(input_layer, tf.float32)
# Convert batch of images from uint8 to float64 normalized.
# input_layer = tf.map_fn(lambda img: tf.image.per_image_standardization(img), input_layer)
tf.summary.image('Input', input_layer, max_outputs=4)
# Nx15x15xK
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=64,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# tf.summary.image('conv1', conv1[:,:,:,:3], max_outputs=4)
# Convolutional Layer #2
conv2 = tf.layers.conv2d(
inputs=conv1,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# tf.summary.image('conv2', conv2[:,:,:,:3], max_outputs=4)
conv3 = tf.layers.conv2d(
inputs=conv2,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# tf.summary.image('conv3', conv3[:,:,:,:3], max_outputs=4)
# Convolutional Layer #2
conv4 = tf.layers.conv2d(
inputs=conv3,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# tf.summary.image('conv4', conv4[:,:,:,:3], max_outputs=4)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[3, 3], strides=3)
# Nx5x5xK
# Convolutional Layer #1
conv5 = tf.layers.conv2d(
inputs=pool1,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Convolutional Layer #2
conv6 = tf.layers.conv2d(
inputs=conv5,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
conv7 = tf.layers.conv2d(
inputs=conv6,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# Convolutional Layer #2
conv8 = tf.layers.conv2d(
inputs=conv7,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
# tf.summary.image('conv4', conv4[:,:,:,:3], max_outputs=4)
# Dense Layer 1
conv8_flat = tf.reshape(conv8, [-1, 5 * 5 * 128])
dense1 = tf.layers.dense(inputs=conv8_flat, units=512, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=dense1, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout1, units=2)
return logits
def cnn_model_fn(features, labels, mode, params):
# logits = cnn_model(features, labels, mode, params)
logits = cnn_model_rgb_v3(features, labels, mode, params)
# logits = cnn_model_rgb_small(features, labels, mode, params)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
export_output = {'predict': tf.estimator.export.PredictOutput(predictions)}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions,
export_outputs=export_output)
# If not predict, then labels is not none
labels = tf.cast(labels, tf.int32)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
| 34.073469
| 101
| 0.678126
| 2,381
| 16,696
| 4.627467
| 0.082738
| 0.067163
| 0.043565
| 0.052278
| 0.851516
| 0.839535
| 0.82583
| 0.816119
| 0.7721
| 0.738156
| 0
| 0.047422
| 0.173994
| 16,696
| 490
| 102
| 34.073469
| 0.751505
| 0.240896
| 0
| 0.691489
| 0
| 0
| 0.054955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035461
| false
| 0
| 0.003546
| 0
| 0.08156
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c0870c14227b354df85e47c7ff718cbf3263d0e0
| 43
|
py
|
Python
|
scvelo/pp.py
|
WeilerP/scvelo
|
1805ab4a72d3f34496f0ef246500a159f619d3a2
|
[
"BSD-3-Clause"
] | 272
|
2018-08-21T08:59:11.000Z
|
2022-03-30T11:24:19.000Z
|
scvelo/pp.py
|
theislab/scvelo
|
1805ab4a72d3f34496f0ef246500a159f619d3a2
|
[
"BSD-3-Clause"
] | 570
|
2018-08-21T14:04:03.000Z
|
2022-03-30T08:48:04.000Z
|
scvelo/pp.py
|
WeilerP/scvelo
|
1805ab4a72d3f34496f0ef246500a159f619d3a2
|
[
"BSD-3-Clause"
] | 105
|
2018-09-04T14:08:58.000Z
|
2022-03-17T16:20:14.000Z
|
from scvelo.preprocessing import * # noqa
| 21.5
| 42
| 0.767442
| 5
| 43
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 1
| 43
| 43
| 0.916667
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c0b377b2357e0be284d72aa83332a32b24a133d2
| 74
|
py
|
Python
|
model/__init__.py
|
phykn/xai_tree
|
66f5cb4ea77686364478b1f16f937678b2e544a8
|
[
"Apache-2.0"
] | 1
|
2022-02-06T17:49:26.000Z
|
2022-02-06T17:49:26.000Z
|
model/__init__.py
|
phykn/xai_tree
|
66f5cb4ea77686364478b1f16f937678b2e544a8
|
[
"Apache-2.0"
] | null | null | null |
model/__init__.py
|
phykn/xai_tree
|
66f5cb4ea77686364478b1f16f937678b2e544a8
|
[
"Apache-2.0"
] | null | null | null |
from .split_data import split_data
from .best_model import get_best_model
| 37
| 38
| 0.864865
| 13
| 74
| 4.538462
| 0.538462
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 38
| 37
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
23ef81393604236ea3044f4542a267131cb4a5c6
| 102
|
py
|
Python
|
pygecko/lib/cg_api_simple/__init__.py
|
SrJMaia/coin-gecko-api
|
2f011f23b104b0ee3a0561e68c6ec974536a59ec
|
[
"MIT"
] | null | null | null |
pygecko/lib/cg_api_simple/__init__.py
|
SrJMaia/coin-gecko-api
|
2f011f23b104b0ee3a0561e68c6ec974536a59ec
|
[
"MIT"
] | null | null | null |
pygecko/lib/cg_api_simple/__init__.py
|
SrJMaia/coin-gecko-api
|
2f011f23b104b0ee3a0561e68c6ec974536a59ec
|
[
"MIT"
] | null | null | null |
from .price import get_simple_price_from_api
from .vs_currency import get_supported_vs_currencies_api
| 34
| 56
| 0.901961
| 17
| 102
| 4.882353
| 0.588235
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 102
| 2
| 57
| 51
| 0.882979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1b2f079b4b85a4cd5be372290326e4f4a707f94
| 37
|
py
|
Python
|
rxsci_river/evaluate/__init__.py
|
maki-nage/rxsci-river
|
19fc66e67aff8dfa7efbf4107c228de15fd75d3a
|
[
"MIT"
] | 2
|
2021-11-26T20:59:38.000Z
|
2022-03-14T10:10:00.000Z
|
rxsci_river/evaluate/__init__.py
|
maki-nage/rxsci-river
|
19fc66e67aff8dfa7efbf4107c228de15fd75d3a
|
[
"MIT"
] | null | null | null |
rxsci_river/evaluate/__init__.py
|
maki-nage/rxsci-river
|
19fc66e67aff8dfa7efbf4107c228de15fd75d3a
|
[
"MIT"
] | null | null | null |
from .prequential import prequential
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 2
| 36
| 18.5
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1c9e6f419532ebc43d314bbb70920fedab8b857
| 168
|
py
|
Python
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/utils/__init__.py
|
bluetiger9/Vitis-AI
|
a7728733bbcfc292ff3afa46b9c8b03e94b740b3
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/utils/__init__.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/utils/__init__.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
from .torch_op_attr import *
from .nndct2torch_op_map import *
from .op_register import *
from .torch_const import *
from .tensor_utils import *
from .schema import *
| 21
| 33
| 0.779762
| 25
| 168
| 4.96
| 0.48
| 0.403226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.14881
| 168
| 7
| 34
| 24
| 0.86014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1e6e51701ba7bf48115ecd46633dd44d4e3ca54
| 86,590
|
py
|
Python
|
sharpy/structure/utils/lagrangeconstraints.py
|
AntonioWR/sharpy
|
c922be8d5a1831c4624b22f39264e2f417a03deb
|
[
"BSD-3-Clause"
] | null | null | null |
sharpy/structure/utils/lagrangeconstraints.py
|
AntonioWR/sharpy
|
c922be8d5a1831c4624b22f39264e2f417a03deb
|
[
"BSD-3-Clause"
] | null | null | null |
sharpy/structure/utils/lagrangeconstraints.py
|
AntonioWR/sharpy
|
c922be8d5a1831c4624b22f39264e2f417a03deb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
LagrangeConstraints library
Library used to create the matrices associated to boundary conditions through
the method of Lagrange Multipliers. The source code includes four different sections.
* Basic structures: basic functions and variables needed to organise the library with different Lagrange Constraints to enhance the interaction with this library.
* Auxiliar functions: basic queries that are performed repeatedly.
* Equations: functions that generate the equations associated to the constraint of basic degrees of freedom.
* Lagrange Constraints: different available Lagrange Constraints. They tipically use the basic functions in "Equations" to assembly the required set of equations.
Attributes:
dict_of_lc (dict): Dictionary including the available Lagrange Contraint identifier
(``_lc_id``) and the associated ``BaseLagrangeConstraint`` class
Notes:
To use this library: import sharpy.structure.utils.lagrangeconstraints as lagrangeconstraints
Args:
lc_list (list): list of all the defined contraints
MBdict (dict): dictionary with the MultiBody and LagrangeMultipliers information
MB_beam (list): list of :class:`~sharpy.structure.models.beam.Beam` of each of the bodies that form the system
MB_tstep (list): list of :class:`~sharpy.utils.datastructures.StructTimeStepInfo` of each of the bodies that form the system
num_LM_eq (int): number of new equations needed to define the boundary boundary conditions
sys_size (int): total number of degrees of freedom of the multibody system
dt (float): time step
Lambda (np.ndarray): list of Lagrange multipliers values
Lambda_dot (np.ndarray): list of the first derivative of the Lagrange multipliers values
dynamic_or_static (str): string defining if the computation is dynamic or static
LM_C (np.ndarray): Damping matrix associated to the Lagrange Multipliers equations
LM_K (np.ndarray): Stiffness matrix associated to the Lagrange Multipliers equations
LM_Q (np.ndarray): Vector of independent terms associated to the Lagrange Multipliers equations
"""
from abc import ABCMeta, abstractmethod
import sharpy.utils.cout_utils as cout
import os
import ctypes as ct
import numpy as np
import sharpy.utils.algebra as ag
###############################################################################
# Basic structures
###############################################################################
dict_of_lc = {}
lc = {} # for internal working
# decorator
def lagrangeconstraint(arg):
"""
Decorator used to create the dictionary (``dict_of_lc``) that links constraints id (``_lc_id``) to the associated ``BaseLagrangeConstraint`` class
"""
global dict_of_lc
try:
arg._lc_id
except AttributeError:
raise AttributeError('Class defined as lagrange constraint has no _lc_id attribute')
dict_of_lc[arg._lc_id] = arg
return arg
def print_available_lc():
"""
Prints the available Lagrange Constraints
"""
cout.cout_wrap('The available lagrange constraints on this session are:', 2)
for name, i_lc in dict_of_lc.items():
cout.cout_wrap('%s ' % i_lc._lc_id, 2)
def lc_from_string(string):
"""
Returns the ``BaseLagrangeConstraint`` class associated to a constraint id (``_lc_id``)
"""
return dict_of_lc[string]
def lc_list_from_path(cwd):
onlyfiles = [f for f in os.listdir(cwd) if os.path.isfile(os.path.join(cwd, f))]
for i_file in range(len(onlyfiles)):
if ".py" in onlyfiles[i_file]:
if onlyfiles[i_file] == "__init__.py":
onlyfiles[i_file] = ""
continue
onlyfiles[i_file] = onlyfiles[i_file].replace('.py', '')
else:
onlyfiles[i_file] = ""
files = [file for file in onlyfiles if not file == ""]
return files
def initialise_lc(lc_name, print_info=True):
"""
Initialises the Lagrange Constraints
"""
if print_info:
cout.cout_wrap('Generating an instance of %s' % lc_name, 2)
cls_type = lc_from_string(lc_name)
lc = cls_type()
return lc
class BaseLagrangeConstraint(metaclass=ABCMeta):
__doc__ = """
BaseLagrangeConstraint
Base class for LagrangeConstraints showing the methods required. They will
be inherited by all the Lagrange Constraints
Attributes:
_n_eq (int): Number of equations required by a LagrangeConstraint
_ieq (int): Number of the first equation associated to the Lagrange Constraint in the whole set of Lagrange equations
"""
_lc_id = 'BaseLagrangeConstraint'
def __init__(self):
"""
Initialisation
"""
self._n_eq = None
self._ieq = None
@abstractmethod
def get_n_eq(self):
"""
Returns the number of equations required by the Lagrange Constraint
"""
return self._n_eq
@abstractmethod
# def initialise(self, **kwargs):
def initialise(self, MBdict_entry, ieq):
"""
Initialisation
"""
self._ieq = ieq
return self._ieq + self._n_eq
@abstractmethod
# def staticmat(self, **kwargs):
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
"""
Generates the structural matrices (damping, stiffness) and the independent vector
associated to the LagrangeConstraint in a static simulation
"""
return np.zeros((6, 6))
@abstractmethod
# def dynamicmat(self, **kwargs):
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
"""
Generates the structural matrices (damping, stiffness) and the independent vector
associated to the LagrangeConstraint in a dynamic simulation
"""
return np.zeros((10, 10))
@abstractmethod
# def staticpost(self, **kwargs):
def staticpost(self, lc_list, MB_beam, MB_tstep):
"""
Postprocess operations needed by the LagrangeConstraint in a static simulation
"""
return
@abstractmethod
# def dynamicpost(self, **kwargs):
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
"""
Postprocess operations needed by the LagrangeConstraint in a dynamic simulation
"""
return
################################################################################
# Auxiliar functions
################################################################################
def define_node_dof(MB_beam, node_body, num_node):
"""
define_node_dof
Define the position of the first degree of freedom associated to a certain node
Args:
MB_beam(list): list of :class:`~sharpy.structure.models.beam.Beam`
node_body(int): body to which the node belongs
num_node(int): number os the node within the body
Returns:
node_dof(int): first degree of freedom associated to the node
"""
node_dof = 0
for ibody in range(node_body):
node_dof += MB_beam[ibody].num_dof.value
if MB_beam[ibody].FoR_movement == 'free':
node_dof += 10
node_dof += 6*MB_beam[node_body].vdof[num_node]
return node_dof
def define_FoR_dof(MB_beam, FoR_body):
"""
define_FoR_dof
Define the position of the first degree of freedom associated to a certain frame of reference
Args:
MB_beam(list): list of :class:`~sharpy.structure.models.beam.Beam`
node_body(int): body to which the node belongs
num_node(int): number os the node within the body
Returns:
node_dof(int): first degree of freedom associated to the node
"""
FoR_dof = 0
for ibody in range(FoR_body):
FoR_dof += MB_beam[ibody].num_dof.value
if MB_beam[ibody].FoR_movement == 'free':
FoR_dof += 10
FoR_dof += MB_beam[FoR_body].num_dof.value
return FoR_dof
def set_value_or_default(dictionary, key, default_val):
try:
value = dictionary[key]
except KeyError:
value = default_val
return value
################################################################################
# Equations
################################################################################
def equal_pos_node_FoR(MB_tstep, MB_beam, FoR_body, node_body, inode_in_body, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda, scalingFactor, penaltyFactor, ieq, LM_K, LM_C, LM_Q):
"""
This function generates the stiffness and damping matrices and the independent vector associated to a constraint that
imposes equal positions between a node and a frame of reference
See ``LagrangeConstraints`` for the description of variables
Args:
node_FoR_dof (int): position of the first degree of freedom of the FoR to which the "node" belongs
node_dof (int): position of the first degree of freedom associated to the "node"
FoR_body (int): body number of the "FoR"
FoR_dof (int): position of the first degree of freedom associated to the "FoR"
"""
num_LM_eq_specific = 3
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Simplify notation
node_cga = MB_tstep[node_body].cga()
node_pos = MB_tstep[node_body].pos[inode_in_body, :]
node_FoR_pos = MB_tstep[node_body].for_pos[0:3]
FoR_pos = MB_tstep[FoR_body].for_pos[0:3]
# if MB_beam[node_body].FoR_movement == 'free':
B[:, node_FoR_dof:node_FoR_dof+3] = np.eye(3)
B[:, node_dof:node_dof+3] = node_cga
B[:, FoR_dof:FoR_dof+3] = -np.eye(3)
LM_K[sys_size + ieq : sys_size + ieq + num_LM_eq_specific, :sys_size] += scalingFactor*B
LM_K[:sys_size, sys_size + ieq : sys_size + ieq + num_LM_eq_specific] += scalingFactor*np.transpose(B)
LM_Q[:sys_size] += scalingFactor*np.dot(np.transpose(B), Lambda[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*(node_FoR_pos +
np.dot(node_cga, node_pos) -
FoR_pos)
LM_C[node_dof:node_dof+3, node_FoR_dof+6:node_FoR_dof+10] += scalingFactor*ag.der_CquatT_by_v(MB_tstep[node_body].quat, Lambda[ieq : ieq + num_LM_eq_specific])
if penaltyFactor:
q = np.zeros((sys_size, ))
q[node_FoR_dof:node_FoR_dof+3] = node_FoR_pos
q[node_dof:node_dof+3] = node_pos
q[FoR_dof:FoR_dof+3] = FoR_pos
LM_Q[:sys_size] += penaltyFactor*np.dot(B.T, np.dot(B, q))
LM_K[node_FoR_dof:node_FoR_dof+3, node_FoR_dof:node_FoR_dof+3] += penaltyFactor*np.eye(3)
LM_K[node_FoR_dof:node_FoR_dof+3, node_dof:node_dof+3] += penaltyFactor*node_cga
LM_K[node_FoR_dof:node_FoR_dof+3, FoR_dof:FoR_dof+3] += -penaltyFactor*np.eye(3)
LM_C[node_FoR_dof:node_FoR_dof+3, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*ag.der_Cquat_by_v(MB_tstep[node_body].quat, node_pos)
LM_K[node_dof:node_dof+3, node_FoR_dof:node_FoR_dof+3] += penaltyFactor*node_cga.T
LM_K[node_dof:node_dof+3, node_dof:node_dof+3] += penaltyFactor*np.eye(3)
LM_K[node_dof:node_dof+3, FoR_dof:FoR_dof+3] += -penaltyFactor*node_cga.T
LM_C[node_dof:node_dof+3, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*(ag.der_CquatT_by_v(MB_tstep[node_body].quat, node_FoR_pos - FoR_pos))
LM_K[FoR_dof:FoR_dof+3, node_FoR_dof:node_FoR_dof+3] += -penaltyFactor*np.eye(3)
LM_K[FoR_dof:FoR_dof+3, node_dof:node_dof+3] += -penaltyFactor*node_cga.T
LM_K[FoR_dof:FoR_dof+3, FoR_dof:FoR_dof+3] += penaltyFactor*np.eye(3)
LM_C[FoR_dof:FoR_dof+3, node_FoR_dof+6:node_FoR_dof+10] += -penaltyFactor*ag.der_Cquat_by_v(MB_tstep[node_body].quat, node_pos)
ieq += 3
return ieq
def equal_lin_vel_node_FoR(MB_tstep, MB_beam, FoR_body, node_body, node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, scalingFactor, penaltyFactor, ieq, LM_K, LM_C, LM_Q):
"""
This function generates the stiffness and damping matrices and the independent vector associated to a constraint that
imposes equal linear velocities between a node and a frame of reference
See ``LagrangeConstraints`` for the description of variables
Args:
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
node_FoR_dof (int): position of the first degree of freedom of the FoR to which the "node" belongs
node_dof (int): position of the first degree of freedom associated to the "node"
FoR_body (int): body number of the "FoR"
FoR_dof (int): position of the first degree of freedom associated to the "FoR"
"""
num_LM_eq_specific = 3
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Simplify notation
node_cga = MB_tstep[node_body].cga()
node_FoR_va = MB_tstep[node_body].for_vel[0:3]
node_FoR_wa = MB_tstep[node_body].for_vel[3:6]
node_Ra = MB_tstep[node_body].pos[node_number,:]
node_dot_Ra = MB_tstep[node_body].pos_dot[node_number,:]
FoR_cga = MB_tstep[FoR_body].cga()
FoR_va = MB_tstep[FoR_body].for_vel[0:3]
Bnh[:, FoR_dof:FoR_dof+3] = FoR_cga
Bnh[:, node_dof:node_dof+3] = -1.0*node_cga
if MB_beam[node_body].FoR_movement == 'free':
Bnh[:, node_FoR_dof:node_FoR_dof+3] = -1.0*node_cga
Bnh[:, node_FoR_dof+3:node_FoR_dof+6] = np.dot(node_cga,ag.skew(node_Ra))
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*np.transpose(Bnh)
LM_Q[:sys_size] += scalingFactor*np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*(np.dot(FoR_cga, FoR_va) +
-1.0*np.dot(node_cga,
node_dot_Ra +
node_FoR_va +
-1.0*np.dot(ag.skew(node_Ra), node_FoR_wa)))
LM_C[FoR_dof:FoR_dof+3, FoR_dof+6:FoR_dof+10] += scalingFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat, Lambda_dot[ieq:ieq+num_LM_eq_specific])
if MB_beam[node_body].FoR_movement == 'free':
LM_C[node_dof:node_dof+3,node_FoR_dof+6:node_FoR_dof+10] -= scalingFactor*ag.der_CquatT_by_v(MB_tstep[node_body].quat, Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_C[node_FoR_dof:node_FoR_dof+3,node_FoR_dof+6:node_FoR_dof+10] -= scalingFactor*ag.der_CquatT_by_v(MB_tstep[node_body].quat,Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_C[node_FoR_dof+3:node_FoR_dof+6,node_FoR_dof+6:node_FoR_dof+10] += scalingFactor*np.dot(ag.skew(node_Ra).T,
ag.der_CquatT_by_v(MB_tstep[node_body].quat,
Lambda_dot[ieq:ieq+num_LM_eq_specific]))
LM_K[node_FoR_dof+3:node_FoR_dof+6,node_dof:node_dof+3] += scalingFactor*ag.skew(np.dot(node_cga.T,Lambda_dot[ieq:ieq+num_LM_eq_specific]))
if penaltyFactor:
q = np.zeros((sys_size))
q[FoR_dof:FoR_dof+3] = FoR_va
q[node_dof:node_dof+3] = node_dot_Ra
if MB_beam[node_body].FoR_movement == 'free':
q[node_FoR_dof:node_FoR_dof+3] = node_FoR_va
q[node_FoR_dof+3:node_FoR_dof+6] = node_FoR_wa
LM_Q[:sys_size] += penaltyFactor*np.dot(np.dot(Bnh.T, Bnh), q)
LM_C[:sys_size, :sys_size] += penaltyFactor*np.dot(Bnh.T, Bnh)
# Derivatives wrt the FoR quaterion
LM_C[FoR_dof:FoR_dof+3, FoR_dof+6:FoR_dof+10] -= penaltyFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
np.dot(node_cga, node_dot_Ra + node_FoR_va +
np.dot(ag.skew(node_Ra), node_FoR_wa)))
LM_C[node_dof:node_dof+3, FoR_dof+6:FoR_dof+10] -= penaltyFactor*np.dot(node_cga.T, ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
FoR_va))
if MB_beam[node_body].FoR_movement == 'free':
LM_C[node_FoR_dof:node_FoR_dof+3, FoR_dof+6:FoR_dof+10] -= penaltyFactor*np.dot(node_cga.T, ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
FoR_va))
mat = ag.multiply_matrices(ag.skew(node_Ra).T, node_cga.T)
LM_C[node_FoR_dof+3:node_FoR_dof+6, FoR_dof+6:FoR_dof+10] += penaltyFactor*np.dot(mat, ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
FoR_va))
# Derivatives wrt the node quaternion
if MB_beam[node_body].FoR_movement == 'free':
vec = -node_dot_Ra - node_FoR_va + np.dot(ag.skew(node_Ra), node_FoR_wa)
LM_C[FoR_dof:FoR_dof+3, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*np.dot(FoR_cga.T, ag.der_Cquat_by_v(MB_tstep[node_body].quat, vec))
derivative = -ag.der_CquatT_by_v(MB_tstep[node_body].quat, np.dot(FoR_cga, FoR_va))
LM_C[node_dof:node_dof+3, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*derivative
LM_C[node_FoR_dof:node_FoR_dof+3, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*derivative
LM_C[node_FoR_dof+3:node_FoR_dof+6, node_FoR_dof+6:node_FoR_dof+10] -= penaltyFactor*np.dot(ag.skew(node_Ra), derivative)
# Derivatives wrt the node Ra
LM_K[FoR_dof:FoR_dof+3, node_dof:node_dof+3] -= penaltyFactor*ag.multiply_matrices(FoR_cga.T, node_cga, ag.skew(node_FoR_wa))
LM_K[node_dof:node_dof+3, node_dof:node_dof+3] += penaltyFactor*ag.skew(node_FoR_wa)
if MB_beam[node_body].FoR_movement == 'free':
LM_K[node_FoR_dof:node_FoR_dof+3, node_dof:node_dof+3] += penaltyFactor*ag.skew(node_FoR_wa)
vec = ag.multiply_matrices(node_cga.T, FoR_cga, FoR_va) - node_dot_Ra - node_FoR_va
LM_K[node_FoR_dof+3:node_FoR_dof+6, node_dof:node_dof+3] += penaltyFactor*ag.skew(vec)
LM_K[node_FoR_dof+3:node_FoR_dof+6, node_dof:node_dof+3] -= penaltyFactor*ag.der_skewp_skewp_v(node_Ra, node_FoR_wa)
ieq += 3
return ieq
def def_rot_axis_FoR_wrt_node_general(MB_tstep, MB_beam, FoR_body, node_body, node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, rot_axisB, scalingFactor, penaltyFactor, ieq, LM_K, LM_C, LM_Q, indep):
"""
This function generates the stiffness and damping matrices and the independent vector associated to a joint that
forces the rotation axis of a FoR to be parallel to a certain direction. This direction is defined in the
B FoR of a node and, thus, might change along the simulation.
See ``LagrangeConstraints`` for the description of variables
Args:
rot_axisB (np.ndarray): Rotation axis with respect to the node B FoR
indep (np.ndarray): Number of the equations that are used as independent
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
node_FoR_dof (int): position of the first degree of freedom of the FoR to which the "node" belongs
node_dof (int): position of the first degree of freedom associated to the "node"
FoR_body (int): body number of the "FoR"
FoR_dof (int): position of the first degree of freedom associated to the "FoR"
"""
ielem, inode_in_elem = MB_beam[node_body].node_master_elem[node_number]
# Simplify notation
cab = ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:])
node_cga = MB_tstep[node_body].cga()
FoR_cga = MB_tstep[FoR_body].cga()
FoR_wa = MB_tstep[FoR_body].for_vel[3:6]
if not indep:
aux_Bnh = ag.multiply_matrices(ag.skew(rot_axisB),
cab.T,
node_cga.T,
FoR_cga)
# indep = None
n0 = np.linalg.norm(aux_Bnh[0,:])
n1 = np.linalg.norm(aux_Bnh[1,:])
n2 = np.linalg.norm(aux_Bnh[2,:])
if ((n0 < n1) and (n0 < n2)):
# indep = np.array([1,2], dtype = int)
indep[:] = [1, 2]
# new_Lambda_dot = np.array([0., Lambda_dot[ieq], Lambda_dot[ieq+1]])
elif ((n1 < n0) and (n1 < n2)):
# indep = np.array([0,2], dtype = int)
indep[:] = [0, 2]
# new_Lambda_dot = np.array([Lambda_dot[ieq], 0.0, Lambda_dot[ieq+1]])
elif ((n2 < n0) and (n2 < n1)):
# indep = np.array([0,1], dtype = int)
indep[:] = [0, 1]
# new_Lambda_dot = np.array([Lambda_dot[ieq], Lambda_dot[ieq+1], 0.0])
new_Lambda_dot = np.zeros(3)
new_Lambda_dot[indep[0]] = Lambda_dot[ieq]
new_Lambda_dot[indep[1]] = Lambda_dot[ieq+1]
num_LM_eq_specific = 2
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Lambda_dot[ieq:ieq+num_LM_eq_specific]
# np.concatenate((Lambda_dot[ieq:ieq+num_LM_eq_specific], np.array([0.])))
Bnh[:, FoR_dof+3:FoR_dof+6] = ag.multiply_matrices(ag.skew(rot_axisB),
cab.T,
node_cga.T,
FoR_cga)[indep,:]
# Constrain angular velocities
LM_Q[:sys_size] += scalingFactor*np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*ag.multiply_matrices(ag.skew(rot_axisB),
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)[indep]
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*np.transpose(Bnh)
if MB_beam[node_body].FoR_movement == 'free':
LM_C[FoR_dof+3:FoR_dof+6,node_FoR_dof+6:node_FoR_dof+10] += scalingFactor*np.dot(FoR_cga.T,
ag.der_Cquat_by_v(MB_tstep[node_body].quat,
ag.multiply_matrices(cab,
ag.skew(rot_axisB).T,
new_Lambda_dot)))
LM_C[FoR_dof+3:FoR_dof+6,FoR_dof+6:FoR_dof+10] += scalingFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
ag.multiply_matrices(node_cga,
cab,
ag.skew(rot_axisB).T,
new_Lambda_dot))
LM_K[FoR_dof+3:FoR_dof+6,node_dof+3:node_dof+6] += scalingFactor*ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
ag.skew(rot_axisB).T,
new_Lambda_dot)
if penaltyFactor:
q = np.zeros((sys_size,))
q[FoR_dof+3:FoR_dof+6] = MB_tstep[FoR_body].for_vel[3:6]
LM_Q[:sys_size] += penaltyFactor*np.dot(Bnh.T, np.dot(Bnh, q))
LM_C[:sys_size, :sys_size] += penaltyFactor*np.dot(Bnh.T, Bnh)
sq_rot_axisB = np.dot(ag.skew(rot_axisB).T, ag.skew(rot_axisB))
# Derivatives with the quaternion of the FoR
vec = ag.multiply_matrices(node_cga,
cab,
sq_rot_axisB,
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+6:FoR_dof+10] += penaltyFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat, vec)
mat = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
sq_rot_axisB,
cab.T,
node_cga.T)
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+6:FoR_dof+10] += penaltyFactor*np.dot(mat, ag.der_Cquat_by_v(MB_tstep[FoR_body].quat, FoR_wa))
if MB_beam[node_body].FoR_movement == 'free':
# Derivatives with the quaternion of the FoR of the node
vec = ag.multiply_matrices(cab,
sq_rot_axisB,
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)
LM_C[FoR_dof+3:FoR_dof+6, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*np.dot(FoR_cga.T,
ag.der_Cquat_by_v(MB_tstep[node_body].quat, vec))
mat = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
sq_rot_axisB,
cab.T)
vec = np.dot(FoR_cga, FoR_wa)
LM_C[FoR_dof+3:FoR_dof+6, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*np.dot(mat, ag.der_CquatT_by_v(MB_tstep[node_body].quat, vec))
# Derivatives with the CRV
mat = np.dot(FoR_cga.T, node_cga)
vec = ag.multiply_matrices(sq_rot_axisB,
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)
LM_K[FoR_dof+3:FoR_dof+6, node_dof+3:node_dof+6] += penaltyFactor*np.dot(mat, ag.der_Ccrv_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:], vec))
mat = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
sq_rot_axisB)
vec = ag.multiply_matrices(node_cga.T,
FoR_cga,
FoR_wa)
LM_K[FoR_dof+3:FoR_dof+6, node_dof+3:node_dof+6] += penaltyFactor*np.dot(mat, ag.der_CcrvT_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:], vec))
ieq += 2
return ieq
def def_rot_axis_FoR_wrt_node_xyz(MB_tstep, MB_beam, FoR_body, node_body, node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, rot_axisB, scalingFactor, penaltyFactor, ieq, LM_K, LM_C, LM_Q, zero_comp):
"""
This function generates the stiffness and damping matrices and the independent vector associated to a joint that
forces the rotation axis of a FoR to be parallel to a certain direction. This direction is defined in the
B FoR of a node and parallel to x, y or z
See ``LagrangeConstraints`` for the description of variables
Args:
rot_axisB (np.ndarray): Rotation axis with respect to the node B FoR
indep (np.ndarray): Number of the equations that are used as independent
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
node_FoR_dof (int): position of the first degree of freedom of the FoR to which the "node" belongs
node_dof (int): position of the first degree of freedom associated to the "node"
FoR_body (int): body number of the "FoR"
FoR_dof (int): position of the first degree of freedom associated to the "FoR"
"""
ielem, inode_in_elem = MB_beam[node_body].node_master_elem[node_number]
num_LM_eq_specific = 2
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Simplify notation
cab = ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:])
node_cga = MB_tstep[node_body].cga()
FoR_cga = MB_tstep[FoR_body].cga()
FoR_wa = MB_tstep[FoR_body].for_vel[3:6]
psi = MB_tstep[node_body].psi[ielem,inode_in_elem,:]
psi_dot = MB_tstep[node_body].psi_dot[ielem,inode_in_elem,:]
# Components to be zero
Z = np.zeros((2,3))
Z[:, zero_comp] = np.eye(2)
Bnh[:, FoR_dof+3:FoR_dof+6] += ag.multiply_matrices(Z, cab.T, node_cga.T, FoR_cga)
Bnh[:, node_dof+3:node_dof+6] -= ag.multiply_matrices(Z, ag.crv2tan(psi))
Bnh[:, node_FoR_dof+3:node_FoR_dof+6] -= ag.multiply_matrices(Z, cab.T)
# Constrain angular velocities
LM_Q[:sys_size] += scalingFactor*np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*ag.multiply_matrices(Z, cab.T, node_cga.T, FoR_cga, FoR_wa)
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] -= scalingFactor*ag.multiply_matrices(Z, ag.crv2tan(psi), psi_dot)
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] -= scalingFactor*ag.multiply_matrices(Z, cab.T, MB_tstep[node_body].for_vel[3:6])
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*np.transpose(Bnh)
vec = ag.multiply_matrices(node_cga, cab, Z.T, Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+6:FoR_dof+10] += scalingFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat, vec)
if MB_beam[node_body].FoR_movement == 'free':
vec = ag.multiply_matrices(cab, Z.T, Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_C[FoR_dof+3:FoR_dof+6, node_FoR_dof+6:node_FoR_dof+10] += scalingFactor*ag.multiply_matrices(FoR_cga.T, ag.der_Cquat_by_v(MB_tstep[node_body].quat, vec))
LM_K[FoR_dof+3:FoR_dof+6, node_dof+3:node_dof+6] += scalingFactor*ag.multiply_matrices(FoR_cga.T, node_cga, ag.der_Ccrv_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:],
np.dot(Z.T, Lambda_dot[ieq:ieq+num_LM_eq_specific])))
LM_K[node_dof+3:node_dof+6, node_dof+3:node_dof+6] -= scalingFactor*ag.der_TanT_by_xv(psi, ag.multiply_matrices(Z.T, Lambda_dot[ieq:ieq+num_LM_eq_specific]))
LM_K[node_FoR_dof+3:node_FoR_dof+6, node_dof+3:node_dof+6] -= scalingFactor*ag.der_Ccrv_by_v(psi, ag.multiply_matrices(Z.T, Lambda_dot[ieq:ieq+num_LM_eq_specific]))
if penaltyFactor:
q = np.zeros((sys_size,))
q[FoR_dof+3:FoR_dof+6] = FoR_wa
LM_Q[:sys_size] += penaltyFactor*np.dot(Bnh.T, np.dot(Bnh, q))
LM_C[:sys_size, :sys_size] += penaltyFactor*np.dot(Bnh.T, Bnh)
ZTZ = np.dot(Z.T, Z)
# Derivatives with the quaternion of the FoR
vec = ag.multiply_matrices(node_cga,
cab,
ZTZ,
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+6:FoR_dof+10] += penaltyFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat, vec)
mat = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
ZTZ,
cab.T,
node_cga.T)
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+6:FoR_dof+10] += penaltyFactor*np.dot(mat, ag.der_Cquat_by_v(MB_tstep[FoR_body].quat, FoR_wa))
if MB_beam[node_body].FoR_movement == 'free':
# Derivatives with the quaternion of the FoR of the node
vec = ag.multiply_matrices(cab,
ZTZ,
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)
LM_C[FoR_dof+3:FoR_dof+6, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*np.dot(FoR_cga.T,
ag.der_Cquat_by_v(MB_tstep[node_body].quat, vec))
mat = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
ZTZ,
cab.T)
vec = np.dot(FoR_cga, FoR_wa)
LM_C[FoR_dof+3:FoR_dof+6, node_FoR_dof+6:node_FoR_dof+10] += penaltyFactor*np.dot(mat, ag.der_CquatT_by_v(MB_tstep[node_body].quat, vec))
# Derivatives with the CRV
mat = np.dot(FoR_cga.T, node_cga)
vec = ag.multiply_matrices(ZTZ,
cab.T,
node_cga.T,
FoR_cga,
FoR_wa)
LM_K[FoR_dof+3:FoR_dof+6, node_dof+3:node_dof+6] += penaltyFactor*np.dot(mat, ag.der_Ccrv_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:], vec))
mat = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
ZTZ)
vec = ag.multiply_matrices(node_cga.T,
FoR_cga,
FoR_wa)
LM_K[FoR_dof+3:FoR_dof+6, node_dof+3:node_dof+6] += penaltyFactor*np.dot(mat, ag.der_CcrvT_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:], vec))
ieq += 2
return ieq
def def_rot_vel_FoR_wrt_node(MB_tstep, MB_beam, FoR_body, node_body, node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, rot_axisB, rot_vel, scalingFactor, penaltyFactor, ieq, LM_K, LM_C, LM_Q):
"""
This function generates the stiffness and damping matrices and the independent vector associated to a joint that
forces the rotation velocity of a FoR with respect to a node
See ``LagrangeConstraints`` for the description of variables
Args:
rot_axisB (np.ndarray): Rotation axis with respect to the node B FoR
rot_vel (float): Rotation velocity
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
node_FoR_dof (int): position of the first degree of freedom of the FoR to which the "node" belongs
node_dof (int): position of the first degree of freedom associated to the "node"
FoR_body (int): body number of the "FoR"
FoR_dof (int): position of the first degree of freedom associated to the "FoR"
"""
num_LM_eq_specific = 1
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
ielem, inode_in_elem = MB_beam[node_body].node_master_elem[node_number]
Bnh[:, FoR_dof+3:FoR_dof+6] = ag.multiply_matrices(rot_axisB,
ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:]).T,
MB_tstep[node_body].cga().T,
MB_tstep[FoR_body].cga())
# Constrain angular velocities
LM_Q[:sys_size] += scalingFactor*np.dot(np.transpose(Bnh),Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*ag.multiply_matrices(rot_axisB,
ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:]).T,
MB_tstep[node_body].cga().T,
MB_tstep[FoR_body].cga(),
MB_tstep[FoR_body].for_vel[3:6]) - scalingFactor*rot_vel
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*np.transpose(Bnh)
if MB_beam[node_body].FoR_movement == 'free':
LM_C[FoR_dof+3:FoR_dof+6,node_FoR_dof+6:node_FoR_dof+10] += scalingFactor*np.dot(MB_tstep[FoR_body].cga().T,
ag.der_Cquat_by_v(MB_tstep[node_body].quat,
ag.multiply_matrices(ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:]),
# rot_axisB.T,
rot_axisB.T*Lambda_dot[ieq:ieq+num_LM_eq_specific])))
LM_C[FoR_dof+3:FoR_dof+6,FoR_dof+6:FoR_dof+10] += scalingFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
ag.multiply_matrices(MB_tstep[node_body].cga(),
ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:]).T,
rot_axisB.T*Lambda_dot[ieq:ieq+num_LM_eq_specific]))
LM_K[FoR_dof+3:FoR_dof+6,node_dof+3:node_dof+6] += scalingFactor*ag.multiply_matrices(MB_tstep[FoR_body].cga().T,
MB_tstep[node_body].cga(),
ag.der_Ccrv_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:],
rot_axisB.T*Lambda_dot[ieq:ieq+num_LM_eq_specific]))
ieq += 1
return ieq
def def_rot_vect_FoR_wrt_node(MB_tstep, MB_beam, FoR_body, node_body, node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, rot_vect, scalingFactor, penaltyFactor, ieq, LM_K, LM_C, LM_Q):
"""
This function fixes the rotation velocity VECTOR of a FOR equal to a velocity vector defined in the B FoR of a node
This function is a new implementation that combines and simplifies the use of 'def_rot_vel_FoR_wrt_node' and 'def_rot_axis_FoR_wrt_node' together
"""
num_LM_eq_specific = 3
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Simplify notation
ielem, inode_in_elem = MB_beam[node_body].node_master_elem[node_number]
node_cga = MB_tstep[node_body].cga()
cab = ag.crv2rotation(MB_tstep[node_body].psi[ielem,inode_in_elem,:])
FoR_cga = MB_tstep[FoR_body].cga()
FoR_wa = MB_tstep[FoR_body].for_vel[3:6]
Bnh[:, FoR_dof+3:FoR_dof+6] = ag.multiply_matrices(cab.T,
node_cga.T,
FoR_cga)
# Constrain angular velocities
LM_Q[:sys_size] += scalingFactor*np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*(np.dot(Bnh[:, FoR_dof+3:FoR_dof+6], FoR_wa) -
rot_vect)
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += scalingFactor*np.transpose(Bnh)
if MB_beam[node_body].FoR_movement == 'free':
LM_C[FoR_dof+3:FoR_dof+6,node_FoR_dof+6:node_FoR_dof+10] += scalingFactor*np.dot(FoR_cga.T,
ag.der_Cquat_by_v(MB_tstep[node_body].quat,
np.dot(cab, Lambda_dot[ieq:ieq+num_LM_eq_specific])))
LM_C[FoR_dof+3:FoR_dof+6,FoR_dof+6:FoR_dof+10] += scalingFactor*ag.der_CquatT_by_v(MB_tstep[FoR_body].quat,
ag.multiply_matrices(node_cga, cab, Lambda_dot[ieq:ieq+num_LM_eq_specific]))
LM_K[FoR_dof+3:FoR_dof+6,node_dof+3:node_dof+6] += scalingFactor*ag.multiply_matrices(FoR_cga.T,
node_cga,
ag.der_Ccrv_by_v(MB_tstep[node_body].psi[ielem,inode_in_elem,:],
Lambda_dot[ieq:ieq+num_LM_eq_specific]))
if penaltyFactor:
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+3:FoR_dof+6] += penaltyFactor*np.eye(3)
q = np.zeros((sys_size))
q[FoR_dof+3:FoR_dof+6] = FoR_wa
LM_Q[:sys_size] += penaltyFactor*np.dot(np.dot(Bnh.T, Bnh), q)
ieq += 3
return ieq
################################################################################
# Lagrange constraints
################################################################################
@lagrangeconstraint
class hinge_node_FoR(BaseLagrangeConstraint):
__doc__ = """
hinge_node_FoR
This constraint forces a hinge behaviour between a node and a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
FoR_body (int): body number of the "FoR"
rot_axisB (np.ndarray): Rotation axis with respect to the node B FoR
"""
_lc_id = 'hinge_node_FoR'
def __init__(self):
self.required_parameters = ['node_in_body', 'body', 'body_FoR', 'rot_axisB']
self._n_eq = 5
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.node_number = MBdict_entry['node_in_body']
self.node_body = MBdict_entry['body']
self.FoR_body = MBdict_entry['body_FoR']
self.rot_axisB = MBdict_entry['rot_axisB']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
if (self.rot_axisB[[1, 2]] == 0).all():
self.rot_dir = 'x'
self.zero_comp = np.array([1, 2], dtype=int)
elif (self.rot_axisB[[0, 2]] == 0).all():
self.rot_dir = 'y'
self.zero_comp = np.array([0, 2], dtype=int)
elif (self.rot_axisB[[0, 1]] == 0).all():
self.rot_dir = 'z'
self.zero_comp = np.array([0, 1], dtype=int)
else:
self.rot_dir = 'general'
self.indep = []
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
# Define the position of the first degree of freedom associated to the node
node_dof = define_node_dof(MB_beam, self.node_body, self.node_number)
node_FoR_dof = define_FoR_dof(MB_beam, self.node_body)
FoR_dof = define_FoR_dof(MB_beam, self.FoR_body)
ieq = self._ieq
# Define the equations
# ieq = equal_pos_node_FoR(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
ieq = equal_lin_vel_node_FoR(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
if self.rot_dir == 'general':
ieq = def_rot_axis_FoR_wrt_node_general(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.rot_axisB, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q, self.indep)
else:
ieq = def_rot_axis_FoR_wrt_node_xyz(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.rot_axisB, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q, self.zero_comp)
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
MB_tstep[self.FoR_body].for_pos[0:3] = np.dot(MB_tstep[self.node_body].cga(), MB_tstep[self.node_body].pos[self.node_number,:]) + MB_tstep[self.node_body].for_pos[0:3]
return
@lagrangeconstraint
class hinge_node_FoR_constant_vel(BaseLagrangeConstraint):
__doc__ = """
hinge_node_FoR_constant_vel
This constraint forces a hinge behaviour between a node and a FoR and
a constant rotation velocity at the join
See ``LagrangeConstraints`` for the description of variables
Attributes:
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
FoR_body (int): body number of the "FoR"
rot_vect (np.ndarray): Rotation velocity vector in the node B FoR
"""
_lc_id = 'hinge_node_FoR_constant_vel'
def __init__(self):
self.required_parameters = ['node_in_body', 'body', 'body_FoR', 'rot_vect']
self._n_eq = 6
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.node_number = MBdict_entry['node_in_body']
self.node_body = MBdict_entry['body']
self.FoR_body = MBdict_entry['body_FoR']
self.rot_vect = MBdict_entry['rot_vect']
self._ieq = ieq
self.indep = []
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
# self.static_constraint = fully_constrained_node_FoR()
# self.static_constraint.initialise(MBdict_entry, ieq)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
# Define the position of the first degree of freedom associated to the node
node_dof = define_node_dof(MB_beam, self.node_body, self.node_number)
node_FoR_dof = define_FoR_dof(MB_beam, self.node_body)
FoR_dof = define_FoR_dof(MB_beam, self.FoR_body)
ieq = self._ieq
# Define the equations
# ieq = equal_pos_node_FoR(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
ieq = equal_lin_vel_node_FoR(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
ieq = def_rot_vect_FoR_wrt_node(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.rot_vect, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
# ieq = def_rot_axis_FoR_wrt_node(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.rot_axisB, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q, self.indep)
# ieq = def_rot_vel_FoR_wrt_node(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.rot_axisB, self.rot_vel, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
MB_tstep[self.FoR_body].for_pos[0:3] = np.dot(MB_tstep[self.node_body].cga(), MB_tstep[self.node_body].pos[self.node_number,:]) + MB_tstep[self.node_body].for_pos[0:3]
ielem, inode_in_elem = MB_beam[self.node_body].node_master_elem[self.node_number]
node_cga = MB_tstep[self.node_body].cga()
cab = ag.crv2rotation(MB_tstep[self.node_body].psi[ielem,inode_in_elem,:])
FoR_cga = MB_tstep[self.FoR_body].cga()
rot_vect_A = ag.multiply_matrices(FoR_cga.T,
node_cga,
cab,
self.rot_vect)
MB_tstep[self.FoR_body].for_vel[3:6] = rot_vect_A.copy()
return
@lagrangeconstraint
class spherical_node_FoR(BaseLagrangeConstraint):
__doc__ = """
spherical_node_FoR
This constraint forces a spherical join between a node and a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
FoR_body (int): body number of the "FoR"
"""
_lc_id = 'spherical_node_FoR'
def __init__(self):
self.required_parameters = ['node_in_body', 'body', 'body_FoR']
self._n_eq = 3
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.node_number = MBdict_entry['node_in_body']
self.node_body = MBdict_entry['body']
self.FoR_body = MBdict_entry['body_FoR']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
# Define the position of the first degree of freedom associated to the node
node_dof = define_node_dof(MB_beam, self.node_body, self.node_number)
node_FoR_dof = define_FoR_dof(MB_beam, self.node_body)
FoR_dof = define_FoR_dof(MB_beam, self.FoR_body)
ieq = self._ieq
# Define the equations
ieq = equal_lin_vel_node_FoR(MB_tstep, MB_beam, self.FoR_body, self.node_body, self.node_number, node_FoR_dof, node_dof, FoR_dof, sys_size, Lambda_dot, self.scalingFactor, self.penaltyFactor, ieq, LM_K, LM_C, LM_Q)
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
MB_tstep[self.FoR_body].for_pos[0:3] = np.dot(MB_tstep[self.node_body].cga(), MB_tstep[self.node_body].pos[self.node_number,:]) + MB_tstep[self.node_body].for_pos[0:3]
return
@lagrangeconstraint
class free(BaseLagrangeConstraint):
_lc_id = 'free'
__doc__ = _lc_id
def __init__(self):
self.required_parameters = []
self._n_eq = 0
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self._ieq = ieq
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class spherical_FoR(BaseLagrangeConstraint):
__doc__ = """
spherical_FoR
This constraint forces a spherical join at a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
body_FoR (int): body number of the "FoR"
"""
_lc_id = 'spherical_FoR'
def __init__(self):
self.required_parameters = ['body_FoR']
self._n_eq = 3
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.body_FoR = MBdict_entry['body_FoR']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Define the position of the first degree of freedom associated to the FoR
FoR_dof = define_FoR_dof(MB_beam, self.body_FoR)
ieq = self._ieq
Bnh[:3, FoR_dof:FoR_dof+3] = 1.0*np.eye(3)
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += self.scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += self.scalingFactor*np.transpose(Bnh)
LM_Q[:sys_size] += self.scalingFactor*np.dot(np.transpose(Bnh),Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+3] += self.scalingFactor*MB_tstep[self.body_FoR].for_vel[0:3].astype(dtype=ct.c_double, copy=True, order='F')
ieq += 3
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class hinge_FoR(BaseLagrangeConstraint):
__doc__ = """
hinge_FoR
This constraint forces a hinge at a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
body_FoR (int): body number of the "FoR"
rot_axis_AFoR (np.ndarray): Rotation axis with respect to the node A FoR
"""
_lc_id = 'hinge_FoR'
def __init__(self):
self.required_parameters = ['body_FoR', 'rot_axis_AFoR']
self._n_eq = 5
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.body_FoR = MBdict_entry['body_FoR']
self.rot_axis = MBdict_entry['rot_axis_AFoR']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
if (self.rot_axis[[1, 2]] == 0).all():
self.rot_dir = 'x'
self.zero_comp = np.array([1, 2], dtype=int)
elif (self.rot_axis[[0, 2]] == 0).all():
self.rot_dir = 'y'
self.zero_comp = np.array([0, 2], dtype=int)
elif (self.rot_axis[[0, 1]] == 0).all():
self.rot_dir = 'z'
self.zero_comp = np.array([0, 1], dtype=int)
else:
self.rot_dir = 'general'
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Define the position of the first degree of freedom associated to the FoR
FoR_dof = define_FoR_dof(MB_beam, self.body_FoR)
ieq = self._ieq
Bnh[:3, FoR_dof:FoR_dof+3] = 1.0*np.eye(3)
if self.rot_dir == 'general':
# Only two of these equations are linearly independent
skew_rot_axis = ag.skew(self.rot_axis)
n0 = np.linalg.norm(skew_rot_axis[0,:])
n1 = np.linalg.norm(skew_rot_axis[1,:])
n2 = np.linalg.norm(skew_rot_axis[2,:])
if ((n0 < n1) and (n0 < n2)):
row0 = 1
row1 = 2
elif ((n1 < n0) and (n1 < n2)):
row0 = 0
row1 = 2
elif ((n2 < n0) and (n2 < n1)):
row0 = 0
row1 = 1
Bnh[3:5, FoR_dof+3:FoR_dof+6] = skew_rot_axis[[row0,row1],:]
else:
Bnh[3:5, FoR_dof+3+self.zero_comp] = np.eye(2)
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += self.scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += self.scalingFactor*np.transpose(Bnh)
LM_Q[:sys_size] += self.scalingFactor*np.dot(np.transpose(Bnh),Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+3] += self.scalingFactor*MB_tstep[self.body_FoR].for_vel[0:3].astype(dtype=ct.c_double, copy=True, order='F')
if self.rot_dir == 'general':
LM_Q[sys_size+ieq+3:sys_size+ieq+5] += self.scalingFactor*np.dot(skew_rot_axis[[row0,row1],:], MB_tstep[self.body_FoR].for_vel[3:6])
else:
LM_Q[sys_size+ieq+3:sys_size+ieq+5] += self.scalingFactor*MB_tstep[self.body_FoR].for_vel[3 + self.zero_comp]
if self.penaltyFactor:
LM_Q[FoR_dof:FoR_dof+3] += self.penaltyFactor*MB_tstep[self.body_FoR].for_vel[0:3]
LM_C[FoR_dof:FoR_dof+3, FoR_dof:FoR_dof+3] += self.penaltyFactor*np.eye(3)
if self.rot_dir == 'general':
sq_rot_axis = np.dot(ag.skew(self.rot_axis).T, ag.skew(self.rot_axis))
LM_Q[FoR_dof+3:FoR_dof+6] += self.penaltyFactor*np.dot(sq_rot_axis, MB_tstep[self.body_FoR].for_vel[3:6])
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+3:FoR_dof+6] += self.penaltyFactor*sq_rot_axis
else:
LM_Q[FoR_dof+3:FoR_dof+6] += self.penaltyFactor*MB_tstep[self.body_FoR].for_vel[3:6]
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+3:FoR_dof+6] += self.penaltyFactor*np.eye(3)
ieq += 5
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class hinge_FoR_wrtG(BaseLagrangeConstraint):
__doc__ = """
hinge_FoR_wrtG
This constraint forces a hinge at a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
body_FoR (int): body number of the "FoR"
rot_axis_AFoR (np.ndarray): Rotation axis with respect to the node G FoR
"""
_lc_id = 'hinge_FoR_wrtG'
def __init__(self):
self.required_parameters = ['body_FoR', 'rot_axis_AFoR']
self._n_eq = 5
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.body_FoR = MBdict_entry['body_FoR']
self.rot_axis = MBdict_entry['rot_axis_AFoR']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Define the position of the first degree of freedom associated to the FoR
FoR_dof = define_FoR_dof(MB_beam, self.body_FoR)
ieq = self._ieq
Bnh[:3, FoR_dof:FoR_dof+3] = MB_tstep[self.body_FoR].cga()
# Only two of these equations are linearly independent
skew_rot_axis = ag.skew(self.rot_axis)
n0 = np.linalg.norm(skew_rot_axis[0,:])
n1 = np.linalg.norm(skew_rot_axis[1,:])
n2 = np.linalg.norm(skew_rot_axis[2,:])
if ((n0 < n1) and (n0 < n2)):
row0 = 1
row1 = 2
elif ((n1 < n0) and (n1 < n2)):
row0 = 0
row1 = 2
elif ((n2 < n0) and (n2 < n1)):
row0 = 0
row1 = 1
Bnh[3:5, FoR_dof+3:FoR_dof+6] = skew_rot_axis[[row0,row1],:]
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += self.scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += self.scalingFactor*np.transpose(Bnh)
LM_C[FoR_dof:FoR_dof+3,FoR_dof+6:FoR_dof+10] += self.scalingFactor*ag.der_CquatT_by_v(MB_tstep[self.body_FoR].quat,Lambda_dot[ieq:ieq+3])
LM_Q[:sys_size] += self.scalingFactor*np.dot(np.transpose(Bnh),Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+3] += self.scalingFactor*np.dot(MB_tstep[self.body_FoR].cga(),MB_tstep[self.body_FoR].for_vel[0:3])
LM_Q[sys_size+ieq+3:sys_size+ieq+5] += self.scalingFactor*np.dot(skew_rot_axis[[row0,row1],:], MB_tstep[self.body_FoR].for_vel[3:6])
ieq += 5
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class fully_constrained_node_FoR(BaseLagrangeConstraint):
__doc__ = """
fully_constrained_node_FoR
This constraint forces linear and angular displacements between a node
and a FoR to be the same
See ``LagrangeConstraints`` for the description of variables
Attributes:
node_number (int): number of the "node" within its own body
node_body (int): body number of the "node"
FoR_body (int): body number of the "FoR"
"""
_lc_id = 'fully_constrained_node_FoR'
def __init__(self):
self.required_parameters = ['node_in_body', 'body', 'body_FoR']
self._n_eq = 6
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
cout.cout_wrap("WARNING: do not use fully_constrained_node_FoR. It is outdated. Definetly not working if 'body' has velocity", 3)
self.node_number = MBdict_entry['node_in_body']
self.node_body = MBdict_entry['body']
self.FoR_body = MBdict_entry['body_FoR']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
node_dof = define_node_dof(MB_beam, self.node_body, self.node_number)
FoR_dof = define_FoR_dof(MB_beam, self.FoR_body)
ieq = self._ieq
# Option with non holonomic constraints
# BC for linear velocities
Bnh[:3, node_dof:node_dof+3] = -1.0*np.eye(3)
quat = ag.quat_bound(MB_tstep[self.FoR_body].quat)
Bnh[:3, FoR_dof:FoR_dof+3] = ag.quat2rotation(quat)
# BC for angular velocities
Bnh[3:6,FoR_dof+3:FoR_dof+6] = -1.0*ag.quat2rotation(quat)
ielem, inode_in_elem = MB_beam[0].node_master_elem[self.node_number]
Bnh[3:6,node_dof+3:node_dof+6] = ag.crv2tan(MB_tstep[0].psi[ielem, inode_in_elem, :])
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += self.scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += self.scalingFactor*np.transpose(Bnh)
LM_Q[:sys_size] += self.scalingFactor*np.dot(np.transpose(Bnh),Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+3] += -self.scalingFactor*MB_tstep[0].pos_dot[-1,:] + np.dot(ag.quat2rotation(quat),MB_tstep[1].for_vel[0:3])
LM_Q[sys_size+ieq+3:sys_size+ieq+6] += self.scalingFactor*(np.dot(ag.crv2tan(MB_tstep[0].psi[ielem, inode_in_elem, :]),MB_tstep[0].psi_dot[ielem, inode_in_elem, :]) -
np.dot(ag.quat2rotation(quat), MB_tstep[self.FoR_body].for_vel[3:6]))
#LM_K[FoR_dof:FoR_dof+3,FoR_dof+6:FoR_dof+10] = ag.der_CquatT_by_v(MB_tstep[body_FoR].quat,Lambda_dot)
LM_C[FoR_dof:FoR_dof+3,FoR_dof+6:FoR_dof+10] += self.scalingFactor*ag.der_CquatT_by_v(quat,Lambda_dot[ieq:ieq+3])
LM_C[FoR_dof+3:FoR_dof+6,FoR_dof+6:FoR_dof+10] -= self.scalingFactor*ag.der_CquatT_by_v(quat,Lambda_dot[ieq+3:ieq+6])
LM_K[node_dof+3:node_dof+6,node_dof+3:node_dof+6] += self.scalingFactor*ag.der_TanT_by_xv(MB_tstep[0].psi[ielem, inode_in_elem, :],Lambda_dot[ieq+3:ieq+6])
ieq += 6
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
# MB_tstep[self.FoR_body].for_pos[0:3] = np.dot(ag.quat2rotation(MB_tstep[self.node_body].quat), MB_tstep[self.node_body].pos[self.node_number,:]) + MB_tstep[self.node_body].for_pos[0:3]
return
@lagrangeconstraint
class constant_rot_vel_FoR(BaseLagrangeConstraint):
__doc__ = """
constant_rot_vel_FoR
This constraint forces a constant rotation velocity of a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
FoR_body (int): body number of the "FoR"
"""
_lc_id = 'constant_rot_vel_FoR'
def __init__(self):
self.required_parameters = ['FoR_body', 'rot_vel']
self._n_eq = 3
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.rot_vel = MBdict_entry['rot_vel']
self.FoR_body = MBdict_entry['FoR_body']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order = 'F')
# Define the position of the first degree of freedom associated to the FoR
FoR_dof = define_FoR_dof(MB_beam, self.FoR_body)
ieq = self._ieq
Bnh[:3,FoR_dof+3:FoR_dof+6] = np.eye(3)
LM_C[sys_size+ieq:sys_size+ieq+num_LM_eq_specific,:sys_size] += self.scalingFactor*Bnh
LM_C[:sys_size,sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += self.scalingFactor*np.transpose(Bnh)
LM_Q[:sys_size] += self.scalingFactor*np.dot(np.transpose(Bnh),Lambda_dot[ieq:ieq+num_LM_eq_specific])
LM_Q[sys_size+ieq:sys_size+ieq+num_LM_eq_specific] += self.scalingFactor*(MB_tstep[self.FoR_body].for_vel[3:6] - self.rot_vel)
ieq += 3
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class constant_vel_FoR(BaseLagrangeConstraint):
__doc__ = """
constant_vel_FoR
This constraint forces a constant velocity of a FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
FoR_body (int): body number of the "FoR"
vel (np.ndarray): 6 components of the desired velocity
"""
_lc_id = 'constant_vel_FoR'
def __init__(self):
self.required_parameters = ['FoR_body', 'vel']
self._n_eq = 6
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.vel = MBdict_entry['vel']
self.FoR_body = MBdict_entry['FoR_body']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
# Define the position of the first degree of freedom associated to the FoR
FoR_dof = define_FoR_dof(MB_beam, self.FoR_body)
ieq = self._ieq
Bnh[:num_LM_eq_specific, FoR_dof:FoR_dof+6] = np.eye(6)
LM_C[sys_size + ieq:sys_size + ieq + num_LM_eq_specific, :sys_size] += self.scalingFactor * Bnh
LM_C[:sys_size, sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor * np.transpose(Bnh)
LM_Q[:sys_size] += self.scalingFactor * np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq + num_LM_eq_specific])
LM_Q[sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor*(MB_tstep[self.FoR_body].for_vel - self.vel)
ieq += 6
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class lin_vel_node_wrtA(BaseLagrangeConstraint):
__doc__ = """
lin_vel_node_wrtA
This constraint forces the linear velocity of a node to have a
certain value with respect to the A FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
node_number (int): number of the "node" within its own body
body_number (int): body number of the "node"
vel (np.ndarray): 6 components of the desired velocity with respect to the A FoR
"""
_lc_id = 'lin_vel_node_wrtA'
def __init__(self):
self.required_parameters = ['velocity', 'body_number', 'node_number']
self._n_eq = 3
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.vel = MBdict_entry['velocity']
self.body_number = MBdict_entry['body_number']
self.node_number = MBdict_entry['node_number']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
# Define the position of the first degree of freedom associated to the FoR
node_dof = define_node_dof(MB_beam, self.body_number, self.node_number)
ieq = self._ieq
B[:num_LM_eq_specific, node_dof:node_dof+3] = np.eye(3)
LM_K[sys_size + ieq:sys_size + ieq + num_LM_eq_specific, :sys_size] += self.scalingFactor * B
LM_K[:sys_size, sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor * np.transpose(B)
LM_Q[:sys_size] += self.scalingFactor * np.dot(np.transpose(B), Lambda[ieq:ieq + num_LM_eq_specific])
LM_Q[sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor*(MB_tstep[self.body_number].pos[self.node_number,:] -
MB_beam[self.body_number].ini_info.pos[self.node_number,:])
ieq += 3
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
if len(self.vel.shape) > 1:
current_vel = self.vel[ts-1, :]
else:
current_vel = self.vel
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
# Define the position of the first degree of freedom associated to the FoR
node_dof = define_node_dof(MB_beam, self.body_number, self.node_number)
ieq = self._ieq
Bnh[:num_LM_eq_specific, node_dof:node_dof+3] = np.eye(3)
LM_C[sys_size + ieq:sys_size + ieq + num_LM_eq_specific, :sys_size] += self.scalingFactor * Bnh
LM_C[:sys_size, sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor * np.transpose(Bnh)
LM_Q[:sys_size] += self.scalingFactor * np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq + num_LM_eq_specific])
LM_Q[sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor*(MB_tstep[self.body_number].pos_dot[self.node_number,:] - current_vel)
ieq += 3
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
@lagrangeconstraint
class lin_vel_node_wrtG(BaseLagrangeConstraint):
__doc__ = """
lin_vel_node_wrtG
This constraint forces the linear velocity of a node to have a
certain value with respect to the G FoR
See ``LagrangeConstraints`` for the description of variables
Attributes:
node_number (int): number of the "node" within its own body
body_number (int): body number of the "node"
vel (np.ndarray): 6 components of the desired velocity with respect to the G FoR
"""
_lc_id = 'lin_vel_node_wrtG'
def __init__(self):
self.required_parameters = ['velocity', 'body_number', 'node_number']
self._n_eq = 3
def get_n_eq(self):
return self._n_eq
def initialise(self, MBdict_entry, ieq, print_info=True):
self.vel = MBdict_entry['velocity']
self.body_number = MBdict_entry['body_number']
self.node_number = MBdict_entry['node_number']
self._ieq = ieq
self.scalingFactor = set_value_or_default(MBdict_entry, "scalingFactor", 1.)
self.penaltyFactor = set_value_or_default(MBdict_entry, "penaltyFactor", 0.)
return self._ieq + self._n_eq
def staticmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
num_LM_eq_specific = self._n_eq
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
# Define the position of the first degree of freedom associated to the FoR
node_dof = define_node_dof(MB_beam, self.body_number, self.node_number)
ieq = self._ieq
B[:num_LM_eq_specific, node_dof:node_dof+3] = MB_tstep[self.body_number].cga()
LM_K[sys_size + ieq:sys_size + ieq + num_LM_eq_specific, :sys_size] += self.scalingFactor * B
LM_K[:sys_size, sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor * np.transpose(B)
LM_Q[:sys_size] += self.scalingFactor * np.dot(np.transpose(B), Lambda[ieq:ieq + num_LM_eq_specific])
LM_Q[sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor*(np.dot(MB_tstep[self.body_number].cga(), MB_tstep[self.body_number].pos[self.node_number,:]) +
MB_tstep[self.body_number].for_pos)
LM_Q[sys_size + ieq:sys_size + ieq + num_LM_eq_specific] -= self.scalingFactor*(np.dot(MB_beam[self.body_number].ini_info.cga(), MB_beam[self.body_number].ini_info.pos[self.node_number,:]) +
MB_beam[self.body_number].ini_info.for_pos)
ieq += 3
return
def dynamicmat(self, LM_C, LM_K, LM_Q, MB_beam, MB_tstep, ts, num_LM_eq,
sys_size, dt, Lambda, Lambda_dot):
if len(self.vel.shape) > 1:
current_vel = self.vel[ts-1, :]
else:
current_vel = self.vel
num_LM_eq_specific = self._n_eq
Bnh = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
B = np.zeros((num_LM_eq_specific, sys_size), dtype=ct.c_double, order='F')
# Define the position of the first degree of freedom associated to the FoR
FoR_dof = define_FoR_dof(MB_beam, self.body_number)
node_dof = define_node_dof(MB_beam, self.body_number, self.node_number)
ieq = self._ieq
if MB_beam[self.body_number].FoR_movement == 'free':
Bnh[:num_LM_eq_specific, FoR_dof:FoR_dof+3] = MB_tstep[self.body_number].cga()
Bnh[:num_LM_eq_specific, FoR_dof+3:FoR_dof+6] = -np.dot(MB_tstep[self.body_number].cga(), ag.skew(MB_tstep[self.body_number].pos[self.node_number,:]))
Bnh[:num_LM_eq_specific, node_dof:node_dof+3] = MB_tstep[self.body_number].cga()
LM_C[sys_size + ieq:sys_size + ieq + num_LM_eq_specific, :sys_size] += self.scalingFactor * Bnh
LM_C[:sys_size, sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor * np.transpose(Bnh)
if MB_beam[self.body_number].FoR_movement == 'free':
LM_C[FoR_dof:FoR_dof+3, FoR_dof+6:FoR_dof+10] += self.scalingFactor*ag.der_CquatT_by_v(MB_tstep[self.body_number].quat,Lambda_dot[ieq:ieq + num_LM_eq_specific])
LM_C[node_dof:node_dof+3, FoR_dof+6:FoR_dof+10] += self.scalingFactor*ag.der_CquatT_by_v(MB_tstep[self.body_number].quat,Lambda_dot[ieq:ieq + num_LM_eq_specific])
LM_C[FoR_dof+3:FoR_dof+6, FoR_dof+6:FoR_dof+10] += self.scalingFactor*np.dot(ag.skew(MB_tstep[self.body_number].pos[self.node_number,:]), ag.der_CquatT_by_v(MB_tstep[self.body_number].quat,Lambda_dot[ieq:ieq + num_LM_eq_specific]))
LM_K[FoR_dof+3:FoR_dof+6, node_dof:node_dof+3] -= self.scalingFactor*ag.skew(np.dot(MB_tstep[self.body_number].cga().T, Lambda_dot[ieq:ieq + num_LM_eq_specific]))
LM_Q[:sys_size] += self.scalingFactor * np.dot(np.transpose(Bnh), Lambda_dot[ieq:ieq + num_LM_eq_specific])
LM_Q[sys_size + ieq:sys_size + ieq + num_LM_eq_specific] += self.scalingFactor*(np.dot( MB_tstep[self.body_number].cga(), (
MB_tstep[self.body_number].for_vel[0:3] +
np.dot(ag.skew(MB_tstep[self.body_number].for_vel[3:6]), MB_tstep[self.body_number].pos[self.node_number,:]) +
MB_tstep[self.body_number].pos_dot[self.node_number,:])) -
current_vel)
ieq += 3
return
def staticpost(self, lc_list, MB_beam, MB_tstep):
return
def dynamicpost(self, lc_list, MB_beam, MB_tstep):
return
################################################################################
# Funtions to interact with this Library
################################################################################
def initialize_constraints(MBdict):
index_eq = 0
num_constraints = MBdict['num_constraints']
lc_list = list()
# Read the dictionary and create the constraints
for iconstraint in range(num_constraints):
lc_list.append(lc_from_string(MBdict["constraint_%02d" % iconstraint]['behaviour'])())
index_eq = lc_list[-1].initialise(MBdict["constraint_%02d" % iconstraint], index_eq)
return lc_list
def define_num_LM_eq(lc_list):
"""
define_num_LM_eq
Define the number of equations needed to define the boundary boundary conditions
Args:
lc_list(): list of all the defined contraints
Returns:
num_LM_eq(int): number of new equations needed to define the boundary boundary conditions
Examples:
num_LM_eq = lagrangeconstraints.define_num_LM_eq(lc_list)
Notes:
"""
num_LM_eq = 0
# Compute the number of equations
for lc in lc_list:
num_LM_eq += lc.get_n_eq()
return num_LM_eq
def generate_lagrange_matrix(lc_list, MB_beam, MB_tstep, ts, num_LM_eq, sys_size, dt, Lambda, Lambda_dot, dynamic_or_static):
"""
generate_lagrange_matrix
Generates the matrices associated to the Lagrange multipliers boundary conditions
Args:
lc_list(): list of all the defined contraints
MBdict(dict): dictionary with the MultiBody and LagrangeMultipliers information
MB_beam(list): list of 'beams' of each of the bodies that form the system
MB_tstep(list): list of 'StructTimeStepInfo' of each of the bodies that form the system
num_LM_eq(int): number of new equations needed to define the boundary boundary conditions
sys_size(int): total number of degrees of freedom of the multibody system
dt(float): time step
Lambda(np.ndarray): list of Lagrange multipliers values
Lambda_dot(np.ndarray): list of the first derivative of the Lagrange multipliers values
dynamic_or_static (str): string defining if the computation is dynamic or static
Returns:
LM_C (np.ndarray): Damping matrix associated to the Lagrange Multipliers equations
LM_K (np.ndarray): Stiffness matrix associated to the Lagrange Multipliers equations
LM_Q (np.ndarray): Vector of independent terms associated to the Lagrange Multipliers equations
"""
# Initialize matrices
LM_C = np.zeros((sys_size + num_LM_eq,sys_size + num_LM_eq), dtype=ct.c_double, order = 'F')
LM_K = np.zeros((sys_size + num_LM_eq,sys_size + num_LM_eq), dtype=ct.c_double, order = 'F')
LM_Q = np.zeros((sys_size + num_LM_eq,),dtype=ct.c_double, order = 'F')
# Define the matrices associated to the constratints
# TODO: Is there a better way to deal with ieq?
# ieq = 0
for lc in lc_list:
if dynamic_or_static.lower() == "static":
lc.staticmat(LM_C=LM_C,
LM_K=LM_K,
LM_Q=LM_Q,
MB_beam=MB_beam,
MB_tstep=MB_tstep,
ts=ts,
num_LM_eq=num_LM_eq,
sys_size=sys_size,
dt=dt,
Lambda=Lambda,
Lambda_dot=Lambda_dot)
elif dynamic_or_static.lower() == "dynamic":
lc.dynamicmat(LM_C=LM_C,
LM_K=LM_K,
LM_Q=LM_Q,
MB_beam=MB_beam,
MB_tstep=MB_tstep,
ts=ts,
num_LM_eq=num_LM_eq,
sys_size=sys_size,
dt=dt,
Lambda=Lambda,
Lambda_dot=Lambda_dot)
return LM_C, LM_K, LM_Q
def postprocess(lc_list, MB_beam, MB_tstep, dynamic_or_static):
"""
Run the postprocess of all the Lagrange Constraints in the system
"""
for lc in lc_list:
if dynamic_or_static.lower() == "static":
lc.staticpost(lc_list = lc_list,
MB_beam = MB_beam,
MB_tstep = MB_tstep)
elif dynamic_or_static.lower() == "dynamic":
lc.dynamicpost(lc_list = lc_list,
MB_beam = MB_beam,
MB_tstep = MB_tstep)
return
def remove_constraint(MBdict, constraint):
"""
Removes a constraint from the list.
This function is thought to release constraints at some point during
a dynamic simulation
"""
try:
del(MBdict[constraint])
MBdict['num_constraints'] -= 1
except KeyError:
# The entry did not exist in the dict, pass without substracting 1 to
# num_constraints
pass
################################################################################
################################################################################
################################################################################
print_available_lc()
| 45.911983
| 265
| 0.613997
| 12,666
| 86,590
| 3.881099
| 0.033081
| 0.043452
| 0.026201
| 0.042109
| 0.878738
| 0.854286
| 0.834045
| 0.814537
| 0.800643
| 0.782396
| 0
| 0.01157
| 0.271325
| 86,590
| 1,885
| 266
| 45.93634
| 0.767529
| 0.156623
| 0
| 0.684885
| 0
| 0.000854
| 0.079805
| 0.006047
| 0
| 0
| 0
| 0.000531
| 0
| 1
| 0.093937
| false
| 0.000854
| 0.005124
| 0.037575
| 0.213493
| 0.013664
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f1e9fa42fc35499cd5aacf042367790b6b15fae9
| 25
|
py
|
Python
|
zeva12can/__init__.py
|
sectioncritical/zeva12can
|
7dbb426a18b8ded8d6c118df998c1cad2d2fdd67
|
[
"MIT"
] | null | null | null |
zeva12can/__init__.py
|
sectioncritical/zeva12can
|
7dbb426a18b8ded8d6c118df998c1cad2d2fdd67
|
[
"MIT"
] | null | null | null |
zeva12can/__init__.py
|
sectioncritical/zeva12can
|
7dbb426a18b8ded8d6c118df998c1cad2d2fdd67
|
[
"MIT"
] | null | null | null |
from .bms12 import BMS12
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.16
| 25
| 1
| 25
| 25
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9e3eabcab4943507ec1525b88097448a94ad5a1a
| 3,376
|
py
|
Python
|
app/src/main/python/film.py
|
108360224/watch_video
|
bfbcd0fbe617eceb974d8c1e9c976f47ad7b0814
|
[
"MIT"
] | null | null | null |
app/src/main/python/film.py
|
108360224/watch_video
|
bfbcd0fbe617eceb974d8c1e9c976f47ad7b0814
|
[
"MIT"
] | null | null | null |
app/src/main/python/film.py
|
108360224/watch_video
|
bfbcd0fbe617eceb974d8c1e9c976f47ad7b0814
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 10:45:29 2020
@author: max
"""
from bs4 import BeautifulSoup
import requests
import re
from make_first_page import make_first_page
import numpy as np
import cv2
class Film():
def __init__(self,URL):
html = requests.get('http://www.99kubo.tv'+URL).text
soup = BeautifulSoup(html, 'lxml')
a=soup.select_one('body > div.main > div.list > div.listlf > dl > span > a:nth-child(3)')
self.URL='http://www.99kubo.tv/'+a['href']
html = requests.get(self.URL).text
self.soup = BeautifulSoup(html, 'lxml')
url_list=()
img_list=()
title_list=()
ul=self.soup.select_one('body > div.main > div.list > div.listlf > ul')
for li in ul.select('li'):
a=li.select('a')[0]
url_list+=(a['href'],)
img=a.find_all('img')[0]
im=img['data-original']
img_list+=(im,)
title_list+=(img['alt'],)
self.film_list=(url_list,title_list,img_list)
def get_film_list(self):
return self.film_list
def sort_by(self,sort):
url_list=()
img_list=()
title_list=()
tag=self.soup.select_one('body > div.main > div.list > div.listlf > div')
a=tag.find_all('a')[-1]
self.URL=re.sub(r'order.+%20desc','order-'+sort+'%20desc',self.URL)
html = requests.get(self.URL).text
self.soup = BeautifulSoup(html, 'lxml')
ul=self.soup.select_one('body > div.main > div.list > div.listlf > ul')
for li in ul.select('li'):
a=li.select('a')[0]
url_list+=(a['href'],)
img=a.find_all('img')[0]
im=img['data-original']
img_list+=(im,)
title_list+=(img['alt'],)
self.film_list=(url_list,title_list,img_list)
def goto_area(self,area):
url_list=()
img_list=()
title_list=()
tag=self.soup.select_one('body > div.main > div.list > div.listlf > div')
a=tag.find_all('a')[-1]
self.URL=re.sub(r'area.+tag','area-'+area+'-tag',self.URL)
html = requests.get(self.URL).text
self.soup = BeautifulSoup(html, 'lxml')
ul=self.soup.select_one('body > div.main > div.list > div.listlf > ul')
for li in ul.select('li'):
a=li.select('a')[0]
url_list+=(a['href'],)
img=a.find_all('img')[0]
im=img['data-original']
img_list+=(im,)
title_list+=(img['alt'],)
self.film_list=(url_list,title_list,img_list)
def load_new_film(self):
url_list=()
img_list=()
title_list=()
tag=self.soup.select_one('body > div.main > div.list > div.listlf > div')
a=tag.find_all('a')[-1]
html = requests.get('http://www.99kubo.tv/'+a['href']).text
self.soup = BeautifulSoup(html, 'lxml')
ul=self.soup.select_one('body > div.main > div.list > div.listlf > ul')
for li in ul.select('li'):
a=li.select('a')[0]
url_list+=(a['href'],)
img=a.find_all('img')[0]
im=img['data-original']
img_list+=(im,)
title_list+=(img['alt'],)
self.film_list=(url_list,title_list,img_list)
| 32.152381
| 97
| 0.531991
| 473
| 3,376
| 3.649049
| 0.173362
| 0.048667
| 0.060255
| 0.078795
| 0.797219
| 0.793163
| 0.765353
| 0.730591
| 0.730591
| 0.730591
| 0
| 0.015101
| 0.293839
| 3,376
| 104
| 98
| 32.461538
| 0.708893
| 0.021327
| 0
| 0.753086
| 0
| 0.012346
| 0.18841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.074074
| 0.012346
| 0.160494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9e4006cadb828a36d14809ba6e15176ea16ed232
| 8,391
|
py
|
Python
|
tests/test_meshes/test_mesh_topology.py
|
pysofe/pysofe
|
088d4061fcf194a85ff3332e7bdd3bde095e4f69
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_meshes/test_mesh_topology.py
|
pysofe/pysofe
|
088d4061fcf194a85ff3332e7bdd3bde095e4f69
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_meshes/test_mesh_topology.py
|
pysofe/pysofe
|
088d4061fcf194a85ff3332e7bdd3bde095e4f69
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests for mesh topologies.
"""
import numpy as np
import pytest
from pysofe import meshes
# the 1D test mesh
#
# 1---(1)---2---(2)---3---(3)---4
class TestMeshTopology1D(object):
# the 1D test mesh connectivity array
cells1D = np.array([[1,2],
[2,3],
[3,4]])
topo = meshes.topology.MeshTopology(cells=cells1D, dimension=1)
def test_attributes(self):
assert self.topo._dimension == 1
assert np.all(self.topo._n_vertices == [1, 2])
def test_incidence_1_0_and_0_1(self):
assert np.all(self.topo.get_connectivity(1,0).toarray()
== np.array([[1,1,0,0],
[0,1,1,0],
[0,0,1,1]]))
assert np.all(self.topo.get_connectivity(0,1).toarray()
== np.array([[1,0,0],
[1,1,0],
[0,1,1],
[0,0,1]]))
def test_incidence_1_1(self):
assert np.all(self.topo.get_connectivity(1,1).toarray()
== np.array([[0,1,0],
[1,0,1],
[0,1,0]]))
def test_incidence_0_0(self):
assert np.all(self.topo.get_connectivity(0,0).toarray()
== np.array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]))
def test_boundary(self):
assert np.all(self.topo.get_boundary(0)
== np.array([1,0,0,1]))
assert np.all(self.topo.get_boundary(1)
== np.array([1,0,1]))
# the 2D test mesh
#
# 4---------3
# |\ /|
# | \ (3) / |
# | \ / |
# | \ / |
# |(4) 5 (2)|
# | / \ |
# | / \ |
# | / (1) \ |
# |/ \|
# 1---------2
class TestMeshTopology2D(object):
# the 2D test mesh connectivity array
cells2D = np.array([[1,2,5],
[2,3,5],
[3,4,5],
[4,1,5]])
topo = meshes.topology.MeshTopology(cells=cells2D, dimension=2)
def test_attributes(self):
assert self.topo._dimension == 2
assert np.all(self.topo._n_vertices == [1, 2, 3])
def test_incidence_2_0_and_0_2(self):
assert np.all(self.topo.get_connectivity(2,0).toarray()
== np.array([[1,1,0,0,1],
[0,1,1,0,1],
[0,0,1,1,1],
[1,0,0,1,1]]))
assert np.all(self.topo.get_connectivity(0,2).toarray()
== np.array([[1,0,0,1],
[1,1,0,0],
[0,1,1,0],
[0,0,1,1],
[1,1,1,1]]))
def test_incidence_1_0_and_0_1(self):
assert np.all(self.topo.get_connectivity(1,0).toarray()
== np.array([[1,1,0,0,0],
[1,0,0,1,0],
[1,0,0,0,1],
[0,1,1,0,0],
[0,1,0,0,1],
[0,0,1,1,0],
[0,0,1,0,1],
[0,0,0,1,1]]))
assert np.all(self.topo.get_connectivity(0,1).toarray()
== np.array([[1,1,1,0,0,0,0,0],
[1,0,0,1,1,0,0,0],
[0,0,0,1,0,1,1,0],
[0,1,0,0,0,1,0,1],
[0,0,1,0,1,0,1,1]]))
def test_incidence_2_1_and_1_2(self):
assert np.all(self.topo.get_connectivity(2,1).toarray()
== np.array([[1,0,1,0,1,0,0,0],
[0,0,0,1,1,0,1,0],
[0,0,0,0,0,1,1,1],
[0,1,1,0,0,0,0,1]]))
assert np.all(self.topo.get_connectivity(1,2).toarray()
== np.array([[1,0,0,0],
[0,0,0,1],
[1,0,0,1],
[0,1,0,0],
[1,1,0,0],
[0,0,1,0],
[0,1,1,0],
[0,0,1,1]]))
def test_incidence_2_2(self):
assert np.all(self.topo.get_connectivity(2,2).toarray()
== np.array([[0,1,0,1],
[1,0,1,0],
[0,1,0,1],
[1,0,1,0]]))
def test_incidence_1_1(self):
assert np.all(self.topo.get_connectivity(1,1).toarray()
== np.array([[0,1,1,1,1,0,0,0],
[1,0,1,0,0,1,0,1],
[1,1,0,0,1,0,1,1],
[1,0,0,0,1,1,1,0],
[1,0,1,1,0,0,1,1],
[0,1,0,1,0,0,1,1],
[0,0,1,1,1,1,0,1],
[0,1,1,0,1,1,1,0]]))
def test_incidence_0_0(self):
assert np.all(self.topo.get_connectivity(0,0).toarray()
== np.eye(5))
def test_boundary(self):
assert np.all(self.topo.get_boundary(0)
== np.array([1,1,1,1,0]))
assert np.all(self.topo.get_boundary(1)
== np.array([1,1,0,1,0,1,0,0]))
assert np.all(self.topo.get_boundary(2)
== np.array([1,1,1,1]))
class TestMeshTopology3D(object):
# the 3D test mesh connectivity array
cells3D = np.array([[1,2,3,5],
[3,4,1,5]])
topo = meshes.topology.MeshTopology(cells=cells3D, dimension=3)
def test_attributes(self):
assert self.topo._dimension == 3
assert np.all(self.topo._n_vertices == [1, 2, 3, 4])
def test_incidence_3_0_and_0_3(self):
assert np.all(self.topo.get_connectivity(3,0).toarray()
== np.array([[1,1,1,0,1],
[1,0,1,1,1]]))
assert np.all(self.topo.get_connectivity(0,3).toarray()
== np.array([[1,1],
[1,0],
[1,1],
[0,1],
[1,1]]))
def test_incidence_2_0_and_0_2(self):
assert np.all(self.topo.get_connectivity(2,0).toarray()
== np.array([[1,1,1,0,0],
[1,1,0,0,1],
[1,0,1,1,0],
[1,0,1,0,1],
[1,0,0,1,1],
[0,1,1,0,1],
[0,0,1,1,1]]))
assert np.all(self.topo.get_connectivity(0,2).toarray()
== np.array([[1,1,1,1,1,0,0],
[1,1,0,0,0,1,0],
[1,0,1,1,0,1,1],
[0,0,1,0,1,0,1],
[0,1,0,1,1,1,1]]))
def test_incidence_1_0_and_0_1(self):
assert np.all(self.topo.get_connectivity(1,0).toarray()
== np.array([[1,1,0,0,0],
[1,0,1,0,0],
[1,0,0,1,0],
[1,0,0,0,1],
[0,1,1,0,0],
[0,1,0,0,1],
[0,0,1,1,0],
[0,0,1,0,1],
[0,0,0,1,1]]))
assert np.all(self.topo.get_connectivity(0,1).toarray()
== np.array([[1,1,1,1,0,0,0,0,0],
[1,0,0,0,1,1,0,0,0],
[0,1,0,0,1,0,1,1,0],
[0,0,1,0,0,0,1,0,1],
[0,0,0,1,0,1,0,1,1]]))
if __name__ == '__main__':
from IPython import embed as IPS
IPS()
| 36.802632
| 67
| 0.342987
| 1,088
| 8,391
| 2.550551
| 0.05239
| 0.10018
| 0.075676
| 0.054775
| 0.833874
| 0.817297
| 0.803243
| 0.783423
| 0.664865
| 0.627387
| 0
| 0.156799
| 0.490764
| 8,391
| 227
| 68
| 36.964758
| 0.492628
| 0.039685
| 0
| 0.377246
| 0
| 0
| 0.000996
| 0
| 0
| 0
| 0
| 0
| 0.179641
| 1
| 0.101796
| false
| 0
| 0.023952
| 0
| 0.179641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9e9e26044eba959869c3dad33521581589e496a1
| 114
|
py
|
Python
|
vhoster/cli/__init__.py
|
GerardBalaoro/VHoster
|
991a10f30308103d7b187c8d5dba636f0e14b669
|
[
"MIT"
] | 2
|
2020-10-30T12:02:21.000Z
|
2020-12-11T23:42:12.000Z
|
vhoster/cli/__init__.py
|
GerardBalaoro/VHoster
|
991a10f30308103d7b187c8d5dba636f0e14b669
|
[
"MIT"
] | null | null | null |
vhoster/cli/__init__.py
|
GerardBalaoro/VHoster
|
991a10f30308103d7b187c8d5dba636f0e14b669
|
[
"MIT"
] | null | null | null |
"""Command Line Interface"""
from .core import *
from .site import *
from .config import *
from .services import *
| 22.8
| 28
| 0.719298
| 15
| 114
| 5.466667
| 0.6
| 0.365854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 114
| 5
| 29
| 22.8
| 0.854167
| 0.192982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7b90a1ab3033703f4758ac0994cb6b6d6c162b4b
| 155
|
py
|
Python
|
solutions/python3/293.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/293.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/293.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def generatePossibleNextMoves(self, s):
return [s[:i] + "--" + s[i + 2:] for i in range(len(s) - 1) if s[i] == s[i + 1] == "+"]
| 51.666667
| 95
| 0.516129
| 25
| 155
| 3.2
| 0.6
| 0.1
| 0.075
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.245161
| 155
| 3
| 95
| 51.666667
| 0.65812
| 0
| 0
| 0
| 1
| 0
| 0.019231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7ba574977a2fe82549fd52a48bfa1b4330c8c575
| 32
|
py
|
Python
|
test/output/011.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
test/output/011.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
test/output/011.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
| 16
| 31
| 0.875
| 5
| 32
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7baa30029056c36bdf044b8018674d3d2750c3f6
| 118
|
py
|
Python
|
utils/tensor_utils.py
|
Stelath/geoguessr-ai
|
08f5ae7ca8d1e50d586ee66222814589f4095a6d
|
[
"MIT"
] | null | null | null |
utils/tensor_utils.py
|
Stelath/geoguessr-ai
|
08f5ae7ca8d1e50d586ee66222814589f4095a6d
|
[
"MIT"
] | null | null | null |
utils/tensor_utils.py
|
Stelath/geoguessr-ai
|
08f5ae7ca8d1e50d586ee66222814589f4095a6d
|
[
"MIT"
] | null | null | null |
import torch
def round_tensor(tensor, decimals=4):
return torch.round(tensor * 10 ** decimals) / (10 ** decimals)
| 29.5
| 66
| 0.70339
| 16
| 118
| 5.125
| 0.5625
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050505
| 0.161017
| 118
| 4
| 66
| 29.5
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
7bcd28f5615c722fe263714b607adff2637af99d
| 234
|
py
|
Python
|
orator/schema/grammars/__init__.py
|
HeathLee/sorator
|
271668865bf0d039908643e3df9b98c966b9d956
|
[
"MIT"
] | null | null | null |
orator/schema/grammars/__init__.py
|
HeathLee/sorator
|
271668865bf0d039908643e3df9b98c966b9d956
|
[
"MIT"
] | null | null | null |
orator/schema/grammars/__init__.py
|
HeathLee/sorator
|
271668865bf0d039908643e3df9b98c966b9d956
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .grammar import SchemaGrammar # noqa
from .sqlite_grammar import SQLiteSchemaGrammar # noqa
from .postgres_grammar import PostgresSchemaGrammar # noqa
from .mysql_grammar import MySQLSchemaGrammar # noqa
| 33.428571
| 58
| 0.799145
| 26
| 234
| 7.076923
| 0.538462
| 0.282609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004902
| 0.128205
| 234
| 6
| 59
| 39
| 0.897059
| 0.175214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c8a9ac03016a0b7c7bce32e3d85f02927fffc51c
| 83
|
py
|
Python
|
lib/python2.7/site-packages/weblib/system.py
|
Jatin-Nagpal/FlaskApp
|
e94184059810e22c82db812c658407a4fcbba4b5
|
[
"MIT"
] | 22
|
2015-04-18T19:07:17.000Z
|
2021-02-19T07:30:09.000Z
|
lib/python2.7/site-packages/weblib/system.py
|
Jatin-Nagpal/FlaskApp
|
e94184059810e22c82db812c658407a4fcbba4b5
|
[
"MIT"
] | 7
|
2015-05-18T06:39:39.000Z
|
2022-03-01T15:06:29.000Z
|
lib/python2.7/site-packages/weblib/system.py
|
Jatin-Nagpal/FlaskApp
|
e94184059810e22c82db812c658407a4fcbba4b5
|
[
"MIT"
] | 10
|
2015-04-27T11:23:59.000Z
|
2021-02-19T07:30:12.000Z
|
def check_ares_support():
import pycurl
return 'c-ares' in pycurl.version
| 16.6
| 37
| 0.710843
| 12
| 83
| 4.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204819
| 83
| 4
| 38
| 20.75
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c8e99fe28862e8a2bbc3a820038cd2d47ced7c31
| 11,889
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/plandefinition_target.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/plandefinition_target.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/plandefinition_target.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class PlanDefinition_TargetSchema:
"""
This resource allows for the definition of various types of plans as a
sharable, consumable, and executable artifact. The resource is general enough
to support the description of a broad range of clinical artifacts such as
clinical decision support rules, order sets and protocols.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
This resource allows for the definition of various types of plans as a
sharable, consumable, and executable artifact. The resource is general enough
to support the description of a broad range of clinical artifacts such as
clinical decision support rules, order sets and protocols.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
measure: The parameter whose value is to be tracked, e.g. body weigth, blood pressure,
or hemoglobin A1c level.
detailQuantity: The target value of the measure to be achieved to signify fulfillment of the
goal, e.g. 150 pounds or 7.0%. Either the high or low or both values of the
range can be specified. Whan a low value is missing, it indicates that the
goal is achieved at any value at or below the high value. Similarly, if the
high value is missing, it indicates that the goal is achieved at any value at
or above the low value.
detailRange: The target value of the measure to be achieved to signify fulfillment of the
goal, e.g. 150 pounds or 7.0%. Either the high or low or both values of the
range can be specified. Whan a low value is missing, it indicates that the
goal is achieved at any value at or below the high value. Similarly, if the
high value is missing, it indicates that the goal is achieved at any value at
or above the low value.
detailCodeableConcept: The target value of the measure to be achieved to signify fulfillment of the
goal, e.g. 150 pounds or 7.0%. Either the high or low or both values of the
range can be specified. Whan a low value is missing, it indicates that the
goal is achieved at any value at or below the high value. Similarly, if the
high value is missing, it indicates that the goal is achieved at any value at
or above the low value.
due: Indicates the timeframe after the start of the goal in which the goal should
be met.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.stu3.complex_types.range import RangeSchema
from spark_fhir_schemas.stu3.complex_types.duration import DurationSchema
if (
max_recursion_limit
and nesting_list.count("PlanDefinition_Target") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["PlanDefinition_Target"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The parameter whose value is to be tracked, e.g. body weigth, blood pressure,
# or hemoglobin A1c level.
StructField(
"measure",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The target value of the measure to be achieved to signify fulfillment of the
# goal, e.g. 150 pounds or 7.0%. Either the high or low or both values of the
# range can be specified. Whan a low value is missing, it indicates that the
# goal is achieved at any value at or below the high value. Similarly, if the
# high value is missing, it indicates that the goal is achieved at any value at
# or above the low value.
StructField(
"detailQuantity",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The target value of the measure to be achieved to signify fulfillment of the
# goal, e.g. 150 pounds or 7.0%. Either the high or low or both values of the
# range can be specified. Whan a low value is missing, it indicates that the
# goal is achieved at any value at or below the high value. Similarly, if the
# high value is missing, it indicates that the goal is achieved at any value at
# or above the low value.
StructField(
"detailRange",
RangeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The target value of the measure to be achieved to signify fulfillment of the
# goal, e.g. 150 pounds or 7.0%. Either the high or low or both values of the
# range can be specified. Whan a low value is missing, it indicates that the
# goal is achieved at any value at or below the high value. Similarly, if the
# high value is missing, it indicates that the goal is achieved at any value at
# or above the low value.
StructField(
"detailCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the timeframe after the start of the goal in which the goal should
# be met.
StructField(
"due",
DurationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 51.24569
| 107
| 0.579359
| 1,338
| 11,889
| 5.006726
| 0.152466
| 0.051948
| 0.033587
| 0.028661
| 0.794298
| 0.786685
| 0.786685
| 0.759815
| 0.754142
| 0.754142
| 0
| 0.007282
| 0.37623
| 11,889
| 231
| 108
| 51.467532
| 0.896036
| 0.402809
| 0
| 0.521429
| 1
| 0
| 0.040883
| 0.009332
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007143
| false
| 0
| 0.05
| 0
| 0.078571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
74039dd5979ccc4ff4ff1300a81744549ed85a50
| 29
|
py
|
Python
|
clynmut/__init__.py
|
jeffhsu3/ClynMut
|
c215ea2f8263016a249d6556c762410b39165546
|
[
"MIT"
] | 18
|
2021-03-12T20:04:57.000Z
|
2022-01-11T03:16:31.000Z
|
clynmut/__init__.py
|
jeffhsu3/ClynMut
|
c215ea2f8263016a249d6556c762410b39165546
|
[
"MIT"
] | null | null | null |
clynmut/__init__.py
|
jeffhsu3/ClynMut
|
c215ea2f8263016a249d6556c762410b39165546
|
[
"MIT"
] | 2
|
2021-03-16T18:41:12.000Z
|
2021-06-04T02:03:01.000Z
|
from clynmut.clynmut import *
| 29
| 29
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cdc4d8df821da4eaa272c38d856f82fce45ca966
| 30
|
py
|
Python
|
code_summary/onmt/bin/__init__.py
|
Nrgeup/review_assistant
|
bf03d62773501b84069afcc8b3da66d6d7829218
|
[
"Apache-2.0"
] | 1
|
2020-01-17T00:41:51.000Z
|
2020-01-17T00:41:51.000Z
|
code_summary/onmt/bin/__init__.py
|
Nrgeup/review_assistant
|
bf03d62773501b84069afcc8b3da66d6d7829218
|
[
"Apache-2.0"
] | null | null | null |
code_summary/onmt/bin/__init__.py
|
Nrgeup/review_assistant
|
bf03d62773501b84069afcc8b3da66d6d7829218
|
[
"Apache-2.0"
] | null | null | null |
from code_summary import onmt
| 15
| 29
| 0.866667
| 5
| 30
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cdc978204c202ab5d5fdd20bd95c516cb51e35c3
| 25
|
py
|
Python
|
gypse/__init__.py
|
aeroxis/gypsy
|
bfcdb64e9ca61fac6a2b41780b11e87c7df759b2
|
[
"MIT"
] | 3
|
2019-04-10T22:02:36.000Z
|
2020-12-13T21:29:28.000Z
|
gypse/__init__.py
|
aeroxis/gypsy
|
bfcdb64e9ca61fac6a2b41780b11e87c7df759b2
|
[
"MIT"
] | null | null | null |
gypse/__init__.py
|
aeroxis/gypsy
|
bfcdb64e9ca61fac6a2b41780b11e87c7df759b2
|
[
"MIT"
] | null | null | null |
__version__ = "00.0.0.0"
| 12.5
| 24
| 0.64
| 5
| 25
| 2.4
| 0.6
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 0.12
| 25
| 1
| 25
| 25
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a810b613e673d036490a1cd9fdcdb5a455cafdaf
| 103
|
py
|
Python
|
app/models/k8s_resource/io/k8s/api/apps/__init__.py
|
zephyrxvxx7/KubeZephyr-Backend
|
242410bc236e1f7204c24d635eb3346b0c256dc8
|
[
"MIT"
] | 2
|
2021-04-25T01:49:45.000Z
|
2021-11-25T09:10:40.000Z
|
app/models/k8s_resource/io/k8s/apimachinery/pkg/apis/meta/__init__.py
|
zephyrxvxx7/KubeZephyr-Backend
|
242410bc236e1f7204c24d635eb3346b0c256dc8
|
[
"MIT"
] | null | null | null |
app/models/k8s_resource/io/k8s/apimachinery/pkg/apis/meta/__init__.py
|
zephyrxvxx7/KubeZephyr-Backend
|
242410bc236e1f7204c24d635eb3346b0c256dc8
|
[
"MIT"
] | null | null | null |
# generated by datamodel-codegen:
# filename: swagger.json
# timestamp: 2021-03-29T08:55:23+00:00
| 25.75
| 40
| 0.718447
| 15
| 103
| 4.933333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 0.145631
| 103
| 3
| 41
| 34.333333
| 0.636364
| 0.932039
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b552f9b13868fe566e3371852979afc588234dce
| 64
|
py
|
Python
|
multilingual_t5/baseline_ta/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
multilingual_t5/baseline_ta/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
multilingual_t5/baseline_ta/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
"""baseline_ta dataset."""
from .baseline_ta import BaselineTa
| 16
| 35
| 0.765625
| 8
| 64
| 5.875
| 0.75
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 3
| 36
| 21.333333
| 0.824561
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b58315fe1a2b68d34f59c37c70e8fbbacda9271c
| 47
|
py
|
Python
|
scripts/portal/in_cygnusGarden.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/portal/in_cygnusGarden.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/portal/in_cygnusGarden.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# 271030600
sm.warp(271040000, 5)
sm.dispose()
| 11.75
| 21
| 0.723404
| 7
| 47
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.452381
| 0.106383
| 47
| 3
| 22
| 15.666667
| 0.357143
| 0.191489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a98b737a1373777fbb01ab298b247bc51454fb60
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/masonry/__init__.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/masonry/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/masonry/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/2c/01/5e/0dc808506426b1f51c286ea153e1fd17e20ffa8fbc785c857b3ee15787
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8d620bec9d775d32398986febe26c308f4849774
| 177
|
py
|
Python
|
MySchool/Dashboard/views.py
|
Bhavesh052/MySchool
|
7902ffd23b7ea82b3eb79943fe6fe80e02ec2580
|
[
"MIT"
] | null | null | null |
MySchool/Dashboard/views.py
|
Bhavesh052/MySchool
|
7902ffd23b7ea82b3eb79943fe6fe80e02ec2580
|
[
"MIT"
] | null | null | null |
MySchool/Dashboard/views.py
|
Bhavesh052/MySchool
|
7902ffd23b7ea82b3eb79943fe6fe80e02ec2580
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def Courses(request):
return HttpResponse('<h1>Ths is my Home Page</h1>')
| 29.5
| 55
| 0.768362
| 26
| 177
| 5.230769
| 0.807692
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.141243
| 177
| 6
| 55
| 29.5
| 0.881579
| 0.129944
| 0
| 0
| 0
| 0
| 0.183007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8d714e25c9a6296ebbf7e275a3de6d321bfb8fb4
| 216
|
py
|
Python
|
pyclue/tf1/models/engine/__init__.py
|
CLUEbenchmark/PyCLUE
|
c16af32dd7dc195e77f352b6b3d2d5b963e193ba
|
[
"MIT"
] | 122
|
2019-12-04T14:42:34.000Z
|
2022-03-01T08:12:30.000Z
|
pyclue/tf1/tasks/classification/multi_label/__init__.py
|
CLUEbenchmark/PyCLUE
|
c16af32dd7dc195e77f352b6b3d2d5b963e193ba
|
[
"MIT"
] | 9
|
2020-06-05T00:42:13.000Z
|
2022-02-09T23:39:31.000Z
|
pyclue/tf1/tasks/sentence_pair/siamese/__init__.py
|
CLUEbenchmark/PyCLUE
|
c16af32dd7dc195e77f352b6b3d2d5b963e193ba
|
[
"MIT"
] | 12
|
2019-12-06T01:58:31.000Z
|
2021-12-22T09:51:13.000Z
|
#!/usr/bin/python3
"""
@Author: Liu Shaoweihua
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 16.615385
| 40
| 0.740741
| 25
| 216
| 5.84
| 0.72
| 0.205479
| 0.328767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005556
| 0.166667
| 216
| 12
| 41
| 18
| 0.805556
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d9d97b1564f2c5e10dd73e6bf6ae9eb2d4399da
| 22
|
py
|
Python
|
orangecontrib/recommendation/evaluation/__init__.py
|
robertcv/orange3-recommendation
|
db421c32f85f123b1f3058865438df1b996772cd
|
[
"BSD-2-Clause"
] | 22
|
2016-09-11T11:40:17.000Z
|
2019-07-27T21:45:21.000Z
|
orangecontrib/recommendation/evaluation/__init__.py
|
robertcv/orange3-recommendation
|
db421c32f85f123b1f3058865438df1b996772cd
|
[
"BSD-2-Clause"
] | 14
|
2016-08-16T22:19:31.000Z
|
2020-12-17T00:03:34.000Z
|
orangecontrib/recommendation/evaluation/__init__.py
|
robertcv/orange3-recommendation
|
db421c32f85f123b1f3058865438df1b996772cd
|
[
"BSD-2-Clause"
] | 19
|
2016-08-16T20:06:57.000Z
|
2021-09-16T11:42:11.000Z
|
from .ranking import *
| 22
| 22
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a5c3edf7e2af07b8820adc1cf58a393f9d57d03f
| 39
|
py
|
Python
|
app/models/__init__.py
|
archit47/Cricket-Run-Chase-Simulator
|
9ccdc6ec459587ed6a8b4d742f15f4d6f72fefb9
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
archit47/Cricket-Run-Chase-Simulator
|
9ccdc6ec459587ed6a8b4d742f15f4d6f72fefb9
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
archit47/Cricket-Run-Chase-Simulator
|
9ccdc6ec459587ed6a8b4d742f15f4d6f72fefb9
|
[
"MIT"
] | null | null | null |
from app.models.players import Player
| 13
| 37
| 0.820513
| 6
| 39
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 2
| 38
| 19.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a5cf7fc19dc004e446539b34cce6621cb081e391
| 226
|
py
|
Python
|
examples/property_prediction/MTL/model/__init__.py
|
siboehm/dgl-lifesci
|
f8a176414b21b72c5ca1f8c7eb8d64702432ae24
|
[
"Apache-2.0"
] | 390
|
2020-06-05T13:16:18.000Z
|
2022-03-31T07:36:34.000Z
|
examples/property_prediction/MTL/model/__init__.py
|
siboehm/dgl-lifesci
|
f8a176414b21b72c5ca1f8c7eb8d64702432ae24
|
[
"Apache-2.0"
] | 71
|
2020-06-12T05:26:56.000Z
|
2022-03-29T06:26:39.000Z
|
examples/property_prediction/MTL/model/__init__.py
|
siboehm/dgl-lifesci
|
f8a176414b21b72c5ca1f8c7eb8d64702432ae24
|
[
"Apache-2.0"
] | 113
|
2020-06-08T18:48:18.000Z
|
2022-03-22T01:16:26.000Z
|
from .gcn import GCNRegressor, GCNRegressorBypass
from .gat import GATRegressor, GATRegressorBypass
from .mpnn import MPNNRegressor, MPNNRegressorBypass
from .attentivefp import AttentiveFPRegressor, AttentiveFPRegressorBypass
| 56.5
| 73
| 0.880531
| 20
| 226
| 9.95
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084071
| 226
| 4
| 73
| 56.5
| 0.961353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a5e4d2c172658de89500c497025ae8a81d6ceab5
| 90
|
py
|
Python
|
pollbot/display/__init__.py
|
tigerdar004/RweddingPoll
|
8617c63798dbebe6aee3ea7bd61d995a588fc048
|
[
"MIT"
] | 112
|
2019-06-11T17:52:57.000Z
|
2022-03-18T00:05:21.000Z
|
pollbot/display/__init__.py
|
tigerdar004/RweddingPoll
|
8617c63798dbebe6aee3ea7bd61d995a588fc048
|
[
"MIT"
] | 91
|
2019-05-28T11:33:40.000Z
|
2022-02-27T12:12:07.000Z
|
pollbot/display/__init__.py
|
tigerdar004/RweddingPoll
|
8617c63798dbebe6aee3ea7bd61d995a588fc048
|
[
"MIT"
] | 69
|
2019-07-10T16:58:06.000Z
|
2022-03-30T22:09:44.000Z
|
# Import for easier re-export
from .poll import * # noqa
from .settings import * # noqa
| 22.5
| 31
| 0.7
| 13
| 90
| 4.846154
| 0.692308
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211111
| 90
| 3
| 32
| 30
| 0.887324
| 0.411111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
938c1a29e3112a81c9ce52c6005bb5a287a585b3
| 4,210
|
py
|
Python
|
analysis/eval_phys_data.py
|
BoyuanChen/neural-state-variables
|
10483d93ac8c006f3786c434fb57d70d9ab465ec
|
[
"MIT"
] | 17
|
2021-12-29T16:48:46.000Z
|
2022-03-25T01:57:13.000Z
|
analysis/eval_phys_data.py
|
BoyuanChen/neural-state-variables
|
10483d93ac8c006f3786c434fb57d70d9ab465ec
|
[
"MIT"
] | null | null | null |
analysis/eval_phys_data.py
|
BoyuanChen/neural-state-variables
|
10483d93ac8c006f3786c434fb57d70d9ab465ec
|
[
"MIT"
] | 1
|
2022-01-22T11:26:09.000Z
|
2022-01-22T11:26:09.000Z
|
import os
import sys
import cv2
import numpy as np
from tqdm import tqdm
def eval_phys_data_single_pendulum(data_filepath, num_vids, num_frms, save_path):
from eval_phys_single_pendulum import eval_physics, phys_vars_list
phys = {p_var:[] for p_var in phys_vars_list}
for n in tqdm(range(num_vids)):
seq_filepath = os.path.join(data_filepath, str(n))
frames = []
for p in range(num_frms):
frame_p = cv2.imread(os.path.join(seq_filepath, str(p)+'.png'))
frames.append(frame_p)
phys_tmp = eval_physics(frames)
for p_var in phys_vars_list:
phys[p_var].append(phys_tmp[p_var])
for p_var in phys_vars_list:
phys[p_var] = np.array(phys[p_var])
np.save(save_path, phys)
def eval_phys_data_double_pendulum(data_filepath, num_vids, num_frms, save_path):
from eval_phys_double_pendulum import eval_physics, phys_vars_list
phys = {p_var:[] for p_var in phys_vars_list}
for n in tqdm(range(num_vids)):
seq_filepath = os.path.join(data_filepath, str(n))
frames = []
for p in range(num_frms):
frame_p = cv2.imread(os.path.join(seq_filepath, str(p)+'.png'))
frames.append(frame_p)
phys_tmp = eval_physics(frames)
for p_var in phys_vars_list:
phys[p_var].append(phys_tmp[p_var])
for p_var in phys_vars_list:
phys[p_var] = np.array(phys[p_var])
# remove outliers
thresh_1 = np.nanpercentile(np.abs(phys['vel_theta_1']), 98)
thresh_2 = np.nanpercentile(np.abs(phys['vel_theta_2']), 98)
for n in range(num_vids):
for p in range(num_frms):
if (not np.isnan(phys['vel_theta_1'][n, p]) and np.abs(phys['vel_theta_1'][n, p]) >= thresh_1) \
or (not np.isnan(phys['vel_theta_2'][n, p]) and np.abs(phys['vel_theta_2'][n, p]) >= thresh_2):
phys['vel_theta_1'][n, p] = np.nan
phys['vel_theta_2'][n, p] = np.nan
phys['kinetic energy'][n, p] = np.nan
phys['total energy'][n, p] = np.nan
np.save(save_path, phys)
def eval_phys_data_elastic_pendulum(data_filepath, num_vids, num_frms, save_path):
from eval_phys_elastic_pendulum import eval_physics, phys_vars_list
phys = {p_var:[] for p_var in phys_vars_list}
for n in tqdm(range(num_vids)):
seq_filepath = os.path.join(data_filepath, str(n))
frames = []
for p in range(num_frms):
frame_p = cv2.imread(os.path.join(seq_filepath, str(p)+'.png'))
frames.append(frame_p)
phys_tmp = eval_physics(frames)
for p_var in phys_vars_list:
phys[p_var].append(phys_tmp[p_var])
for p_var in phys_vars_list:
phys[p_var] = np.array(phys[p_var])
# remove outliers
thresh_1 = np.nanpercentile(np.abs(phys['vel_theta_1']), 98)
thresh_2 = np.nanpercentile(np.abs(phys['vel_theta_2']), 98)
thresh_z = np.nanpercentile(np.abs(phys['vel_z']), 98)
for n in range(num_vids):
for p in range(num_frms):
if (not np.isnan(phys['vel_theta_1'][n, p]) and np.abs(phys['vel_theta_1'][n, p]) >= thresh_1) \
or (not np.isnan(phys['vel_theta_2'][n, p]) and np.abs(phys['vel_theta_2'][n, p]) >= thresh_2) \
or (not np.isnan(phys['vel_z'][n, p]) and np.abs(phys['vel_z'][n, p]) >= thresh_z):
phys['vel_theta_1'][n, p] = np.nan
phys['vel_theta_2'][n, p] = np.nan
phys['vel_z'][n, p] = np.nan
phys['kinetic energy'][n, p] = np.nan
phys['total energy'][n, p] = np.nan
np.save(save_path, phys)
if __name__ == '__main__':
dataset = str(sys.argv[1])
data_filepath = str(sys.argv[2])
save_path = os.path.join(data_filepath, 'phys_vars.npy')
if dataset == 'single_pendulum':
eval_phys_data_single_pendulum(data_filepath, 1200, 60, save_path)
elif dataset == 'double_pendulum':
eval_phys_data_double_pendulum(data_filepath, 1100, 60, save_path)
elif dataset == 'elastic_pendulum':
eval_phys_data_elastic_pendulum(data_filepath, 1200, 60, save_path)
else:
assert False, 'Unknown system...'
| 40.480769
| 108
| 0.628979
| 682
| 4,210
| 3.589443
| 0.111437
| 0.039216
| 0.078431
| 0.04902
| 0.898284
| 0.873775
| 0.853758
| 0.775735
| 0.775735
| 0.756127
| 0
| 0.018052
| 0.236817
| 4,210
| 104
| 109
| 40.480769
| 0.743853
| 0.007363
| 0
| 0.670588
| 0
| 0
| 0.082356
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 1
| 0.035294
| false
| 0
| 0.094118
| 0
| 0.129412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9e13104a3aeae1a35ec0921ef99ae6343e2913b4
| 110
|
py
|
Python
|
blue_st_sdk/features/audio/adpcm/__init__.py
|
cchangeur/BlueSTSDK_Python
|
e5c6e4bc5a58680bad0d867633dd9d92012b9baf
|
[
"BSD-3-Clause"
] | 43
|
2019-03-08T08:03:19.000Z
|
2022-01-20T11:51:11.000Z
|
blue_st_sdk/features/audio/adpcm/__init__.py
|
cchangeur/BlueSTSDK_Python
|
e5c6e4bc5a58680bad0d867633dd9d92012b9baf
|
[
"BSD-3-Clause"
] | 24
|
2019-04-01T20:50:40.000Z
|
2022-03-16T17:00:54.000Z
|
blue_st_sdk/features/audio/adpcm/__init__.py
|
cchangeur/BlueSTSDK_Python
|
e5c6e4bc5a58680bad0d867633dd9d92012b9baf
|
[
"BSD-3-Clause"
] | 19
|
2019-02-20T08:41:20.000Z
|
2021-11-21T11:39:50.000Z
|
__all__ = [
'feature_audio_adpcm', \
'feature_audio_adpcm_sync', \
'bv_audio_sync_manager'
]
| 18.333333
| 34
| 0.645455
| 12
| 110
| 4.916667
| 0.583333
| 0.40678
| 0.576271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236364
| 110
| 5
| 35
| 22
| 0.702381
| 0
| 0
| 0
| 0
| 0
| 0.609524
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f537028c163dd63737f7765089510e55cca4f3bc
| 200
|
py
|
Python
|
experiments/training/__init__.py
|
selflein/manifold-flow
|
2cc91c7acf61c8b4df07a940f0311ee93c39f0c7
|
[
"MIT"
] | 199
|
2020-03-31T22:45:31.000Z
|
2022-03-18T14:57:23.000Z
|
experiments/training/__init__.py
|
selflein/manifold-flow
|
2cc91c7acf61c8b4df07a940f0311ee93c39f0c7
|
[
"MIT"
] | 4
|
2020-04-04T18:45:33.000Z
|
2022-01-05T03:16:07.000Z
|
experiments/training/__init__.py
|
selflein/manifold-flow
|
2cc91c7acf61c8b4df07a940f0311ee93c39f0c7
|
[
"MIT"
] | 25
|
2020-04-01T11:04:11.000Z
|
2022-03-30T17:21:44.000Z
|
from . import losses
from .trainer import ForwardTrainer, ConditionalForwardTrainer, AdversarialTrainer, ConditionalAdversarialTrainer, SCANDALForwardTrainer
from .alternate import AlternatingTrainer
| 50
| 136
| 0.885
| 15
| 200
| 11.8
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 200
| 3
| 137
| 66.666667
| 0.961957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f56481aa8feb289328d539c08da34d2c51019e5f
| 37
|
py
|
Python
|
study.py
|
GongBenWuZang123456/go_study
|
048b1d4124d3b9784668d1911650be29f3930cbd
|
[
"MIT"
] | null | null | null |
study.py
|
GongBenWuZang123456/go_study
|
048b1d4124d3b9784668d1911650be29f3930cbd
|
[
"MIT"
] | null | null | null |
study.py
|
GongBenWuZang123456/go_study
|
048b1d4124d3b9784668d1911650be29f3930cbd
|
[
"MIT"
] | null | null | null |
sum = 1
nnn = 2
mmm = 666666666
| 4.111111
| 15
| 0.540541
| 6
| 37
| 3.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.478261
| 0.378378
| 37
| 8
| 16
| 4.625
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f57fecf4e45ef37fa99cd8f2c73cf05ae15afdd7
| 13,706
|
py
|
Python
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/SOCRadar/Integrations/SOCRadarThreatFusion/SOCRadarThreatFusion_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import io
import pytest
from CommonServerPython import DemistoException, FeedIndicatorType, CommandResults
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
SOCRADAR_API_ENDPOINT = 'https://platform.socradar.com/api'
CALCULATE_DBOT_SCORE_INPUTS = [
(900, 3),
(800, 2),
(450, 2),
(300, 1),
(100, 1),
(0, 0),
]
def test_test_module(requests_mock):
"""Tests the test_module validation command.
"""
from SOCRadarThreatFusion import Client, test_module
mock_socradar_api_key = "APIKey"
suffix = f'threat/analysis/check/auth?key={mock_socradar_api_key}'
mock_response = util_load_json('test_data/check_auth_response.json')
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
response = test_module(client)
assert response == 'ok'
def test_test_module_handles_authorization_error(requests_mock):
"""Tests the test_module validation command authorization error.
"""
from SOCRadarThreatFusion import Client, test_module, MESSAGES
mock_socradar_api_key = "WrongAPIKey"
suffix = f'threat/analysis/check/auth?key={mock_socradar_api_key}'
mock_response = util_load_json('test_data/check_auth_response_auth_error.json')
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response, status_code=401)
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(DemistoException, match=MESSAGES['AUTHORIZATION_ERROR']):
test_module(client)
def test_ip_command(requests_mock):
"""Tests the ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, ip_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_ip_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'ip': '1.1.1.1'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = ip_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_ip_expected_output.json')
expected_context = util_load_json('test_data/score_ip_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for IP: 1.1.1.1' in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_ip_command_handles_incorrect_entity_type():
"""Tests the ip_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, ip_command
mock_socradar_api_key = "APIKey"
mock_args = {'ip': 'INCORRECT IP ADDRESS'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
ip_command(
client=client,
args=mock_args,
)
def test_domain_command(requests_mock):
"""Tests the domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, domain_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_domain_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'domain': 'paloaltonetworks.com'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = domain_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_domain_expected_output.json')
expected_context = util_load_json('test_data/score_domain_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for domain: paloaltonetworks.com' in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_domain_command_handles_incorrect_entity_type():
"""Tests the domain_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, domain_command
mock_socradar_api_key = "APIKey"
mock_args = {'domain': 'INCORRECT DOMAIN'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
domain_command(
client=client,
args=mock_args,
)
def test_file_command(requests_mock):
"""Tests the file_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, file_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_hash_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'file': '3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = file_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_hash_expected_output.json')
expected_context = util_load_json('test_data/score_hash_expected_context_generic_command.json')
assert isinstance(result, list)
assert result != []
assert '### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792' \
in result[0].readable_output
assert result[0].outputs == expected_context
assert result[0].raw_response == expected_output
def test_file_command_handles_incorrect_entity_type():
"""Tests the file_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, file_command
mock_socradar_api_key = "APIKey"
mock_args = {'file': 'INCORRECT HASH'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
file_command(
client=client,
args=mock_args,
)
def test_score_ip(requests_mock):
"""Tests the score_ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_ip_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_ip_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'ip': '1.1.1.1'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_ip_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_ip_expected_output.json')
expected_context = util_load_json('test_data/score_ip_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for IP: 1.1.1.1' in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_ip_handles_incorrect_entity_type():
"""Tests the score_ip_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_ip_command
mock_socradar_api_key = "APIKey"
mock_args = {'ip': 'INCORRECT IP ADDRESS'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_ip_command(
client=client,
args=mock_args,
)
def test_score_domain(requests_mock):
"""Tests the score_domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_domain_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_domain_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'domain': 'paloaltonetworks.com'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_domain_command(
client=client,
args=mock_args,
)
expected_output = util_load_json('test_data/score_domain_expected_output.json')
expected_context = util_load_json('test_data/score_domain_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for domain: paloaltonetworks.com' in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_domain_handles_incorrect_entity_type():
"""Tests the score_domain_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_domain_command
mock_socradar_api_key = "APIKey"
mock_args = {'domain': 'INCORRECT DOMAIN'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_domain_command(
client=client,
args=mock_args,
)
def test_score_hash(requests_mock):
"""Tests the score_hash_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from SOCRadarThreatFusion import Client, score_hash_command
mock_socradar_api_key = "APIKey"
mock_response = util_load_json('test_data/score_hash_response.json')
suffix = 'threat/analysis'
requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}', json=mock_response)
mock_args = {'hash': '3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
result = score_hash_command(
client=client,
args=mock_args
)
expected_output = util_load_json('test_data/score_hash_expected_output.json')
expected_context = util_load_json('test_data/score_hash_expected_context.json')
assert isinstance(result, CommandResults)
assert '### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792' \
in result.readable_output
assert result.outputs == expected_context
assert result.raw_response == expected_output
def test_score_hash_handles_incorrect_entity_type():
"""Tests the score_hash_command function incorrect entity type error.
"""
from SOCRadarThreatFusion import Client, score_hash_command
mock_socradar_api_key = "APIKey"
mock_args = {'hash': 'INCORRECT HASH'}
client = Client(
base_url=SOCRADAR_API_ENDPOINT,
api_key=mock_socradar_api_key,
verify=False,
proxy=False
)
with pytest.raises(ValueError):
score_hash_command(
client=client,
args=mock_args,
)
@pytest.mark.parametrize('socradar_score, dbot_score', CALCULATE_DBOT_SCORE_INPUTS)
def test_calculate_dbot_score(socradar_score, dbot_score):
from SOCRadarThreatFusion import calculate_dbot_score
assert calculate_dbot_score(socradar_score) == dbot_score
def test_map_indicator_type():
from SOCRadarThreatFusion import map_indicator_type
assert FeedIndicatorType.IP == map_indicator_type('ipv4')
assert FeedIndicatorType.IPv6 == map_indicator_type('ipv6')
assert FeedIndicatorType.Domain == map_indicator_type('hostname')
assert FeedIndicatorType.File == map_indicator_type('hash')
assert None is map_indicator_type('IP')
assert None is map_indicator_type('invalid')
| 31.292237
| 121
| 0.721509
| 1,672
| 13,706
| 5.593301
| 0.076555
| 0.06234
| 0.048118
| 0.057742
| 0.895958
| 0.879491
| 0.85864
| 0.823567
| 0.80849
| 0.770317
| 0
| 0.01944
| 0.196848
| 13,706
| 437
| 122
| 31.363844
| 0.830124
| 0.140522
| 0
| 0.629758
| 0
| 0
| 0.192822
| 0.124122
| 0
| 0
| 0
| 0
| 0.121107
| 1
| 0.058824
| false
| 0
| 0.069204
| 0
| 0.131488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1926dfeb97d8f0f20d83a85e8229ffec681ec175
| 112
|
py
|
Python
|
pytextgears/__init__.py
|
khmurakami/pytextgears
|
a45722382d5cec1586e5aaeab354ecea9e506f91
|
[
"MIT"
] | 1
|
2021-05-19T04:45:06.000Z
|
2021-05-19T04:45:06.000Z
|
pytextgears/__init__.py
|
khmurakami/pytextgears
|
a45722382d5cec1586e5aaeab354ecea9e506f91
|
[
"MIT"
] | null | null | null |
pytextgears/__init__.py
|
khmurakami/pytextgears
|
a45722382d5cec1586e5aaeab354ecea9e506f91
|
[
"MIT"
] | null | null | null |
from .pytextgears import TextGear
from .utils import *
from .json_parser import *
from .error_handling import *
| 22.4
| 33
| 0.794643
| 15
| 112
| 5.8
| 0.6
| 0.229885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 112
| 4
| 34
| 28
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
192aa9b796fd97e660c4692f673f70ec94c8773f
| 5,354
|
py
|
Python
|
tests/test_spc.py
|
andrewheusser/quail
|
fce1152a3f7dc983f4a3143698fdc3e27f61d1d2
|
[
"MIT"
] | 17
|
2017-04-12T15:45:37.000Z
|
2021-07-12T21:25:50.000Z
|
tests/test_spc.py
|
vishalbelsare/quail
|
6c847a49f31d953f3264294439576a23588b84d8
|
[
"MIT"
] | 80
|
2017-04-12T18:54:10.000Z
|
2021-06-05T17:28:33.000Z
|
tests/test_spc.py
|
vishalbelsare/quail
|
6c847a49f31d953f3264294439576a23588b84d8
|
[
"MIT"
] | 8
|
2018-02-01T18:53:46.000Z
|
2020-01-12T17:36:33.000Z
|
from quail.egg import Egg
import numpy as np
import pytest
def test_spc():
presented=[[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]]
recalled=[[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]]
egg = Egg(pres=presented,rec=recalled)
assert np.array_equal(egg.analyze('spc').data.values,[np.array([ 1., 1., 1., 1.]),np.array([ 1., 1., 0., 1.])])
def test_analysis_spc_multisubj():
presented=[[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']],[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]]
recalled=[[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']],[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]]
multisubj_egg = Egg(pres=presented,rec=recalled)
assert np.allclose(multisubj_egg.analyze('spc').data.values,np.array([[ 1., 1., 1., 1.],[ 1., 1., 0., 1.],[ 1., 1., 1., 1.],[ 1., 1., 0., 1.]]))
def test_spc_best_euclidean():
presented=[[[10, 20, 30, 40],[10, 20, 30, 40]]]
recalled=[[[20, 10, 40, 30],[20, 40, 10]]]
egg = Egg(pres=presented,rec=recalled)
assert np.allclose(egg.analyze('spc', match='best', distance='euclidean', features='item').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_euclidean():
presented = [[[{'item' : i, 'feature1' : i*10} for i in range(1, 5)] for i in range(2)]]
recalled=[[[{'item' : i, 'feature1' : i*10} for i in [2, 1, 4, 3]],[{'item' : i, 'feature1' : i*10} for i in [2, 4, 1]]]]
egg = Egg(pres=presented,rec=recalled)
assert np.allclose(egg.analyze('spc', match='best', distance='euclidean', features='feature1').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_euclidean_3d():
presented = [[[{'item' : i, 'feature1' : [i*10, 0, 0]} for i in range(1, 5)] for i in range(2)]]
recalled=[[[{'item' : i, 'feature1' : [i*10, 0, 0]} for i in [2, 1, 4, 3]],[{'item' : i, 'feature1' : [i*10, 0, 0]} for i in [2, 4, 1]]]]
egg = Egg(pres=presented,rec=recalled)
assert np.array_equal(egg.analyze('spc', match='best', distance='euclidean', features='feature1').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_euclidean_3d_2features():
presented = [[[{'item' : i, 'feature1' : [i*10, 0, 0], 'feature2' : [i*10, 0, 0]} for i in range(1, 5)] for i in range(2)]]
recalled=[[[{'item' : i, 'feature1' : [i*10, 0, 0], 'feature2': [i*10, 0, 0]} for i in [2, 1, 4, 3]],[{'item' : i, 'feature1' : [i*10, 0, 0], 'feature2': [i*10, 0, 0]} for i in [2, 4, 1]]]]
egg = Egg(pres=presented,rec=recalled)
assert np.array_equal(egg.analyze('spc', match='best', distance='euclidean', features=['feature1', 'feature2']).data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_euclidean_3d_features_not_set():
presented = [[[{'item' : i, 'feature1' : [i*10, 0, 0]} for i in range(1, 5)] for i in range(2)]]
recalled=[[[{'item' : i, 'feature1' : [i*10, 0, 0]} for i in [2, 1, 4, 3]],[{'item' : i, 'feature1' : [i*10, 0, 0]} for i in [2, 4, 1]]]]
egg = Egg(pres=presented,rec=recalled)
assert np.array_equal(egg.analyze('spc', match='best', distance='euclidean', features='feature1').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_euclidean_3d_exception_no_features():
presented=[[[[10, 0, 0], [20, 0, 0], [30, 0, 0], [40, 0, 0]],
[[10, 0, 0], [20, 0, 0], [30, 0, 0], [40, 0, 0]]]]
recalled=[[[[20, 0, 0], [10, 0, 0], [40, 0, 0], [30, 0, 0]],
[[20, 0, 0], [40, 0, 0], [10, 0, 0]]]]
egg = Egg(pres=presented,rec=recalled)
with pytest.raises(Exception):
assert np.array_equal(egg.analyze('spc', match='best', distance='euclidean').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_euclidean_3d_exception_item_specified():
presented=[[[[10, 0, 0], [20, 0, 0], [30, 0, 0], [40, 0, 0]],
[[10, 0, 0], [20, 0, 0], [30, 0, 0], [40, 0, 0]]]]
recalled=[[[[20, 0, 0], [10, 0, 0], [40, 0, 0], [30, 0, 0]],
[[20, 0, 0], [40, 0, 0], [10, 0, 0]]]]
egg = Egg(pres=presented,rec=recalled)
assert np.array_equal(egg.analyze('spc', match='best', distance='euclidean', features='item').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_best_correlation_3d():
presented=[[[[10, 0, 10], [20, 0, 0], [30, 0, -10], [40, 0, -20]],
[[10, 0, 10], [20, 0, 0], [30, 0, -10], [40, 0, -20]]]]
recalled=[[[[20, 0, 0], [10, 0, 10], [40, 0, -20], [30, 0, -10]],
[[20, 0, 0], [40, 0, -20], [10, 0, 10]]]]
egg = Egg(pres=presented,rec=recalled)
assert np.array_equal(egg.analyze('spc', match='best', distance='correlation', features='item').data.values,[np.array([1., 1., 1., 1.]),np.array([1., 1., 0., 1.])])
def test_spc_smooth_correlation_3d():
presented=[[[[10, 0, 10], [20, 0, 0], [30, 0, -10], [40, 0, -20]],
[[10, 0, 10], [20, 0, 0], [30, 0, -10], [40, 0, -20]]]]
recalled=[[[[20, 0, 0], [10, 0, 10], [40, 0, -20], [30, 0, -10]],
[[20, 0, 0], [40, 0, -20], [10, 0, 10]]]]
egg = Egg(pres=presented,rec=recalled)
egg.analyze('spc', match='smooth', distance='euclidean', features='item').data.values
| 67.772152
| 193
| 0.540344
| 875
| 5,354
| 3.241143
| 0.074286
| 0.035261
| 0.028561
| 0.060296
| 0.911142
| 0.911142
| 0.897391
| 0.88787
| 0.876234
| 0.841678
| 0
| 0.113961
| 0.177251
| 5,354
| 78
| 194
| 68.641026
| 0.529852
| 0
| 0
| 0.507463
| 0
| 0
| 0.103661
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 1
| 0.164179
| false
| 0
| 0.044776
| 0
| 0.208955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1931e0564e7a7ea346e585ae507436d9fe70eebb
| 28
|
py
|
Python
|
plugins/plugin_huffman/__init__.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
plugins/plugin_huffman/__init__.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
plugins/plugin_huffman/__init__.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
from . import huffman_codec
| 14
| 27
| 0.821429
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1996e266c1b531fd6cc4ed2ce80391b2c5054e51
| 201
|
py
|
Python
|
answers.py
|
DenisLozhnikov/iamhome
|
07d8b32c556ded345a915a7e61620b3c951d670d
|
[
"MIT"
] | 1
|
2021-11-14T16:50:18.000Z
|
2021-11-14T16:50:18.000Z
|
answers.py
|
DenisLozhnikov/iamhome
|
07d8b32c556ded345a915a7e61620b3c951d670d
|
[
"MIT"
] | 26
|
2021-09-22T14:18:23.000Z
|
2021-10-07T05:57:26.000Z
|
answers.py
|
DenisLozhnikov/iamhome
|
07d8b32c556ded345a915a7e61620b3c951d670d
|
[
"MIT"
] | 1
|
2021-09-24T11:37:39.000Z
|
2021-09-24T11:37:39.000Z
|
import random
POSITIVE_ANSWERS = ['Хорошо', 'Поняла', 'Отлично', 'Поняла Вас', 'Ясно', 'Понятно', 'Записала']
def add_positive_answer(text):
return random.choice(POSITIVE_ANSWERS) + '. ' + text
| 25.125
| 95
| 0.691542
| 23
| 201
| 5.869565
| 0.73913
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139303
| 201
| 7
| 96
| 28.714286
| 0.780347
| 0
| 0
| 0
| 0
| 0
| 0.248756
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
199f640cef151891e8177a4f7886d0fe16df1c6b
| 4,311
|
py
|
Python
|
output/models/nist_data/list_pkg/nmtokens/schema_instance/nistschema_sv_iv_list_nmtokens_enumeration_1_xsd/nistschema_sv_iv_list_nmtokens_enumeration_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/nmtokens/schema_instance/nistschema_sv_iv_list_nmtokens_enumeration_1_xsd/nistschema_sv_iv_list_nmtokens_enumeration_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/nmtokens/schema_instance/nistschema_sv_iv_list_nmtokens_enumeration_1_xsd/nistschema_sv_iv_list_nmtokens_enumeration_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-list-NMTOKENS-enumeration-1-NS"
class NistschemaSvIvListNmtokensEnumeration1Type(Enum):
IMPROVED_IS_STANDARDS_ADVENT_XML_THE_RETRIEVE_WITH_C_INVESTIGATION_SPECIFICATIONS_IS_ONLY_INFO_OF_ADDITIONALLY_TO_BASED_THE_EB_XML_PARTNERSHIPS_OASI_DEVELOPERS_ENGINEERIN_THE_DISCUSSIONS_EFFORTS_PA = (
"improved-is-standards.advent-XML_the_retrieve.with:C",
"investigation.specifications.is-Only.info",
"of.Additionally_to_based:The.ebXML_partnerships_OASI",
"Developers.engineerin",
"the_discussions:efforts_pa",
)
PROBLEMS_OF_DOM_FOR_DEFINE_INDUSTRY_HAS_I_TECHNOLOGIES_CHAIN_F_DISTRIBUTED_ENSURE_THE_ADDRESSING_A_INCLUDING_IN_THE_PROVIDE_DIAGNOS_FROM_TO_VARIETY_UN_THE_TECHNICAL_SCHEMAS_FOR_USE_PROJECTOR_AND_DISPLAYING_REVOL_FILE_COLLABORATING_DESCRIPTION_U_PARTNERSHIPS_THIS_TO_OASIS_PERVASIVE_THE_XML_THE_INTERNATIONAL_AND_KNOWN_AN_XML_TO_PAGES_DISCOVER_FED_DISSEMINATE_FOR_CAN_TO_UTILITIES_CHAINS_INTEROPERABILITY = (
"problems_of_DOM_for.define-industry:has_i",
"technologies.chain:f",
"distributed:ensure",
"the-addressing.a_including:in.the-provide-diagnos",
"from_to:variety.un",
"the_Technical_Schemas_for:use.projector_and.displaying.revol",
"file-collaborating_Description.u",
"partnerships_this:to-OASIS_Pervasive.the:",
"XML.the.international:and.known_an.XML-to.pages_discover:fed",
"disseminate-for:can:to_utilities.chains-interoperability:",
)
DEPENDABILITY_ENFORCEMENT_BETWEEN_PARTICIPATE_NIST_OF_THE_OF_WITH_GROUPS_COLLABORATE_DISCOVERY_THEIR_TO_GRAPHIC_REVIEWED_AND_LANGU_PROMISES_DOCUMENTS_SOLUTIONS_IN_THE_IN_SOFT = (
"dependability-enforcement.between:participate_NIST-of",
"the-of.with_Groups.collaborate.discovery:",
"their-to_graphic",
"reviewed.and.Langu",
"promises-documents-solutions.in-the_in:soft",
)
MANIPULATE_PERV_OFFER_TO_AND_E_WITH_REFE_TIME_THE_ANY_AMBIGUITIES_TO_ORGANIZATION_THE_LOCALIZED_JI_OF_PROVIDES_A_INTEROPERABILITY_BUSINESS_NIST_PERVASIVE_A_FIL_AND_IS_THE_BACK_XML_TEST_DOCUMENTS_AND_A_REFERENCE_IS_THE_P_AND_OUR_THE_ARE_AND_R = (
"manipulate:perv",
"offer:to.and:e-with.refe",
"time_the.any-ambiguities.to_organization-the-",
"localized_Ji",
"of.provides-a:interoperability-business-NIST-pervasive_a:fil",
"and-is.the_back-XML.test-documents_and.a-reference_is.the_p",
"and-our:the-are.and.r",
)
DEVELOPERS_S_A_BUILD_IS_ENABLING_THAT_T_ISSUES_SUCH_THE_THE_ASKED_RELATED_TO_BE_HAMPERED_O_AND_INFO_MUST_TO_WITH_THE_THE_B_VOCA_AROU_USING_VERTICAL_THIS_WIDELY_WITHI = (
"developers.s_a_build:is-enabling.that-t",
"issues_such:The_The.asked:related-to.be-hampered.o",
"and_info",
"must_to:with-the.the-b",
"voca",
"arou",
"using:vertical:This.widely:withi",
)
CAN_WITH_PROTOTYPE_TECHNOLOGIES_A_HELPING_ENFORCEMENT_CO_MANY_AND_EMBEDDED_SEMANTICS_PERVASIVE_UNAMBIGUO_LAW_THE_TODAY_SIGNATURES_DESKTOP_NEW_XML_A_OBJECTIVE_IT_THE_AND_FOR_MADE_KEY_ORGANIZATIONS_QUALITY_AB_MARKUP_ARE_ABOUT_OASI_THE_A_IS_ABILITY_TO_IS_FOR_SUCCESS_OF_INDUSTRY_DOM_PC_WHICH_THE_LED_AUTOMATIC = (
"can.with:prototype:technologies.a:helping.enforcement:_co",
"many.and:embedded-semantics.Pervasive.unambiguo",
"law.The_today_signatures:desktop:new.XML:A",
"objective.it",
"The:and-for-made-key:organizations.quality:ab",
"Markup-are-about:OASI",
"the.A_is.ability-to.is:for-success.of.industry-DOM.PC_which",
"The:led.automatic-",
)
@dataclass
class NistschemaSvIvListNmtokensEnumeration1:
class Meta:
name = "NISTSchema-SV-IV-list-NMTOKENS-enumeration-1"
namespace = "NISTSchema-SV-IV-list-NMTOKENS-enumeration-1-NS"
value: Optional[NistschemaSvIvListNmtokensEnumeration1Type] = field(
default=None,
metadata={
"required": True,
}
)
| 55.987013
| 411
| 0.742983
| 547
| 4,311
| 5.323583
| 0.334552
| 0.008242
| 0.014423
| 0.018544
| 0.899038
| 0.899038
| 0.899038
| 0.885989
| 0.845467
| 0.811813
| 0
| 0.001685
| 0.174206
| 4,311
| 76
| 412
| 56.723684
| 0.816292
| 0
| 0
| 0
| 0
| 0.014286
| 0.371376
| 0.33171
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.042857
| 0
| 0.185714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
19a199ba3c2923def0bd8f6bad9da5d847c4cf4f
| 180
|
py
|
Python
|
autoarray/inversion/regularization/__init__.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
autoarray/inversion/regularization/__init__.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
autoarray/inversion/regularization/__init__.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
from .constant import Constant
from .constant import ConstantSplit
from .adaptive_brightness import AdaptiveBrightness
from .adaptive_brightness import AdaptiveBrightnessSplit
| 36
| 57
| 0.866667
| 18
| 180
| 8.555556
| 0.444444
| 0.155844
| 0.233766
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 180
| 4
| 58
| 45
| 0.9625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
270d7b756a53b40a23192377c06dd1774d77b516
| 113
|
py
|
Python
|
utils/base.py
|
saisyam/scrapers
|
1e34c2e3d9b052f0516c72210f0bcbdb8f631d89
|
[
"Apache-2.0"
] | null | null | null |
utils/base.py
|
saisyam/scrapers
|
1e34c2e3d9b052f0516c72210f0bcbdb8f631d89
|
[
"Apache-2.0"
] | 5
|
2021-03-13T07:07:41.000Z
|
2021-03-23T11:28:21.000Z
|
utils/base.py
|
saisyam/scrapers
|
1e34c2e3d9b052f0516c72210f0bcbdb8f631d89
|
[
"Apache-2.0"
] | null | null | null |
class BaseScraper:
def __init__(self, url):
self.url = url
def scrape(self):
return {}
| 14.125
| 28
| 0.557522
| 13
| 113
| 4.538462
| 0.615385
| 0.237288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.336283
| 113
| 8
| 29
| 14.125
| 0.786667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
27213f0b38b74db7851c15191be45d4d09a8c6c5
| 284
|
py
|
Python
|
via_cms/model/internal/__init__.py
|
jeanjacquesp/via-cms
|
12b212f8005e3d667c23ffc4da831e4d3e653999
|
[
"MIT"
] | null | null | null |
via_cms/model/internal/__init__.py
|
jeanjacquesp/via-cms
|
12b212f8005e3d667c23ffc4da831e4d3e653999
|
[
"MIT"
] | null | null | null |
via_cms/model/internal/__init__.py
|
jeanjacquesp/via-cms
|
12b212f8005e3d667c23ffc4da831e4d3e653999
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Pax Syriana Foundation. Licensed under the Apache License, Version 2.0
#
from via_cms.model.internal._id_manager import *
from via_cms.model.internal.role_dao import *
from via_cms.model.internal.user_dao import *
from via_cms.model.internal.workflow_dao import *
| 35.5
| 88
| 0.809859
| 45
| 284
| 4.911111
| 0.555556
| 0.126697
| 0.180995
| 0.271493
| 0.524887
| 0.420814
| 0.289593
| 0
| 0
| 0
| 0
| 0.023904
| 0.116197
| 284
| 7
| 89
| 40.571429
| 0.856574
| 0.299296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2734575d3d6f659e2dac1b63b4c20102436119ef
| 137
|
py
|
Python
|
scripts/reactor/autogen_ludiquest2.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/reactor/autogen_ludiquest2.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/reactor/autogen_ludiquest2.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
# ParentID: 2202002
# Character field ID when accessed: 220020400
# ObjectID: 1000011
# Object Position X: -650
# Object Position Y: 162
| 22.833333
| 45
| 0.751825
| 18
| 137
| 5.722222
| 0.888889
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254386
| 0.167883
| 137
| 5
| 46
| 27.4
| 0.649123
| 0.919708
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e0e3c048f76ae11f44a3a1cc7ad7ccd70653470
| 9,063
|
py
|
Python
|
quspin/basis/_reshape_subsys.py
|
anton-buyskikh/QuSpin
|
4e46b495e399414d9361d659e186492a1ac5b511
|
[
"BSD-3-Clause"
] | 195
|
2016-10-24T18:05:31.000Z
|
2022-03-29T10:11:56.000Z
|
quspin/basis/_reshape_subsys.py
|
cileeky/QuSpin
|
769d3817870f6ff55c4283af46f94e11c36f4121
|
[
"BSD-3-Clause"
] | 303
|
2016-10-25T20:08:11.000Z
|
2022-03-31T16:52:09.000Z
|
quspin/basis/_reshape_subsys.py
|
cileeky/QuSpin
|
769d3817870f6ff55c4283af46f94e11c36f4121
|
[
"BSD-3-Clause"
] | 54
|
2017-01-03T18:47:52.000Z
|
2022-03-16T06:54:33.000Z
|
import numpy as _np
import scipy.sparse as _sp
from ._basis_utils import _shuffle_sites
####################################################
# set of helper functions to implement the partial #
# trace of lattice density matrices. They do not #
# have any checks and states are assumed to be #
# in the non-symmetry reduced basis. #
####################################################
def _lattice_partial_trace_pure(psi,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a dense pure state psi over set of sites sub_sys_A and returns
reduced DM. Vectorisation available.
"""
psi_v=_lattice_reshape_pure(psi,sub_sys_A,L,sps)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
def _lattice_partial_trace_mixed(rho,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a set of dense mixed states rho over set of sites sub_sys_A
and returns reduced DM. Vectorisation available.
"""
rho_v=_lattice_reshape_mixed(rho,sub_sys_A,L,sps)
if return_rdm == "A":
return _np.einsum("...jlkl->...jk",rho_v),None
elif return_rdm == "B":
return None,_np.einsum("...ljlk->...jk",rho_v.conj())
elif return_rdm == "both":
return _np.einsum("...jlkl->...jk",rho_v),_np.einsum("...ljlk->...jk",rho_v.conj())
def _lattice_partial_trace_sparse_pure(psi,sub_sys_A,L,sps,return_rdm="A"):
"""
This function computes the partial trace of a sparse pure state psi over set of sites sub_sys_A and returns
reduced DM.
"""
psi=_lattice_reshape_sparse_pure(psi,sub_sys_A,L,sps)
if return_rdm == "A":
return psi.dot(psi.H),None
elif return_rdm == "B":
return None,psi.H.dot(psi)
elif return_rdm == "both":
return psi.dot(psi.H),psi.H.dot(psi)
def _lattice_reshape_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = psi.shape[:-1]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
T_tup = sub_sys_A+sub_sys_B
psi_v = _shuffle_sites(sps,T_tup,psi)
psi_v = psi_v.reshape(extra_dims+(Ns_A,Ns_B))
return psi_v
'''
def _lattice_reshape_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = psi.shape[:-1]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(n_dims + s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(L))
psi_v = psi.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
psi_v = psi_v.transpose(T_tup) # take transpose to reshuffle indices
psi_v = psi_v.reshape(extra_dims+(Ns_A,Ns_B))
return psi_v
'''
def _lattice_reshape_mixed(rho,sub_sys_A,L,sps):
"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = rho.shape[:-2]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
# T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(T_tup) + tuple(L+s for s in T_tup)
rho = rho.reshape(extra_dims+(-1,))
rho_v = _shuffle_sites(sps,T_tup,rho)
return rho_v.reshape(extra_dims+(Ns_A,Ns_B,Ns_A,Ns_B))
'''
def _lattice_reshape_mixed(rho,sub_sys_A,L,sps):
"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = rho.shape[:-2]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
# T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(s+n_dims for s in T_tup) + tuple(L+n_dims+s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(2*L))
rho_v = rho.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
rho_v = rho_v.transpose(T_tup) # take transpose to reshuffle indices
return rho_v.reshape(extra_dims+(Ns_A,Ns_B,Ns_A,Ns_B))
'''
def _lattice_reshape_sparse_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the sparse pure state psi over the Hilbert space defined by sub_sys_A and its complement.
"""
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
psi = psi.tocoo()
T_tup = sub_sys_A+sub_sys_B
# reshuffle indices for the sub-systems.
# j = sum( j[i]*(sps**i) for i in range(L))
# this reshuffles the j[i] similar to the transpose operation
# on the dense arrays psi_v.transpose(T_tup)
if T_tup != tuple(range(L)):
indx = _np.zeros(psi.col.shape,dtype=psi.col.dtype)
for i_old,i_new in enumerate(T_tup):
indx += ((psi.col//(sps**(L-i_new-1))) % sps)*(sps**(L-i_old-1))
else:
indx = psi.col
# A = _np.array([0,1,2,3,4,5,6,7,8,9,10,11])
# print("make shift way of reshaping array")
# print("A = {}".format(A))
# print("A.reshape((3,4)): \n {}".format(A.reshape((3,4))))
# print("rows: A.reshape((3,4))/4: \n {}".format(A.reshape((3,4))/4))
# print("cols: A.reshape((3,4))%4: \n {}".format(A.reshape((3,4))%4))
psi._shape = (Ns_A,Ns_B)
psi.row[:] = indx / Ns_B
psi.col[:] = indx % Ns_B
return psi.tocsr()
def _tensor_reshape_pure(psi,sub_sys_A,Ns_l,Ns_r):
extra_dims = psi.shape[:-1]
if sub_sys_A == "left":
return psi.reshape(extra_dims+(Ns_l,Ns_r))
else:
n_dims = len(extra_dims)
T_tup = tuple(range(n_dims))+(n_dims+1,n_dims)
psi_v = psi.reshape(extra_dims+(Ns_l,Ns_r))
return psi_v.transpose(T_tup)
def _tensor_reshape_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r):
psi = psi.tocoo()
# make shift way of reshaping array
# j = j_l + Ns_r * j_l
# j_l = j / Ns_r
# j_r = j % Ns_r
if sub_sys_A == "left":
psi._shape = (Ns_l,Ns_r)
psi.row[:] = psi.col / Ns_r
psi.col[:] = psi.col % Ns_r
return psi.tocsr()
else:
psi._shape = (Ns_l,Ns_r)
psi.row[:] = psi.col / Ns_r
psi.col[:] = psi.col % Ns_r
return psi.T.tocsr()
def _tensor_reshape_mixed(rho,sub_sys_A,Ns_l,Ns_r):
extra_dims = rho.shape[:-2]
if sub_sys_A == "left":
return rho.reshape(extra_dims+(Ns_l,Ns_r,Ns_l,Ns_r))
else:
n_dims = len(extra_dims)
T_tup = tuple(range(n_dims))+(n_dims+1,n_dims)+(n_dims+3,n_dims+2)
rho_v = rho.reshape(extra_dims+(Ns_l,Ns_r,Ns_l,Ns_r))
return rho_v.transpose(T_tup)
def _tensor_partial_trace_pure(psi,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
psi_v = _tensor_reshape_pure(psi,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ij,...kj->...ik",psi_v,psi_v.conj())),_np.squeeze(_np.einsum("...ji,...jk->...ik",psi_v.conj(),psi_v))
def _tensor_partial_trace_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
psi = _tensor_reshape_sparse_pure(psi,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return psi.dot(psi.H),None
elif return_rdm == "B":
return None,psi.H.dot(psi)
elif return_rdm == "both":
return psi.dot(psi.H),psi.H.dot(psi)
def _tensor_partial_trace_mixed(rho,sub_sys_A,Ns_l,Ns_r,return_rdm="A"):
rho_v = _tensor_reshape_mixed(rho,sub_sys_A,Ns_l,Ns_r)
if return_rdm == "A":
return _np.squeeze(_np.einsum("...ijkj->...ik",rho_v)),None
elif return_rdm == "B":
return None,_np.squeeze(_np.einsum("...jijk->...ik",rho_v.conj()))
elif return_rdm == "both":
return _np.squeeze(_np.einsum("...ijkj->...ik",rho_v)),_np.squeeze(_np.einsum("...jijk->...ik",rho_v.conj()))
| 31.578397
| 138
| 0.688624
| 1,778
| 9,063
| 3.197975
| 0.092801
| 0.098136
| 0.076328
| 0.025325
| 0.857545
| 0.83732
| 0.804784
| 0.780865
| 0.764685
| 0.714386
| 0
| 0.006417
| 0.140241
| 9,063
| 286
| 139
| 31.688811
| 0.723306
| 0.200265
| 0
| 0.607692
| 0
| 0
| 0.060241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.023077
| 0
| 0.323077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fdfd689db547e2e4c0b8c5c6cfc4b95d54f0ad6a
| 1,053
|
py
|
Python
|
2019/16/part1.py
|
mihaip/adventofcode
|
3725668595bfcf619fe6c97d12e2f14b42e3f0cb
|
[
"Apache-2.0"
] | null | null | null |
2019/16/part1.py
|
mihaip/adventofcode
|
3725668595bfcf619fe6c97d12e2f14b42e3f0cb
|
[
"Apache-2.0"
] | null | null | null |
2019/16/part1.py
|
mihaip/adventofcode
|
3725668595bfcf619fe6c97d12e2f14b42e3f0cb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python3
INPUT = "59766299734185935790261115703620877190381824215209853207763194576128635631359682876612079355215350473577604721555728904226669021629637829323357312523389374096761677612847270499668370808171197765497511969240451494864028712045794776711862275853405465401181390418728996646794501739600928008413106803610665694684578514524327181348469613507611935604098625200707607292339397162640547668982092343405011530889030486280541249694798815457170337648425355693137656149891119757374882957464941514691345812606515925579852852837849497598111512841599959586200247265784368476772959711497363250758706490540128635133116613480058848821257395084976935351858829607105310340"
L = len(INPUT)
BASE_PATTERN = [0, 1, 0, -1]
def phase(input):
result = []
for i in range(L):
r = 0
for pi in range(L):
p = BASE_PATTERN[(pi + 1) // (i + 1) % 4]
r += input[pi] * p
result.append(abs(r) % 10)
return result
input = list(map(int, INPUT))
for i in range(100):
input = phase(input)
print("answer: %s" % "".join(map(str, input[:8])))
| 45.782609
| 661
| 0.834758
| 71
| 1,053
| 12.352113
| 0.521127
| 0.023945
| 0.013683
| 0.025086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.689834
| 0.08452
| 1,053
| 22
| 662
| 47.863636
| 0.219917
| 0.021842
| 0
| 0
| 0
| 0
| 0.641399
| 0.631681
| 0
| 1
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.125
| 0.0625
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e3016272421ec74b2f4005da3d3ead38f7f113ab
| 131
|
py
|
Python
|
screens/__init__.py
|
suchyDev/Kivy-Dynamic-Screens-Template
|
7fbab74c5693430ea486ac359fa7a032596c232b
|
[
"MIT"
] | 13
|
2016-11-13T17:56:31.000Z
|
2022-03-03T21:17:00.000Z
|
screens/__init__.py
|
suchyDev/Kivy-Dynamic-Screens-Template
|
7fbab74c5693430ea486ac359fa7a032596c232b
|
[
"MIT"
] | 4
|
2018-03-16T00:40:34.000Z
|
2020-10-26T19:51:02.000Z
|
screens/__init__.py
|
suchyDev/Kivy-Dynamic-Screens-Template
|
7fbab74c5693430ea486ac359fa7a032596c232b
|
[
"MIT"
] | 9
|
2019-03-26T19:19:05.000Z
|
2021-08-06T17:06:23.000Z
|
'''Screens package containing all the app screens.'''
from resource_registers import register_kv_and_data
register_kv_and_data()
| 21.833333
| 53
| 0.824427
| 19
| 131
| 5.315789
| 0.736842
| 0.19802
| 0.257426
| 0.336634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10687
| 131
| 5
| 54
| 26.2
| 0.863248
| 0.358779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e337de7410b876ca26bbd0376992a347aa78b144
| 36
|
py
|
Python
|
jsonPackage/__init__.py
|
Hammer2900/json_help_object
|
1154f87a53973d239c3a37d01b52410c2abc9101
|
[
"MIT"
] | null | null | null |
jsonPackage/__init__.py
|
Hammer2900/json_help_object
|
1154f87a53973d239c3a37d01b52410c2abc9101
|
[
"MIT"
] | 1
|
2021-03-25T21:48:46.000Z
|
2021-03-25T21:48:46.000Z
|
jsonPackage/__init__.py
|
Hammer2900/json_help_object
|
1154f87a53973d239c3a37d01b52410c2abc9101
|
[
"MIT"
] | null | null | null |
from . import main_core, fast_utils
| 18
| 35
| 0.805556
| 6
| 36
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 1
| 36
| 36
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b5ef5fd1128e81f186da18e36e1301932b9ed4c
| 31
|
py
|
Python
|
test.py
|
GCreaner/Week1
|
6709aafa8ccc962026319f7a9435f1e98d4548b5
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
GCreaner/Week1
|
6709aafa8ccc962026319f7a9435f1e98d4548b5
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
GCreaner/Week1
|
6709aafa8ccc962026319f7a9435f1e98d4548b5
|
[
"Apache-2.0"
] | null | null | null |
print "This is a test message"
| 15.5
| 30
| 0.741935
| 6
| 31
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 31
| 1
| 31
| 31
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8b9fbd4952fb354426b9eaf821c069013e84a0ae
| 35
|
py
|
Python
|
bbauto/api/router.py
|
BB-Auto-Detailing/backend
|
b60a5107ef0bb6716429ccedcacd8aa987acc866
|
[
"BSD-2-Clause"
] | 4
|
2018-07-20T15:37:01.000Z
|
2019-02-20T23:39:09.000Z
|
freenit/project/project/api/router.py
|
mekanix/flask-bootstrap-sql-rest
|
13b4e4dc093e268e40ec56caa25b03157634a087
|
[
"BSD-2-Clause"
] | 4
|
2020-01-31T12:12:56.000Z
|
2021-01-13T12:37:23.000Z
|
freenit/project/project/api/router.py
|
mekanix/flask-bootstrap-sql-rest
|
13b4e4dc093e268e40ec56caa25b03157634a087
|
[
"BSD-2-Clause"
] | 5
|
2018-06-19T19:32:27.000Z
|
2019-10-02T20:11:30.000Z
|
from freenit.api.router import api
| 17.5
| 34
| 0.828571
| 6
| 35
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ba24f2ead601e33028c7ee972070160c1370c9e
| 110
|
py
|
Python
|
fathom/residual/__init__.py
|
Aetf/fathom
|
1f0dafa9fe3b7988708522d93ecda7f282cb2090
|
[
"Apache-2.0"
] | 1
|
2021-06-30T04:59:22.000Z
|
2021-06-30T04:59:22.000Z
|
fathom/residual/__init__.py
|
Aetf/fathom
|
1f0dafa9fe3b7988708522d93ecda7f282cb2090
|
[
"Apache-2.0"
] | null | null | null |
fathom/residual/__init__.py
|
Aetf/fathom
|
1f0dafa9fe3b7988708522d93ecda7f282cb2090
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, print_function, division
from .residual import Residual, ResidualFwd
| 27.5
| 64
| 0.854545
| 13
| 110
| 6.769231
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 110
| 3
| 65
| 36.666667
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
8bcb060f9c9e9b1a05e7378d1176a09bd72f8629
| 12,884
|
py
|
Python
|
menpo/math/test/decomposition_base_test.py
|
jacksoncsy/menpo
|
3cac491fe30454935ed12fcaa89f453c5f6ec878
|
[
"BSD-3-Clause"
] | null | null | null |
menpo/math/test/decomposition_base_test.py
|
jacksoncsy/menpo
|
3cac491fe30454935ed12fcaa89f453c5f6ec878
|
[
"BSD-3-Clause"
] | null | null | null |
menpo/math/test/decomposition_base_test.py
|
jacksoncsy/menpo
|
3cac491fe30454935ed12fcaa89f453c5f6ec878
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy.testing import assert_almost_equal
from menpo.math import eigenvalue_decomposition, \
principal_component_decomposition
# Positive semi-definite matrix
cov_matrix = np.array([[3, 1], [1, 3]])
# Data values taken from:
# http://www.cs.otago.ac.nz/cosc453/student_tutorials/principal_components.pdf
# Tested values are equal
large_samples_data_matrix = np.array([[2.5, 2.4],
[0.5, 0.7],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
[2.0, 1.6],
[1.0, 1.1],
[1.5, 1.6],
[1.1, 0.9]])
centered_eigenvectors_s = np.array([[0.6778734, 0.73517866],
[-0.73517866, 0.6778734]])
non_centered_eigenvectors_s = np.array([[0.68647784, 0.72715072],
[-0.72715072, 0.68647784]])
mean_vector_s = np.array([1.81, 1.91])
eigenvalues_no_centre_no_bias_s = np.array([8.97738481, 0.04928186])
eigenvalues_centered_biased_s = np.array([1.15562494, 0.04417506])
eigenvalues_no_centre_biased_s = np.array([8.07964633, 0.04435367])
eigenvalues_centered_no_bias_s = np.array([1.28402771, 0.0490834])
centered_eigenvectors_f = np.array([[-0.09901475, 0.19802951, 0.69310328,
0.29704426, -0.09901475, 0.39605902,
-0.39605902, 0.09901475, 0.09901475,
-0.19802951]])
centered_eigenvectors_biased_f = np.array([[-0.13864839, 0.27729678,
0.97053872, 0.41594517,
-0.13864839, 0.55459355,
-0.55459355, 0.13864839,
0.13864839, -0.27729678]])
non_centered_eigenvectors_biased_f = np.array(
[[0.04284044, 0.01054804, 0.04479142, 0.03594266, 0.05333815,
0.0438411, 0.03139242, 0.01839615, 0.02714423, 0.01744583],
[-0.3840268, 0.26369659, 0.88167249, 0.29008842, -0.43904756,
0.40818153, -0.802497, 0.06307234, 0.0172217, -0.41041863]])
non_centered_eigenvectors_f = np.array(
[[0.38507927, 0.09481302, 0.40261598, 0.32307722, 0.4794398, 0.39407387,
0.28217662, 0.16535718, 0.24399096, 0.15681507],
[-0.25575629, 0.17561812, 0.58718113, 0.19319469, -0.29239933, 0.27184299,
-0.5344514, 0.04200527, 0.01146941, -0.27333287]])
mean_vector_f = np.array([2.45, 0.6, 2.55, 2.05, 3.05,
2.5, 1.8, 1.05, 1.55, 1.])
eigenvalues_no_centre_no_bias_f = np.array([80.79646326, 0.44353674])
eigenvalues_centered_biased_f = np.array([0.255])
eigenvalues_no_centre_biased_f = np.array([40.39823163, 0.22176837])
eigenvalues_centered_no_bias_f = np.array([0.51])
# whiten,centre,bias (samples)
# 000
def pcd_samples_nowhiten_nocentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix,
centre=False, whiten=False,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_no_bias_s)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_s)
assert_almost_equal(mean_vector, [0.0, 0.0])
# 001
def pcd_samples_nowhiten_nocentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix,
centre=False, bias=True,
whiten=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_biased_s)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_s)
assert_almost_equal(mean_vector, [0.0, 0.0])
# 010
def pcd_samples_nowhiten_yescentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix,
whiten=False, centre=True,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_no_bias_s)
assert_almost_equal(eigenvectors, centered_eigenvectors_s)
assert_almost_equal(mean_vector, mean_vector_s)
# 011
def pcd_samples_nowhiten_yescentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix,
bias=True, centre=True,
whiten=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_biased_s)
assert_almost_equal(eigenvectors, centered_eigenvectors_s)
assert_almost_equal(mean_vector, mean_vector_s)
# 100
def pcd_samples_yeswhiten_nocentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix,
centre=False, whiten=True,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_no_bias_s)
assert_almost_equal(eigenvectors.T / np.sqrt(1.0 / eigenvalues),
non_centered_eigenvectors_s.T)
assert_almost_equal(mean_vector, [0.0, 0.0])
# 101
def pcd_samples_yeswhiten_nocentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix,
bias=True, centre=False,
whiten=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_biased_s)
assert_almost_equal(eigenvectors.T / np.sqrt(1.0 / eigenvalues),
non_centered_eigenvectors_s.T)
assert_almost_equal(mean_vector, [0.0, 0.0])
# 110
def pcd_samples_yeswhiten_yescentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix,
whiten=True, centre=True,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_no_bias_s)
assert_almost_equal(eigenvectors.T / np.sqrt(1.0 / eigenvalues),
centered_eigenvectors_s.T)
assert_almost_equal(mean_vector, mean_vector_s)
# 111
def pcd_samples_yeswhiten_yescentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix,
whiten=True, centre=True,
bias=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_biased_s)
assert_almost_equal(eigenvectors.T / np.sqrt(1.0 / eigenvalues),
centered_eigenvectors_s.T)
assert_almost_equal(mean_vector, mean_vector_s)
# whiten,centre,bias (features)
# 000
def pcd_features_nowhiten_nocentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
centre=False, whiten=False,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_no_bias_f)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)
assert_almost_equal(mean_vector, np.zeros(10))
def pcd_features_nowhiten_nocentre_nobias_inplace_test():
# important to copy as this will now destructively effect the input data
# matrix (due to inplace)
output = principal_component_decomposition(large_samples_data_matrix.T.copy(),
centre=False, whiten=False,
bias=False, inplace=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_no_bias_f)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)
assert_almost_equal(mean_vector, np.zeros(10))
# 001
def pcd_features_nowhiten_nocentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
centre=False, bias=True,
whiten=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_biased_f)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)
assert_almost_equal(mean_vector, np.zeros(10))
# 010
def pcd_features_nowhiten_yescentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
whiten=False, centre=True,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_no_bias_f)
assert_almost_equal(eigenvectors, centered_eigenvectors_f)
assert_almost_equal(mean_vector, mean_vector_f)
# 011
def pcd_features_nowhiten_yescentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
bias=True, centre=True,
whiten=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_biased_f)
assert_almost_equal(eigenvectors, centered_eigenvectors_f)
assert_almost_equal(mean_vector, mean_vector_f)
# 100
def pcd_features_yeswhiten_nocentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
centre=False, whiten=True,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_no_bias_f)
assert_almost_equal(eigenvectors.T / np.sqrt(1.0 / eigenvalues),
non_centered_eigenvectors_f.T)
assert_almost_equal(mean_vector, np.zeros(10))
# 101
def pcd_features_yeswhiten_nocentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
bias=True, centre=False,
whiten=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_biased_f)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_biased_f)
assert_almost_equal(mean_vector, np.zeros(10))
# 110
def pcd_features_yeswhiten_yescentre_nobias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
whiten=True, centre=True,
bias=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_no_bias_f)
assert_almost_equal(eigenvectors, centered_eigenvectors_biased_f)
assert_almost_equal(mean_vector, mean_vector_f)
# 111
def pcd_features_yeswhiten_yescentre_yesbias_test():
output = principal_component_decomposition(large_samples_data_matrix.T,
whiten=True, centre=True,
bias=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_biased_f)
assert_almost_equal(eigenvectors, centered_eigenvectors_biased_f)
assert_almost_equal(mean_vector, mean_vector_f)
def eigenvalue_decomposition_default_epsilon_test():
pos_eigenvectors, pos_eigenvalues = eigenvalue_decomposition(cov_matrix)
assert_almost_equal(pos_eigenvalues, [4.0, 2.0])
sqrt_one_over_2 = np.sqrt(2.0) / 2.0
assert_almost_equal(pos_eigenvectors, [[sqrt_one_over_2, -sqrt_one_over_2],
[sqrt_one_over_2, sqrt_one_over_2]])
def eigenvalue_decomposition_large_epsilon_test():
pos_eigenvectors, pos_eigenvalues = eigenvalue_decomposition(cov_matrix,
eps=0.5)
assert_almost_equal(pos_eigenvalues, [4.0])
sqrt_one_over_2 = np.sqrt(2.0) / 2.0
assert_almost_equal(pos_eigenvectors,
[[sqrt_one_over_2], [sqrt_one_over_2]])
| 44.891986
| 82
| 0.635827
| 1,442
| 12,884
| 5.29681
| 0.124827
| 0.087981
| 0.12464
| 0.051846
| 0.80885
| 0.762765
| 0.730034
| 0.712359
| 0.707646
| 0.694161
| 0
| 0.098455
| 0.286557
| 12,884
| 286
| 83
| 45.048951
| 0.732485
| 0.028873
| 0
| 0.600985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 1
| 0.093596
| false
| 0
| 0.014778
| 0
| 0.108374
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8bd1792860d88ade8bd38146c2ce1074d33e66b0
| 1,322
|
py
|
Python
|
Sprint 2/backend/src/controllers/envio_email.py
|
IsraelAugusto0110/API_5-SEM
|
c0d872453c1a0b6e34764810176525456dc6d559
|
[
"MIT"
] | null | null | null |
Sprint 2/backend/src/controllers/envio_email.py
|
IsraelAugusto0110/API_5-SEM
|
c0d872453c1a0b6e34764810176525456dc6d559
|
[
"MIT"
] | null | null | null |
Sprint 2/backend/src/controllers/envio_email.py
|
IsraelAugusto0110/API_5-SEM
|
c0d872453c1a0b6e34764810176525456dc6d559
|
[
"MIT"
] | null | null | null |
import smtplib
class Email:
def email_cadastro(a, c):
SUBJECT = "Cadastro realizado com sucesso!!!"
TO = a
FROM = "contato.bycar@gmail.com"
PASSWORD = "@bycarApp2021"
text = f"SENHA TEMPORÁRIA: {c}"
BODY = "\r\n".join((
f"From: {FROM}",
f"To: {TO}",
f"Subject: {SUBJECT}",
"",
text))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(FROM, PASSWORD)
print("Login funfou")
server.sendmail(FROM, TO, BODY)
print("Email enviado para", TO)
server.quit()
return "200"
def email_redefinicao(a):
SUBJECT = "Solicitação de redefinição de senha aceita"
TO = a
print(a)
FROM = "contato.bycar@gmail.com"
PASSWORD = "@bycarApp2021"
text = "use esse código para redefinir sua senha"
BODY = "\r\n".join((
f"From: {FROM}",
f"To: {TO}",
f"Subject: {SUBJECT}",
"",
text))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(FROM, PASSWORD)
print("Login funfou")
server.sendmail(FROM, TO, BODY)
print("Email enviado para", TO)
server.quit()
return "200"
| 23.607143
| 62
| 0.520424
| 146
| 1,322
| 4.69863
| 0.342466
| 0.046647
| 0.034985
| 0.049563
| 0.708455
| 0.708455
| 0.708455
| 0.708455
| 0.708455
| 0.565598
| 0
| 0.022989
| 0.341906
| 1,322
| 55
| 63
| 24.036364
| 0.765517
| 0
| 0
| 0.790698
| 0
| 0
| 0.293313
| 0.034954
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.093023
| 0.023256
| 0
| 0.139535
| 0.116279
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8bd1c5796cbd6d14d55320acd635b285009c7322
| 16,729
|
py
|
Python
|
pyastbuilder/tests/sparqlparser/func_unittest.py
|
TNO/pyAST
|
f202b1cd2bcc7cf5c5ad9ef5402d5ebea490a496
|
[
"MIT"
] | 4
|
2017-12-05T12:43:46.000Z
|
2021-08-12T11:37:45.000Z
|
pyastbuilder/tests/sparqlparser/func_unittest.py
|
TNO/pyAST
|
f202b1cd2bcc7cf5c5ad9ef5402d5ebea490a496
|
[
"MIT"
] | 9
|
2017-08-24T08:55:17.000Z
|
2017-10-18T11:57:14.000Z
|
pyastbuilder/tests/sparqlparser/func_unittest.py
|
TNO/pyAST
|
f202b1cd2bcc7cf5c5ad9ef5402d5ebea490a496
|
[
"MIT"
] | null | null | null |
'''
Created on 20 apr. 2016
@author: jeroenbruijning
'''
import unittest
from parsertools.parsers.sparqlparser import SPARQLParser, SPARQLParseException
from parsertools.parsers.sparqlparser import stripComments, parseQuery, unescapeUcode
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# ParseStruct tests
def testParse(self):
s = "'work' ^^<work:>"
r = SPARQLParser.RDFLiteral(s)
assert r.check()
def testCopy(self):
s = "'work' ^^<work:>"
r = SPARQLParser.RDFLiteral(s)
r_copy = r.copy()
assert r_copy == r
assert not r_copy is r
def testStr(self):
s = "'work' ^^<work:>"
r = SPARQLParser.RDFLiteral(s)
assert r.__str__() == "'work' ^^ <work:>"
def testLabelDotAccess(self):
s = "'work' ^^<work:>"
r = SPARQLParser.RDFLiteral(s)
assert str(r.lexical_form) == "'work'", r.lexical_form
r_copy = r.copy()
try:
r_copy.lexical_form = SPARQLParser.String("'work2'")
except AttributeError as e:
assert str(e) == 'Direct setting of attributes not allowed. To change an element e, try e.updateWith() instead.'
s = '<c:check#22?> ( $var, ?var )'
r = SPARQLParser.PrimaryExpression(s, postParseCheck=False)
assert r.iriOrFunction.iri == SPARQLParser.iri('<c:check#22?>', postParseCheck=False)
def testUpdateWith(self):
s = "'work' ^^<work:>"
r = SPARQLParser.RDFLiteral(s)
r_copy = r.copy()
r_copy.lexical_form.updateWith("'work2'")
assert r_copy != r
r_copy.lexical_form.updateWith("'work'")
assert r_copy == r
q = '''
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?p WHERE
{
?p a foaf:Person
}
'''
r = parseQuery(q)
r.expandIris()
subjpath = r.searchElements(element_type=SPARQLParser.IRIREF, value=None)[1]
assert str(subjpath.getParent()) == '<http://xmlns.com/foaf/0.1/Person>'
assert str(subjpath.getAncestors()) == '[iri("<http://xmlns.com/foaf/0.1/Person>"), GraphTerm("<http://xmlns.com/foaf/0.1/Person>"), VarOrTerm("<http://xmlns.com/foaf/0.1/Person>"), GraphNodePath("<http://xmlns.com/foaf/0.1/Person>"), ObjectPath("<http://xmlns.com/foaf/0.1/Person>"), ObjectListPath("<http://xmlns.com/foaf/0.1/Person>"), PropertyListPathNotEmpty("a <http://xmlns.com/foaf/0.1/Person>"), TriplesSameSubjectPath("?p a <http://xmlns.com/foaf/0.1/Person>"), TriplesBlock("?p a <http://xmlns.com/foaf/0.1/Person>"), GroupGraphPatternSub("?p a <http://xmlns.com/foaf/0.1/Person>"), GroupGraphPattern("{ ?p a <http://xmlns.com/foaf/0.1/Person> }"), WhereClause("WHERE { ?p a <http://xmlns.com/foaf/0.1/Person> }"), SelectQuery("SELECT ?p WHERE { ?p a <http://xmlns.com/foaf/0.1/Person> }"), Query("PREFIX foaf: <http://xmlns.com/foaf/0.1/> SELECT ?p WHERE { ?p a <http://xmlns.com/foaf/0.1/Person> }"), QueryUnit("PREFIX foaf: <http://xmlns.com/foaf/0.1/> SELECT ?p WHERE { ?p a <http://xmlns.com/foaf/0.1/Person> }")]'
assert r.hasParentPointers()
def testBranchAndAtom(self):
s = "'work' ^^<work:>"
r = SPARQLParser.RDFLiteral(s)
assert r.isBranch()
assert not r.isAtom()
assert (SPARQLParser.IRIREF('<ftp://test>')).isAtom()
def testDescend(self):
s = '(DISTINCT "*Expression*", "*Expression*", "*Expression*" )'
r = SPARQLParser.ArgList(s)
assert r.descend() == r
v = r.searchElements(element_type=SPARQLParser.STRING_LITERAL2)
assert v[0].isAtom()
assert not v[0].isBranch()
e = r.searchElements(element_type=SPARQLParser.Expression)[0]
d = e.descend()
assert d.isAtom()
def testStripComments(self):
s1 = """
<c:check#22?> ( $var, ?var )
# bla
'sdfasf# sdfsfd' # comment
"""[1:-1].split('\n')
s2 = """
<c:check#22?> ( $var, ?var )
'sdfasf# sdfsfd'
"""[1:-1]
assert stripComments(s1) == s2
def testSearchElements(self):
s = '<c:check#22?> ( $var, ?var )'
r = SPARQLParser.PrimaryExpression(s, postParseCheck=False)
found = r.searchElements()
assert len(found) == 32, len(found)
found = r.searchElements(labeledOnly=False)
assert len(found) == 32, len(found)
found = r.searchElements(labeledOnly=True)
assert len(found) == 4, len(found)
found = r.searchElements(value='<c:check#22?>')
assert len(found) == 2, len(found)
assert type(found[0]) == SPARQLParser.iri
assert found[0].getLabel() == 'iri'
assert found[0].__str__() == '<c:check#22?>'
def testGetChildOrAncestors(self):
s = '<c:check#22?> ( $var, ?var )'
r = SPARQLParser.PrimaryExpression(s, postParseCheck=False)
found = r.searchElements(element_type=SPARQLParser.ArgList)
arglist = found[0]
assert(len(arglist.getChildren())) == 4, len(arglist.getChildren())
ancestors = arglist.getAncestors()
assert str(ancestors) == '[iriOrFunction("<c:check#22?> ( $var , ?var )"), PrimaryExpression("<c:check#22?> ( $var , ?var )")]', str(ancestors)
def testParseQuery(self):
s = 'BASE <work:22?> SELECT REDUCED $var1 ?var2 (("*Expression*") AS $var3) { SELECT * {} } GROUP BY ROUND ( "*Expression*") VALUES $S { <t:testIri> <t:testIri> }'
parseQuery(s)
s = 'BASE <prologue:22> PREFIX prologue: <prologue:33> LOAD <t:testIri> ; BASE <prologue2:42> PREFIX prologue2: <prologue3:33>'
parseQuery(s)
def testDump(self):
s = '(DISTINCT "*Expression*", "*Expression*", "*Expression*" )'
s_dump = '''
[ArgList] /( DISTINCT "*Expression*" , "*Expression*" , "*Expression*" )/
| [LPAR] /(/
| | (
| > distinct:
| [DISTINCT] /DISTINCT/
| | DISTINCT
| > argument:
| [Expression] /"*Expression*"/
| | [ConditionalOrExpression] /"*Expression*"/
| | | [ConditionalAndExpression] /"*Expression*"/
| | | | [ValueLogical] /"*Expression*"/
| | | | | [RelationalExpression] /"*Expression*"/
| | | | | | [NumericExpression] /"*Expression*"/
| | | | | | | [AdditiveExpression] /"*Expression*"/
| | | | | | | | [MultiplicativeExpression] /"*Expression*"/
| | | | | | | | | [UnaryExpression] /"*Expression*"/
| | | | | | | | | | [PrimaryExpression] /"*Expression*"/
| | | | | | | | | | | [RDFLiteral] /"*Expression*"/
| | | | | | | | | | | | > lexical_form:
| | | | | | | | | | | | [String] /"*Expression*"/
| | | | | | | | | | | | | [STRING_LITERAL2] /"*Expression*"/
| | | | | | | | | | | | | | "*Expression*"
| ,
| > argument:
| [Expression] /"*Expression*"/
| | [ConditionalOrExpression] /"*Expression*"/
| | | [ConditionalAndExpression] /"*Expression*"/
| | | | [ValueLogical] /"*Expression*"/
| | | | | [RelationalExpression] /"*Expression*"/
| | | | | | [NumericExpression] /"*Expression*"/
| | | | | | | [AdditiveExpression] /"*Expression*"/
| | | | | | | | [MultiplicativeExpression] /"*Expression*"/
| | | | | | | | | [UnaryExpression] /"*Expression*"/
| | | | | | | | | | [PrimaryExpression] /"*Expression*"/
| | | | | | | | | | | [RDFLiteral] /"*Expression*"/
| | | | | | | | | | | | > lexical_form:
| | | | | | | | | | | | [String] /"*Expression*"/
| | | | | | | | | | | | | [STRING_LITERAL2] /"*Expression*"/
| | | | | | | | | | | | | | "*Expression*"
| ,
| > argument:
| [Expression] /"*Expression*"/
| | [ConditionalOrExpression] /"*Expression*"/
| | | [ConditionalAndExpression] /"*Expression*"/
| | | | [ValueLogical] /"*Expression*"/
| | | | | [RelationalExpression] /"*Expression*"/
| | | | | | [NumericExpression] /"*Expression*"/
| | | | | | | [AdditiveExpression] /"*Expression*"/
| | | | | | | | [MultiplicativeExpression] /"*Expression*"/
| | | | | | | | | [UnaryExpression] /"*Expression*"/
| | | | | | | | | | [PrimaryExpression] /"*Expression*"/
| | | | | | | | | | | [RDFLiteral] /"*Expression*"/
| | | | | | | | | | | | > lexical_form:
| | | | | | | | | | | | [String] /"*Expression*"/
| | | | | | | | | | | | | [STRING_LITERAL2] /"*Expression*"/
| | | | | | | | | | | | | | "*Expression*"
| [RPAR] /)/
| | )
'''[1:]
r = SPARQLParser.ArgList(s)
assert r.dump() == s_dump
def testPrefixesAndBase(self):
s = 'BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>'
r = parseQuery(s)
answer1 = '''
UpdateUnit
BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[]
None
UpdateUnit
BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[]
None
Update
BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[]
None
Prologue
BASE <prologue:22/> PREFIX prologue1: <prologue:33>
[('prologue1:', 'prologue:33')]
prologue:22/
BaseDecl
BASE <prologue:22/>
[('prologue1:', 'prologue:33')]
prologue:22/
BASE
BASE
[('prologue1:', 'prologue:33')]
prologue:22/
IRIREF
<prologue:22/>
[('prologue1:', 'prologue:33')]
prologue:22/
PrefixDecl
PREFIX prologue1: <prologue:33>
[('prologue1:', 'prologue:33')]
prologue:22/
PREFIX
PREFIX
[('prologue1:', 'prologue:33')]
prologue:22/
PNAME_NS
prologue1:
[('prologue1:', 'prologue:33')]
prologue:22/
IRIREF
<prologue:33>
[('prologue1:', 'prologue:33')]
prologue:22/
Update1
LOAD <t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
Load
LOAD <t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
LOAD
LOAD
[('prologue1:', 'prologue:33')]
prologue:22/
iri
<t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
IRIREF
<t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
SEMICOL
;
[('prologue1:', 'prologue:33')]
prologue:22/
Update
BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[('prologue1:', 'prologue:33')]
prologue:22/
Prologue
BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BaseDecl
BASE <prologue:44>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BASE
BASE
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
IRIREF
<prologue:44>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BaseDecl
BASE </exttra>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BASE
BASE
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
IRIREF
</exttra>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
PrefixDecl
PREFIX prologue2: <prologue:55>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
PREFIX
PREFIX
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
PNAME_NS
prologue2:
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
IRIREF
<prologue:55>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
'''
r_answer1 = ''
for elt in r.searchElements():
for e in [elt.__class__.__name__, elt, sorted(elt.getPrefixes().items()), elt.getBaseiri()]:
r_answer1 += str(e) + '\n'
r_answer1 += '\n'
assert answer1.strip() == r_answer1.strip()
r = parseQuery(s, base='ftp://nothing/')
answer2 = '''
UpdateUnit
BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[]
ftp://nothing/
UpdateUnit
BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[]
ftp://nothing/
Update
BASE <prologue:22/> PREFIX prologue1: <prologue:33> LOAD <t:testIri> ; BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[]
ftp://nothing/
Prologue
BASE <prologue:22/> PREFIX prologue1: <prologue:33>
[('prologue1:', 'prologue:33')]
prologue:22/
BaseDecl
BASE <prologue:22/>
[('prologue1:', 'prologue:33')]
prologue:22/
BASE
BASE
[('prologue1:', 'prologue:33')]
prologue:22/
IRIREF
<prologue:22/>
[('prologue1:', 'prologue:33')]
prologue:22/
PrefixDecl
PREFIX prologue1: <prologue:33>
[('prologue1:', 'prologue:33')]
prologue:22/
PREFIX
PREFIX
[('prologue1:', 'prologue:33')]
prologue:22/
PNAME_NS
prologue1:
[('prologue1:', 'prologue:33')]
prologue:22/
IRIREF
<prologue:33>
[('prologue1:', 'prologue:33')]
prologue:22/
Update1
LOAD <t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
Load
LOAD <t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
LOAD
LOAD
[('prologue1:', 'prologue:33')]
prologue:22/
iri
<t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
IRIREF
<t:testIri>
[('prologue1:', 'prologue:33')]
prologue:22/
SEMICOL
;
[('prologue1:', 'prologue:33')]
prologue:22/
Update
BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[('prologue1:', 'prologue:33')]
prologue:22/
Prologue
BASE <prologue:44> BASE </exttra> PREFIX prologue2: <prologue:55>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BaseDecl
BASE <prologue:44>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BASE
BASE
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
IRIREF
<prologue:44>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BaseDecl
BASE </exttra>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
BASE
BASE
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
IRIREF
</exttra>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
PrefixDecl
PREFIX prologue2: <prologue:55>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
PREFIX
PREFIX
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
PNAME_NS
prologue2:
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
IRIREF
<prologue:55>
[('prologue1:', 'prologue:33'), ('prologue2:', 'prologue:55')]
prologue:/exttra
'''
r_answer2 = ''
for elt in r.searchElements():
for e in [elt.__class__.__name__, elt, sorted(elt.getPrefixes().items()), elt.getBaseiri()]:
r_answer2 += str(e) + '\n'
r_answer2 += '\n'
assert answer2.strip() == r_answer2.strip()
def testExpandIris(self):
s1 = '''
PREFIX dc: <http://purl.org/dc/elements/1.1/>
SELECT ?title
WHERE { <http://example.org/book/book1> dc:title ?title }
'''[1:-1]
s2 = '''
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX : <http://example.org/book/>
SELECT $title
WHERE { :book1 dc:title $title }
'''[1:-1]
s3 = '''
BASE <http://example.org/book/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
SELECT $title
WHERE { <book1> dc:title ?title }
'''[1:-1]
r1 = parseQuery(s1)
r2 = parseQuery(s2)
r3 = parseQuery(s3)
r1.expandIris()
r2.expandIris()
r3.expandIris()
assert str(r1) == 'PREFIX dc: <http://purl.org/dc/elements/1.1/> SELECT ?title WHERE { <http://example.org/book/book1> <http://purl.org/dc/elements/1.1/title> ?title }'
assert str(r2) == 'PREFIX dc: <http://purl.org/dc/elements/1.1/> PREFIX : <http://example.org/book/> SELECT $title WHERE { <http://example.org/book/book1> <http://purl.org/dc/elements/1.1/title> $title }'
assert str(r3) == 'BASE <http://example.org/book/> PREFIX dc: <http://purl.org/dc/elements/1.1/> SELECT $title WHERE { <http://example.org/book/book1> <http://purl.org/dc/elements/1.1/title> ?title }'
def testUnescapeUcode(self):
s = 'abra\\U000C00AAcada\\u00AAbr\u99DDa'
assert unescapeUcode(s) == 'abracadaªbr駝a'
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 29.400703
| 1,038
| 0.58814
| 1,774
| 16,729
| 5.506201
| 0.124577
| 0.067568
| 0.122543
| 0.082924
| 0.765663
| 0.727273
| 0.70864
| 0.693898
| 0.687654
| 0.668714
| 0
| 0.045164
| 0.20587
| 16,729
| 569
| 1,039
| 29.400703
| 0.690026
| 0.006635
| 0
| 0.722101
| 0
| 0.030635
| 0.692595
| 0.028296
| 0
| 0
| 0
| 0
| 0.078775
| 1
| 0.037199
| false
| 0.004376
| 0.006565
| 0
| 0.045952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4744e9d3ab3b10fbf604ae8ab2a9fc793da2f674
| 2,547
|
py
|
Python
|
openprescribing/frontend/tests/test_managers.py
|
rebkwok/openprescribing
|
28c7500a7e4cb725fc6cda0f8c58b07ac7e916a4
|
[
"MIT"
] | null | null | null |
openprescribing/frontend/tests/test_managers.py
|
rebkwok/openprescribing
|
28c7500a7e4cb725fc6cda0f8c58b07ac7e916a4
|
[
"MIT"
] | null | null | null |
openprescribing/frontend/tests/test_managers.py
|
rebkwok/openprescribing
|
28c7500a7e4cb725fc6cda0f8c58b07ac7e916a4
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from frontend.models import MeasureValue
class MeasureValueManagerTests(TestCase):
fixtures = ['one_month_of_measures']
def test_by_ccg_with_no_org(self):
mvs = MeasureValue.objects.by_ccg([])
self.assertEqual(len(mvs), 2)
def test_by_ccg_with_org(self):
mvs = MeasureValue.objects.by_ccg(['04D'])
self.assertEqual(len(mvs), 1)
def test_by_ccg_with_orgs(self):
mvs = MeasureValue.objects.by_ccg(['04D', '02Q'])
self.assertEqual(len(mvs), 2)
def test_by_ccg_with_measure(self):
mvs = MeasureValue.objects.by_ccg([], measure_id='cerazette')
self.assertEqual(len(mvs), 2)
mvs = MeasureValue.objects.by_ccg([], measure_id='bananas')
self.assertEqual(len(mvs), 0)
def test_by_ccg_with_tag(self):
mvs = MeasureValue.objects.by_ccg([], tags=['core'])
self.assertEqual(len(mvs), 2)
mvs = MeasureValue.objects.by_ccg([], tags=['lowpriority'])
self.assertEqual(len(mvs), 0)
def test_by_ccg_with_tags(self):
mvs = MeasureValue.objects.by_ccg([], tags=['core', 'lowpriority'])
self.assertEqual(len(mvs), 0)
def test_by_practice_with_no_org(self):
mvs = MeasureValue.objects.by_practice([])
self.assertEqual(len(mvs), 9)
def test_by_practice_with_pct_org(self):
mvs = MeasureValue.objects.by_practice(['04D'])
self.assertEqual(len(mvs), 1)
def test_by_practice_with_practice_org(self):
mvs = MeasureValue.objects.by_practice(['C83051'])
self.assertEqual(len(mvs), 1)
def test_by_practice_with_orgs(self):
mvs = MeasureValue.objects.by_practice(['C83051', '02Q'])
self.assertEqual(len(mvs), 8)
def test_by_practice_with_measure(self):
mvs = MeasureValue.objects.by_practice(
['C83051'], measure_id='cerazette')
self.assertEqual(len(mvs), 1)
mvs = MeasureValue.objects.by_practice(
['C83051'], measure_id='bananas')
self.assertEqual(len(mvs), 0)
def test_by_practice_with_tag(self):
mvs = MeasureValue.objects.by_practice(['C83051'], tags=['core'])
self.assertEqual(len(mvs), 1)
mvs = MeasureValue.objects.by_practice(
['C83051'], tags=['lowpriority'])
self.assertEqual(len(mvs), 0)
def test_by_practice_with_tags(self):
mvs = MeasureValue.objects.by_practice(
['C83051'], tags=['core', 'lowpriority'])
self.assertEqual(len(mvs), 0)
| 33.513158
| 75
| 0.649784
| 321
| 2,547
| 4.906542
| 0.140187
| 0.161905
| 0.23746
| 0.259048
| 0.897778
| 0.846984
| 0.846984
| 0.649524
| 0.504762
| 0.417778
| 0
| 0.030861
| 0.211229
| 2,547
| 75
| 76
| 33.96
| 0.753111
| 0
| 0
| 0.345455
| 0
| 0
| 0.066745
| 0.008245
| 0
| 0
| 0
| 0
| 0.309091
| 1
| 0.236364
| false
| 0
| 0.036364
| 0
| 0.309091
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4776d8a97cc5e406953b981c6556a6d522c11b46
| 214
|
py
|
Python
|
api/words_vector/models.py
|
leandrocamposcardoso/VetorDePalavras
|
76d442d0343e85a0edc55ca91b76480c30b3127a
|
[
"MIT"
] | null | null | null |
api/words_vector/models.py
|
leandrocamposcardoso/VetorDePalavras
|
76d442d0343e85a0edc55ca91b76480c30b3127a
|
[
"MIT"
] | null | null | null |
api/words_vector/models.py
|
leandrocamposcardoso/VetorDePalavras
|
76d442d0343e85a0edc55ca91b76480c30b3127a
|
[
"MIT"
] | null | null | null |
from django.db import models
from custom_fields import ListField
class Logs(models.Model):
files = ListField()
vocabulary = ListField(null=True, blank=True)
vectors = ListField(null=True, blank=True)
| 23.777778
| 49
| 0.742991
| 28
| 214
| 5.642857
| 0.607143
| 0.164557
| 0.21519
| 0.278481
| 0.329114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163551
| 214
| 8
| 50
| 26.75
| 0.882682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
47bb152600fc41de2ec8a1427ba054b5e6f16288
| 170
|
py
|
Python
|
University Admission Procedure/task/university_stage1.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | null | null | null |
University Admission Procedure/task/university_stage1.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | null | null | null |
University Admission Procedure/task/university_stage1.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | 3
|
2020-12-19T13:48:06.000Z
|
2021-08-12T18:36:33.000Z
|
def get_mean(a, b, c):
return sum((a, b, c)) / 3
mean = get_mean(int(input()), int(input()), int(input()))
print(mean, "Congratulations, you are accepted!", sep="\n")
| 42.5
| 59
| 0.617647
| 29
| 170
| 3.551724
| 0.62069
| 0.23301
| 0.058252
| 0.31068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.141176
| 170
| 4
| 59
| 42.5
| 0.69863
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
47dd782d49714e6fbc9cb1c79e2c4462f5851fb8
| 770
|
py
|
Python
|
contrib/python/Jinja2/jinja2/_identifier.py
|
timgates42/catboost
|
2fa492f5e32ba14c890dc4b3313cfe1024ca4839
|
[
"Apache-2.0"
] | 2
|
2021-01-29T04:27:28.000Z
|
2021-01-29T04:28:27.000Z
|
contrib/python/Jinja2/jinja2/_identifier.py
|
birichie/catboost
|
de75c6af12cf490700e76c22072fbdc15b35d679
|
[
"Apache-2.0"
] | 1
|
2021-12-09T23:08:25.000Z
|
2021-12-09T23:08:25.000Z
|
contrib/python/Jinja2/jinja2/_identifier.py
|
birichie/catboost
|
de75c6af12cf490700e76c22072fbdc15b35d679
|
[
"Apache-2.0"
] | 1
|
2021-04-27T23:40:09.000Z
|
2021-04-27T23:40:09.000Z
|
# -*- coding: utf-8 -*-
import re
# generated by scripts/generate_identifier_pattern.py
pattern = re.compile(
r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
)
| 96.25
| 656
| 0.119481
| 185
| 770
| 3.005405
| 0.989189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005369
| 0.032468
| 770
| 7
| 657
| 110
| 0.115436
| 0.109091
| 0
| 0
| 1
| 0
| 0.931085
| 0.931085
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9a22867cccb5bc200a032cfb57d090ec6851db04
| 95
|
py
|
Python
|
Models/__init__.py
|
yasesf93/CrossGradientAggregation
|
276940b893fcc5f740dcb273efe7f06990c985b9
|
[
"BSD-3-Clause"
] | 4
|
2021-06-24T17:25:41.000Z
|
2021-11-16T07:08:30.000Z
|
Models/__init__.py
|
yasesf93/CrossGradientAggregation
|
276940b893fcc5f740dcb273efe7f06990c985b9
|
[
"BSD-3-Clause"
] | 2
|
2021-07-14T11:13:57.000Z
|
2021-07-16T21:50:31.000Z
|
Models/__init__.py
|
yasesf93/CrossGradientAggregation
|
276940b893fcc5f740dcb273efe7f06990c985b9
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T17:25:43.000Z
|
2021-06-24T17:25:43.000Z
|
from .preresnet import *
from .vgg import *
from .wide_resnet import *
from .resnet import *
| 23.75
| 27
| 0.726316
| 13
| 95
| 5.230769
| 0.461538
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189474
| 95
| 4
| 28
| 23.75
| 0.883117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7be3e1c3d4f1eed485f71b969c725501ed0213a3
| 50
|
py
|
Python
|
molsysmt/tools/networkx_Graph/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/networkx_Graph/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/networkx_Graph/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
from .is_networkx_Graph import is_networkx_Graph
| 16.666667
| 48
| 0.88
| 8
| 50
| 5
| 0.625
| 0.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 50
| 2
| 49
| 25
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d06f56cfab0a3093ce641eec454894d1a17df5af
| 41
|
py
|
Python
|
genomics_demo/__init__.py
|
nickdelgrosso/genomics_workshop_demo
|
9890017a4348d9a97eda8f5977a8a02ed24610c3
|
[
"MIT"
] | 1
|
2019-04-12T02:40:54.000Z
|
2019-04-12T02:40:54.000Z
|
genomics_demo/__init__.py
|
nickdelgrosso/genomics_workshop_demo
|
9890017a4348d9a97eda8f5977a8a02ed24610c3
|
[
"MIT"
] | 1
|
2018-10-01T13:11:51.000Z
|
2018-10-01T13:14:17.000Z
|
genomics_demo/__init__.py
|
nickdelgrosso/genomics_workshop_demo
|
9890017a4348d9a97eda8f5977a8a02ed24610c3
|
[
"MIT"
] | 12
|
2018-10-01T09:35:35.000Z
|
2018-10-01T09:49:27.000Z
|
from .dna import DNA
from .rna import RNA
| 20.5
| 20
| 0.780488
| 8
| 41
| 4
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 21
| 20.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d097793f041e36848acc849e03b9787f4833511e
| 136
|
py
|
Python
|
cw_wrapper/scope/__init__.py
|
BeneciaLee/cw_wrapper
|
a1562aa04e11acf9c1646777e2edc52981df9d2e
|
[
"MIT"
] | 3
|
2021-06-30T05:36:48.000Z
|
2021-07-01T10:24:59.000Z
|
cw_wrapper/scope/__init__.py
|
BeneciaLee/cw_wrapper
|
a1562aa04e11acf9c1646777e2edc52981df9d2e
|
[
"MIT"
] | 1
|
2021-07-12T12:11:35.000Z
|
2021-07-12T12:11:35.000Z
|
cw_wrapper/scope/__init__.py
|
BeneciaLee/cw_wrapper
|
a1562aa04e11acf9c1646777e2edc52981df9d2e
|
[
"MIT"
] | 2
|
2021-06-30T08:13:41.000Z
|
2021-07-01T09:18:04.000Z
|
__all__ = ['CWScope', 'cw_firmware_auto_update']
from .cw_scope import CWScope
from .cw_firmware_update import cw_firmware_auto_update
| 27.2
| 55
| 0.830882
| 20
| 136
| 5
| 0.45
| 0.3
| 0.28
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095588
| 136
| 4
| 56
| 34
| 0.813008
| 0
| 0
| 0
| 0
| 0
| 0.220588
| 0.169118
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d0a698bc9e8dbf90047ce807983a957fa1a05a85
| 94
|
py
|
Python
|
rawsec_cli/__init__.py
|
noraj/rawsec_cli
|
e671ad0d86d8627a0eff1ff13abf1227b6d26114
|
[
"MIT"
] | null | null | null |
rawsec_cli/__init__.py
|
noraj/rawsec_cli
|
e671ad0d86d8627a0eff1ff13abf1227b6d26114
|
[
"MIT"
] | null | null | null |
rawsec_cli/__init__.py
|
noraj/rawsec_cli
|
e671ad0d86d8627a0eff1ff13abf1227b6d26114
|
[
"MIT"
] | null | null | null |
# autogenerated
__version__ = "1.1.1"
__commit__ = "0403f8d920e7940a633d25f2b7a8a3e9a31af7f0"
| 23.5
| 55
| 0.819149
| 7
| 94
| 9.857143
| 0.714286
| 0.057971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.337209
| 0.085106
| 94
| 3
| 56
| 31.333333
| 0.465116
| 0.138298
| 0
| 0
| 1
| 0
| 0.56962
| 0.506329
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d0bce9ec07ed3815d6afb9d9ba75f8ec8084f54c
| 31
|
py
|
Python
|
pil_resize_aspect_ratio/enums/__init__.py
|
kkristof200/py_resize_image
|
33824c691481b2166ade18e7fa6b5583ceeaa4f6
|
[
"MIT"
] | null | null | null |
pil_resize_aspect_ratio/enums/__init__.py
|
kkristof200/py_resize_image
|
33824c691481b2166ade18e7fa6b5583ceeaa4f6
|
[
"MIT"
] | null | null | null |
pil_resize_aspect_ratio/enums/__init__.py
|
kkristof200/py_resize_image
|
33824c691481b2166ade18e7fa6b5583ceeaa4f6
|
[
"MIT"
] | null | null | null |
from .fill_type import FillType
| 31
| 31
| 0.870968
| 5
| 31
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d0cc4bfdff292b18362e240a3786014dd325e463
| 195
|
py
|
Python
|
tests/conftest.py
|
mmEissen/deptree
|
bc0fac36418ccfa805fc9f405442409cf1a28ade
|
[
"MIT"
] | 1
|
2018-06-13T16:54:38.000Z
|
2018-06-13T16:54:38.000Z
|
tests/conftest.py
|
mmEissen/deptree
|
bc0fac36418ccfa805fc9f405442409cf1a28ade
|
[
"MIT"
] | 6
|
2018-03-26T09:20:46.000Z
|
2018-04-23T09:50:05.000Z
|
tests/conftest.py
|
mmEissen/importgraph
|
bc0fac36418ccfa805fc9f405442409cf1a28ade
|
[
"MIT"
] | null | null | null |
import pytest
from unittest.mock import MagicMock
from importgraph import ImportAction
@pytest.fixture
def import_action():
return ImportAction('some.module.name', {}, [], 0, MagicMock())
| 19.5
| 67
| 0.753846
| 23
| 195
| 6.347826
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005917
| 0.133333
| 195
| 9
| 68
| 21.666667
| 0.857988
| 0
| 0
| 0
| 0
| 0
| 0.082051
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.833333
| 0.166667
| 1.166667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d0db185a759b784bebfcd2771e58b1735fb88677
| 58
|
py
|
Python
|
parting/__init__.py
|
danfairs/django-parting
|
80f24781396ab8778960ed4b2a86e96dd9f18bb0
|
[
"BSD-3-Clause"
] | 8
|
2015-04-06T19:59:01.000Z
|
2019-01-22T16:39:26.000Z
|
parting/__init__.py
|
danfairs/django-parting
|
80f24781396ab8778960ed4b2a86e96dd9f18bb0
|
[
"BSD-3-Clause"
] | 1
|
2016-04-14T08:49:31.000Z
|
2016-04-14T08:49:31.000Z
|
parting/__init__.py
|
danfairs/django-parting
|
80f24781396ab8778960ed4b2a86e96dd9f18bb0
|
[
"BSD-3-Clause"
] | 1
|
2018-12-20T13:43:39.000Z
|
2018-12-20T13:43:39.000Z
|
from .models import PartitionForeignKey, PartitionManager
| 29
| 57
| 0.87931
| 5
| 58
| 10.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 58
| 1
| 58
| 58
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d0e95b95265adc4750b71c90b865388e389c5b3e
| 3,364
|
py
|
Python
|
src/store/baseviews.py
|
wqian94/retrodb
|
e0a5655a5986f46872230d0ace8a4a28dab71f28
|
[
"MIT"
] | null | null | null |
src/store/baseviews.py
|
wqian94/retrodb
|
e0a5655a5986f46872230d0ace8a4a28dab71f28
|
[
"MIT"
] | null | null | null |
src/store/baseviews.py
|
wqian94/retrodb
|
e0a5655a5986f46872230d0ace8a4a28dab71f28
|
[
"MIT"
] | null | null | null |
""" Library for base classes of retroactively-updatable views. """
import numbers
class NumericView(numbers.Integral):
""" Wrapper for numeric functions so that views can behave like numbers. """
# General magic functions
def __repr__(self):
return self._value.__repr__()
# Number-related functions
def __abs__(self, *args, **kwargs):
return self._value.__abs__(*args, **kwargs)
def __add__(self, *args, **kwargs):
return self._value.__add__(*args, **kwargs)
def __and__(self, *args, **kwargs):
return self._value.__and__(*args, **kwargs)
def __ceil__(self, *args, **kwargs):
return self._value.__ceil__(*args, **kwargs)
def __eq__(self, *args, **kwargs):
return self._value.__eq__(*args, **kwargs)
def __floor__(self, *args, **kwargs):
return self._value.__floor__(*args, **kwargs)
def __floordiv__(self, *args, **kwargs):
return self._value.__floordiv__(*args, **kwargs)
def __int__(self, *args, **kwargs):
return self._value.__int__(*args, **kwargs)
def __invert__(self, *args, **kwargs):
return self._value.__invert__(*args, **kwargs)
def __le__(self, *args, **kwargs):
return self._value.__le__(*args, **kwargs)
def __lshift__(self, *args, **kwargs):
return self._value.__lshift__(*args, **kwargs)
def __lt__(self, *args, **kwargs):
return self._value.__lt__(*args, **kwargs)
def __mod__(self, *args, **kwargs):
return self._value.__mod__(*args, **kwargs)
def __mul__(self, *args, **kwargs):
return self._value.__mul__(*args, **kwargs)
def __neg__(self, *args, **kwargs):
return self._value.__neg__(*args, **kwargs)
def __or__(self, *args, **kwargs):
return self._value.__or__(*args, **kwargs)
def __pos__(self, *args, **kwargs):
return self._value.__pos__(*args, **kwargs)
def __pow__(self, *args, **kwargs):
return self._value.__pow__(*args, **kwargs)
def __radd__(self, *args, **kwargs):
return self._value.__radd__(*args, **kwargs)
def __rand__(self, *args, **kwargs):
return self._value.__rand__(*args, **kwargs)
def __rfloordiv__(self, *args, **kwargs):
return self._value.__rfloordiv__(*args, **kwargs)
def __rlshift__(self, *args, **kwargs):
return self._value.__rlshift__(*args, **kwargs)
def __rmod__(self, *args, **kwargs):
return self._value.__rmod__(*args, **kwargs)
def __rmul__(self, *args, **kwargs):
return self._value.__rmul__(*args, **kwargs)
def __ror__(self, *args, **kwargs):
return self._value.__ror__(*args, **kwargs)
def __round__(self, *args, **kwargs):
return self._value.__round__(*args, **kwargs)
def __rpow__(self, *args, **kwargs):
return self._value.__rpow__(*args, **kwargs)
def __rrshift__(self, *args, **kwargs):
return self._value.__rrshift__(*args, **kwargs)
def __rshift__(self, *args, **kwargs):
return self._value.__rshift__(*args, **kwargs)
def __rtruediv__(self, *args, **kwargs):
return self._value.__rtruediv__(*args, **kwargs)
def __rxor__(self, *args, **kwargs):
return self._value.__rxor__(*args, **kwargs)
def __truediv__(self, *args, **kwargs):
return self._value.__truediv__(*args, **kwargs)
def __trunc__(self, *args, **kwargs):
return self._value.__trunc__(*args, **kwargs)
def __xor__(self, *args, **kwargs):
return self._value.__xor__(*args, **kwargs)
| 29.508772
| 78
| 0.672711
| 412
| 3,364
| 4.728155
| 0.160194
| 0.349076
| 0.269507
| 0.349076
| 0.50616
| 0.50616
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156956
| 3,364
| 113
| 79
| 29.769912
| 0.686883
| 0.05321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.486111
| false
| 0
| 0.013889
| 0.486111
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ef7ae166dd3c5655215dfc997707002c4e44707d
| 2,853
|
py
|
Python
|
tests/test_datasets.py
|
ricosjp/siml
|
8fc07d798cdedd77622c16221ee44a575d36bad0
|
[
"Apache-2.0"
] | 11
|
2020-12-28T16:22:33.000Z
|
2021-11-14T17:09:27.000Z
|
tests/test_datasets.py
|
ricosjp/siml
|
8fc07d798cdedd77622c16221ee44a575d36bad0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_datasets.py
|
ricosjp/siml
|
8fc07d798cdedd77622c16221ee44a575d36bad0
|
[
"Apache-2.0"
] | 2
|
2021-04-28T09:41:47.000Z
|
2021-07-01T21:18:51.000Z
|
import unittest
import numpy as np
import torch
import siml.datasets as datasets
class TestDatasets(unittest.TestCase):
def test_merge_sparse_tensors_square(self):
stripped_sparse_info = [
{
'size': [2, 2],
'row': torch.Tensor([0, 1, 1]),
'col': torch.Tensor([0, 0, 1]),
'values': torch.Tensor([1., 2., 3.]),
},
{
'size': [2, 2],
'row': torch.Tensor([0, 1, 1]),
'col': torch.Tensor([0, 0, 1]),
'values': torch.Tensor([10., 20., 30.]),
},
{
'size': [2, 2],
'row': torch.Tensor([0, 1, 1]),
'col': torch.Tensor([0, 0, 1]),
'values': torch.Tensor([100., 200., 300.]),
},
]
expected_sparse = np.array([
[1., 0., 0., 0., 0., 0.],
[2., 3., 0., 0., 0., 0.],
[0., 0., 10., 0., 0., 0.],
[0., 0., 20., 30., 0., 0.],
[0., 0., 0., 0., 100., 0.],
[0., 0., 0., 0., 200., 300.],
])
merged_sparse = datasets.merge_sparse_tensors(stripped_sparse_info)
np.testing.assert_almost_equal(
merged_sparse.to_dense().numpy(), expected_sparse)
def test_merge_sparse_tensors_rectangle(self):
stripped_sparse_info = [
{
'size': [2, 5],
'row': torch.Tensor([0, 1, 1, 1]),
'col': torch.Tensor([0, 0, 1, 4]),
'values': torch.Tensor([1., 2., 3., 4.]),
},
{
'size': [3, 4],
'row': torch.Tensor([0, 1, 1, 2]),
'col': torch.Tensor([0, 0, 1, 3]),
'values': torch.Tensor([10., 20., 30., 40.]),
},
{
'size': [4, 2],
'row': torch.Tensor([0, 1, 1, 3]),
'col': torch.Tensor([0, 0, 1, 1]),
'values': torch.Tensor([100., 200., 300., 400.]),
},
]
expected_sparse = np.array([
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[2., 3., 0., 0., 4., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 10., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 20., 30., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 40., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 100., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 200., 300.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 400.],
])
merged_sparse = datasets.merge_sparse_tensors(stripped_sparse_info)
np.testing.assert_almost_equal(
merged_sparse.to_dense().numpy(), expected_sparse)
| 36.113924
| 75
| 0.375745
| 357
| 2,853
| 2.907563
| 0.137255
| 0.206166
| 0.254335
| 0.292871
| 0.861272
| 0.813102
| 0.651252
| 0.587669
| 0.569364
| 0.563584
| 0
| 0.146597
| 0.397476
| 2,853
| 78
| 76
| 36.576923
| 0.457243
| 0
| 0
| 0.291667
| 0
| 0
| 0.033649
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.027778
| false
| 0
| 0.055556
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4bdd85033c224d066f356287d15f6c1d5c50ca18
| 119
|
py
|
Python
|
blues/datasets/__init__.py
|
Kageshimasu/blues
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
[
"MIT"
] | null | null | null |
blues/datasets/__init__.py
|
Kageshimasu/blues
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
[
"MIT"
] | null | null | null |
blues/datasets/__init__.py
|
Kageshimasu/blues
|
a808fb8da86224f2e597916b04bdbd29376af6bb
|
[
"MIT"
] | 1
|
2021-02-15T07:54:17.000Z
|
2021-02-15T07:54:17.000Z
|
from .classification_dataset import ClassificationDataset
from .object_detection_dataset import ObjectDetectionDataset
| 39.666667
| 60
| 0.915966
| 11
| 119
| 9.636364
| 0.727273
| 0.245283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067227
| 119
| 2
| 61
| 59.5
| 0.954955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4bf04507b422bc4361f0e650c85b766ee0d864c7
| 26
|
py
|
Python
|
hdcs_manager/source/hsm/hsm/db/__init__.py
|
isabella232/HDCS
|
1a5ae046e3ff947cc7a42219b9b1959766687612
|
[
"Apache-2.0"
] | null | null | null |
hdcs_manager/source/hsm/hsm/db/__init__.py
|
isabella232/HDCS
|
1a5ae046e3ff947cc7a42219b9b1959766687612
|
[
"Apache-2.0"
] | 1
|
2021-02-23T19:10:26.000Z
|
2021-02-23T19:10:26.000Z
|
hdcs_manager/source/hsm/hsm/db/__init__.py
|
isabella232/HDCS
|
1a5ae046e3ff947cc7a42219b9b1959766687612
|
[
"Apache-2.0"
] | null | null | null |
from hsm.db.api import *
| 8.666667
| 24
| 0.692308
| 5
| 26
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 26
| 2
| 25
| 13
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4bf065ebe291a982c00f2e079d033e93fc3f8ca4
| 2,889
|
py
|
Python
|
corpus/mnm_early.py
|
mnm-rnd/elsa-voice-asr
|
5c350b6610753629e7f5580468f54ec796f04e16
|
[
"MIT"
] | 1
|
2021-10-01T00:03:00.000Z
|
2021-10-01T00:03:00.000Z
|
corpus/mnm_early.py
|
mnm-rnd/elsa-voice-asr
|
5c350b6610753629e7f5580468f54ec796f04e16
|
[
"MIT"
] | null | null | null |
corpus/mnm_early.py
|
mnm-rnd/elsa-voice-asr
|
5c350b6610753629e7f5580468f54ec796f04e16
|
[
"MIT"
] | 1
|
2021-10-01T00:00:08.000Z
|
2021-10-01T00:00:08.000Z
|
from torch.utils.data import Dataset
from tqdm import tqdm
from pathlib import Path
def read_text(text_file):
with open(text_file, 'r', encoding='utf-8') as out:
return out.readlines()[0].strip()
class MnMAudioDataset(Dataset):
def __init__(self, path, manifest_csv_file, tokenizer, data_transformer, bucket_size, path_from_home=True):
if path_from_home:
main_path = Path.home()
else:
main_path = Path(".")
corpus_path = main_path.joinpath(path)
manifest_csv_path = corpus_path.joinpath(manifest_csv_file)
self.file_text_pair = []
self.data_transformer = data_transformer
self.tokenizer = tokenizer
self.bucket_size = bucket_size
with open(manifest_csv_path, 'r', encoding='utf-8') as mp:
for x in tqdm(mp):
str_vals = x.strip().split(",")
# Preprocess the text
text = read_text(str_vals[-1])
text = self.data_transformer(text)
text = self.tokenizer.encode(text)
self.file_text_pair.append((str_vals[0], text))
def __len__(self):
return len(self.file_text_pair)
def __getitem__(self, index):
if self.bucket_size > 1:
# Return a bucket
index = min(len(self) - self.bucket_size, index)
return self.file_text_pair[index:index+self.bucket_size]
# Return a single sample
return self.file_text_pair[index]
class MnMAudioTextDataset(Dataset):
def __init__(self, path, manifest_csv_file, tokenizer, data_transformer, bucket_size, path_from_home=True):
if path_from_home:
main_path = Path.home()
else:
main_path = Path(".")
corpus_path = main_path.joinpath(path)
manifest_csv_path = corpus_path.joinpath(manifest_csv_file)
self.texts = []
self.data_transformer = data_transformer
self.tokenizer = tokenizer
self.bucket_size = bucket_size
with open(manifest_csv_path, 'r', encoding='utf-8') as mp:
for x in tqdm(mp):
str_vals = x.strip().split(",")
# Preprocess the text
text = read_text(str_vals[-1])
text = self.data_transformer(text)
text = self.tokenizer.encode(text)
self.texts.append(text)
def __len__(self):
return len(self.texts)
def __getitem__(self, index):
if self.bucket_size > 1:
# Return a bucket
index = min(len(self) - self.bucket_size, index)
return self.texts[index:index+self.bucket_size]
# Return a single sample
return self.texts[index]
| 33.593023
| 111
| 0.578055
| 343
| 2,889
| 4.58309
| 0.186589
| 0.076336
| 0.071247
| 0.050891
| 0.833333
| 0.823791
| 0.805344
| 0.770992
| 0.770992
| 0.770992
| 0
| 0.004646
| 0.329526
| 2,889
| 85
| 112
| 33.988235
| 0.806918
| 0.040498
| 0
| 0.689655
| 0
| 0
| 0.007957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0
| 0.051724
| 0.034483
| 0.327586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
326d44ee5dc00c3eaaea9db2bb520b0e1108e7ef
| 48
|
py
|
Python
|
django_profile_middleware/__init__.py
|
Pear0/django-profile-middleware
|
0d9d44cfac4351a4dea874165c95e792fbef16ae
|
[
"MIT"
] | 33
|
2017-06-29T15:04:20.000Z
|
2020-06-01T08:12:19.000Z
|
django_profile_middleware/__init__.py
|
inovizz/django-profile-middleware
|
0d9d44cfac4351a4dea874165c95e792fbef16ae
|
[
"MIT"
] | null | null | null |
django_profile_middleware/__init__.py
|
inovizz/django-profile-middleware
|
0d9d44cfac4351a4dea874165c95e792fbef16ae
|
[
"MIT"
] | 8
|
2017-08-23T21:50:35.000Z
|
2019-11-11T02:31:31.000Z
|
from middleware import *
from decorator import *
| 24
| 24
| 0.8125
| 6
| 48
| 6.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 25
| 24
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
327ca0932ce7b15e1a9b71e8093d4bcbafc1236f
| 66
|
py
|
Python
|
gobigger/agents/__init__.py
|
luanshaotong/GoBigger
|
00c347a89a660134677d633f39c39123c5ab3deb
|
[
"Apache-2.0"
] | 189
|
2021-10-08T07:55:10.000Z
|
2022-03-31T23:49:43.000Z
|
gobigger/agents/__init__.py
|
luanshaotong/GoBigger
|
00c347a89a660134677d633f39c39123c5ab3deb
|
[
"Apache-2.0"
] | 25
|
2021-11-01T06:59:30.000Z
|
2022-03-22T11:22:27.000Z
|
gobigger/agents/__init__.py
|
luanshaotong/GoBigger
|
00c347a89a660134677d633f39c39123c5ab3deb
|
[
"Apache-2.0"
] | 28
|
2021-10-14T12:23:14.000Z
|
2022-03-31T23:49:45.000Z
|
from .base_agent import BaseAgent
from .bot_agent import BotAgent
| 22
| 33
| 0.848485
| 10
| 66
| 5.4
| 0.7
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 34
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
328c30ddef776f4ee405a847595c2a217f3ab3a0
| 6,953
|
py
|
Python
|
tests/test_queries.py
|
openclimatefix/perceiver-pytorch
|
62c314b302aec95571796684732b2bcd0a81cc75
|
[
"MIT"
] | 7
|
2021-07-30T22:06:26.000Z
|
2022-02-24T09:39:02.000Z
|
tests/test_queries.py
|
openclimatefix/perceiver-pytorch
|
62c314b302aec95571796684732b2bcd0a81cc75
|
[
"MIT"
] | 16
|
2021-07-27T09:58:03.000Z
|
2021-12-16T12:26:53.000Z
|
tests/test_queries.py
|
openclimatefix/perceiver-pytorch
|
62c314b302aec95571796684732b2bcd0a81cc75
|
[
"MIT"
] | null | null | null |
import pytest
import torch
from perceiver_pytorch.queries import LearnableQuery
from perceiver_pytorch.perceiver_io import PerceiverIO
from perceiver_pytorch.utils import encode_position
import einops
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query(layer_shape):
query_creator = LearnableQuery(
channel_dim=32,
query_shape=(6, 16, 16),
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=128,
sine_only=False,
generate_fourier_features=True,
)
x = torch.randn((4, 6, 12, 16, 16))
out = query_creator(x)
# Output is flattened, so should be [B, T*H*W, C]
# Channels is from channel_dim + 3*(num_frequency_bands * 2 + 1)
# 32 + 3*(257) = 771 + 32 = 803
assert out.shape == (4, 16 * 16 * 6, 803)
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query_no_fourier(layer_shape):
query_creator = LearnableQuery(
channel_dim=32,
query_shape=(6, 16, 16),
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=128,
sine_only=False,
generate_fourier_features=False,
)
x = torch.randn((4, 6, 12, 16, 16))
out = query_creator(x)
assert out.shape == (4, 16 * 16 * 6, 32)
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query_qpplication(layer_shape):
output_shape = (6, 16, 16)
query_creator = LearnableQuery(
channel_dim=32,
query_shape=output_shape,
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=32,
sine_only=False,
generate_fourier_features=True,
)
with torch.no_grad():
query_creator.eval()
x = torch.randn((2, 6, 12, 16, 16))
out = query_creator(x)
model = PerceiverIO(depth=2, dim=100, queries_dim=query_creator.output_shape()[-1])
model.eval()
model_input = torch.randn((2, 256, 100))
model_out = model(model_input, queries=out)
# Reshape back to correct shape
model_out = einops.rearrange(
model_out,
"b (t h w) c -> b t c h w",
t=output_shape[0],
h=output_shape[1],
w=output_shape[2],
)
assert model_out.shape == (2, 6, 227, 16, 16)
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query_precomputed_fourier_only(layer_shape):
precomputed_features = encode_position(
1, # Batch size, 1 for this as it will be adapted in forward
axis=(10, 16, 16), # 4 history + 6 future steps
max_frequency=16.0,
num_frequency_bands=128,
sine_only=False,
)
# Only take future ones
precomputed_features = precomputed_features[:, 4:]
query_creator = LearnableQuery(
channel_dim=32,
query_shape=(6, 16, 16),
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=16,
sine_only=False,
precomputed_fourier=precomputed_features,
generate_fourier_features=False,
)
x = torch.randn((4, 6, 12, 16, 16))
out = query_creator(x)
# Output is flattened, so should be [B, T*H*W, C]
# Channels is from channel_dim + 3*(num_frequency_bands * 2 + 1)
# 32 + 3*(257) = 771 + 32 = 803
assert out.shape == (4, 16 * 16 * 6, 803)
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query_precomputed_and_generated_fourer(layer_shape):
precomputed_features = encode_position(
1, # Batch size, 1 for this as it will be adapted in forward
axis=(10, 16, 16), # 4 history + 6 future steps
max_frequency=16.0,
num_frequency_bands=128,
sine_only=False,
)
# Only take future ones
precomputed_features = precomputed_features[:, 4:]
query_creator = LearnableQuery(
channel_dim=32,
query_shape=(6, 16, 16),
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=128,
sine_only=False,
precomputed_fourier=precomputed_features,
generate_fourier_features=True,
)
x = torch.randn((4, 6, 12, 16, 16))
out = query_creator(x)
# Output is flattened, so should be [B, T*H*W, C]
# Channels is from channel_dim + 3*(num_frequency_bands * 2 + 1)
# 32 + 3*(257) = 771 + 32 = 803
# Then add 771 from the precomputed features, to get 803 + 771
assert out.shape == (4, 16 * 16 * 6, 803 + 771)
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query_pass_in_fourier(layer_shape):
precomputed_features = encode_position(
4,
axis=(10, 16, 16), # 4 history + 6 future steps
max_frequency=16.0,
num_frequency_bands=64,
sine_only=False,
)
# Only take future ones
precomputed_features = precomputed_features[:, 4:]
query_creator = LearnableQuery(
channel_dim=32,
query_shape=(6, 16, 16),
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=128,
sine_only=False,
generate_fourier_features=False,
)
x = torch.randn((4, 6, 12, 16, 16))
out = query_creator(x, precomputed_features)
# Output is flattened, so should be [B, T*H*W, C]
# Channels is from channel_dim + 3*(num_frequency_bands * 2 + 1)
# 3*(129) = 389 + 32 = 419
# Since this is less than what is passed to LearnableQuery, we know its using the passed in features
assert out.shape == (4, 16 * 16 * 6, 419)
@pytest.mark.parametrize("layer_shape", ["2d", "3d"])
def test_learnable_query_all_fouriers(layer_shape):
batch_ff = encode_position(
4,
axis=(10, 16, 16), # 4 history + 6 future steps
max_frequency=16.0,
num_frequency_bands=32,
sine_only=False,
)
# Only take future ones
batch_ff = batch_ff[:, 4:]
precomputed_features = encode_position(
1,
axis=(10, 16, 16), # 4 history + 6 future steps
max_frequency=16.0,
num_frequency_bands=64,
sine_only=False,
)
# Only take future ones
precomputed_features = precomputed_features[:, 4:]
query_creator = LearnableQuery(
channel_dim=32,
query_shape=(6, 16, 16),
conv_layer=layer_shape,
max_frequency=64.0,
num_frequency_bands=128,
sine_only=False,
precomputed_fourier=precomputed_features,
generate_fourier_features=True,
)
x = torch.randn((4, 6, 12, 16, 16))
out = query_creator(x, batch_ff)
# Output is flattened, so should be [B, T*H*W, C]
# Channels is from channel_dim + 3*(num_frequency_bands * 2 + 1)
# 3*(129) = 389 + 32 = 419 + 771 from the generated ones + 195 from the batch features
# Since this is less than what is passed to LearnableQuery, we know its using the passed in features
assert out.shape == (4, 16 * 16 * 6, 1385)
| 34.59204
| 104
| 0.633396
| 962
| 6,953
| 4.358628
| 0.133056
| 0.024803
| 0.068924
| 0.051514
| 0.820653
| 0.811352
| 0.804436
| 0.788934
| 0.759361
| 0.759361
| 0
| 0.080286
| 0.25658
| 6,953
| 200
| 105
| 34.765
| 0.730896
| 0.201352
| 0
| 0.68323
| 0
| 0
| 0.023365
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.043478
| false
| 0.006211
| 0.037267
| 0
| 0.080745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
328fda97178b8faeaff2544f59328b4292d8af0e
| 108
|
py
|
Python
|
uncertaintorch/__init__.py
|
jcreinhold/uncertaintorch
|
0cdc9f25fefad938c9f0bd3a6b40dfaa362dfca5
|
[
"Apache-2.0"
] | 1
|
2021-03-21T23:13:45.000Z
|
2021-03-21T23:13:45.000Z
|
uncertaintorch/__init__.py
|
jcreinhold/uncertaintorch
|
0cdc9f25fefad938c9f0bd3a6b40dfaa362dfca5
|
[
"Apache-2.0"
] | null | null | null |
uncertaintorch/__init__.py
|
jcreinhold/uncertaintorch
|
0cdc9f25fefad938c9f0bd3a6b40dfaa362dfca5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .learn import *
from .models import *
from .plot import *
from .util import *
| 15.428571
| 23
| 0.638889
| 15
| 108
| 4.6
| 0.6
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.203704
| 108
| 6
| 24
| 18
| 0.790698
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
32c3a076a3172d0f2b9f90ac768cb2e513b9bf62
| 259
|
py
|
Python
|
catalog.py
|
TerenceFox/itemcatalog
|
88621ea3e0383d9b43fe9341fa355b3f4928ead4
|
[
"MIT"
] | null | null | null |
catalog.py
|
TerenceFox/itemcatalog
|
88621ea3e0383d9b43fe9341fa355b3f4928ead4
|
[
"MIT"
] | null | null | null |
catalog.py
|
TerenceFox/itemcatalog
|
88621ea3e0383d9b43fe9341fa355b3f4928ead4
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models.category import Category
from app.models.item import Item
from app.models.user import User
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Category': Category, 'Item': Item}
| 28.777778
| 71
| 0.756757
| 40
| 259
| 4.8
| 0.35
| 0.145833
| 0.203125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131274
| 259
| 8
| 72
| 32.375
| 0.853333
| 0
| 0
| 0
| 0
| 0
| 0.069498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.571429
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
088a698cce82a2d7ea09de5bb2721fbaf4baca7f
| 180
|
py
|
Python
|
estimators/gym-network_classification_env/gym_network_classification_env/envs/__init__.py
|
boyuruan/Anomaly-ReactionRL
|
a82da87e2da28ad333a7e19af5a0608390c3312c
|
[
"MIT"
] | 75
|
2018-06-12T10:51:50.000Z
|
2022-03-24T14:16:40.000Z
|
estimators/gym-network_classification_env/gym_network_classification_env/envs/__init__.py
|
draryan/Anomaly-ReactionRL
|
590fbc89dfa761be324c35e0dcf5d08f6086df77
|
[
"MIT"
] | 11
|
2018-07-21T17:56:29.000Z
|
2021-10-24T00:48:21.000Z
|
estimators/gym-network_classification_env/gym_network_classification_env/envs/__init__.py
|
draryan/Anomaly-ReactionRL
|
590fbc89dfa761be324c35e0dcf5d08f6086df77
|
[
"MIT"
] | 35
|
2018-09-27T06:03:14.000Z
|
2022-03-28T13:54:37.000Z
|
from gym_network_classification_env.envs.network_classification import NetworkClassificationEnv
from gym_network_classification_env.envs.helpers_data_preprocessing import data_cls
| 60
| 95
| 0.933333
| 22
| 180
| 7.181818
| 0.545455
| 0.398734
| 0.177215
| 0.35443
| 0.443038
| 0.443038
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 180
| 2
| 96
| 90
| 0.918605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
08ce7a9a118e4e8d9608c2ee615a3276f371115d
| 34
|
py
|
Python
|
src/sage/repl/all.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | 2
|
2015-08-11T05:05:47.000Z
|
2019-05-15T17:27:25.000Z
|
src/sage/repl/all.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/repl/all.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | 1
|
2020-07-24T11:56:55.000Z
|
2020-07-24T11:56:55.000Z
|
from interpreter import preparser
| 17
| 33
| 0.882353
| 4
| 34
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
08d8ac986f8bc438aa1bb82b8698c0c01f72132f
| 33
|
py
|
Python
|
bumpversion/__main__.py
|
jaap3/bump2version
|
22294dd1a1692d6aea4b826b590643c62b5b1eb9
|
[
"MIT"
] | 1,289
|
2015-01-03T02:42:58.000Z
|
2022-03-31T11:32:59.000Z
|
bumpversion/__main__.py
|
jaap3/bump2version
|
22294dd1a1692d6aea4b826b590643c62b5b1eb9
|
[
"MIT"
] | 151
|
2015-01-02T15:02:40.000Z
|
2022-03-15T19:57:12.000Z
|
bumpversion/__main__.py
|
jaap3/bump2version
|
22294dd1a1692d6aea4b826b590643c62b5b1eb9
|
[
"MIT"
] | 173
|
2015-01-09T21:43:44.000Z
|
2022-03-02T22:39:23.000Z
|
__import__('bumpversion').main()
| 16.5
| 32
| 0.757576
| 3
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 33
| 1
| 33
| 33
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
08e53e4ffac3386be42f59d40de7c209c41f6103
| 33
|
py
|
Python
|
app/utils/__init__.py
|
hashtagSELFIE/memegen
|
3c50b21b557cb7f52b7a46ea33a6b16e3f10011b
|
[
"MIT"
] | null | null | null |
app/utils/__init__.py
|
hashtagSELFIE/memegen
|
3c50b21b557cb7f52b7a46ea33a6b16e3f10011b
|
[
"MIT"
] | null | null | null |
app/utils/__init__.py
|
hashtagSELFIE/memegen
|
3c50b21b557cb7f52b7a46ea33a6b16e3f10011b
|
[
"MIT"
] | null | null | null |
from . import html, images, text
| 16.5
| 32
| 0.727273
| 5
| 33
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 33
| 1
| 33
| 33
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3eb083bc4d39832694e8d5194fdca91c29d2e518
| 365
|
py
|
Python
|
dataent/patches/v11_0/rename_workflow_action_to_workflow_action_master.py
|
dataent/dataent
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
[
"MIT"
] | null | null | null |
dataent/patches/v11_0/rename_workflow_action_to_workflow_action_master.py
|
dataent/dataent
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
[
"MIT"
] | 6
|
2020-03-24T17:15:56.000Z
|
2022-02-10T18:41:31.000Z
|
dataent/patches/v11_0/rename_workflow_action_to_workflow_action_master.py
|
dataent/dataent
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import dataent
from dataent.model.rename_doc import rename_doc
def execute():
if dataent.db.table_exists("Workflow Action") and not dataent.db.table_exists("Workflow Action Master"):
rename_doc('DocType', 'Workflow Action', 'Workflow Action Master')
dataent.reload_doc('workflow', 'doctype', 'workflow_action_master')
| 36.5
| 105
| 0.794521
| 49
| 365
| 5.653061
| 0.44898
| 0.252708
| 0.216607
| 0.144404
| 0.245487
| 0.245487
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09863
| 365
| 9
| 106
| 40.555556
| 0.841945
| 0
| 0
| 0
| 0
| 0
| 0.323288
| 0.060274
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.428571
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ed98593aeb222b0e161b15194d2d05f4a2cde78
| 66
|
py
|
Python
|
cookiecutter_mbam/scan/__init__.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
cookiecutter_mbam/scan/__init__.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
cookiecutter_mbam/scan/__init__.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
from . import views
from .models import Scan
from . import service
| 22
| 24
| 0.787879
| 10
| 66
| 5.2
| 0.6
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 66
| 3
| 25
| 22
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f5d1f09175afcf816183fc642f26ef92d92bb581
| 7,406
|
py
|
Python
|
tests/system/get_cars_positive_test.py
|
ikostan/REST_API_AUTOMATION
|
cdb4d30fbc7457b2a403b4dad6fe1efa2e754681
|
[
"Unlicense"
] | 8
|
2020-03-17T09:15:28.000Z
|
2022-01-29T19:50:45.000Z
|
tests/system/get_cars_positive_test.py
|
ikostan/REST_API_AUTOMATION
|
cdb4d30fbc7457b2a403b4dad6fe1efa2e754681
|
[
"Unlicense"
] | 1
|
2021-06-02T00:26:58.000Z
|
2021-06-02T00:26:58.000Z
|
tests/system/get_cars_positive_test.py
|
ikostan/REST_API_AUTOMATION
|
cdb4d30fbc7457b2a403b4dad6fe1efa2e754681
|
[
"Unlicense"
] | 1
|
2021-11-22T16:10:27.000Z
|
2021-11-22T16:10:27.000Z
|
#!/path/to/interpreter
"""
Flask App REST API testing: GET
"""
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import allure
import requests
from tests.system.base_test import BaseTestCase
from api.cars_app import CARS_LIST, USER_LIST
@allure.epic('Simple Flask App')
@allure.parent_suite('REST API')
@allure.suite("System Tests")
@allure.sub_suite("Positive Tests")
@allure.feature("GET")
@allure.story('Cars')
class GetCarsPositiveTestCase(BaseTestCase):
"""
Simple Flask App Positive Test: GET call > cars
"""
def setUp(self) -> None:
"""
Test data preparation
:return:
"""
with allure.step("Arrange expected results (cars list)"):
self.cars_url = '/cars'
self.CARS_HATCHBACK = [car for car in CARS_LIST
if car["car_type"] == 'hatchback']
self.CARS_SEDAN = [car for car in CARS_LIST
if car["car_type"] == 'sedan']
self.CARS_LIST = CARS_LIST
def test_get_list_of_cars_admin(self):
"""
Get full list of cars using admin user credentials.
:return:
"""
allure.dynamic.title("Get list of cars "
"using admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[0]['name']
password = USER_LIST[0]['password']
self.assertEqual("admin",
USER_LIST[0]['perm'])
with allure.step("Send GET request"):
response = requests.get(self.URL + self.cars_url,
auth=(username,
password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify 'successful' flag"):
self.assertTrue(response.json()['successful'])
with allure.step("Verify retrieved cars list"):
self.assertTrue(all(True for car in self.CARS_LIST
if car in response.json()['cars_list']))
def test_get_list_of_cars_non_admin(self):
"""
Get full list of cars using non admin user credentials.
:return:
"""
allure.dynamic.title("Get list of cars "
"using non admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[1]['name']
password = USER_LIST[1]['password']
self.assertEqual("non_admin",
USER_LIST[1]['perm'])
with allure.step("Send GET request"):
response = requests.get(self.URL + self.cars_url,
auth=(username,
password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify 'successful' flag"):
self.assertTrue(response.json()['successful'])
with allure.step("Verify retrieved cars list"):
self.assertTrue(all(True for car in self.CARS_LIST
if car in response.json()['cars_list']))
def test_get_list_of_cars_non_admin_sedan(self):
"""
Get full list of cars of type = 'sedan'
using non admin user credentials.
:return:
"""
allure.dynamic.title("Get list of cars of type = 'sedan' "
"using non admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[1]['name']
password = USER_LIST[1]['password']
self.assertEqual("non_admin",
USER_LIST[1]['perm'])
with allure.step("Send GET request"):
response = requests.get(self.URL +
self.cars_url +
'/filter/sedan',
auth=(username, password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify retrieved cars list of type sedan"):
self.assertTrue(all(True for car in self.CARS_SEDAN
if car in response.json()['cars']))
def test_get_list_of_cars_admin_hatchback(self):
"""
Get full list of cars from type = 'hatchback'
using admin user credentials.
:return:
"""
allure.dynamic.title("Get list of cars of type = 'hatchback' "
"using admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[0]['name']
password = USER_LIST[0]['password']
self.assertEqual("admin",
USER_LIST[0]['perm'])
with allure.step("Send GET request"):
response = requests.get(self.URL +
self.cars_url +
'/filter/hatchback',
auth=(username, password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify retrieved cars list of type hatchback"):
self.assertTrue(all(True for car in self.CARS_HATCHBACK
if car in response.json()['cars']))
def test_get_car_by_name_non_admin_swift(self):
"""
Get car data by name = 'swift'
using non admin user credentials.
:return:
"""
allure.dynamic.title("Get car data by name using "
"non admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[1]['name']
password = USER_LIST[1]['password']
self.assertEqual("non_admin",
USER_LIST[1]['perm'])
with allure.step("Prepare expected results"):
car = {"brand": "Maruti",
"car_type": "hatchback",
"name": "Swift",
"price_range": "3-5 lacs"}
with allure.step("Send GET request"):
response = requests.get(self.URL +
self.cars_url +
'/Swift',
auth=(username, password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify 'successful' flag"):
self.assertTrue(response.json()['successful'])
with allure.step("Verify retrieved car"):
self.assertTrue(car == response.json()['car'])
| 35.266667
| 73
| 0.530381
| 773
| 7,406
| 4.968952
| 0.137128
| 0.065087
| 0.091122
| 0.093726
| 0.791981
| 0.784431
| 0.770372
| 0.766467
| 0.746941
| 0.710492
| 0
| 0.006797
| 0.364299
| 7,406
| 209
| 74
| 35.435407
| 0.809048
| 0.082771
| 0
| 0.669291
| 0
| 0
| 0.185146
| 0
| 0
| 0
| 0
| 0
| 0.141732
| 1
| 0.047244
| false
| 0.07874
| 0.031496
| 0
| 0.086614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f5f5c6edab85cfd630b4745ea1e44b090793640b
| 49
|
py
|
Python
|
examples/i18nurls/__init__.py
|
adityavs/werkzeug
|
03bf010f239255049b62f41e37e2e53006ad2398
|
[
"BSD-3-Clause"
] | 4,200
|
2016-03-29T16:32:32.000Z
|
2022-03-30T15:37:03.000Z
|
examples/i18nurls/__init__.py
|
northernSage/werkzeug
|
048cdfd9b969c0c3a133d7ff43b8ad1ad6a673ec
|
[
"BSD-3-Clause"
] | 1,203
|
2016-03-29T15:46:57.000Z
|
2022-03-31T21:15:00.000Z
|
examples/i18nurls/__init__.py
|
northernSage/werkzeug
|
048cdfd9b969c0c3a133d7ff43b8ad1ad6a673ec
|
[
"BSD-3-Clause"
] | 1,403
|
2016-03-29T16:50:37.000Z
|
2022-03-29T09:18:38.000Z
|
from .application import Application as make_app
| 24.5
| 48
| 0.857143
| 7
| 49
| 5.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 49
| 1
| 49
| 49
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.