hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
78d3c60da3c69f85e10ce022f39e62f8c884ea9f | 21,512 | py | Python | spark_fhir_schemas/r4/complex_types/dosage.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | spark_fhir_schemas/r4/complex_types/dosage.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | spark_fhir_schemas/r4/complex_types/dosage.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
BooleanType,
DataType,
)
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class DosageSchema:
"""
Indicates how the medication is/was taken or should be taken by the patient.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Indicates how the medication is/was taken or should be taken by the patient.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
sequence: Indicates the order in which the dosage instructions should be applied or
interpreted.
text: Free text dosage instructions e.g. SIG.
additionalInstruction: Supplemental instructions to the patient on how to take the medication (e.g.
"with meals" or"take half to one hour before food") or warnings for the
patient about the medication (e.g. "may cause drowsiness" or "avoid exposure
of skin to direct sunlight or sunlamps").
patientInstruction: Instructions in terms that are understood by the patient or consumer.
timing: When medication should be administered.
asNeededBoolean: Indicates whether the Medication is only taken when needed within a specific
dosing schedule (Boolean option), or it indicates the precondition for taking
the Medication (CodeableConcept).
asNeededCodeableConcept: Indicates whether the Medication is only taken when needed within a specific
dosing schedule (Boolean option), or it indicates the precondition for taking
the Medication (CodeableConcept).
site: Body site to administer to.
route: How drug should enter body.
method: Technique for administering medication.
doseAndRate: The amount of medication administered.
maxDosePerPeriod: Upper limit on medication per unit of time.
maxDosePerAdministration: Upper limit on medication per administration.
maxDosePerLifetime: Upper limit on medication per lifetime of the patient.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.simple_types.integer import integerSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.timing import TimingSchema
from spark_fhir_schemas.r4.complex_types.dosage_doseandrate import (
Dosage_DoseAndRateSchema,
)
from spark_fhir_schemas.r4.complex_types.ratio import RatioSchema
from spark_fhir_schemas.r4.complex_types.quantity import QuantitySchema
if (
max_recursion_limit and nesting_list.count("Dosage") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Dosage"]
my_parent_path = parent_path + ".dosage" if parent_path else "dosage"
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Indicates the order in which the dosage instructions should be applied or
# interpreted.
StructField(
"sequence",
integerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".sequence",
),
True,
),
# Free text dosage instructions e.g. SIG.
StructField("text", StringType(), True),
# Supplemental instructions to the patient on how to take the medication (e.g.
# "with meals" or"take half to one hour before food") or warnings for the
# patient about the medication (e.g. "may cause drowsiness" or "avoid exposure
# of skin to direct sunlight or sunlamps").
StructField(
"additionalInstruction",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Instructions in terms that are understood by the patient or consumer.
StructField("patientInstruction", StringType(), True),
# When medication should be administered.
StructField(
"timing",
TimingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indicates whether the Medication is only taken when needed within a specific
# dosing schedule (Boolean option), or it indicates the precondition for taking
# the Medication (CodeableConcept).
StructField("asNeededBoolean", BooleanType(), True),
# Indicates whether the Medication is only taken when needed within a specific
# dosing schedule (Boolean option), or it indicates the precondition for taking
# the Medication (CodeableConcept).
StructField(
"asNeededCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Body site to administer to.
StructField(
"site",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# How drug should enter body.
StructField(
"route",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Technique for administering medication.
StructField(
"method",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The amount of medication administered.
StructField(
"doseAndRate",
ArrayType(
Dosage_DoseAndRateSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Upper limit on medication per unit of time.
StructField(
"maxDosePerPeriod",
RatioSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Upper limit on medication per administration.
StructField(
"maxDosePerAdministration",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Upper limit on medication per lifetime of the patient.
StructField(
"maxDosePerLifetime",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 49.681293 | 109 | 0.551971 | 1,956 | 21,512 | 5.81953 | 0.128834 | 0.06009 | 0.038215 | 0.054819 | 0.805851 | 0.779935 | 0.771765 | 0.742774 | 0.738821 | 0.733111 | 0 | 0.002729 | 0.403868 | 21,512 | 432 | 110 | 49.796296 | 0.884903 | 0.26455 | 0 | 0.676282 | 0 | 0 | 0.031347 | 0.004413 | 0 | 0 | 0 | 0 | 0 | 1 | 0.003205 | false | 0 | 0.028846 | 0 | 0.041667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
15346720abea14917430cdb63a3115b4e69b19da | 4,042 | py | Python | acouchbase/tests/cases/connection_t.py | dfresh613/couchbase-python-client | c77af56490ed4c6d364fcf8fc1a374570de0239b | [
"Apache-2.0"
] | 189 | 2015-01-07T18:34:31.000Z | 2022-03-21T17:41:56.000Z | acouchbase/tests/cases/connection_t.py | dfresh613/couchbase-python-client | c77af56490ed4c6d364fcf8fc1a374570de0239b | [
"Apache-2.0"
] | 24 | 2015-05-19T14:00:16.000Z | 2022-03-16T22:01:30.000Z | acouchbase/tests/cases/connection_t.py | dfresh613/couchbase-python-client | c77af56490ed4c6d364fcf8fc1a374570de0239b | [
"Apache-2.0"
] | 60 | 2015-03-10T22:12:50.000Z | 2022-03-07T21:57:40.000Z | import unittest
import asyncio
from unittest.mock import patch
from acouchbase.cluster import Cluster, ABucket, get_event_loop, close_event_loop
from acouchbase.asyncio_iops import IOPS
from couchbase.cluster import ClusterOptions
from couchbase.auth import PasswordAuthenticator
class TestAcouchbaseConnection(unittest.TestCase):
# TODO: possible normalize how to validate cluster __init__ args?
@patch('couchbase.cluster.Cluster.__init__')
def test_connection_basic(self, mock_cluster_init):
mock_cluster_init.return_value = None
conn_string = "couchbaes://fake-host"
_ = Cluster(conn_string, ClusterOptions(
PasswordAuthenticator("Administrator", "password")))
args = mock_cluster_init.call_args[0]
kwargs = mock_cluster_init.call_args[1]
# validate *args
self.assertEqual(conn_string, args[0])
self.assertIsInstance(args[1], ClusterOptions)
self.assertIn('authenticator', args[1])
self.assertIsInstance(args[1]['authenticator'], PasswordAuthenticator)
# validate **kwargs
self.assertIn('bucket_factory', kwargs)
# bucket_factory has not been instantiated at this moment
self.assertEqual(kwargs['bucket_factory'].__name__, ABucket.__name__)
self.assertIn('_flags', kwargs)
self.assertEqual(40, kwargs['_flags'])
self.assertIn('_iops', kwargs)
self.assertIsInstance(kwargs['_iops'], IOPS)
@patch('couchbase.cluster.Cluster.__init__')
def test_connection_kwargs(self, mock_cluster_init):
mock_cluster_init.return_value = None
conn_string = "couchbaes://fake-host"
_ = Cluster(connection_string=conn_string, authenticator=PasswordAuthenticator(
"Administrator", "password"))
args = mock_cluster_init.call_args[0]
kwargs = mock_cluster_init.call_args[1]
# validate *args
self.assertEqual(conn_string, args[0])
# validate **kwargs
self.assertIn('authenticator', kwargs)
self.assertIsInstance(kwargs['authenticator'], PasswordAuthenticator)
self.assertIn('bucket_factory', kwargs)
# bucket_factory has not been instantiated at this moment
self.assertEqual(kwargs['bucket_factory'].__name__, ABucket.__name__)
self.assertIn('_flags', kwargs)
self.assertEqual(40, kwargs['_flags'])
self.assertIn('_iops', kwargs)
self.assertIsInstance(kwargs['_iops'], IOPS)
_ = Cluster(conn_string, ClusterOptions(
authenticator=PasswordAuthenticator("Administrator", "password")))
args = mock_cluster_init.call_args[0]
kwargs = mock_cluster_init.call_args[1]
# validate *args
self.assertEqual(conn_string, args[0])
self.assertIsInstance(args[1], ClusterOptions)
self.assertIn('authenticator', args[1])
self.assertIsInstance(args[1]['authenticator'], PasswordAuthenticator)
# validate **kwargs
self.assertIn('bucket_factory', kwargs)
# bucket_factory has not been instantiated at this moment
self.assertEqual(kwargs['bucket_factory'].__name__, ABucket.__name__)
self.assertIn('_flags', kwargs)
self.assertEqual(40, kwargs['_flags'])
self.assertIn('_iops', kwargs)
self.assertIsInstance(kwargs['_iops'], IOPS)
def test_loop_open_close(self):
loop = get_event_loop()
self.assertIsNotNone(IOPS._working_loop)
# verify IOPS and asyncio event loops are the same
self.assertEqual(id(loop), id(asyncio.get_event_loop()))
close_event_loop()
self.assertIsNone(IOPS._working_loop)
new_loop = get_event_loop()
# verify a new loop is not the same as the old
self.assertNotEqual(id(loop), id(new_loop))
# verify that after closing and recreating another loop, IOPS and asyncio event
# loops are the same
self.assertEqual(id(new_loop), id(asyncio.get_event_loop()))
close_event_loop()
| 39.627451 | 87 | 0.68951 | 448 | 4,042 | 5.935268 | 0.191964 | 0.05378 | 0.056412 | 0.042873 | 0.717563 | 0.717563 | 0.707785 | 0.707785 | 0.670929 | 0.641595 | 0 | 0.006575 | 0.209797 | 4,042 | 101 | 88 | 40.019802 | 0.825924 | 0.128897 | 0 | 0.641791 | 0 | 0 | 0.114375 | 0.031375 | 0 | 0 | 0 | 0.009901 | 0.507463 | 1 | 0.044776 | false | 0.119403 | 0.104478 | 0 | 0.164179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
158221942c016bdb7106bc8d5b3ebb4d688fe90a | 6,470 | py | Python | well_plate_project/data_etl/well_plate_extract.py | MthBr/well-plate-light-driven-predictions | d313c5ff8f589516cb6f65f422626faed5bf6dd2 | [
"MIT"
] | null | null | null | well_plate_project/data_etl/well_plate_extract.py | MthBr/well-plate-light-driven-predictions | d313c5ff8f589516cb6f65f422626faed5bf6dd2 | [
"MIT"
] | null | null | null | well_plate_project/data_etl/well_plate_extract.py | MthBr/well-plate-light-driven-predictions | d313c5ff8f589516cb6f65f422626faed5bf6dd2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 10:20:09 2020
@author: enzo
"""
import pandas as pd
def map_dict_worksheet(basic_structure_df, version, dict_weel_plates):
row=8
col=12
row_name=list(map(chr, range(ord('A'), ord('H')+1)))
col_name=[str(each) for each in range(1,13)]
last_letter = 'V'
well_plate_names = list(map(chr, range(ord('A'), ord('I')+1))) #Italian order! -J,K
well_plate_names.extend(list(map(chr, range(ord('L'), ord(last_letter)+1))))
count = 0
for x,y in zip(well_plate_names[0::2], well_plate_names[1::2]):
print(x, '+', y)
start_row = 1 + count*3 + count*row
end_row = start_row + row
dict_weel_plates[x+version] = basic_structure_df.iloc[start_row:end_row, 1:1+col]
dict_weel_plates[x+version].columns = col_name
dict_weel_plates[x+version].index = row_name
dict_weel_plates[y+version] = basic_structure_df.iloc[start_row:end_row, (1+col+2):(1+col+2)+col]
dict_weel_plates[y+version].columns = col_name
dict_weel_plates[y+version].index = row_name
count += 1
return dict_weel_plates
def read_excel(file_xls):
import pandas as pd
df_dict=pd.read_excel(file_xls,None)
return df_dict
def map_df_worksheet(basic_structure_df, version, df_weel_plates):
import unittest
case = unittest.TestCase()
case.assertListEqual(df_weel_plates.columns.tolist(), ['well_plate_name', 'well_name', 'class_target', 'value_target'])
row=8
col=12
row_name=list(map(chr, range(ord('A'), ord('H')+1)))
col_name=[str(each) for each in range(1,13)]
well_names = [f'{a}{b}' for a in row_name for b in col_name]
last_letter = 'V'
well_plate_names = list(map(chr, range(ord('A'), ord('I')+1))) #Italian order! -J,K
well_plate_names.extend(list(map(chr, range(ord('L'), ord(last_letter)+1))))
count = 0
for x,y in zip(well_plate_names[0::2], well_plate_names[1::2]):
print(x, '+', y)
start_row = 1 + count*4 + count*row
end_row = start_row + row
temp_dataframe = basic_structure_df.iloc[start_row:end_row, 1:1+col]
temp_dataframe.columns = list(basic_structure_df.iloc[start_row-1, 1:1+col])
temp_dataframe.index = row_name
stacked = temp_dataframe.stack().reset_index()
stacked.insert(loc=0, column='well_name', value=well_names, allow_duplicates = False)
stacked.insert(0, 'well_plate_name', x+version)
stacked = stacked.drop(['level_0'], axis=1)
stacked = stacked.rename(columns={"level_1": "class_target", 0: "value_target"})
#stacked.set_index(['well_plate_name','well_name'])
df_weel_plates=pd.concat([df_weel_plates, stacked], ignore_index=True) #, sort=True)
temp_dataframe = basic_structure_df.iloc[start_row:end_row, (1+col+2):(1+col+2)+col]
temp_dataframe.columns = list(basic_structure_df.iloc[start_row-1, (1+col+2):(1+col+2)+col])
temp_dataframe.index = row_name
stacked = temp_dataframe.stack().reset_index()
stacked.insert(loc=0, column='well_name', value=well_names[:len(stacked)], allow_duplicates = False)
stacked.insert(0, 'well_plate_name', y+version)
stacked = stacked.drop(['level_0'], axis=1)
stacked = stacked.rename(columns={"level_1": "class_target", 0: "value_target"})
#stacked.set_index(['well_plate_name','well_name'])
df_weel_plates=pd.concat([df_weel_plates, stacked], ignore_index=True) #, sort=True)
count += 1
return df_weel_plates
#%% testings
#%% INIT
def clear_all():
"""Clears all the variables from the workspace of the application."""
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
def load_test_file(file_name = 'Matrici multiwell.xlsx'):
from well_plate_project.config import data_dir
path = data_dir / 'raw' / 'matrix'
file = path / file_name
assert file.is_file()
return file
def test_dict():
clear_all()
file_name = 'Matrici multiwell.xlsx'
xls_file = load_test_file(file_name)
dict_df = read_excel(str(xls_file))
keys = list(dict_df.keys())
KEY_WORD = "Matrici "
dict_weel_plates = {}
worksheet = keys[0]
version = '1' if not worksheet[-1].isnumeric() else worksheet[-1]
basic_structure_df = dict_df[worksheet]
dict_weel_plates = map_dict_worksheet(basic_structure_df, version, dict_weel_plates)
worksheet = keys[1]
version = '1' if not worksheet[-1].isnumeric() else worksheet[-1]
basic_structure_df = dict_df[worksheet]
dict_weel_plates = map_dict_worksheet(basic_structure_df, version, dict_weel_plates)
from well_plate_project.config import data_dir
import pickle
print("Saving...")
target_filename = 'matrici_multiwell' + '_dict_df' + '.pkl'
target_path = data_dir / 'raw' / 'matrix' / target_filename
with open(str(target_path),"wb+") as file:
pickle.dump(dict_weel_plates, file)
print("Done")
return 0
if __name__ == "__main__":
clear_all()
file_name = 'Matrici multiwell.xlsx'
xls_file = load_test_file(file_name)
dict_df = read_excel(str(xls_file))
keys = list(dict_df.keys())
KEY_WORD = "Matrici "
columns = ['well_plate_name', 'well_name', 'class_target', 'value_target']
df_weel_plates = pd.DataFrame(columns=columns)
worksheet = keys[0]
version = '1' if not worksheet[-1].isnumeric() else worksheet[-1]
basic_structure_df = dict_df[worksheet]
df_weel_plates = map_df_worksheet(basic_structure_df, version, df_weel_plates)
worksheet = keys[1]
version = '1' if not worksheet[-1].isnumeric() else worksheet[-1]
basic_structure_df = dict_df[worksheet]
df_weel_plates = map_df_worksheet(basic_structure_df, version, df_weel_plates)
from well_plate_project.config import data_dir
import pickle
print("Saving...")
target_filename = 'matrici_multiwell' + '_df' + '.pkl'
target_path = data_dir / 'raw' / 'matrix' / target_filename
with open(str(target_path),"wb+") as file:
pickle.dump(df_weel_plates, file)
print("Done")
| 33.697917 | 123 | 0.654096 | 942 | 6,470 | 4.208068 | 0.171975 | 0.068113 | 0.064581 | 0.037841 | 0.841826 | 0.786327 | 0.779516 | 0.749243 | 0.739405 | 0.691726 | 0 | 0.019204 | 0.211283 | 6,470 | 191 | 124 | 33.874346 | 0.757594 | 0.052241 | 0 | 0.611111 | 0 | 0 | 0.07482 | 0 | 0.015873 | 0 | 0 | 0 | 0.015873 | 1 | 0.047619 | false | 0 | 0.063492 | 0 | 0.150794 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1598e0104e455261882e7b022d2a38c5f949814b | 12,637 | py | Python | example/flare_analyze_injection.py | zhuchangzhan/TVOI | 3071a70383ce5d7770cd000a2b439f1e857d14bf | [
"MIT"
] | null | null | null | example/flare_analyze_injection.py | zhuchangzhan/TVOI | 3071a70383ce5d7770cd000a2b439f1e857d14bf | [
"MIT"
] | null | null | null | example/flare_analyze_injection.py | zhuchangzhan/TVOI | 3071a70383ce5d7770cd000a2b439f1e857d14bf | [
"MIT"
] | 1 | 2020-04-12T04:21:42.000Z | 2020-04-12T04:21:42.000Z | """
process injected flares
"""
import os,sys
import glob
import time
import pandas as pd
import matplotlib.pyplot as plt
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
import src.main.Lightcurve_io4 as LC_io2
from src.main.General_Catcher import *
def injection_test():
"""
Run flare catching code over injected lightcurve given by Max
"""
filepaths = glob.glob("injected/*.csv")
Norm = 0
counter = 0
start = time.time()
total_time = time.time() -start
print("Begin Injected Catching")
savepath = "deploy_injected"
if not os.path.isdir(savepath):
os.makedirs(savepath)
for count,filepath in enumerate(filepaths):
if count > 84:
continue
try:
TIC_ID = filepath.split("_")[-1].replace(".csv","")
sector = int(filepath.split("/")[-1].split("_")[0][1:])
df = pd.read_csv(filepath)
times = df["# time"].values
flux = df["flux"].values
error = df["flux_err"].values
Catcher = SPOC_Catcher_v3(None,None,TIC_ID,sector,Norm)
Catcher.Load_Lightcurve_Data(True,[],times,flux,error)
TVOI = Catcher.TVOI
if Catcher.TVOI.num_flares > 0:
output = Catcher.Create_Flare_Report(savepath,deploy=True)
if counter%20 == 0:
plt.close()
counter +=1
print(count,counter,TIC_ID,output,total_time)
else:
print(count,counter,TIC_ID,0,total_time)
except:
print(count,counter,TIC_ID,"F",total_time)
with open(os.path.join(savepath,"a.sector%s_result.txt"%sector),"a") as outputfile:
outputfile.write(",".join([str(count),str(counter),str(TIC_ID),"F",str(total_time),"\n"]))
total_time=time.time()-start
"""
TVOI = LC_io2.SPOC_TVOI(None,None,TIC_ID,sector,Norm)
TVOI.load_user_input(time,flux,error)
TVOI.calibrate_lightcurve()
TVOI.detrend_lightcurve()
TVOI.bin_lightcurve()
#plt.plot(time,flux)
plt.plot(TVOI.time_bin,TVOI.signal_bin)
plt.plot(TVOI.time_bin,TVOI.signal_bin_detrended)
plt.show()
"""
return
def read_injection():
filepaths1 = glob.glob("injected2/S001*.csv")
filepaths2 = glob.glob("injected2/S002*.csv")
Norm = 0
counter = 0
start = time.time()
total_time = time.time() - start
savepath = "testing_temp2.1"
if not os.path.isdir(savepath):
os.makedirs(savepath)
listss = glob.glob("injection_params2/*")
for inject_param in listss:
print(inject_param)
sector = int(inject_param.split("/")[-1][3])
#if sector != 1:
# continue
idf = pd.read_csv(inject_param)
TIC_IDs = idf["ID"].values
tpeaks = idf["tpeak"].values - 2457000.0
amplitudes = idf["ampl"].values
fwhms = idf["fwhm"].values
count = 0
counter = 0
for TIC,tpeak,ampli,fwhms in zip(TIC_IDs,tpeaks,amplitudes,fwhms):
#print(TIC,tpeak,ampli,fwhms)
if True:
TIC = str(int(TIC))
if int(TIC) != 358108509:
continue
print(TIC)
count +=1
if count%10 == 0:
plt.close()
if sector == 1:
filepath = [val for val in filepaths1 if TIC in val][0]
else:
filepath = [val for val in filepaths2 if TIC in val][0]
TIC_ID = filepath.split("_")[-1].replace(".csv","")
#sector = int(filepath.split("/")[-1].split("_")[0][1:])
df = pd.read_csv(filepath)
times = df["# time"].values #
flux = df["flux"].values
error = df["flux_err"].values
Catcher = SPOC_Catcher_v3(None,None,TIC_ID,sector,Norm)
Catcher.Load_Lightcurve_Data([],[times,flux,error,-1])
TVOI = Catcher.TVOI
if Catcher.TVOI.num_flares > 0:
counter +=1
output = Catcher.Create_Flare_Report(savepath,deploy=False,inject = True,
inject_param=[tpeak,ampli,fwhms,sector])
print(count,counter,TIC_ID,output,total_time)
#else:
# print(count,counter,TIC_ID,0,total_time)
total_time=time.time()-start
else:
#except:
count+=1
print(count,counter,TIC_ID,"F",total_time)
def read_dropbox_injection():
name = "output_single"
filepaths1 = glob.glob("/Users/azariven/Dropbox (Personal)/%s/csv/S001*.bz2"%name)
filepaths2 = glob.glob("/Users/azariven/Dropbox (Personal)/%s/csv/S002*.bz2"%name)
#filepaths1 = glob.glob("/Users/azariven/Dropbox (Personal)/output/csv/S001*.bz2")
#filepaths2 = glob.glob("/Users/azariven/Dropbox (Personal)/output/csv/S002*.bz2")
Norm = 0
counter = 0
start = time.time()
total_time = time.time() - start
savepath = "i_boxcar_%s"%name
if not os.path.isdir(savepath):
os.makedirs(savepath)
listss = glob.glob("/Users/azariven/Dropbox (Personal)/%s/injection_params/*"%name)
#listss = glob.glob("/Users/azariven/Dropbox (Personal)/output/injection_params/*")
for inject_param in listss:
print(inject_param)
sector = int(inject_param.split("/")[-1][3])
"""
if "FGK" not in inject_param:
continue
if sector == 2:
continue
"""
idf = pd.read_csv(inject_param)
idf = idf[idf["flare_nr"]==0]
TIC_IDs = idf["ID"].values
tpeaks = idf["tpeak"].values - 2457000.0
amplitudes = idf["ampl"].values
fwhms = idf["fwhm"].values
count = 0
counter = 0
for TIC_ID,tpeak,ampli,fwhms in zip(TIC_IDs,tpeaks,amplitudes,fwhms):
#print(TIC,tpeak,ampli,fwhms)
#try:
if True:
TIC_ID = str(TIC_ID)
#if TIC_ID != "141154638":
# continue
count +=1
if count%10 == 0:
plt.close()
if sector == 1:
filepath = [val for val in filepaths1 if TIC_ID == val.split("_")[-2]][0]
else:
filepath = [val for val in filepaths2 if TIC_ID == val.split("_")[-2]][0]
TESSMAG = float(filepath.split("_")[-1].replace(".csv.bz2",""))
TIC_ID = filepath.split("_")[-2]#
df = pd.read_csv(filepath, compression='bz2', header=0, sep=',', quotechar='"')
#df = df.rename(columns = {0:"# time",1:"flux",2:"flux_err"})
times = df["# time"].values
flux = df["flux"].values
error = df["flux_err"].values
#print(times)
Catcher = SPOC_Catcher_v3(None,None,TIC_ID,sector,Norm)
Catcher.Load_Lightcurve_Data([],[times,flux,error,TESSMAG])
TVOI = Catcher.TVOI
if Catcher.TVOI.num_flares > 0:
counter +=1
output = Catcher.Create_Flare_Report(savepath,deploy=True,inject = True,
inject_param=[tpeak,ampli,fwhms,sector])
print(count,counter,TIC_ID,output,total_time)
#else:
# print(count,counter,TIC_ID,0,total_time)
total_time=time.time()-start
else:
#except:
count+=1
print(count,counter,TIC_ID,"F",total_time)
def read_returns():
with open("returns.txt","r") as f:
info = f.read()
sector = [x.split(" ")[0][3] for x in info.split("\n")][1:]
bad_tic = [x.split(" ")[1] for x in info.split("\n")][1:]
def read_dropbox_injection_fix():
with open("returns.txt","r") as f:
info = f.read()
sector_info = [x.split(" ")[0][3] for x in info.split("\n")][1:]
bad_tic = [x.split(" ")[1] for x in info.split("\n")][1:]
name = "output_outburst"
filepaths1 = glob.glob("/Users/azariven/Dropbox (Personal)/%s/csv/S001*.bz2"%name)
filepaths2 = glob.glob("/Users/azariven/Dropbox (Personal)/%s/csv/S002*.bz2"%name)
#filepaths1 = glob.glob("/Users/azariven/Dropbox (Personal)/output/csv/S001*.bz2")
#filepaths2 = glob.glob("/Users/azariven/Dropbox (Personal)/output/csv/S002*.bz2")
Norm = 0
counter = 0
start = time.time()
total_time = time.time() - start
savepath = "injection_global_test_%s"%name
if not os.path.isdir(savepath):
os.makedirs(savepath)
listss = glob.glob("/Users/azariven/Dropbox (Personal)/%s/injection_params/*"%name)
#listss = glob.glob("/Users/azariven/Dropbox (Personal)/output/injection_params/*")
for inject_param in listss:
#print(inject_param)
sector = int(inject_param.split("/")[-1][3])
idf = pd.read_csv(inject_param)
idf = idf[idf["flare_nr"]==0]
TIC_IDs = idf["ID"].values
tpeaks = idf["tpeak"].values - 2457000.0
amplitudes = idf["ampl"].values
fwhms = idf["fwhm"].values
count = 0
counter = 0
for TIC_ID,tpeak,ampli,fwhms in zip(TIC_IDs,tpeaks,amplitudes,fwhms):
#print(TIC,tpeak,ampli,fwhms)
TIC_ID = str(TIC_ID)
for a,b in zip(sector_info,bad_tic):
if int(a) == sector and b == str(TIC_ID):
if sector == 1:
filepath = [val for val in filepaths1 if TIC_ID == val.split("_")[-2]][0]
else:
filepath = [val for val in filepaths2 if TIC_ID == val.split("_")[-2]][0]
#print(filepath)
TESSMAG = float(filepath.split("_")[-1].replace(".csv.bz2",""))
TIC_ID = filepath.split("_")[-2]#
df = pd.read_csv(filepath, compression='bz2', header=0, sep=',', quotechar='"')
#df = df.rename(columns = {0:"# time",1:"flux",2:"flux_err"})
times = df["# time"].values
flux = df["flux"].values
error = df["flux_err"].values
#print(times)
Catcher = SPOC_Catcher_v3(None,None,TIC_ID,sector,Norm)
Catcher.Load_Lightcurve_Data([],[times,flux,error,TESSMAG])
TVOI = Catcher.TVOI
if Catcher.TVOI.num_flares > 0:
counter +=1
output = Catcher.Create_Flare_Report(savepath,deploy=False,inject=True,
inject_param=[tpeak,ampli,fwhms,sector])
print(count,counter,TIC_ID,output,total_time)
if __name__ == "__main__":
#run_all_files()
#local_run_test()
#injection_test()
#read_injection()
read_dropbox_injection()
#read_returns()
#read_dropbox_injection_fix()
| 32.237245 | 106 | 0.480573 | 1,344 | 12,637 | 4.374256 | 0.130952 | 0.027216 | 0.026535 | 0.042864 | 0.785678 | 0.775642 | 0.77156 | 0.764926 | 0.740092 | 0.726484 | 0 | 0.027034 | 0.394081 | 12,637 | 391 | 107 | 32.319693 | 0.74076 | 0.093218 | 0 | 0.748768 | 0 | 0 | 0.07009 | 0.032611 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024631 | false | 0 | 0.034483 | 0 | 0.064039 | 0.059113 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
15b3879ddd31f7bfe7c6c17603768f044768e247 | 119 | py | Python | rational_crowd/questionnaire/__init__.py | shirishgoyal/rational_crowd | 1d1ef7d1653a5c9db90bd930c56208d062085db8 | [
"MIT"
] | 1 | 2017-03-22T08:50:44.000Z | 2017-03-22T08:50:44.000Z | rational_crowd/questionnaire/__init__.py | shirishgoyal/rational_crowd | 1d1ef7d1653a5c9db90bd930c56208d062085db8 | [
"MIT"
] | null | null | null | rational_crowd/questionnaire/__init__.py | shirishgoyal/rational_crowd | 1d1ef7d1653a5c9db90bd930c56208d062085db8 | [
"MIT"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.contrib import admin
| 23.8 | 55 | 0.840336 | 18 | 119 | 5.444444 | 0.666667 | 0.306122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12605 | 119 | 4 | 56 | 29.75 | 0.942308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ec9cbec154194ed620b175ba32dfd6a3d298f9f6 | 10,766 | py | Python | pysem/startingvalues.py | planplus/pysem | 6effa2e1e468c889e89109ac4a7a486b0813f02d | [
"MIT"
] | 2 | 2021-12-10T04:20:58.000Z | 2022-01-07T06:57:17.000Z | pysem/startingvalues.py | planplus/pysem | 6effa2e1e468c889e89109ac4a7a486b0813f02d | [
"MIT"
] | null | null | null | pysem/startingvalues.py | planplus/pysem | 6effa2e1e468c889e89109ac4a7a486b0813f02d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains functions for stating values estimation."""
from scipy.stats import linregress
import numpy as np
def start_beta(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Parameters in beta are traditionally set to 0 at start.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
return 0.0
def start_lambda(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Manifest variables are regressed onto their counterpart with fixed
regression coefficient.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
if rval not in model.vars['latent']:
return 0.0
obs = model.vars['observed']
first = rval
while first not in obs:
try:
first = model.first_manifs[first]
if first == rval:
return 0.0
except KeyError:
return 0.0
if first is None or not hasattr(model, 'mx_data'):
return 0.0
i, j = obs.index(first), obs.index(lval)
data = model.mx_data
x, y = data[:, i], data[:, j]
mask = np.isfinite(x) & np.isfinite(y)
return linregress(x[mask], y[mask]).slope
def start_psi(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Exogenous covariances are fixed to their empirical values.
All other variances are halved. Latent variances are set to 0.05,
everything else is set to zero.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
lat = model.vars['latent']
if rval in lat or lval in lat:
if rval == lval:
return 0.05
return 0.0
exo = model.vars['exogenous']
obs = model.vars['observed']
i, j = obs.index(lval), obs.index(rval)
if lval in exo:
return model.mx_cov[i, j]
elif i == j:
return model.mx_cov[i, j] / 2
return 0.0
def start_theta(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Variances are set to half of observed variances.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
if lval != rval:
return 0.0
obs = model.vars['observed']
i, j = obs.index(lval), obs.index(rval)
return model.mx_cov[i, j] / 2
def start_gamma1(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Parameters in Gamma1 are set to 0 at start unless we are dealing with
means, then they are estimated as means.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
if rval == '1':
mx = model.mx_data
i = model.vars['observed'].index(lval)
return np.nanmean(mx[:, i]) / 2
return 0.0
def start_gamma2(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Parameters in Gamma2 are set to 0 at start unless we are dealing with
means, then they are estimated as means.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
if rval == '1':
mx = model.mx_data
i = model.vars['observed'].index(lval)
return np.nanmean(mx[:, i]) / 2
return 0.0
def start_d(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
In future a sophisticated procedure will be provided.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
if lval == rval:
try:
v = model.effects_loadings.get(lval, 0.1) / 2
except (AttributeError, TypeError):
v = 0.05
return v
return 0.0
def start_v(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
return 1.0
'''---------------------------------IMPUTER---------------------------------'''
def start_data_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just calculates mean.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
obs = model.mod.vars['observed']
try:
i = obs.index(rval)
except ValueError:
return 0.0
mx = model.mod.mx_data
return np.nanmean(mx[:, i])
def start_g_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just calculates mean.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
obs = model.mod.vars['observed_exogenous']
try:
i = obs.index(rval)
except ValueError:
return 0.0
mx = model.mod.mx_g1
return np.nanmean(mx[i, :])
def start_beta_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original model.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_beta
rows, cols = model.mod.names_beta
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_gamma1_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original model.
Parameters
----------
model : Model
Imputer instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_gamma1
rows, cols = model.mod.names_gamma1
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_gamma2_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original model.
Parameters
----------
model : Model
Imputer instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_gamma2
rows, cols = model.mod.names_gamma2
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_lambda_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original data.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_lambda
rows, cols = model.mod.names_lambda
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_psi_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original data.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_psi
rows, cols = model.mod.names_psi
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_theta_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original data.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_theta
rows, cols = model.mod.names_theta
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_d_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original data.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_d
rows, cols = model.mod.names_d
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
def start_v_imp(model, lval: str, rval: str):
"""
Calculate starting value for parameter in data given data in model.
For Imputer -- just copies values from original data.
Parameters
----------
model : Model
Model instance.
lval : str
L-value name.
rval : str
R-value name.
Returns
-------
float
Starting value.
"""
mx = model.mod.mx_v
rows, cols = model.mod.names_v
i, j = rows.index(lval), cols.index(rval)
v = mx[i, j]
return v
| 20.783784 | 79 | 0.560747 | 1,391 | 10,766 | 4.299784 | 0.10496 | 0.042133 | 0.036114 | 0.048152 | 0.818258 | 0.783648 | 0.777964 | 0.756897 | 0.756897 | 0.756897 | 0 | 0.008476 | 0.320546 | 10,766 | 517 | 80 | 20.823985 | 0.809159 | 0.493684 | 0 | 0.472868 | 0 | 0 | 0.023273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.015504 | 0 | 0.403101 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ecba870f6a60962be764d90347afe750ca5962eb | 47 | py | Python | scripts/portal/in_guild.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/in_guild.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/in_guild.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # 200000300
sm.warp(200000301, 3)
sm.dispose()
| 11.75 | 21 | 0.723404 | 7 | 47 | 4.857143 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.452381 | 0.106383 | 47 | 3 | 22 | 15.666667 | 0.357143 | 0.191489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ecd7b824c91daf5695bed416d593b5814c8cbfb5 | 172 | py | Python | aldryn_accounts/monkeypatches.py | conformist-mw/aldryn-accounts | e4bd60354547945a8e80cc692c0080582dd0d846 | [
"MIT"
] | null | null | null | aldryn_accounts/monkeypatches.py | conformist-mw/aldryn-accounts | e4bd60354547945a8e80cc692c0080582dd0d846 | [
"MIT"
] | 1 | 2019-05-29T03:49:39.000Z | 2019-05-29T09:40:04.000Z | aldryn_accounts/monkeypatches.py | conformist-mw/aldryn-accounts | e4bd60354547945a8e80cc692c0080582dd0d846 | [
"MIT"
] | 6 | 2019-03-05T15:19:26.000Z | 2021-12-16T20:50:21.000Z | # -*- coding: utf-8 -*-
def patch_user_unicode():
from django.contrib.auth.models import User
from .utils import user_display
User.__unicode__ = user_display
| 21.5 | 47 | 0.709302 | 23 | 172 | 4.956522 | 0.652174 | 0.192982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007143 | 0.186047 | 172 | 7 | 48 | 24.571429 | 0.807143 | 0.122093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ece77bd0755236bfa86b41e2c2a4277a4516721f | 27 | py | Python | build/lib/NaMAZU/namadeco/__init__.py | NMZ0429/NaMAZU | 46ac3a5fab6fc21bbef323e16daadfd4111e2e68 | [
"Apache-2.0"
] | 5 | 2021-09-22T20:17:22.000Z | 2021-11-26T07:09:18.000Z | build/lib/NaMAZU/namadeco/__init__.py | NMZ0429/NaMAZU | 46ac3a5fab6fc21bbef323e16daadfd4111e2e68 | [
"Apache-2.0"
] | null | null | null | build/lib/NaMAZU/namadeco/__init__.py | NMZ0429/NaMAZU | 46ac3a5fab6fc21bbef323e16daadfd4111e2e68 | [
"Apache-2.0"
] | null | null | null | from .decorations import *
| 13.5 | 26 | 0.777778 | 3 | 27 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
01bcb700ffb7136681b5a0ddb62bcd1d7bd4639d | 147 | py | Python | ygo_client/connection/enums/__init__.py | hinihatetsu/ygo-client-python | 5220452417878757ed712ec95c9936004fdcb003 | [
"MIT"
] | null | null | null | ygo_client/connection/enums/__init__.py | hinihatetsu/ygo-client-python | 5220452417878757ed712ec95c9936004fdcb003 | [
"MIT"
] | null | null | null | ygo_client/connection/enums/__init__.py | hinihatetsu/ygo-client-python | 5220452417878757ed712ec95c9936004fdcb003 | [
"MIT"
] | null | null | null | from .ctos_message import CtosMessage
from .stoc_message import StocMessage
from .game_message import GameMessage
from .error_type import ErrorType | 36.75 | 37 | 0.870748 | 20 | 147 | 6.2 | 0.6 | 0.314516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102041 | 147 | 4 | 38 | 36.75 | 0.939394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
01db75c80dafcc2118c45db0d030c9857b3bf923 | 5,810 | py | Python | mp/models.py | luozhouyang/matchpyramid | fa436460acdd4c2c8edab92de9799ffbe9811422 | [
"Apache-2.0"
] | 2 | 2019-06-30T01:29:26.000Z | 2020-12-09T06:46:17.000Z | mp/models.py | luozhouyang/matchpyramid | fa436460acdd4c2c8edab92de9799ffbe9811422 | [
"Apache-2.0"
] | 3 | 2020-11-13T18:14:57.000Z | 2022-02-10T00:23:35.000Z | mp/models.py | luozhouyang/matchpyramid | fa436460acdd4c2c8edab92de9799ffbe9811422 | [
"Apache-2.0"
] | 1 | 2019-07-22T02:48:00.000Z | 2019-07-22T02:48:00.000Z | # Copyright 2019 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from mp.indicator import Indicator
model_config = {
'query_max_len': 1000,
'doc_max_len': 1000,
'num_conv_layers': 3,
'filters': [8, 16, 32],
'kernel_size': [[5, 5], [3, 3], [3, 3]],
'pool_size': [[2, 2], [2, 2], [2, 2]],
'dropout': 0.5,
'batch_size': 32,
'vocab_size': 100, # Important!!! update vocab_size
'embedding_size': 128,
}
def build_dot_model(config):
"""Using dot-product to produce match matrix, as described in the paper."""
q_input = tf.keras.layers.Input(shape=(config['query_max_len'],), name='q_input')
d_input = tf.keras.layers.Input(shape=(config['doc_max_len'],), name='d_input')
embedding = tf.keras.layers.Embedding(config['vocab_size'], config['embedding_size'], name='embedding')
q_embedding = embedding(q_input)
d_embedding = embedding(d_input)
# dot
dot = tf.keras.layers.Dot(axes=-1, name='dot')([q_embedding, d_embedding])
# reshape to [batch_size, query_max_len, doc_max_len, channel(1)]
matrix = tf.keras.layers.Reshape((config['query_max_len'], config['doc_max_len'], 1), name='matrix')(dot)
x = matrix
for i in range(config['num_conv_layers']):
x = tf.keras.layers.Conv2D(
filters=config['filters'][i],
kernel_size=config['kernel_size'][i],
padding='same',
activation='relu',
name='conv_%d' % i)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=tuple(config['pool_size'][i]), name='max_pooling_%d' % i)(x)
x = tf.keras.layers.BatchNormalization()(x)
flatten = tf.keras.layers.Flatten()(x)
dense = tf.keras.layers.Dense(32, activation='relu')(flatten)
out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dense)
model = tf.keras.Model(inputs=[q_input, d_input], outputs=[matrix, out])
model.compile(
loss={
'out': 'binary_crossentropy'
},
optimizer='sgd',
metrics={
'out': [tf.keras.metrics.Accuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
})
return model
def build_cosine_model(config):
"""Using cosine to produce match matrix, as described in the paper."""
q_input = tf.keras.layers.Input(shape=(config['query_max_len'],), name='q_input')
d_input = tf.keras.layers.Input(shape=(config['doc_max_len'],), name='d_input')
embedding = tf.keras.layers.Embedding(config['vocab_size'], config['embedding_size'], name='embedding')
q_embedding = embedding(q_input)
d_embedding = embedding(d_input)
# cosine
cosine = tf.keras.layers.Dot(axes=-1, normalize=True, name='cosine')([q_embedding, d_embedding])
matrix = tf.keras.layers.Reshape((config['query_max_len'], config['doc_max_len'], 1), name='matrix')(cosine)
x = matrix
for i in range(config['num_conv_layers']):
x = tf.keras.layers.Conv2D(
filters=config['filters'][i],
kernel_size=config['kernel_size'][i],
padding='same',
activation='relu',
name='conv_%d' % i)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=tuple(config['pool_size'][i]), name='max_pooling_%d' % i)(x)
x = tf.keras.layers.BatchNormalization()(x)
flatten = tf.keras.layers.Flatten()(x)
dense = tf.keras.layers.Dense(32, activation='relu')(flatten)
out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dense)
model = tf.keras.Model(inputs=[q_input, d_input], outputs=[matrix, out])
model.compile(
loss={
'out': 'binary_crossentropy'
},
optimizer='sgd',
metrics={
'out': [tf.keras.metrics.Accuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
})
return model
def build_indicator_model(config):
"""Using indicator fn to produce match matrix, as described in the paper."""
q_input = tf.keras.layers.Input(shape=(config['query_max_len'],), name='q_input')
d_input = tf.keras.layers.Input(shape=(config['doc_max_len'],), name='d_input')
m = Indicator(config['query_max_len'], config['doc_max_len'], name='matrix')((q_input, d_input))
m2 = tf.keras.layers.Reshape((config['query_max_len'], config['doc_max_len'], 1), name='m2')(m)
x = m2
for i in range(config['num_conv_layers']):
x = tf.keras.layers.Conv2D(
filters=config['filters'][i],
kernel_size=config['kernel_size'][i],
padding='same',
activation='relu',
name='conv_%d' % i)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=tuple(config['pool_size'][i]), name='max_pooling_%d' % i)(x)
x = tf.keras.layers.BatchNormalization()(x)
flatten = tf.keras.layers.Flatten()(x)
dense = tf.keras.layers.Dense(32, activation='relu')(flatten)
out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dense)
model = tf.keras.Model(inputs=[q_input, d_input], outputs=[out, m])
model.compile(
loss={
'out': 'binary_crossentropy'
},
optimizer='sgd',
metrics={
'out': [tf.keras.metrics.Accuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
})
return model
| 39.256757 | 112 | 0.638382 | 797 | 5,810 | 4.505646 | 0.188206 | 0.083821 | 0.112225 | 0.035088 | 0.739627 | 0.736842 | 0.725146 | 0.725146 | 0.716235 | 0.716235 | 0 | 0.014602 | 0.198451 | 5,810 | 147 | 113 | 39.52381 | 0.756496 | 0.148193 | 0 | 0.730769 | 0 | 0 | 0.152735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028846 | false | 0 | 0.019231 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bf0a8343f424d0166744b8b2dc5c4e4ad04a7a53 | 12,302 | py | Python | python/cv_utils.py | shamindras/bttv-aistats2020 | 7a2d5136647519d2c4cc6b0735599abec9c2997a | [
"MIT"
] | 1 | 2020-08-20T09:51:10.000Z | 2020-08-20T09:51:10.000Z | python/cv_utils.py | shamindras/bttv-aistats2020 | 7a2d5136647519d2c4cc6b0735599abec9c2997a | [
"MIT"
] | 8 | 2020-02-13T04:48:29.000Z | 2020-02-20T05:33:49.000Z | python/cv_utils.py | shamindras/bttv-aistats2020 | 7a2d5136647519d2c4cc6b0735599abec9c2997a | [
"MIT"
] | 1 | 2021-09-16T14:07:31.000Z | 2021-09-16T14:07:31.000Z | import sys
import numpy as np
import scipy as sc
import scipy.linalg as spl
import grad_utils as model
import ks_utils as ks
import simulation_utils as si
import opt_utils as op
def loocv(data, lambdas_smooth, opt_fn,
num_loocv = 200, get_estimate = True,
verbose = 'cv', out = 'terminal', **kwargs):
'''
conduct local
----------
Input:
data: TxNxN array
lambdas_smooth: a vector of query lambdas
opt_fn: a python function in a particular form of
opt_fn(data, lambda_smooth, beta_init=None, **kwargs)
kwargs might contain hyperparameters
(e.g., step size, max iteration, etc.) for
the optimization function
num_loocv: the number of random samples left-one-out cv sample
get_estimate: whether or not we calculate estimates beta's for
every lambdas_smooth. If True, we use those estimates as
initial values for optimizations with cv data
verbose: controlling the verbose level. If 'cv', the function
prints only cv related message. If 'all', the function prints
all messages including ones from optimization process.
The default is 'cv'.
out: controlling the direction of output. If 'terminal', the function
prints into the terminal. If 'notebook', the function prints into
the ipython notebook. If 'file', the function prints into a log
file 'cv_log.txt' at the same directory. You can give a custom
output stream to this argument. The default is 'terminal'
**kwargs: keyword arguments for opt_fn
----------
Output:
lambda_cv: lambda_smooth chosen after cross-validation
nll_cv: average cross-validated negative loglikelihood
beta_cv: beta chosen after cross-validation. None if get_estimate is False
'''
lambdas_smooth = lambdas_smooth.flatten()
lambdas_smooth = -np.sort(-lambdas_smooth)
betas = [None] * lambdas_smooth.shape[0]
last_beta = np.zeros(data.shape[:2])
for i, lambda_smooth in enumerate(lambdas_smooth):
_, beta = opt_fn(data, lambda_smooth, beta_init = last_beta, **kwargs)
betas[i] = beta.reshape(data.shape[:2])
last_beta = betas[i]
indices = np.array(np.where(np.full(data.shape, True))).T
cum_match = np.cumsum(data.flatten())
if out == 'terminal':
out = sys.__stdout__
elif out == 'notebook':
out = sys.stdout
elif out == 'file':
out = open('cv_log.txt', 'w')
loglikes_loocv = np.zeros(lambdas_smooth.shape)
for i in range(num_loocv):
data_loocv = data.copy()
rand_match = np.random.randint(np.sum(data))
rand_index = indices[np.min(np.where(cum_match >= rand_match)[0])]
data_loocv[tuple(rand_index)] -= 1
for j, lambda_smooth in enumerate(lambdas_smooth):
_, beta_loocv = opt_fn(data_loocv, lambda_smooth, beta_init=betas[j],
verbose=(verbose in ['all']), out=out, **kwargs)
beta_loocv = beta_loocv.reshape(data.shape[:2])
loglikes_loocv[j] += beta_loocv[rand_index[0],rand_index[1]] \
- np.log(np.exp(beta_loocv[rand_index[0],rand_index[1]])
+ np.exp(beta_loocv[rand_index[0],rand_index[2]]))
if verbose in ['cv', 'all']:
out.write("%d-th cv done\n"%(i+1))
out.flush()
return (lambdas_smooth[np.argmax(loglikes_loocv)], -loglikes_loocv[::-1]/num_loocv,
betas[np.argmax(loglikes_loocv)])
def loocv_ks(data, h_list, opt_fn,
num_loocv = 200, get_estimate = True, return_prob = True,
verbose = 'cv', out = 'terminal', **kwargs):
'''
conduct local
----------
Input:
data: TxNxN array
h: a vector of kernel parameters
opt_fn: a python function in a particular form of
opt_fn(data, lambda_smooth, beta_init=None, **kwargs)
kwargs might contain hyperparameters
(e.g., step size, max iteration, etc.) for
the optimization function
num_loocv: the number of random samples left-one-out cv sample
get_estimate: whether or not we calculate estimates beta's for
every lambdas_smooth. If True, we use those estimates as
initial values for optimizations with cv data
verbose: controlling the verbose level. If 'cv', the function
prints only cv related message. If 'all', the function prints
all messages including ones from optimization process.
The default is 'cv'.
out: controlling the direction of output. If 'terminal', the function
prints into the terminal. If 'notebook', the function prints into
the ipython notebook. If 'file', the function prints into a log
file 'cv_log.txt' at the same directory. You can give a custom
output stream to this argument. The default is 'terminal'
**kwargs: keyword arguments for opt_fn
----------
Output:
lambda_cv: lambda_smooth chosen after cross-validation
nll_cv: average cross-validated negative loglikelihood
beta_cv: beta chosen after cross-validation. None if get_estimate is False
'''
h_list = h_list.flatten()
h_list = -np.sort(-h_list)
betas = [None] * h_list.shape[0]
last_beta = np.zeros(data.shape[:2])
for i, h in enumerate(h_list):
ks_data = ks.kernel_smooth(data,h)
_, beta = opt_fn(ks_data, beta_init = last_beta, **kwargs)
betas[i] = beta.reshape(data.shape[:2])
last_beta = betas[i]
indices = np.array(np.where(np.full(data.shape, True))).T
cum_match = np.cumsum(data.flatten())
if out == 'terminal':
out = sys.__stdout__
elif out == 'notebook':
out = sys.stdout
elif out == 'file':
out = open('cv_log.txt', 'w')
loglikes_loocv = np.zeros(h_list.shape)
prob_loocv = np.zeros(h_list.shape)
for i in range(num_loocv):
data_loocv = data.copy()
rand_match = np.random.randint(np.sum(data))
rand_index = indices[np.min(np.where(cum_match >= rand_match)[0])]
data_loocv[tuple(rand_index)] -= 1
for j, h in enumerate(h_list):
ks_data_loocv = ks.kernel_smooth(data_loocv,h)
_, beta_loocv = opt_fn(ks_data_loocv, beta_init=betas[j],
verbose=(verbose in ['all']), out=out, **kwargs)
beta_loocv = beta_loocv.reshape(data.shape[:2])
loglikes_loocv[j] += beta_loocv[rand_index[0],rand_index[1]] \
- np.log(np.exp(beta_loocv[rand_index[0],rand_index[1]])
+ np.exp(beta_loocv[rand_index[0],rand_index[2]]))
prob_loocv[j] += 1 - np.exp(beta_loocv[rand_index[0],rand_index[1]]) \
/ (np.exp(beta_loocv[rand_index[0],rand_index[1]])
+ np.exp(beta_loocv[rand_index[0],rand_index[2]]))
if verbose in ['cv', 'all']:
out.write("%d-th cv done\n"%(i+1))
out.flush()
if return_prob:
return (h_list[np.argmax(loglikes_loocv)], -loglikes_loocv[::-1]/num_loocv,
betas[np.argmax(loglikes_loocv)], prob_loocv[::-1]/num_loocv)
else:
return (h_list[np.argmax(loglikes_loocv)], -loglikes_loocv[::-1]/num_loocv,
betas[np.argmax(loglikes_loocv)])
def loo_DBT(data, h, opt_fn,
num_loo = 200, get_estimate = True, return_prob = True,
verbose = 'cv', out = 'terminal', **kwargs):
'''
conduct local
----------
Input:
data: TxNxN array
h: a vector of kernel parameters
opt_fn: a python function in a particular form of
opt_fn(data, lambda_smooth, beta_init=None, **kwargs)
kwargs might contain hyperparameters
(e.g., step size, max iteration, etc.) for
the optimization function
num_loo: the number of random samples left-one-out sample
get_estimate: whether or not we calculate estimates beta's for
every lambdas_smooth. If True, we use those estimates as
initial values for optimizations with cv data
verbose: controlling the verbose level. If 'cv', the function
prints only cv related message. If 'all', the function prints
all messages including ones from optimization process.
The default is 'cv'.
out: controlling the direction of output. If 'terminal', the function
prints into the terminal. If 'notebook', the function prints into
the ipython notebook. If 'file', the function prints into a log
file 'cv_log.txt' at the same directory. You can give a custom
output stream to this argument. The default is 'terminal'
**kwargs: keyword arguments for opt_fn
----------
Output:
'''
last_beta = np.zeros(data.shape[:2])
ks_data = ks.kernel_smooth(data,h)
_, beta = opt_fn(ks_data, beta_init = last_beta, **kwargs)
beta = beta.reshape(data.shape[:2])
indices = np.array(np.where(np.full(data.shape, True))).T
cum_match = np.cumsum(data.flatten())
if out == 'terminal':
out = sys.__stdout__
elif out == 'notebook':
out = sys.stdout
elif out == 'file':
out = open('cv_log.txt', 'w')
loglikes_loo = 0
prob_loo = 0
for i in range(num_loo):
data_loo = data.copy()
rand_match = np.random.randint(np.sum(data))
rand_index = indices[np.min(np.where(cum_match >= rand_match)[0])]
data_loo[tuple(rand_index)] -= 1
ks_data_loo = ks.kernel_smooth(data_loo,h)
_, beta_loo = opt_fn(ks_data_loo, beta_init=beta,
verbose=(verbose in ['all']), out=out, **kwargs)
beta_loo = beta_loo.reshape(data.shape[:2])
loglikes_loo += beta_loo[rand_index[0],rand_index[1]] \
- np.log(np.exp(beta_loo[rand_index[0],rand_index[1]])
+ np.exp(beta_loo[rand_index[0],rand_index[2]]))
prob_loo += 1 - np.exp(beta_loo[rand_index[0],rand_index[1]]) \
/ (np.exp(beta_loo[rand_index[0],rand_index[1]])
+ np.exp(beta_loo[rand_index[0],rand_index[2]]))
if verbose in ['cv', 'all']:
out.write("%d-th cv done\n"%(i+1))
out.flush()
if return_prob:
return (-loglikes_loo/num_loo,
beta, prob_loo/num_loo)
else:
return (-loglikes_loo/num_loo,
beta)
def loo_vBT(data,num_loo = 200):
T,N = data.shape[:2]
_, beta = op.gd_bt(data = data)
indices = np.array(np.where(np.full(data.shape, True))).T
cum_match = np.cumsum(data.flatten())
loglikes_loo = 0
prob_loo = 0
for i in range(num_loo):
data_loo = data.copy()
beta_loo = beta.copy()
rand_match = np.random.randint(np.sum(data))
rand_index = indices[np.min(np.where(cum_match >= rand_match)[0])]
data_loo[tuple(rand_index)] -= 1
data_loo = data_loo[rand_index[0]].reshape((1,N,N))
beta_loo_i = beta[rand_index[0],:]
_, beta_loo_i = op.gd_bt(data = data_loo,beta_init = beta_loo_i)
beta_loo[rand_index[0]] = beta_loo_i
beta_loo_i = beta_loo_i.reshape((N,))
loglikes_loo += beta_loo_i[rand_index[1]] \
- np.log(np.exp(beta_loo_i[rand_index[1]])
+ np.exp(beta_loo_i[rand_index[2]]))
prob_loo += 1 - np.exp(beta_loo_i[rand_index[1]]) \
/ (np.exp(beta_loo_i[rand_index[1]])
+ np.exp(beta_loo_i[rand_index[2]]))
return (-loglikes_loo/num_loo, prob_loo/num_loo)
def loo_winrate(data,num_loo = 200):
indices = np.array(np.where(np.full(data.shape, True))).T
cum_match = np.cumsum(data.flatten())
loglikes_loo = 0
prob_loo = 0
for i in range(num_loo):
data_loo = data.copy()
rand_match = np.random.randint(np.sum(data))
rand_index = indices[np.min(np.where(cum_match >= rand_match)[0])]
data_loo[tuple(rand_index)] -= 1
winrate_loo = si.get_winrate(data = data_loo)
prob_loo += 1 - winrate_loo[rand_index[0],rand_index[1]]
return (-loglikes_loo/num_loo, prob_loo/num_loo) | 40.735099 | 88 | 0.61681 | 1,761 | 12,302 | 4.115843 | 0.104486 | 0.063328 | 0.027594 | 0.030905 | 0.890591 | 0.874172 | 0.85665 | 0.828918 | 0.81388 | 0.800911 | 0 | 0.010616 | 0.264916 | 12,302 | 302 | 89 | 40.735099 | 0.790888 | 0.313608 | 0 | 0.643275 | 0 | 0 | 0.023792 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02924 | false | 0 | 0.046784 | 0 | 0.116959 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1755cb8685d3f608ef9e6fe4b94ff2185a3bf714 | 477 | py | Python | src/GridCal/Engine/IO/__init__.py | mzy2240/GridCal | 0352f0e9ce09a9c037722bf2f2afc0a31ccd2880 | [
"BSD-3-Clause"
] | 284 | 2016-01-31T03:20:44.000Z | 2022-03-17T21:16:52.000Z | src/GridCal/Engine/IO/__init__.py | mzy2240/GridCal | 0352f0e9ce09a9c037722bf2f2afc0a31ccd2880 | [
"BSD-3-Clause"
] | 94 | 2016-01-14T13:37:40.000Z | 2022-03-28T03:13:56.000Z | src/GridCal/Engine/IO/__init__.py | mzy2240/GridCal | 0352f0e9ce09a9c037722bf2f2afc0a31ccd2880 | [
"BSD-3-Clause"
] | 84 | 2016-03-29T10:43:04.000Z | 2022-02-22T16:26:55.000Z |
from GridCal.Engine.IO.cim.cim_parser import CIMImport, CIMExport
from GridCal.Engine.IO.dgs_parser import *
from GridCal.Engine.IO.dpx_parser import *
from GridCal.Engine.IO.ipa_parser import *
from GridCal.Engine.IO.json_parser import *
from GridCal.Engine.IO.matpower.matpower_parser import parse_matpower_file
from GridCal.Engine.IO.raw_parser import PSSeParser
from GridCal.Engine.IO.excel_interface import *
from GridCal.Engine.IO.file_handler import FileOpen, FileSave
| 43.363636 | 74 | 0.842767 | 73 | 477 | 5.356164 | 0.315068 | 0.253197 | 0.391304 | 0.43734 | 0.381074 | 0.317136 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081761 | 477 | 10 | 75 | 47.7 | 0.892694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
bd7a89bdfd2a3fd04c631eb098a7a6462fe218a2 | 103 | py | Python | plugin_scripts/pipeline_exceptions.py | jashparekh/bigquery-action | 2939c416d881dcf120e8f2cb35d6adc864bdbc77 | [
"MIT"
] | 8 | 2021-09-17T19:57:54.000Z | 2022-01-02T21:38:07.000Z | plugin_scripts/pipeline_exceptions.py | wayfair-incubator/bigquery-buildkite-plugin | 18bef97a3dbbb468cfa3b90b8fdda15a02388d37 | [
"MIT"
] | 95 | 2021-07-11T01:11:56.000Z | 2022-03-31T08:15:30.000Z | plugin_scripts/pipeline_exceptions.py | jashparekh/bigquery-action | 2939c416d881dcf120e8f2cb35d6adc864bdbc77 | [
"MIT"
] | null | null | null | class DatasetSchemaDirectoryNonExistent(Exception):
pass
class DeployFailed(Exception):
pass
| 14.714286 | 51 | 0.786408 | 8 | 103 | 10.125 | 0.625 | 0.320988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15534 | 103 | 6 | 52 | 17.166667 | 0.931034 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
da0e78826aa8b5046a25f264e050ab8dd42dacba | 62 | py | Python | package/crud/celery/abc/__init__.py | derekmerck/pycrud | 065edd4f3ec1fda906772de7a20e8df16e31bfb2 | [
"MIT"
] | 15 | 2019-02-12T23:26:09.000Z | 2021-12-21T08:53:58.000Z | package/crud/celery/abc/__init__.py | derekmerck/pycrud | 065edd4f3ec1fda906772de7a20e8df16e31bfb2 | [
"MIT"
] | 2 | 2019-01-23T21:13:12.000Z | 2019-06-28T15:45:51.000Z | package/crud/celery/abc/__init__.py | derekmerck/pycrud | 065edd4f3ec1fda906772de7a20e8df16e31bfb2 | [
"MIT"
] | 6 | 2019-01-23T20:22:50.000Z | 2022-02-03T03:27:04.000Z | from .distributed import DistributedMixin, LockingGatewayMixin | 62 | 62 | 0.903226 | 5 | 62 | 11.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 62 | 1 | 62 | 62 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
da4c86202eb6297690f11159e4e604eb90cf7960 | 97 | py | Python | tests/test_loop.py | ChillyCider/swarmclientpy | 0872ba013182120aec23b46d6d3caccfa4ee6f62 | [
"Zlib"
] | 1 | 2021-09-10T21:45:44.000Z | 2021-09-10T21:45:44.000Z | tests/test_loop.py | ChillyCider/swarmclientpy | 0872ba013182120aec23b46d6d3caccfa4ee6f62 | [
"Zlib"
] | null | null | null | tests/test_loop.py | ChillyCider/swarmclientpy | 0872ba013182120aec23b46d6d3caccfa4ee6f62 | [
"Zlib"
] | null | null | null | from .context import swarmclientpy
def test_add():
assert swarmclientpy.loop.add(1, 3) == 4
| 19.4 | 44 | 0.721649 | 14 | 97 | 4.928571 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 0.164948 | 97 | 4 | 45 | 24.25 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
da4f4815b376b184b345af8dfdae9831c4b1e0ad | 47 | py | Python | scripts/portal/move_elin.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/move_elin.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/move_elin.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # 222020400
sm.warp(300000100, 1)
sm.dispose()
| 11.75 | 21 | 0.723404 | 7 | 47 | 4.857143 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.452381 | 0.106383 | 47 | 3 | 22 | 15.666667 | 0.357143 | 0.191489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
da8706b2443112e5cfb3fb145f0e47abdd4b8b83 | 401 | py | Python | CH7/function-module.py | yancqS/Python-tourial | 6d5e91a5fe5dc22a807e375eb444553a20837d5a | [
"MIT"
] | null | null | null | CH7/function-module.py | yancqS/Python-tourial | 6d5e91a5fe5dc22a807e375eb444553a20837d5a | [
"MIT"
] | null | null | null | CH7/function-module.py | yancqS/Python-tourial | 6d5e91a5fe5dc22a807e375eb444553a20837d5a | [
"MIT"
] | null | null | null | import pizza
pizza.make_pizza(16, 'a', 'b', 'c')
pizza.make_pizza(12, 'a', 'b')
from pizza import make_pizza
make_pizza(16, 'a', 'b', 'c')
make_pizza(12, 'a', 'b')
from pizza import make_pizza as mp
mp(16, 'a', 'b', 'c')
mp(12, 'a', 'b')
import pizza as p
p.make_pizza(16, 'a', 'b', 'c')
p.make_pizza(12, 'a', 'b')
from pizza import *
make_pizza(16, 'a', 'b', 'c')
make_pizza(12, 'a', 'b')
| 14.851852 | 35 | 0.586035 | 78 | 401 | 2.884615 | 0.153846 | 0.4 | 0.088889 | 0.111111 | 0.782222 | 0.782222 | 0.72 | 0.635556 | 0.635556 | 0.635556 | 0 | 0.059701 | 0.164589 | 401 | 26 | 36 | 15.423077 | 0.61194 | 0 | 0 | 0.266667 | 0 | 0 | 0.062344 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
16f4fe7217278987cd4e79cc92cb10723e9e337d | 12,404 | py | Python | aws-dev/awsdev8/venv/Lib/site-packages/amazondax/grammar/DynamoDbGrammarLexer.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 13 | 2020-02-02T13:53:50.000Z | 2022-03-20T19:50:02.000Z | aws-dev/awsdev8/venv/Lib/site-packages/amazondax/grammar/DynamoDbGrammarLexer.py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 2 | 2020-03-29T19:08:04.000Z | 2021-06-02T00:57:44.000Z | aws-dev/awsdev8/venv/Lib/site-packages/amazondax/grammar/DynamoDbGrammarLexer(1).py | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | 3f76e3d3df6797705b5b30ae574fe678250d5e92 | [
"MIT"
] | 10 | 2019-12-25T20:42:37.000Z | 2021-11-17T15:19:00.000Z | # Generated from grammar/DynamoDbGrammar.g4 by ANTLR 4.7
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2 ")
buf.write("\u013e\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6")
buf.write("\3\6\3\7\3\7\3\b\6\b\u0089\n\b\r\b\16\b\u008a\3\b\3\b")
buf.write("\3\t\3\t\3\n\3\n\3\n\3\13\3\13\3\f\3\f\3\f\3\r\3\r\3\16")
buf.write("\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\21\3\22\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\24")
buf.write("\3\24\3\24\3\24\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\27")
buf.write("\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\7\32\u00d1")
buf.write("\n\32\f\32\16\32\u00d4\13\32\5\32\u00d6\n\32\3\33\3\33")
buf.write("\7\33\u00da\n\33\f\33\16\33\u00dd\13\33\3\34\3\34\6\34")
buf.write("\u00e1\n\34\r\34\16\34\u00e2\3\35\3\35\6\35\u00e7\n\35")
buf.write("\r\35\16\35\u00e8\3\36\3\36\3\37\3\37\3 \3 \3!\3!\3\"")
buf.write("\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'\3\'\3(\3(\3)\3)\3*\3")
buf.write("*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3\60\3\61\3\61\3")
buf.write("\62\3\62\3\63\3\63\3\64\3\64\3\65\3\65\3\66\3\66\3\67")
buf.write("\3\67\38\38\39\39\3:\3:\3;\3;\3<\3<\3<\3<\7<\u012b\n<")
buf.write("\f<\16<\u012e\13<\3<\3<\3<\3<\3<\7<\u0135\n<\f<\16<\u0138")
buf.write("\13<\3<\5<\u013b\n<\3=\3=\4\u012c\u0136\2>\3\3\5\4\7\5")
buf.write("\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35")
buf.write("\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33")
buf.write("\65\34\67\359\36;\2=\2?\2A\2C\2E\2G\2I\2K\2M\2O\2Q\2S")
buf.write("\2U\2W\2Y\2[\2]\2_\2a\2c\2e\2g\2i\2k\2m\2o\2q\2s\2u\2")
buf.write("w\37y \3\2!\5\2\13\f\17\17\"\"\4\2C\\c|\6\2\62;C\\aac")
buf.write("|\3\2\63;\3\2\62;\4\2CCcc\4\2DDdd\4\2EEee\4\2FFff\4\2")
buf.write("GGgg\4\2HHhh\4\2IIii\4\2JJjj\4\2KKkk\4\2LLll\4\2MMmm\4")
buf.write("\2NNnn\4\2OOoo\4\2PPpp\4\2QQqq\4\2RRrr\4\2SSss\4\2TTt")
buf.write("t\4\2UUuu\4\2VVvv\4\2WWww\4\2XXxx\4\2YYyy\4\2ZZzz\4\2")
buf.write("[[{{\4\2\\\\||\2\u012a\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2")
buf.write("\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2")
buf.write("\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2")
buf.write("\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!")
buf.write("\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2")
buf.write("\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3")
buf.write("\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2w\3\2\2\2")
buf.write("\2y\3\2\2\2\3{\3\2\2\2\5}\3\2\2\2\7\177\3\2\2\2\t\u0081")
buf.write("\3\2\2\2\13\u0083\3\2\2\2\r\u0085\3\2\2\2\17\u0088\3\2")
buf.write("\2\2\21\u008e\3\2\2\2\23\u0090\3\2\2\2\25\u0093\3\2\2")
buf.write("\2\27\u0095\3\2\2\2\31\u0098\3\2\2\2\33\u009a\3\2\2\2")
buf.write("\35\u009d\3\2\2\2\37\u009f\3\2\2\2!\u00a1\3\2\2\2#\u00a4")
buf.write("\3\2\2\2%\u00ac\3\2\2\2\'\u00b0\3\2\2\2)\u00b4\3\2\2\2")
buf.write("+\u00b7\3\2\2\2-\u00bb\3\2\2\2/\u00bf\3\2\2\2\61\u00c6")
buf.write("\3\2\2\2\63\u00d5\3\2\2\2\65\u00d7\3\2\2\2\67\u00de\3")
buf.write("\2\2\29\u00e4\3\2\2\2;\u00ea\3\2\2\2=\u00ec\3\2\2\2?\u00ee")
buf.write("\3\2\2\2A\u00f0\3\2\2\2C\u00f2\3\2\2\2E\u00f4\3\2\2\2")
buf.write("G\u00f6\3\2\2\2I\u00f8\3\2\2\2K\u00fa\3\2\2\2M\u00fc\3")
buf.write("\2\2\2O\u00fe\3\2\2\2Q\u0100\3\2\2\2S\u0102\3\2\2\2U\u0104")
buf.write("\3\2\2\2W\u0106\3\2\2\2Y\u0108\3\2\2\2[\u010a\3\2\2\2")
buf.write("]\u010c\3\2\2\2_\u010e\3\2\2\2a\u0110\3\2\2\2c\u0112\3")
buf.write("\2\2\2e\u0114\3\2\2\2g\u0116\3\2\2\2i\u0118\3\2\2\2k\u011a")
buf.write("\3\2\2\2m\u011c\3\2\2\2o\u011e\3\2\2\2q\u0120\3\2\2\2")
buf.write("s\u0122\3\2\2\2u\u0124\3\2\2\2w\u013a\3\2\2\2y\u013c\3")
buf.write("\2\2\2{|\7.\2\2|\4\3\2\2\2}~\7*\2\2~\6\3\2\2\2\177\u0080")
buf.write("\7+\2\2\u0080\b\3\2\2\2\u0081\u0082\7\60\2\2\u0082\n\3")
buf.write("\2\2\2\u0083\u0084\7]\2\2\u0084\f\3\2\2\2\u0085\u0086")
buf.write("\7_\2\2\u0086\16\3\2\2\2\u0087\u0089\t\2\2\2\u0088\u0087")
buf.write("\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u0088\3\2\2\2\u008a")
buf.write("\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u008d\b\b\2\2")
buf.write("\u008d\20\3\2\2\2\u008e\u008f\7?\2\2\u008f\22\3\2\2\2")
buf.write("\u0090\u0091\7>\2\2\u0091\u0092\7@\2\2\u0092\24\3\2\2")
buf.write("\2\u0093\u0094\7>\2\2\u0094\26\3\2\2\2\u0095\u0096\7>")
buf.write("\2\2\u0096\u0097\7?\2\2\u0097\30\3\2\2\2\u0098\u0099\7")
buf.write("@\2\2\u0099\32\3\2\2\2\u009a\u009b\7@\2\2\u009b\u009c")
buf.write("\7?\2\2\u009c\34\3\2\2\2\u009d\u009e\7-\2\2\u009e\36\3")
buf.write("\2\2\2\u009f\u00a0\7/\2\2\u00a0 \3\2\2\2\u00a1\u00a2\5")
buf.write("S*\2\u00a2\u00a3\5]/\2\u00a3\"\3\2\2\2\u00a4\u00a5\5E")
buf.write("#\2\u00a5\u00a6\5K&\2\u00a6\u00a7\5i\65\2\u00a7\u00a8")
buf.write("\5o8\2\u00a8\u00a9\5K&\2\u00a9\u00aa\5K&\2\u00aa\u00ab")
buf.write("\5]/\2\u00ab$\3\2\2\2\u00ac\u00ad\5]/\2\u00ad\u00ae\5")
buf.write("_\60\2\u00ae\u00af\5i\65\2\u00af&\3\2\2\2\u00b0\u00b1")
buf.write("\5C\"\2\u00b1\u00b2\5]/\2\u00b2\u00b3\5I%\2\u00b3(\3\2")
buf.write("\2\2\u00b4\u00b5\5_\60\2\u00b5\u00b6\5e\63\2\u00b6*\3")
buf.write("\2\2\2\u00b7\u00b8\5g\64\2\u00b8\u00b9\5K&\2\u00b9\u00ba")
buf.write("\5i\65\2\u00ba,\3\2\2\2\u00bb\u00bc\5C\"\2\u00bc\u00bd")
buf.write("\5I%\2\u00bd\u00be\5I%\2\u00be.\3\2\2\2\u00bf\u00c0\5")
buf.write("I%\2\u00c0\u00c1\5K&\2\u00c1\u00c2\5Y-\2\u00c2\u00c3\5")
buf.write("K&\2\u00c3\u00c4\5i\65\2\u00c4\u00c5\5K&\2\u00c5\60\3")
buf.write("\2\2\2\u00c6\u00c7\5e\63\2\u00c7\u00c8\5K&\2\u00c8\u00c9")
buf.write("\5[.\2\u00c9\u00ca\5_\60\2\u00ca\u00cb\5m\67\2\u00cb\u00cc")
buf.write("\5K&\2\u00cc\62\3\2\2\2\u00cd\u00d6\7\62\2\2\u00ce\u00d2")
buf.write("\5? \2\u00cf\u00d1\5A!\2\u00d0\u00cf\3\2\2\2\u00d1\u00d4")
buf.write("\3\2\2\2\u00d2\u00d0\3\2\2\2\u00d2\u00d3\3\2\2\2\u00d3")
buf.write("\u00d6\3\2\2\2\u00d4\u00d2\3\2\2\2\u00d5\u00cd\3\2\2\2")
buf.write("\u00d5\u00ce\3\2\2\2\u00d6\64\3\2\2\2\u00d7\u00db\5;\36")
buf.write("\2\u00d8\u00da\5=\37\2\u00d9\u00d8\3\2\2\2\u00da\u00dd")
buf.write("\3\2\2\2\u00db\u00d9\3\2\2\2\u00db\u00dc\3\2\2\2\u00dc")
buf.write("\66\3\2\2\2\u00dd\u00db\3\2\2\2\u00de\u00e0\7%\2\2\u00df")
buf.write("\u00e1\5=\37\2\u00e0\u00df\3\2\2\2\u00e1\u00e2\3\2\2\2")
buf.write("\u00e2\u00e0\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e38\3\2\2")
buf.write("\2\u00e4\u00e6\7<\2\2\u00e5\u00e7\5=\37\2\u00e6\u00e5")
buf.write("\3\2\2\2\u00e7\u00e8\3\2\2\2\u00e8\u00e6\3\2\2\2\u00e8")
buf.write("\u00e9\3\2\2\2\u00e9:\3\2\2\2\u00ea\u00eb\t\3\2\2\u00eb")
buf.write("<\3\2\2\2\u00ec\u00ed\t\4\2\2\u00ed>\3\2\2\2\u00ee\u00ef")
buf.write("\t\5\2\2\u00ef@\3\2\2\2\u00f0\u00f1\t\6\2\2\u00f1B\3\2")
buf.write("\2\2\u00f2\u00f3\t\7\2\2\u00f3D\3\2\2\2\u00f4\u00f5\t")
buf.write("\b\2\2\u00f5F\3\2\2\2\u00f6\u00f7\t\t\2\2\u00f7H\3\2\2")
buf.write("\2\u00f8\u00f9\t\n\2\2\u00f9J\3\2\2\2\u00fa\u00fb\t\13")
buf.write("\2\2\u00fbL\3\2\2\2\u00fc\u00fd\t\f\2\2\u00fdN\3\2\2\2")
buf.write("\u00fe\u00ff\t\r\2\2\u00ffP\3\2\2\2\u0100\u0101\t\16\2")
buf.write("\2\u0101R\3\2\2\2\u0102\u0103\t\17\2\2\u0103T\3\2\2\2")
buf.write("\u0104\u0105\t\20\2\2\u0105V\3\2\2\2\u0106\u0107\t\21")
buf.write("\2\2\u0107X\3\2\2\2\u0108\u0109\t\22\2\2\u0109Z\3\2\2")
buf.write("\2\u010a\u010b\t\23\2\2\u010b\\\3\2\2\2\u010c\u010d\t")
buf.write("\24\2\2\u010d^\3\2\2\2\u010e\u010f\t\25\2\2\u010f`\3\2")
buf.write("\2\2\u0110\u0111\t\26\2\2\u0111b\3\2\2\2\u0112\u0113\t")
buf.write("\27\2\2\u0113d\3\2\2\2\u0114\u0115\t\30\2\2\u0115f\3\2")
buf.write("\2\2\u0116\u0117\t\31\2\2\u0117h\3\2\2\2\u0118\u0119\t")
buf.write("\32\2\2\u0119j\3\2\2\2\u011a\u011b\t\33\2\2\u011bl\3\2")
buf.write("\2\2\u011c\u011d\t\34\2\2\u011dn\3\2\2\2\u011e\u011f\t")
buf.write("\35\2\2\u011fp\3\2\2\2\u0120\u0121\t\36\2\2\u0121r\3\2")
buf.write("\2\2\u0122\u0123\t\37\2\2\u0123t\3\2\2\2\u0124\u0125\t")
buf.write(" \2\2\u0125v\3\2\2\2\u0126\u012c\7$\2\2\u0127\u0128\7")
buf.write("^\2\2\u0128\u012b\7$\2\2\u0129\u012b\13\2\2\2\u012a\u0127")
buf.write("\3\2\2\2\u012a\u0129\3\2\2\2\u012b\u012e\3\2\2\2\u012c")
buf.write("\u012d\3\2\2\2\u012c\u012a\3\2\2\2\u012d\u012f\3\2\2\2")
buf.write("\u012e\u012c\3\2\2\2\u012f\u013b\7$\2\2\u0130\u0136\7")
buf.write(")\2\2\u0131\u0132\7^\2\2\u0132\u0135\7)\2\2\u0133\u0135")
buf.write("\13\2\2\2\u0134\u0131\3\2\2\2\u0134\u0133\3\2\2\2\u0135")
buf.write("\u0138\3\2\2\2\u0136\u0137\3\2\2\2\u0136\u0134\3\2\2\2")
buf.write("\u0137\u0139\3\2\2\2\u0138\u0136\3\2\2\2\u0139\u013b\7")
buf.write(")\2\2\u013a\u0126\3\2\2\2\u013a\u0130\3\2\2\2\u013bx\3")
buf.write("\2\2\2\u013c\u013d\13\2\2\2\u013dz\3\2\2\2\16\2\u008a")
buf.write("\u00d2\u00d5\u00db\u00e2\u00e8\u012a\u012c\u0134\u0136")
buf.write("\u013a\3\2\3\2")
return buf.getvalue()
class DynamoDbGrammarLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
WS = 7
EQ = 8
NE = 9
LT = 10
LE = 11
GT = 12
GE = 13
PLUS = 14
MINUS = 15
IN = 16
BETWEEN = 17
NOT = 18
AND = 19
OR = 20
SET = 21
ADD = 22
DELETE = 23
REMOVE = 24
INDEX = 25
ID = 26
ATTRIBUTE_NAME_SUB = 27
LITERAL_SUB = 28
STRING_LITERAL = 29
UNKNOWN = 30
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"','", "'('", "')'", "'.'", "'['", "']'", "'='", "'<>'", "'<'",
"'<='", "'>'", "'>='", "'+'", "'-'" ]
symbolicNames = [ "<INVALID>",
"WS", "EQ", "NE", "LT", "LE", "GT", "GE", "PLUS", "MINUS", "IN",
"BETWEEN", "NOT", "AND", "OR", "SET", "ADD", "DELETE", "REMOVE",
"INDEX", "ID", "ATTRIBUTE_NAME_SUB", "LITERAL_SUB", "STRING_LITERAL",
"UNKNOWN" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "WS",
"EQ", "NE", "LT", "LE", "GT", "GE", "PLUS", "MINUS", "IN",
"BETWEEN", "NOT", "AND", "OR", "SET", "ADD", "DELETE",
"REMOVE", "INDEX", "ID", "ATTRIBUTE_NAME_SUB", "LITERAL_SUB",
"ID_START_CHAR", "ID_CHAR", "POS_DIGIT", "DIGIT", "A",
"B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L",
"M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "STRING_LITERAL", "UNKNOWN" ]
grammarFileName = "DynamoDbGrammar.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 57.425926 | 103 | 0.547404 | 2,852 | 12,404 | 2.359748 | 0.15533 | 0.130758 | 0.081575 | 0.08737 | 0.238633 | 0.156166 | 0.07786 | 0.067162 | 0.063596 | 0.063596 | 0 | 0.329581 | 0.154869 | 12,404 | 215 | 104 | 57.693023 | 0.312411 | 0.004353 | 0 | 0 | 1 | 0.388889 | 0.602786 | 0.561153 | 0.005051 | 0 | 0 | 0 | 0 | 1 | 0.010101 | false | 0 | 0.020202 | 0 | 0.232323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e5092b4bb251cf6eb2f3e69fd35d6cfc23fc809c | 1,040 | py | Python | temboo/core/Library/Tumblr/User/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Tumblr/User/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Tumblr/User/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | from temboo.Library.Tumblr.User.FollowUser import FollowUser, FollowUserInputSet, FollowUserResultSet, FollowUserChoreographyExecution
from temboo.Library.Tumblr.User.GetUserInformation import GetUserInformation, GetUserInformationInputSet, GetUserInformationResultSet, GetUserInformationChoreographyExecution
from temboo.Library.Tumblr.User.RetrieveFollowedBlogsForUser import RetrieveFollowedBlogsForUser, RetrieveFollowedBlogsForUserInputSet, RetrieveFollowedBlogsForUserResultSet, RetrieveFollowedBlogsForUserChoreographyExecution
from temboo.Library.Tumblr.User.RetrieveUserDashboard import RetrieveUserDashboard, RetrieveUserDashboardInputSet, RetrieveUserDashboardResultSet, RetrieveUserDashboardChoreographyExecution
from temboo.Library.Tumblr.User.RetrieveUserLikes import RetrieveUserLikes, RetrieveUserLikesInputSet, RetrieveUserLikesResultSet, RetrieveUserLikesChoreographyExecution
from temboo.Library.Tumblr.User.UnfollowUser import UnfollowUser, UnfollowUserInputSet, UnfollowUserResultSet, UnfollowUserChoreographyExecution
| 148.571429 | 224 | 0.919231 | 66 | 1,040 | 14.484848 | 0.454545 | 0.062762 | 0.106695 | 0.144351 | 0.169456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040385 | 1,040 | 6 | 225 | 173.333333 | 0.957916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e50ad6f4cd7d265f5aa872acba5de4b4717c53cd | 85 | py | Python | tests/api_tests/openstack_tests/__init__.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 7 | 2018-05-20T08:56:08.000Z | 2022-03-11T15:50:54.000Z | tests/api_tests/openstack_tests/__init__.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:12:51.000Z | 2022-01-13T01:25:27.000Z | tests/api_tests/openstack_tests/__init__.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 5 | 2016-10-09T14:52:09.000Z | 2020-12-25T01:04:35.000Z | from utils.base import BaseTestCase
class OpenstackTestBase(BaseTestCase):
pass | 17 | 38 | 0.811765 | 9 | 85 | 7.666667 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.141176 | 85 | 5 | 39 | 17 | 0.945205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
e536c077f2f7a4f38be26492f050dc7bc86904f2 | 142 | py | Python | models/old/old_transformer/transformer/__init__.py | ErikHumphrey/sustain-seq2seq | c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4 | [
"Apache-2.0"
] | 4 | 2019-05-09T19:47:48.000Z | 2020-04-11T13:58:31.000Z | models/old/old_transformer/transformer/__init__.py | ErikHumphrey/sustain-seq2seq | c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4 | [
"Apache-2.0"
] | null | null | null | models/old/old_transformer/transformer/__init__.py | ErikHumphrey/sustain-seq2seq | c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4 | [
"Apache-2.0"
] | 4 | 2018-12-05T01:52:22.000Z | 2019-11-01T01:01:52.000Z | import transformer.config
import transformer.layers
import transformer.attention
import transformer.optimizers
import transformer.transformer
| 23.666667 | 30 | 0.894366 | 15 | 142 | 8.466667 | 0.4 | 0.669291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070423 | 142 | 5 | 31 | 28.4 | 0.962121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e541cfe6bfac7250ff7e1a9aad307d67afb88c2a | 167 | py | Python | naics/years/y2017/objects.py | dylanmoring/naics_sic | 8b51ddf0b9ab1b9d380bfd620564ac281bb7d1d2 | [
"MIT"
] | null | null | null | naics/years/y2017/objects.py | dylanmoring/naics_sic | 8b51ddf0b9ab1b9d380bfd620564ac281bb7d1d2 | [
"MIT"
] | null | null | null | naics/years/y2017/objects.py | dylanmoring/naics_sic | 8b51ddf0b9ab1b9d380bfd620564ac281bb7d1d2 | [
"MIT"
] | null | null | null | from ..make_objects import make_naics_objects
from . import NAICS2017, naics_2017_json_path
naics_2017_objects = make_naics_objects(naics_2017_json_path, NAICS2017)
| 27.833333 | 72 | 0.862275 | 25 | 167 | 5.24 | 0.36 | 0.206107 | 0.244275 | 0.259542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131579 | 0.08982 | 167 | 5 | 73 | 33.4 | 0.730263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
e57cd41d03c60f5e2d2d386a7f57d9648174d2c0 | 41 | py | Python | django_dynamodb_cache/encode/__init__.py | xncbf/django-dynamodb-cache | be6d1b4b8e92d581041043bcd694f2a9f00ee386 | [
"MIT"
] | 21 | 2022-02-16T10:18:24.000Z | 2022-03-31T23:40:06.000Z | django_dynamodb_cache/encode/__init__.py | xncbf/django-dynamodb-cache | be6d1b4b8e92d581041043bcd694f2a9f00ee386 | [
"MIT"
] | 9 | 2022-03-01T06:40:59.000Z | 2022-03-26T08:12:31.000Z | django_dynamodb_cache/encode/__init__.py | xncbf/django-dynamodb-cache | be6d1b4b8e92d581041043bcd694f2a9f00ee386 | [
"MIT"
] | null | null | null | from .pickle import PickleEncode # noqa
| 20.5 | 40 | 0.780488 | 5 | 41 | 6.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170732 | 41 | 1 | 41 | 41 | 0.941176 | 0.097561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e58ba93b5b5b94534133fa32019c4327fe1a503f | 98 | py | Python | orchestrator_service/__init__.py | Shchusia/orchestrator | 993935a8e22b617d5618fc298b0f6414498c5ab1 | [
"Unlicense"
] | 1 | 2021-04-09T11:58:29.000Z | 2021-04-09T11:58:29.000Z | orchestrator_service/__init__.py | Shchusia/orchestrator | 993935a8e22b617d5618fc298b0f6414498c5ab1 | [
"Unlicense"
] | null | null | null | orchestrator_service/__init__.py | Shchusia/orchestrator | 993935a8e22b617d5618fc298b0f6414498c5ab1 | [
"Unlicense"
] | null | null | null | """
Importing
"""
from .message import *
from .orchestrator import *
from .service import *
| 14 | 28 | 0.663265 | 10 | 98 | 6.5 | 0.6 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 98 | 6 | 29 | 16.333333 | 0.844156 | 0.091837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e5911b9d074fde48b2b14555fcfedf24a5e89063 | 27,673 | py | Python | verilog_langserver/verilog_parser/antlr_build/WorkspaceSymbolsLexer.py | eirikpre/verilog-langserver | e18545b139e40fe935bad430daf43e70553003a4 | [
"MIT"
] | 1 | 2020-09-24T02:30:10.000Z | 2020-09-24T02:30:10.000Z | verilog_langserver/verilog_parser/antlr_build/WorkspaceSymbolsLexer.py | eirikpre/verilog-langserver | e18545b139e40fe935bad430daf43e70553003a4 | [
"MIT"
] | null | null | null | verilog_langserver/verilog_parser/antlr_build/WorkspaceSymbolsLexer.py | eirikpre/verilog-langserver | e18545b139e40fe935bad430daf43e70553003a4 | [
"MIT"
] | null | null | null | # Generated from C:\Users\eirik\Desktop\verilog-langserver\verilog_langserver\verilog_parser/grammar/WorkspaceSymbols.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2=")
buf.write("\u0293\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\3\2")
buf.write("\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\7\3\7\3\7\3\7\7\7\u00cd\n\7\f\7\16\7\u00d0")
buf.write("\13\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\7\b\u00da\n\b\f")
buf.write("\b\16\b\u00dd\13\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t")
buf.write("\3\n\3\n\3\n\3\n\3\13\5\13\u00ed\n\13\3\13\3\13\3\13\3")
buf.write("\13\3\f\3\f\3\f\3\f\7\f\u00f7\n\f\f\f\16\f\u00fa\13\f")
buf.write("\3\f\3\f\3\r\3\r\3\r\7\r\u0101\n\r\f\r\16\r\u0104\13\r")
buf.write("\3\r\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3")
buf.write("\21\3\22\3\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\3\26")
buf.write("\3\27\3\27\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34")
buf.write("\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3!\3")
buf.write("!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3#")
buf.write("\3#\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3$\3")
buf.write("$\3$\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3")
buf.write("&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3\'")
buf.write("\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3")
buf.write(",\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3")
buf.write("-\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3\60\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\62\3\62\5\62\u01ec\n\62\3\63\3\63\3\63\3")
buf.write("\63\3\63\5\63\u01f3\n\63\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\5\64\u0200\n\64\3\65\3\65\3")
buf.write("\65\7\65\u0205\n\65\f\65\16\65\u0208\13\65\3\66\5\66\u020b")
buf.write("\n\66\3\66\3\66\3\66\3\67\5\67\u0211\n\67\3\67\3\67\3")
buf.write("\67\38\58\u0217\n8\38\38\38\39\59\u021d\n9\39\39\39\3")
buf.write(":\3:\3:\3:\5:\u0226\n:\3;\3;\3;\3<\3<\6<\u022d\n<\r<\16")
buf.write("<\u022e\3=\3=\3>\3>\3?\3?\3@\3@\5@\u0239\n@\3A\3A\3B\5")
buf.write("B\u023e\nB\3B\3B\3C\3C\3D\3D\3E\3E\3F\3F\3G\3G\3H\3H\3")
buf.write("H\5H\u024f\nH\3I\3I\3I\5I\u0254\nI\3J\3J\3J\5J\u0259\n")
buf.write("J\3K\3K\3K\5K\u025e\nK\3L\3L\3L\7L\u0263\nL\fL\16L\u0266")
buf.write("\13L\3M\3M\3M\3N\3N\3N\3O\3O\3O\3P\3P\3P\3Q\3Q\3Q\7Q\u0277")
buf.write("\nQ\fQ\16Q\u027a\13Q\3R\3R\3R\7R\u027f\nR\fR\16R\u0282")
buf.write("\13R\3S\3S\3S\7S\u0287\nS\fS\16S\u028a\13S\3T\3T\3T\7")
buf.write("T\u028f\nT\fT\16T\u0292\13T\4\u00db\u0102\2U\3\3\5\4\7")
buf.write("\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17")
buf.write("\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63")
buf.write("\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-")
buf.write("Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y\2{\2")
buf.write("}\2\177\2\u0081\2\u0083\2\u0085\2\u0087\2\u0089\2\u008b")
buf.write("\2\u008d\2\u008f\2\u0091\2\u0093\2\u0095\2\u0097\2\u0099")
buf.write("\2\u009b\2\u009d\2\u009f\2\u00a1\2\u00a3\2\u00a5\2\u00a7")
buf.write("\2\3\2\30\3\2\f\f\3\2$$\t\2##%\',-//>@BB\u0080\u0080\4")
buf.write("\2GGgg\3\2\62\63\3\2bb\3\2))\3\2aa\4\2--//\6\2hhoprrw")
buf.write("w\3\2uu\4\2ZZzz\5\2AA\\\\||\5\2\62;C\\c|\3\2\62;\3\2\63")
buf.write(";\3\2\629\5\2\62;CHch\4\2FFff\4\2DDdd\4\2QQqq\4\2JJjj")
buf.write("\2\u02a4\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2")
buf.write("\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2")
buf.write("\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2")
buf.write("\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3")
buf.write("\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2")
buf.write("-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3")
buf.write("\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2")
buf.write("?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2")
buf.write("\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2")
buf.write("\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2")
buf.write("\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3")
buf.write("\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o")
buf.write("\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\3")
buf.write("\u00a9\3\2\2\2\5\u00ac\3\2\2\2\7\u00b1\3\2\2\2\t\u00b8")
buf.write("\3\2\2\2\13\u00c2\3\2\2\2\r\u00c8\3\2\2\2\17\u00d5\3\2")
buf.write("\2\2\21\u00e3\3\2\2\2\23\u00e7\3\2\2\2\25\u00ec\3\2\2")
buf.write("\2\27\u00f2\3\2\2\2\31\u00fd\3\2\2\2\33\u0109\3\2\2\2")
buf.write("\35\u010b\3\2\2\2\37\u010d\3\2\2\2!\u010f\3\2\2\2#\u0111")
buf.write("\3\2\2\2%\u0113\3\2\2\2\'\u0115\3\2\2\2)\u0117\3\2\2\2")
buf.write("+\u0119\3\2\2\2-\u011b\3\2\2\2/\u011d\3\2\2\2\61\u011f")
buf.write("\3\2\2\2\63\u0121\3\2\2\2\65\u0123\3\2\2\2\67\u0125\3")
buf.write("\2\2\29\u012c\3\2\2\2;\u0136\3\2\2\2=\u0140\3\2\2\2?\u014d")
buf.write("\3\2\2\2A\u0153\3\2\2\2C\u015c\3\2\2\2E\u0163\3\2\2\2")
buf.write("G\u016d\3\2\2\2I\u0177\3\2\2\2K\u0184\3\2\2\2M\u018c\3")
buf.write("\2\2\2O\u0197\3\2\2\2Q\u019c\3\2\2\2S\u01a4\3\2\2\2U\u01ad")
buf.write("\3\2\2\2W\u01b9\3\2\2\2Y\u01c1\3\2\2\2[\u01cc\3\2\2\2")
buf.write("]\u01d2\3\2\2\2_\u01d9\3\2\2\2a\u01e1\3\2\2\2c\u01eb\3")
buf.write("\2\2\2e\u01f2\3\2\2\2g\u01ff\3\2\2\2i\u0201\3\2\2\2k\u020a")
buf.write("\3\2\2\2m\u0210\3\2\2\2o\u0216\3\2\2\2q\u021c\3\2\2\2")
buf.write("s\u0221\3\2\2\2u\u0227\3\2\2\2w\u022c\3\2\2\2y\u0230\3")
buf.write("\2\2\2{\u0232\3\2\2\2}\u0234\3\2\2\2\177\u0236\3\2\2\2")
buf.write("\u0081\u023a\3\2\2\2\u0083\u023d\3\2\2\2\u0085\u0241\3")
buf.write("\2\2\2\u0087\u0243\3\2\2\2\u0089\u0245\3\2\2\2\u008b\u0247")
buf.write("\3\2\2\2\u008d\u0249\3\2\2\2\u008f\u024e\3\2\2\2\u0091")
buf.write("\u0253\3\2\2\2\u0093\u0258\3\2\2\2\u0095\u025d\3\2\2\2")
buf.write("\u0097\u025f\3\2\2\2\u0099\u0267\3\2\2\2\u009b\u026a\3")
buf.write("\2\2\2\u009d\u026d\3\2\2\2\u009f\u0270\3\2\2\2\u00a1\u0273")
buf.write("\3\2\2\2\u00a3\u027b\3\2\2\2\u00a5\u0283\3\2\2\2\u00a7")
buf.write("\u028b\3\2\2\2\u00a9\u00aa\7%\2\2\u00aa\u00ab\7*\2\2\u00ab")
buf.write("\4\3\2\2\2\u00ac\u00ad\7x\2\2\u00ad\u00ae\7q\2\2\u00ae")
buf.write("\u00af\7k\2\2\u00af\u00b0\7f\2\2\u00b0\6\3\2\2\2\u00b1")
buf.write("\u00b2\7u\2\2\u00b2\u00b3\7v\2\2\u00b3\u00b4\7c\2\2\u00b4")
buf.write("\u00b5\7v\2\2\u00b5\u00b6\7k\2\2\u00b6\u00b7\7e\2\2\u00b7")
buf.write("\b\3\2\2\2\u00b8\u00b9\7c\2\2\u00b9\u00ba\7w\2\2\u00ba")
buf.write("\u00bb\7v\2\2\u00bb\u00bc\7q\2\2\u00bc\u00bd\7o\2\2\u00bd")
buf.write("\u00be\7c\2\2\u00be\u00bf\7v\2\2\u00bf\u00c0\7k\2\2\u00c0")
buf.write("\u00c1\7e\2\2\u00c1\n\3\2\2\2\u00c2\u00c3\7&\2\2\u00c3")
buf.write("\u00c4\7t\2\2\u00c4\u00c5\7q\2\2\u00c5\u00c6\7q\2\2\u00c6")
buf.write("\u00c7\7v\2\2\u00c7\f\3\2\2\2\u00c8\u00c9\7\61\2\2\u00c9")
buf.write("\u00ca\7\61\2\2\u00ca\u00ce\3\2\2\2\u00cb\u00cd\n\2\2")
buf.write("\2\u00cc\u00cb\3\2\2\2\u00cd\u00d0\3\2\2\2\u00ce\u00cc")
buf.write("\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d1\3\2\2\2\u00d0")
buf.write("\u00ce\3\2\2\2\u00d1\u00d2\7\f\2\2\u00d2\u00d3\3\2\2\2")
buf.write("\u00d3\u00d4\b\7\2\2\u00d4\16\3\2\2\2\u00d5\u00d6\7\61")
buf.write("\2\2\u00d6\u00d7\7,\2\2\u00d7\u00db\3\2\2\2\u00d8\u00da")
buf.write("\13\2\2\2\u00d9\u00d8\3\2\2\2\u00da\u00dd\3\2\2\2\u00db")
buf.write("\u00dc\3\2\2\2\u00db\u00d9\3\2\2\2\u00dc\u00de\3\2\2\2")
buf.write("\u00dd\u00db\3\2\2\2\u00de\u00df\7,\2\2\u00df\u00e0\7")
buf.write("\61\2\2\u00e0\u00e1\3\2\2\2\u00e1\u00e2\b\b\2\2\u00e2")
buf.write("\20\3\2\2\2\u00e3\u00e4\7\"\2\2\u00e4\u00e5\3\2\2\2\u00e5")
buf.write("\u00e6\b\t\2\2\u00e6\22\3\2\2\2\u00e7\u00e8\7\13\2\2\u00e8")
buf.write("\u00e9\3\2\2\2\u00e9\u00ea\b\n\2\2\u00ea\24\3\2\2\2\u00eb")
buf.write("\u00ed\7\17\2\2\u00ec\u00eb\3\2\2\2\u00ec\u00ed\3\2\2")
buf.write("\2\u00ed\u00ee\3\2\2\2\u00ee\u00ef\7\f\2\2\u00ef\u00f0")
buf.write("\3\2\2\2\u00f0\u00f1\b\13\2\2\u00f1\26\3\2\2\2\u00f2\u00f8")
buf.write("\7$\2\2\u00f3\u00f7\n\3\2\2\u00f4\u00f5\7^\2\2\u00f5\u00f7")
buf.write("\7$\2\2\u00f6\u00f3\3\2\2\2\u00f6\u00f4\3\2\2\2\u00f7")
buf.write("\u00fa\3\2\2\2\u00f8\u00f6\3\2\2\2\u00f8\u00f9\3\2\2\2")
buf.write("\u00f9\u00fb\3\2\2\2\u00fa\u00f8\3\2\2\2\u00fb\u00fc\7")
buf.write("$\2\2\u00fc\30\3\2\2\2\u00fd\u00fe\5y=\2\u00fe\u0102\5")
buf.write("w<\2\u00ff\u0101\13\2\2\2\u0100\u00ff\3\2\2\2\u0101\u0104")
buf.write("\3\2\2\2\u0102\u0103\3\2\2\2\u0102\u0100\3\2\2\2\u0103")
buf.write("\u0105\3\2\2\2\u0104\u0102\3\2\2\2\u0105\u0106\5\177@")
buf.write("\2\u0106\u0107\3\2\2\2\u0107\u0108\b\r\2\2\u0108\32\3")
buf.write("\2\2\2\u0109\u010a\7]\2\2\u010a\34\3\2\2\2\u010b\u010c")
buf.write("\7_\2\2\u010c\36\3\2\2\2\u010d\u010e\7*\2\2\u010e \3\2")
buf.write("\2\2\u010f\u0110\7+\2\2\u0110\"\3\2\2\2\u0111\u0112\7")
buf.write("}\2\2\u0112$\3\2\2\2\u0113\u0114\7\177\2\2\u0114&\3\2")
buf.write("\2\2\u0115\u0116\7=\2\2\u0116(\3\2\2\2\u0117\u0118\7<")
buf.write("\2\2\u0118*\3\2\2\2\u0119\u011a\7.\2\2\u011a,\3\2\2\2")
buf.write("\u011b\u011c\7?\2\2\u011c.\3\2\2\2\u011d\u011e\7A\2\2")
buf.write("\u011e\60\3\2\2\2\u011f\u0120\7\60\2\2\u0120\62\3\2\2")
buf.write("\2\u0121\u0122\5{>\2\u0122\64\3\2\2\2\u0123\u0124\t\4")
buf.write("\2\2\u0124\66\3\2\2\2\u0125\u0126\7o\2\2\u0126\u0127\7")
buf.write("q\2\2\u0127\u0128\7f\2\2\u0128\u0129\7w\2\2\u0129\u012a")
buf.write("\7n\2\2\u012a\u012b\7g\2\2\u012b8\3\2\2\2\u012c\u012d")
buf.write("\7g\2\2\u012d\u012e\7p\2\2\u012e\u012f\7f\2\2\u012f\u0130")
buf.write("\7o\2\2\u0130\u0131\7q\2\2\u0131\u0132\7f\2\2\u0132\u0133")
buf.write("\7w\2\2\u0133\u0134\7n\2\2\u0134\u0135\7g\2\2\u0135:\3")
buf.write("\2\2\2\u0136\u0137\7k\2\2\u0137\u0138\7p\2\2\u0138\u0139")
buf.write("\7v\2\2\u0139\u013a\7g\2\2\u013a\u013b\7t\2\2\u013b\u013c")
buf.write("\7h\2\2\u013c\u013d\7c\2\2\u013d\u013e\7e\2\2\u013e\u013f")
buf.write("\7g\2\2\u013f<\3\2\2\2\u0140\u0141\7g\2\2\u0141\u0142")
buf.write("\7p\2\2\u0142\u0143\7f\2\2\u0143\u0144\7k\2\2\u0144\u0145")
buf.write("\7p\2\2\u0145\u0146\7v\2\2\u0146\u0147\7g\2\2\u0147\u0148")
buf.write("\7t\2\2\u0148\u0149\7h\2\2\u0149\u014a\7c\2\2\u014a\u014b")
buf.write("\7e\2\2\u014b\u014c\7g\2\2\u014c>\3\2\2\2\u014d\u014e")
buf.write("\7e\2\2\u014e\u014f\7n\2\2\u014f\u0150\7c\2\2\u0150\u0151")
buf.write("\7u\2\2\u0151\u0152\7u\2\2\u0152@\3\2\2\2\u0153\u0154")
buf.write("\7g\2\2\u0154\u0155\7p\2\2\u0155\u0156\7f\2\2\u0156\u0157")
buf.write("\7e\2\2\u0157\u0158\7n\2\2\u0158\u0159\7c\2\2\u0159\u015a")
buf.write("\7u\2\2\u015a\u015b\7u\2\2\u015bB\3\2\2\2\u015c\u015d")
buf.write("\7e\2\2\u015d\u015e\7q\2\2\u015e\u015f\7p\2\2\u015f\u0160")
buf.write("\7h\2\2\u0160\u0161\7k\2\2\u0161\u0162\7i\2\2\u0162D\3")
buf.write("\2\2\2\u0163\u0164\7g\2\2\u0164\u0165\7p\2\2\u0165\u0166")
buf.write("\7f\2\2\u0166\u0167\7e\2\2\u0167\u0168\7q\2\2\u0168\u0169")
buf.write("\7p\2\2\u0169\u016a\7h\2\2\u016a\u016b\7k\2\2\u016b\u016c")
buf.write("\7i\2\2\u016cF\3\2\2\2\u016d\u016e\7r\2\2\u016e\u016f")
buf.write("\7t\2\2\u016f\u0170\7k\2\2\u0170\u0171\7o\2\2\u0171\u0172")
buf.write("\7k\2\2\u0172\u0173\7v\2\2\u0173\u0174\7k\2\2\u0174\u0175")
buf.write("\7x\2\2\u0175\u0176\7g\2\2\u0176H\3\2\2\2\u0177\u0178")
buf.write("\7g\2\2\u0178\u0179\7p\2\2\u0179\u017a\7f\2\2\u017a\u017b")
buf.write("\7r\2\2\u017b\u017c\7t\2\2\u017c\u017d\7k\2\2\u017d\u017e")
buf.write("\7o\2\2\u017e\u017f\7k\2\2\u017f\u0180\7v\2\2\u0180\u0181")
buf.write("\7k\2\2\u0181\u0182\7x\2\2\u0182\u0183\7g\2\2\u0183J\3")
buf.write("\2\2\2\u0184\u0185\7r\2\2\u0185\u0186\7t\2\2\u0186\u0187")
buf.write("\7q\2\2\u0187\u0188\7i\2\2\u0188\u0189\7t\2\2\u0189\u018a")
buf.write("\7c\2\2\u018a\u018b\7o\2\2\u018bL\3\2\2\2\u018c\u018d")
buf.write("\7g\2\2\u018d\u018e\7p\2\2\u018e\u018f\7f\2\2\u018f\u0190")
buf.write("\7r\2\2\u0190\u0191\7t\2\2\u0191\u0192\7q\2\2\u0192\u0193")
buf.write("\7i\2\2\u0193\u0194\7t\2\2\u0194\u0195\7c\2\2\u0195\u0196")
buf.write("\7o\2\2\u0196N\3\2\2\2\u0197\u0198\7v\2\2\u0198\u0199")
buf.write("\7c\2\2\u0199\u019a\7u\2\2\u019a\u019b\7m\2\2\u019bP\3")
buf.write("\2\2\2\u019c\u019d\7g\2\2\u019d\u019e\7p\2\2\u019e\u019f")
buf.write("\7f\2\2\u019f\u01a0\7v\2\2\u01a0\u01a1\7c\2\2\u01a1\u01a2")
buf.write("\7u\2\2\u01a2\u01a3\7m\2\2\u01a3R\3\2\2\2\u01a4\u01a5")
buf.write("\7h\2\2\u01a5\u01a6\7w\2\2\u01a6\u01a7\7p\2\2\u01a7\u01a8")
buf.write("\7e\2\2\u01a8\u01a9\7v\2\2\u01a9\u01aa\7k\2\2\u01aa\u01ab")
buf.write("\7q\2\2\u01ab\u01ac\7p\2\2\u01acT\3\2\2\2\u01ad\u01ae")
buf.write("\7g\2\2\u01ae\u01af\7p\2\2\u01af\u01b0\7f\2\2\u01b0\u01b1")
buf.write("\7h\2\2\u01b1\u01b2\7w\2\2\u01b2\u01b3\7p\2\2\u01b3\u01b4")
buf.write("\7e\2\2\u01b4\u01b5\7v\2\2\u01b5\u01b6\7k\2\2\u01b6\u01b7")
buf.write("\7q\2\2\u01b7\u01b8\7p\2\2\u01b8V\3\2\2\2\u01b9\u01ba")
buf.write("\7r\2\2\u01ba\u01bb\7c\2\2\u01bb\u01bc\7e\2\2\u01bc\u01bd")
buf.write("\7m\2\2\u01bd\u01be\7c\2\2\u01be\u01bf\7i\2\2\u01bf\u01c0")
buf.write("\7g\2\2\u01c0X\3\2\2\2\u01c1\u01c2\7g\2\2\u01c2\u01c3")
buf.write("\7p\2\2\u01c3\u01c4\7f\2\2\u01c4\u01c5\7r\2\2\u01c5\u01c6")
buf.write("\7c\2\2\u01c6\u01c7\7e\2\2\u01c7\u01c8\7m\2\2\u01c8\u01c9")
buf.write("\7c\2\2\u01c9\u01ca\7i\2\2\u01ca\u01cb\7g\2\2\u01cbZ\3")
buf.write("\2\2\2\u01cc\u01cd\7k\2\2\u01cd\u01ce\7p\2\2\u01ce\u01cf")
buf.write("\7r\2\2\u01cf\u01d0\7w\2\2\u01d0\u01d1\7v\2\2\u01d1\\")
buf.write("\3\2\2\2\u01d2\u01d3\7q\2\2\u01d3\u01d4\7w\2\2\u01d4\u01d5")
buf.write("\7v\2\2\u01d5\u01d6\7r\2\2\u01d6\u01d7\7w\2\2\u01d7\u01d8")
buf.write("\7v\2\2\u01d8^\3\2\2\2\u01d9\u01da\7x\2\2\u01da\u01db")
buf.write("\7k\2\2\u01db\u01dc\7t\2\2\u01dc\u01dd\7v\2\2\u01dd\u01de")
buf.write("\7w\2\2\u01de\u01df\7c\2\2\u01df\u01e0\7n\2\2\u01e0`\3")
buf.write("\2\2\2\u01e1\u01e2\7v\2\2\u01e2\u01e3\7{\2\2\u01e3\u01e4")
buf.write("\7r\2\2\u01e4\u01e5\7g\2\2\u01e5\u01e6\7f\2\2\u01e6\u01e7")
buf.write("\7g\2\2\u01e7\u01e8\7h\2\2\u01e8b\3\2\2\2\u01e9\u01ec")
buf.write("\5e\63\2\u01ea\u01ec\5g\64\2\u01eb\u01e9\3\2\2\2\u01eb")
buf.write("\u01ea\3\2\2\2\u01ecd\3\2\2\2\u01ed\u01f3\5k\66\2\u01ee")
buf.write("\u01f3\5o8\2\u01ef\u01f3\5m\67\2\u01f0\u01f3\5q9\2\u01f1")
buf.write("\u01f3\5i\65\2\u01f2\u01ed\3\2\2\2\u01f2\u01ee\3\2\2\2")
buf.write("\u01f2\u01ef\3\2\2\2\u01f2\u01f0\3\2\2\2\u01f2\u01f1\3")
buf.write("\2\2\2\u01f3f\3\2\2\2\u01f4\u01f5\5i\65\2\u01f5\u01f6")
buf.write("\5\61\31\2\u01f6\u01f7\5i\65\2\u01f7\u0200\3\2\2\2\u01f8")
buf.write("\u01f9\5i\65\2\u01f9\u01fa\5\61\31\2\u01fa\u01fb\5i\65")
buf.write("\2\u01fb\u01fc\t\5\2\2\u01fc\u01fd\5\u0081A\2\u01fd\u01fe")
buf.write("\5i\65\2\u01fe\u0200\3\2\2\2\u01ff\u01f4\3\2\2\2\u01ff")
buf.write("\u01f8\3\2\2\2\u0200h\3\2\2\2\u0201\u0206\5\u008bF\2\u0202")
buf.write("\u0205\5}?\2\u0203\u0205\5\u008bF\2\u0204\u0202\3\2\2")
buf.write("\2\u0204\u0203\3\2\2\2\u0205\u0208\3\2\2\2\u0206\u0204")
buf.write("\3\2\2\2\u0206\u0207\3\2\2\2\u0207j\3\2\2\2\u0208\u0206")
buf.write("\3\2\2\2\u0209\u020b\5\u0097L\2\u020a\u0209\3\2\2\2\u020a")
buf.write("\u020b\3\2\2\2\u020b\u020c\3\2\2\2\u020c\u020d\5\u0099")
buf.write("M\2\u020d\u020e\5\u00a1Q\2\u020el\3\2\2\2\u020f\u0211")
buf.write("\5\u0097L\2\u0210\u020f\3\2\2\2\u0210\u0211\3\2\2\2\u0211")
buf.write("\u0212\3\2\2\2\u0212\u0213\5\u009bN\2\u0213\u0214\5\u00a3")
buf.write("R\2\u0214n\3\2\2\2\u0215\u0217\5\u0097L\2\u0216\u0215")
buf.write("\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u0218\3\2\2\2\u0218")
buf.write("\u0219\5\u009dO\2\u0219\u021a\5\u00a5S\2\u021ap\3\2\2")
buf.write("\2\u021b\u021d\5\u0097L\2\u021c\u021b\3\2\2\2\u021c\u021d")
buf.write("\3\2\2\2\u021d\u021e\3\2\2\2\u021e\u021f\5\u009fP\2\u021f")
buf.write("\u0220\5\u00a7T\2\u0220r\3\2\2\2\u0221\u0225\5{>\2\u0222")
buf.write("\u0226\5\u0085C\2\u0223\u0226\5\u0087D\2\u0224\u0226\t")
buf.write("\6\2\2\u0225\u0222\3\2\2\2\u0225\u0223\3\2\2\2\u0225\u0224")
buf.write("\3\2\2\2\u0226t\3\2\2\2\u0227\u0228\5i\65\2\u0228\u0229")
buf.write("\5\u0083B\2\u0229v\3\2\2\2\u022a\u022d\5\u0089E\2\u022b")
buf.write("\u022d\5}?\2\u022c\u022a\3\2\2\2\u022c\u022b\3\2\2\2\u022d")
buf.write("\u022e\3\2\2\2\u022e\u022c\3\2\2\2\u022e\u022f\3\2\2\2")
buf.write("\u022fx\3\2\2\2\u0230\u0231\t\7\2\2\u0231z\3\2\2\2\u0232")
buf.write("\u0233\t\b\2\2\u0233|\3\2\2\2\u0234\u0235\t\t\2\2\u0235")
buf.write("~\3\2\2\2\u0236\u0238\7\f\2\2\u0237\u0239\7\17\2\2\u0238")
buf.write("\u0237\3\2\2\2\u0238\u0239\3\2\2\2\u0239\u0080\3\2\2\2")
buf.write("\u023a\u023b\t\n\2\2\u023b\u0082\3\2\2\2\u023c\u023e\t")
buf.write("\13\2\2\u023d\u023c\3\2\2\2\u023d\u023e\3\2\2\2\u023e")
buf.write("\u023f\3\2\2\2\u023f\u0240\t\f\2\2\u0240\u0084\3\2\2\2")
buf.write("\u0241\u0242\t\r\2\2\u0242\u0086\3\2\2\2\u0243\u0244\t")
buf.write("\16\2\2\u0244\u0088\3\2\2\2\u0245\u0246\t\17\2\2\u0246")
buf.write("\u008a\3\2\2\2\u0247\u0248\t\20\2\2\u0248\u008c\3\2\2")
buf.write("\2\u0249\u024a\t\21\2\2\u024a\u008e\3\2\2\2\u024b\u024f")
buf.write("\t\20\2\2\u024c\u024f\5\u0085C\2\u024d\u024f\5\u0087D")
buf.write("\2\u024e\u024b\3\2\2\2\u024e\u024c\3\2\2\2\u024e\u024d")
buf.write("\3\2\2\2\u024f\u0090\3\2\2\2\u0250\u0254\t\6\2\2\u0251")
buf.write("\u0254\5\u0085C\2\u0252\u0254\5\u0087D\2\u0253\u0250\3")
buf.write("\2\2\2\u0253\u0251\3\2\2\2\u0253\u0252\3\2\2\2\u0254\u0092")
buf.write("\3\2\2\2\u0255\u0259\t\22\2\2\u0256\u0259\5\u0085C\2\u0257")
buf.write("\u0259\5\u0087D\2\u0258\u0255\3\2\2\2\u0258\u0256\3\2")
buf.write("\2\2\u0258\u0257\3\2\2\2\u0259\u0094\3\2\2\2\u025a\u025e")
buf.write("\t\23\2\2\u025b\u025e\5\u0085C\2\u025c\u025e\5\u0087D")
buf.write("\2\u025d\u025a\3\2\2\2\u025d\u025b\3\2\2\2\u025d\u025c")
buf.write("\3\2\2\2\u025e\u0096\3\2\2\2\u025f\u0264\5\u008dG\2\u0260")
buf.write("\u0263\5}?\2\u0261\u0263\5\u008bF\2\u0262\u0260\3\2\2")
buf.write("\2\u0262\u0261\3\2\2\2\u0263\u0266\3\2\2\2\u0264\u0262")
buf.write("\3\2\2\2\u0264\u0265\3\2\2\2\u0265\u0098\3\2\2\2\u0266")
buf.write("\u0264\3\2\2\2\u0267\u0268\5{>\2\u0268\u0269\t\24\2\2")
buf.write("\u0269\u009a\3\2\2\2\u026a\u026b\5{>\2\u026b\u026c\t\25")
buf.write("\2\2\u026c\u009c\3\2\2\2\u026d\u026e\5{>\2\u026e\u026f")
buf.write("\t\26\2\2\u026f\u009e\3\2\2\2\u0270\u0271\5{>\2\u0271")
buf.write("\u0272\t\27\2\2\u0272\u00a0\3\2\2\2\u0273\u0278\5\u008f")
buf.write("H\2\u0274\u0277\5}?\2\u0275\u0277\5\u008fH\2\u0276\u0274")
buf.write("\3\2\2\2\u0276\u0275\3\2\2\2\u0277\u027a\3\2\2\2\u0278")
buf.write("\u0276\3\2\2\2\u0278\u0279\3\2\2\2\u0279\u00a2\3\2\2\2")
buf.write("\u027a\u0278\3\2\2\2\u027b\u0280\5\u0091I\2\u027c\u027f")
buf.write("\5}?\2\u027d\u027f\5\u0091I\2\u027e\u027c\3\2\2\2\u027e")
buf.write("\u027d\3\2\2\2\u027f\u0282\3\2\2\2\u0280\u027e\3\2\2\2")
buf.write("\u0280\u0281\3\2\2\2\u0281\u00a4\3\2\2\2\u0282\u0280\3")
buf.write("\2\2\2\u0283\u0288\5\u0093J\2\u0284\u0287\5}?\2\u0285")
buf.write("\u0287\5\u0093J\2\u0286\u0284\3\2\2\2\u0286\u0285\3\2")
buf.write("\2\2\u0287\u028a\3\2\2\2\u0288\u0286\3\2\2\2\u0288\u0289")
buf.write("\3\2\2\2\u0289\u00a6\3\2\2\2\u028a\u0288\3\2\2\2\u028b")
buf.write("\u0290\5\u0095K\2\u028c\u028f\5}?\2\u028d\u028f\5\u0095")
buf.write("K\2\u028e\u028c\3\2\2\2\u028e\u028d\3\2\2\2\u028f\u0292")
buf.write("\3\2\2\2\u0290\u028e\3\2\2\2\u0290\u0291\3\2\2\2\u0291")
buf.write("\u00a8\3\2\2\2\u0292\u0290\3\2\2\2%\2\u00ce\u00db\u00ec")
buf.write("\u00f6\u00f8\u0102\u01eb\u01f2\u01ff\u0204\u0206\u020a")
buf.write("\u0210\u0216\u021c\u0225\u022c\u022e\u0238\u023d\u024e")
buf.write("\u0253\u0258\u025d\u0262\u0264\u0276\u0278\u027e\u0280")
buf.write("\u0286\u0288\u028e\u0290\3\b\2\2")
return buf.getvalue()
class WorkspaceSymbolsLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
SINGLELINE_COMMENT = 6
MULTILINE_COMMENT = 7
SPACE = 8
TAB = 9
NEWLINE = 10
String = 11
COMPILER_DIRECTIVE = 12
OpenBracket = 13
CloseBracket = 14
OpenParen = 15
CloseParen = 16
OpenBrace = 17
CloseBrace = 18
SemiColon = 19
Colon = 20
Comma = 21
Assign = 22
QuestionMark = 23
Dot = 24
Apostrophe = 25
Operators = 26
Module = 27
Endmodule = 28
Interface = 29
Endinterface = 30
Class = 31
Endclass = 32
Config = 33
Endconfig = 34
Primitive = 35
Endprimitive = 36
Program = 37
Endprogram = 38
Task = 39
Endtask = 40
Function = 41
Endfunction = 42
Package = 43
Endpackage = 44
Input = 45
Output = 46
Virtual = 47
Typedef = 48
Number = 49
IntegralNumber = 50
RealNumber = 51
UnsignedNumber = 52
DecimalNumber = 53
BinaryNumber = 54
OctalNumber = 55
HexNumber = 56
UnbasedUnsizedLiteral = 57
Time = 58
Word = 59
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'#('", "'void'", "'static'", "'automatic'", "'$root'", "' '",
"'\t'", "'['", "']'", "'('", "')'", "'{'", "'}'", "';'", "':'",
"','", "'='", "'?'", "'.'", "'module'", "'endmodule'", "'interface'",
"'endinterface'", "'class'", "'endclass'", "'config'", "'endconfig'",
"'primitive'", "'endprimitive'", "'program'", "'endprogram'",
"'task'", "'endtask'", "'function'", "'endfunction'", "'package'",
"'endpackage'", "'input'", "'output'", "'virtual'", "'typedef'" ]
symbolicNames = [ "<INVALID>",
"SINGLELINE_COMMENT", "MULTILINE_COMMENT", "SPACE", "TAB", "NEWLINE",
"String", "COMPILER_DIRECTIVE", "OpenBracket", "CloseBracket",
"OpenParen", "CloseParen", "OpenBrace", "CloseBrace", "SemiColon",
"Colon", "Comma", "Assign", "QuestionMark", "Dot", "Apostrophe",
"Operators", "Module", "Endmodule", "Interface", "Endinterface",
"Class", "Endclass", "Config", "Endconfig", "Primitive", "Endprimitive",
"Program", "Endprogram", "Task", "Endtask", "Function", "Endfunction",
"Package", "Endpackage", "Input", "Output", "Virtual", "Typedef",
"Number", "IntegralNumber", "RealNumber", "UnsignedNumber",
"DecimalNumber", "BinaryNumber", "OctalNumber", "HexNumber",
"UnbasedUnsizedLiteral", "Time", "Word" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "SINGLELINE_COMMENT",
"MULTILINE_COMMENT", "SPACE", "TAB", "NEWLINE", "String",
"COMPILER_DIRECTIVE", "OpenBracket", "CloseBracket", "OpenParen",
"CloseParen", "OpenBrace", "CloseBrace", "SemiColon",
"Colon", "Comma", "Assign", "QuestionMark", "Dot", "Apostrophe",
"Operators", "Module", "Endmodule", "Interface", "Endinterface",
"Class", "Endclass", "Config", "Endconfig", "Primitive",
"Endprimitive", "Program", "Endprogram", "Task", "Endtask",
"Function", "Endfunction", "Package", "Endpackage", "Input",
"Output", "Virtual", "Typedef", "Number", "IntegralNumber",
"RealNumber", "UnsignedNumber", "DecimalNumber", "BinaryNumber",
"OctalNumber", "HexNumber", "UnbasedUnsizedLiteral", "Time",
"Word", "GRAVE", "APOSTROPHE", "UNDERSCORE", "EOL", "SIGN",
"TIME_UNIT", "X", "Z", "CHAR", "DIGIT", "NON_ZERO_DIGIT",
"DECIMAL_DIGIT", "BINARY_DIGIT", "OCTAL_DIGIT", "HEX_DIGIT",
"NON_ZERO_NUMBER", "DECIMAL_BASE", "BINARY_BASE", "OCTAL_BASE",
"HEX_BASE", "DECIMAL_VALUE", "BINARY_VALUE", "OCTAL_VALUE",
"HEX_VALUE" ]
grammarFileName = "WorkspaceSymbols.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 64.807963 | 134 | 0.570159 | 5,875 | 27,673 | 2.674383 | 0.164596 | 0.120545 | 0.064537 | 0.073574 | 0.273931 | 0.183809 | 0.125446 | 0.12169 | 0.117299 | 0.11558 | 0 | 0.3411 | 0.159361 | 27,673 | 426 | 135 | 64.960094 | 0.334308 | 0.004734 | 0 | 0.009804 | 1 | 0.64951 | 0.642815 | 0.583616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004902 | false | 0 | 0.009804 | 0 | 0.183824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e5ad5c71b88914ed73949f875033306cf44b063a | 43 | py | Python | src/__main__.py | Togohogo1/TypeRacer-Stats | b13a0c973a813a4c5f00dbbc7f98c5a13b49b25c | [
"MIT"
] | 3 | 2021-04-24T23:04:32.000Z | 2022-01-16T01:36:42.000Z | src/__main__.py | Togohogo1/TypeRacer-Stats | b13a0c973a813a4c5f00dbbc7f98c5a13b49b25c | [
"MIT"
] | 1 | 2021-05-29T17:39:05.000Z | 2021-07-12T02:26:10.000Z | src/__main__.py | Togohogo1/TypeRacer-Stats | b13a0c973a813a4c5f00dbbc7f98c5a13b49b25c | [
"MIT"
] | 1 | 2021-08-06T03:45:00.000Z | 2021-08-06T03:45:00.000Z | from .grapher import main_plot
main_plot()
| 14.333333 | 30 | 0.813953 | 7 | 43 | 4.714286 | 0.714286 | 0.484848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116279 | 43 | 2 | 31 | 21.5 | 0.868421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
f901b8a2b200e4af23e96a952c1e5458c6685222 | 429 | py | Python | tests/test_naming.py | Cottonwood-Technology/AIOConductor | dc5f6fae48fac2b84eff26ce3aee6ca7ba581ff4 | [
"BSD-2-Clause"
] | null | null | null | tests/test_naming.py | Cottonwood-Technology/AIOConductor | dc5f6fae48fac2b84eff26ce3aee6ca7ba581ff4 | [
"BSD-2-Clause"
] | null | null | null | tests/test_naming.py | Cottonwood-Technology/AIOConductor | dc5f6fae48fac2b84eff26ce3aee6ca7ba581ff4 | [
"BSD-2-Clause"
] | null | null | null | from aioconductor.naming import camelcase_to_underscore
def test_camelcase_to_underscore() -> None:
assert camelcase_to_underscore("DB") == "db"
assert camelcase_to_underscore("HTTPClient") == "http_client"
assert camelcase_to_underscore("CoolXMLParser") == "cool_xml_parser"
assert camelcase_to_underscore("MessageQueue") == "message_queue"
assert camelcase_to_underscore("RSA512Crypt") == "rsa_512_crypt"
| 42.9 | 72 | 0.776224 | 49 | 429 | 6.367347 | 0.530612 | 0.246795 | 0.471154 | 0.432692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015831 | 0.11655 | 429 | 9 | 73 | 47.666667 | 0.807388 | 0 | 0 | 0 | 0 | 0 | 0.237762 | 0 | 0 | 0 | 0 | 0 | 0.714286 | 1 | 0.142857 | true | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
007859ac939f29c309be6c96966989357f7b6cc8 | 98 | py | Python | tests/bundles/security/_app/__init__.py | achiang/flask-unchained | 12788a6e618904a25ff2b571eb05ff1dc8f1840f | [
"MIT"
] | 69 | 2018-10-10T01:59:11.000Z | 2022-03-29T17:29:30.000Z | tests/bundles/security/_app/__init__.py | achiang/flask-unchained | 12788a6e618904a25ff2b571eb05ff1dc8f1840f | [
"MIT"
] | 18 | 2018-11-17T12:42:02.000Z | 2021-05-22T18:45:27.000Z | tests/bundles/security/_app/__init__.py | achiang/flask-unchained | 12788a6e618904a25ff2b571eb05ff1dc8f1840f | [
"MIT"
] | 7 | 2018-10-12T16:20:25.000Z | 2021-10-06T12:18:21.000Z | from flask_unchained import AppBundle as BaseAppBundle
class AppBundle(BaseAppBundle):
pass
| 16.333333 | 54 | 0.816327 | 11 | 98 | 7.181818 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153061 | 98 | 5 | 55 | 19.6 | 0.951807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
0096594bf7cfb5caf791160fad1e65600db2620e | 175 | py | Python | list_methods/join_method.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | list_methods/join_method.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | list_methods/join_method.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | address = ['500 Fifth Avenue', 'New York', 'NY', '10036']
print(','.join(address))
print(', '.join(address))
print(''.join(address))
print("-".join(["555", "123", "4567"])) | 21.875 | 57 | 0.582857 | 22 | 175 | 4.636364 | 0.590909 | 0.352941 | 0.470588 | 0.617647 | 0.558824 | 0.558824 | 0.558824 | 0.558824 | 0 | 0 | 0 | 0.11465 | 0.102857 | 175 | 8 | 58 | 21.875 | 0.535032 | 0 | 0 | 0.4 | 0 | 0 | 0.255682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.8 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
daf76348fd147b6a1a7979a39d7da4d40ec1029e | 86 | py | Python | st_toolbox/spcrng/__init__.py | sekro/spatial_transcriptomics_toolbox | 57d48e7cda74c9da5381df024fd4c519f9c379f5 | [
"MIT"
] | null | null | null | st_toolbox/spcrng/__init__.py | sekro/spatial_transcriptomics_toolbox | 57d48e7cda74c9da5381df024fd4c519f9c379f5 | [
"MIT"
] | null | null | null | st_toolbox/spcrng/__init__.py | sekro/spatial_transcriptomics_toolbox | 57d48e7cda74c9da5381df024fd4c519f9c379f5 | [
"MIT"
] | null | null | null | from .spacerange_import import SpaceRangerPaths, SpaceRangerImporter, SpaceRangerSpots | 86 | 86 | 0.906977 | 7 | 86 | 11 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05814 | 86 | 1 | 86 | 86 | 0.950617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
975627481736c4259102fd85f2277da9899adc70 | 99 | py | Python | tests/testexec.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | tests/testexec.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | tests/testexec.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | import logging as L
def main(ctx):
#TODO: Something to be done here ...
L.info("Go go go")
| 16.5 | 38 | 0.626263 | 18 | 99 | 3.444444 | 0.833333 | 0.129032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.242424 | 99 | 5 | 39 | 19.8 | 0.826667 | 0.353535 | 0 | 0 | 0 | 0 | 0.137931 | 0 | 0 | 0 | 0 | 0.2 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
977cbf0db1b71d6808f47ff9d7ca4ca848712d19 | 155 | py | Python | align/pdk/finfet/__init__.py | pretl/ALIGN-public | 4b03042d9e96fa669740427842b0bf268b0c9a86 | [
"BSD-3-Clause"
] | null | null | null | align/pdk/finfet/__init__.py | pretl/ALIGN-public | 4b03042d9e96fa669740427842b0bf268b0c9a86 | [
"BSD-3-Clause"
] | null | null | null | align/pdk/finfet/__init__.py | pretl/ALIGN-public | 4b03042d9e96fa669740427842b0bf268b0c9a86 | [
"BSD-3-Clause"
] | null | null | null | from .canvas import CanvasPDK
from .transistor import MOS
from .transistor_array import MOSGenerator
from .resistor import tfr_prim
from .digital import *
| 25.833333 | 42 | 0.832258 | 21 | 155 | 6.047619 | 0.571429 | 0.220472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 155 | 5 | 43 | 31 | 0.940741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
97985803ff187bab9756a07fa2d20e70fdb97c00 | 18,954 | py | Python | src/layers/losses.py | malfonsoNeoris/maskrcnn_tf2 | edcd276217546b08dbc6e33bb65840432999365c | [
"MIT"
] | null | null | null | src/layers/losses.py | malfonsoNeoris/maskrcnn_tf2 | edcd276217546b08dbc6e33bb65840432999365c | [
"MIT"
] | null | null | null | src/layers/losses.py | malfonsoNeoris/maskrcnn_tf2 | edcd276217546b08dbc6e33bb65840432999365c | [
"MIT"
] | 1 | 2021-08-09T18:06:51.000Z | 2021-08-09T18:06:51.000Z | import tensorflow as tf
# Losses in subclassed API
class RPNClassLoss(tf.keras.losses.Loss):
def __init__(self, name="rpn_class_loss", **kwargs):
"""
RPN anchor classifier loss.
Args:
name: rpn_class_loss
"""
self.name = name
super(RPNClassLoss, self).__init__(name=name, **kwargs)
def call(self, rpn_match, rpn_class_logits, **kwargs):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = tf.cast(tf.math.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(tf.math.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=anchor_class,
y_pred=rpn_class_logits,
from_logits=True)
loss = tf.keras.backend.switch(tf.size(loss) > 0, tf.math.reduce_mean(loss), tf.constant(0.0))
return loss
class RPNBboxLoss(tf.keras.losses.Loss):
def __init__(self, images_per_gpu, name="rpn_bbox_loss", **kwargs):
"""
Return the RPN bounding box loss graph.
Args:
images_per_gpu:
name: rpn_bbox_loss
"""
self.name = name
self.images_per_gpu = images_per_gpu
super(RPNBboxLoss, self).__init__(name=name, **kwargs)
def batch_pack_graph(self, x, counts):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(self.images_per_gpu):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def smooth_l1_loss(self, y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = tf.math.abs(y_true - y_pred)
less_than_one = tf.cast(tf.math.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def call(self, target_bbox, rpn_match, rpn_bbox, **kwargs):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = tf.squeeze(rpn_match, -1)
indices = tf.where(tf.math.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices) # (3,4) (4, 4)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = tf.math.reduce_sum(tf.cast(tf.math.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = self.batch_pack_graph(target_bbox, batch_counts)
loss = self.smooth_l1_loss(target_bbox, rpn_bbox)
loss = tf.keras.backend.switch(tf.size(loss) > 0, tf.math.reduce_mean(loss), tf.constant(0.0))
return loss
class MRCNNClassLoss(tf.keras.losses.Loss):
def __init__(self, batch_size, name="mrcnn_class_loss", **kwargs):
"""
Loss for the classifier head of Mask RCNN.
Args:
name: mrcnn_class_loss
"""
self.name = name
self.batch_size = batch_size
super(MRCNNClassLoss, self).__init__(name=name, **kwargs)
def call(self, target_class_ids, pred_class_logits, active_class_ids, **kwargs):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0 for classes that are not in the dataset.
The position of ones and zeros means the class index.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
pred_active = tf.stack(
[tf.gather(active_class_ids[b], pred_class_ids[b]) for b in range(self.batch_size)], axis=0
)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.math.reduce_sum(loss) / tf.math.reduce_sum(pred_active)
return loss
class MRCNNBboxLoss(tf.keras.losses.Loss):
def __init__(self, num_classes, name='mrcnn_bbox_loss', **kwargs):
"""
Loss for Mask R-CNN bounding box refinement.
Args:
name: mrcnn_bbox_loss
"""
self.name = name
self.num_classes = num_classes
super(MRCNNBboxLoss, self).__init__(name=name, **kwargs)
def smooth_l1_loss(self, y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = tf.math.abs(y_true - y_pred)
less_than_one = tf.cast(tf.math.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def call(self, target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = tf.reshape(target_class_ids, (-1,))
target_bbox = tf.reshape(target_bbox, (-1, 4))
pred_bbox = tf.reshape(pred_bbox, (-1, self.num_classes, 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = tf.keras.backend.switch(tf.size(target_bbox) > 0,
tf.math.reduce_mean(self.smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox)),
tf.constant(0.0))
return loss
class MRCNNMaskLoss(tf.keras.losses.Loss):
def __init__(self, name='mrcnn_mask_loss', **kwargs):
"""
Mask binary cross-entropy loss for the masks head.
Args:
name: mrcnn_mask_loss
"""
self.name = name
super(MRCNNMaskLoss, self).__init__(name=name, **kwargs)
def call(self, target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = tf.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = tf.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = tf.reshape(pred_masks, (-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = tf.keras.backend.switch(tf.size(y_true) > 0,
tf.keras.losses.binary_crossentropy(y_true=y_true, y_pred=y_pred),
tf.constant(0.0))
loss = tf.math.reduce_mean(loss)
return loss
class L2RegLoss(tf.keras.losses.Loss):
def __init__(self, model, config, name='l2_regularizer', **kwargs):
super(L2RegLoss, self).__init__(name=name, **kwargs)
self.name = name
self.config = config
self.model = model
self.regularizer = tf.keras.regularizers.l2(self.config['weight_decay'])
def call(self, dummy=None, **kwargs):
# Skip gamma and beta weights of batch normalization layers.
# Also skip biases from being regularized
if self.config['l2_reg_batchnorm']:
reg_losses = [self.regularizer(w) / tf.cast(tf.size(w), tf.float32)
for w in self.model.trainable_weights
]
else:
reg_losses = [self.regularizer(w) / tf.cast(tf.size(w), tf.float32)
for w in self.model.trainable_weights
if 'gamma' not in w.name
and 'beta' not in w.name
]
loss = tf.add_n(reg_losses)
return loss
# Losses in functional API
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = tf.math.abs(y_true - y_pred)
less_than_one = tf.cast(tf.math.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = tf.cast(tf.math.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(tf.math.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=anchor_class,
y_pred=rpn_class_logits,
from_logits=True)
loss = tf.keras.backend.switch(tf.size(loss) > 0, tf.math.reduce_mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(target_bbox, rpn_match, rpn_bbox, config):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = tf.squeeze(rpn_match, -1)
indices = tf.where(tf.math.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = tf.math.reduce_sum(tf.cast(tf.math.equal(rpn_match, 1), tf.int32), axis=1)
def batch_pack_graph(x, counts, images_per_gpu):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(images_per_gpu):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
target_bbox = batch_pack_graph(target_bbox, batch_counts, config['images_per_gpu'])
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = tf.keras.backend.switch(tf.size(loss) > 0, tf.math.reduce_mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids, config):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0 for classes that are not in the dataset.
The position of ones and zeros means the class index.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
pred_active = tf.stack(
[tf.gather(active_class_ids[b], pred_class_ids[b]) for b in range(config['batch_size'])], axis=0
)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.math.reduce_sum(loss) / tf.math.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox, config):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = tf.reshape(target_class_ids, (-1,))
target_bbox = tf.reshape(target_bbox, (-1, 4))
pred_bbox = tf.reshape(pred_bbox, (-1, config['num_classes'], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = tf.keras.backend.switch(tf.size(target_bbox) > 0,
tf.math.reduce_mean(smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox)),
tf.constant(0.0))
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = tf.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = tf.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = tf.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = tf.keras.backend.switch(tf.size(y_true) > 0,
tf.keras.losses.binary_crossentropy(y_true=y_true, y_pred=y_pred),
tf.constant(0.0))
loss = tf.math.reduce_mean(loss)
return loss
| 42.979592 | 118 | 0.637016 | 2,736 | 18,954 | 4.202851 | 0.088085 | 0.040351 | 0.041395 | 0.023132 | 0.912166 | 0.893817 | 0.882772 | 0.857118 | 0.840595 | 0.81981 | 0 | 0.015975 | 0.260209 | 18,954 | 440 | 119 | 43.077273 | 0.804094 | 0.357444 | 0 | 0.675258 | 0 | 0 | 0.016642 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113402 | false | 0 | 0.005155 | 0 | 0.231959 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
97a0a7690a4c7f9585f5abd616533bd4a9f83355 | 1,126 | py | Python | Source/server/db.py | SeungMinSong2929/2020-1-ESCD-SIN | 905415d40d1198b4c739b86d56d0951d3ea40f1d | [
"Apache-2.0"
] | 1 | 2020-06-01T05:43:28.000Z | 2020-06-01T05:43:28.000Z | Source/server/db.py | SeungMinSong2929/2020-1-ESCD-SIN | 905415d40d1198b4c739b86d56d0951d3ea40f1d | [
"Apache-2.0"
] | 3 | 2020-05-29T06:07:19.000Z | 2020-06-26T08:37:48.000Z | Source/server/db.py | SeungMinSong2929/2020-1-ESCD-SIN | 905415d40d1198b4c739b86d56d0951d3ea40f1d | [
"Apache-2.0"
] | 5 | 2020-05-01T07:33:02.000Z | 2020-10-26T02:39:21.000Z | import mysql.connector
def sql(querry, val):
# Open database connection
try:
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="huong",
database="voice_db"
)
# prepare a cursor object using cursor() method
mycursor = mydb.cursor()
# run querry
mycursor.execute(querry, val)
# commit database
mydb.commit()
except Exception as e:
print("Database connection fail", e)
def sqlSelect(querry, val):
# Open database connection
try:
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="huong",
database="voice_db"
)
# prepare a cursor object using cursor() method
mycursor = mydb.cursor()
# run querry
val = (val,)
mycursor.execute(querry, val)
myresult = mycursor.fetchall()
# commit database
mydb.commit()
return myresult
except Exception as e:
print("Database connection fail", e)
| 22.52 | 55 | 0.543517 | 110 | 1,126 | 5.545455 | 0.372727 | 0.07377 | 0.042623 | 0.068852 | 0.708197 | 0.708197 | 0.708197 | 0.708197 | 0.708197 | 0.557377 | 0 | 0 | 0.363233 | 1,126 | 49 | 56 | 22.979592 | 0.850767 | 0.173179 | 0 | 0.733333 | 0 | 0 | 0.10846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.066667 | 0.033333 | 0 | 0.133333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
97cc3dbc40f57c810eda5cd4a50db30d725a927c | 204 | py | Python | main/admin.py | ssnwangfei/wfsite | 8634b0c5cebb5eef30b109d260679620f3b46fce | [
"Apache-2.0"
] | null | null | null | main/admin.py | ssnwangfei/wfsite | 8634b0c5cebb5eef30b109d260679620f3b46fce | [
"Apache-2.0"
] | null | null | null | main/admin.py | ssnwangfei/wfsite | 8634b0c5cebb5eef30b109d260679620f3b46fce | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import UserProfile
from .models import UserRelationship
admin.site.register(UserProfile)
admin.site.register(UserRelationship)
| 22.666667 | 37 | 0.833333 | 25 | 204 | 6.8 | 0.48 | 0.117647 | 0.188235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102941 | 204 | 8 | 38 | 25.5 | 0.928962 | 0.127451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
97d222f4c287f099806bc9caf02a2c676022dda7 | 274 | py | Python | Dragon/python/dragon/vm/theano/compile/__init__.py | neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | [
"BSD-2-Clause"
] | 212 | 2015-07-05T07:57:17.000Z | 2022-02-27T01:55:35.000Z | Dragon/python/dragon/vm/theano/compile/__init__.py | neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | [
"BSD-2-Clause"
] | 6 | 2016-07-07T14:31:56.000Z | 2017-12-12T02:21:15.000Z | Dragon/python/dragon/vm/theano/compile/__init__.py | neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | [
"BSD-2-Clause"
] | 71 | 2016-03-24T09:02:41.000Z | 2021-06-03T01:52:41.000Z | # --------------------------------------------------------
# Theano @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
from .function import function
from .scan import scan
from .sharedvalue import shared | 30.444444 | 58 | 0.445255 | 22 | 274 | 5.545455 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016327 | 0.105839 | 274 | 9 | 59 | 30.444444 | 0.481633 | 0.645985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8ad98a8ded60e4fb261521f665866a6634feb6ef | 822 | py | Python | acceptance_tests/tests/tests/test_stats_db.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | acceptance_tests/tests/tests/test_stats_db.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | acceptance_tests/tests/tests/test_stats_db.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import pytest
import subprocess
def test_no_extra(app_connection, composition):
composition.run('run_test', 'c2cwsgiutils_stats_db.py',
'--db', 'postgresql://www-data:www-data@db:5432/test', '--schema', 'public')
def test_with_extra(app_connection, composition):
composition.run('run_test', 'c2cwsgiutils_stats_db.py',
'--db', 'postgresql://www-data:www-data@db:5432/test', '--schema', 'public',
'--extra', "select 'toto', 42")
def test_error(app_connection, composition):
with pytest.raises(subprocess.CalledProcessError):
composition.run('run_test', 'c2cwsgiutils_stats_db.py',
'--db', 'postgresql://www-data:www-data@db:5432/test', '--schema', 'public',
'--extra', "select 'toto, 42")
| 39.142857 | 100 | 0.616788 | 94 | 822 | 5.212766 | 0.297872 | 0.085714 | 0.146939 | 0.128571 | 0.738776 | 0.738776 | 0.738776 | 0.738776 | 0.738776 | 0.738776 | 0 | 0.029366 | 0.212895 | 822 | 20 | 101 | 41.1 | 0.727975 | 0 | 0 | 0.357143 | 0 | 0 | 0.396594 | 0.244526 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c12e4bf1cfc69f0b896b7e1cd6147f9ddb5cb403 | 137 | py | Python | 33.operacoes_com_dicionarios/6.setdefault.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 33.operacoes_com_dicionarios/6.setdefault.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 33.operacoes_com_dicionarios/6.setdefault.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | dicionario_3 = {'usuario':'id_3','usuario': 'id_4'}
print(dicionario_3.setdefault("id_3",333))
print(dicionario_3.setdefault("id_4",333)) | 45.666667 | 51 | 0.744526 | 22 | 137 | 4.318182 | 0.363636 | 0.347368 | 0.210526 | 0.547368 | 0.589474 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098485 | 0.036496 | 137 | 3 | 52 | 45.666667 | 0.621212 | 0 | 0 | 0 | 0 | 0 | 0.217391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.666667 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
c15e64be230f322a538cc6f29a0b6d932692b411 | 103 | py | Python | Chapter 11/ch11_2_4.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 11/ch11_2_4.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 11/ch11_2_4.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | def concat(a,b):
return tuple(str(a))*b
a=100
print(concat(a,2))
# ('1','0','0', '1','0','0') | 17.166667 | 28 | 0.485437 | 21 | 103 | 2.380952 | 0.571429 | 0.28 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 0.174757 | 103 | 6 | 28 | 17.166667 | 0.470588 | 0.252427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0.25 | 0.5 | 0.25 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
c1607117887ee54cdac11071ba8cd36b84d6f55c | 43 | py | Python | Hacker Rank/Python/collections.py | Ahmad-Fahad/Python | 5a5f8f3395f7085947430b8309f6af70b2e25a77 | [
"Apache-2.0"
] | null | null | null | Hacker Rank/Python/collections.py | Ahmad-Fahad/Python | 5a5f8f3395f7085947430b8309f6af70b2e25a77 | [
"Apache-2.0"
] | null | null | null | Hacker Rank/Python/collections.py | Ahmad-Fahad/Python | 5a5f8f3395f7085947430b8309f6af70b2e25a77 | [
"Apache-2.0"
] | null | null | null | import collections
print(dir(collections)) | 14.333333 | 23 | 0.837209 | 5 | 43 | 7.2 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 43 | 3 | 23 | 14.333333 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
c1c144f4ccaed2829146ba42e4be9669b8fb0c44 | 1,779 | py | Python | tests/parsers/transform/test_cleaning.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | null | null | null | tests/parsers/transform/test_cleaning.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | 84 | 2020-07-27T13:01:12.000Z | 2022-03-16T17:10:23.000Z | tests/parsers/transform/test_cleaning.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | null | null | null | from unittest import TestCase
import pandas as pd
from moonstone.parsers.transform.cleaning import StringCleaner
class TestStringCleaner(TestCase):
def test_remove_trailing_spaces(self):
df = pd.DataFrame(
[
[1, ' b'],
[4, " a "]
],
columns=['number', 'string']
)
expected_df = pd.DataFrame(
[
[1, 'b'],
[4, "a"]
],
columns=['number', 'string']
)
method_name = "remove_trailing_spaces"
expected_history = [
[method_name, {'col_name': 'string'}]
]
transform_cleaning = StringCleaner(df)
getattr(transform_cleaning, method_name)('string')
self.assertTrue(transform_cleaning.history)
self.assertListEqual(transform_cleaning.history, expected_history)
pd.testing.assert_frame_equal(transform_cleaning.df, expected_df)
def test_to_slug(self):
df = pd.DataFrame(
[
[1, ' b test '],
[4, " a Stuff.2"]
],
columns=['number', 'string']
)
expected_df = pd.DataFrame(
[
[1, 'b-test'],
[4, "a-stuff-2"]
],
columns=['number', 'string']
)
method_name = "to_slug"
expected_history = [
[method_name, {'col_name': 'string'}]
]
transform_cleaning = StringCleaner(df)
getattr(transform_cleaning, method_name)('string')
self.assertTrue(transform_cleaning.history)
self.assertListEqual(transform_cleaning.history, expected_history)
pd.testing.assert_frame_equal(transform_cleaning.df, expected_df)
| 30.152542 | 74 | 0.539629 | 164 | 1,779 | 5.621951 | 0.27439 | 0.20282 | 0.056399 | 0.060738 | 0.789588 | 0.767896 | 0.759219 | 0.759219 | 0.759219 | 0.741866 | 0 | 0.008606 | 0.346824 | 1,779 | 58 | 75 | 30.672414 | 0.784854 | 0 | 0 | 0.576923 | 0 | 0 | 0.088814 | 0.012367 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.038462 | false | 0 | 0.057692 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a9aabea56b89a41efc773584d4a6e41f0bd24da4 | 1,459 | py | Python | tests/test_reset.py | dmitrygx/ucx-py | b0132c5f269e4b681225f7f15969100f86ac7742 | [
"BSD-3-Clause"
] | 76 | 2019-06-08T04:03:39.000Z | 2022-01-07T20:34:23.000Z | tests/test_reset.py | dmitrygx/ucx-py | b0132c5f269e4b681225f7f15969100f86ac7742 | [
"BSD-3-Clause"
] | 644 | 2019-06-04T23:06:02.000Z | 2022-02-24T11:17:45.000Z | tests/test_reset.py | dmitrygx/ucx-py | b0132c5f269e4b681225f7f15969100f86ac7742 | [
"BSD-3-Clause"
] | 32 | 2019-08-14T09:22:02.000Z | 2022-01-21T20:17:50.000Z | import pytest
import ucp
class ResetAfterN:
"""Calls ucp.reset() after n calls"""
def __init__(self, n):
self.n = n
self.count = 0
def __call__(self):
self.count += 1
if self.count == self.n:
ucp.reset()
@pytest.mark.asyncio
async def test_reset():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
del lt
del ep
reset()
@pytest.mark.asyncio
async def test_lt_still_in_scope_error():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
del ep
with pytest.raises(
ucp.exceptions.UCXError,
match="Trying to reset UCX but not all Endpoints and/or Listeners are closed()",
):
ucp.reset()
lt.close()
ucp.reset()
@pytest.mark.asyncio
async def test_ep_still_in_scope_error():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
del lt
with pytest.raises(
ucp.exceptions.UCXError,
match="Trying to reset UCX but not all Endpoints and/or Listeners are closed()",
):
ucp.reset()
ep.abort()
ucp.reset()
| 19.986301 | 88 | 0.60658 | 197 | 1,459 | 4.350254 | 0.284264 | 0.056009 | 0.052509 | 0.077013 | 0.808635 | 0.808635 | 0.808635 | 0.768961 | 0.682614 | 0.682614 | 0 | 0.004739 | 0.276902 | 1,459 | 72 | 89 | 20.263889 | 0.807583 | 0.021247 | 0 | 0.754717 | 0 | 0 | 0.099859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0 | 0.037736 | 0 | 0.150943 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a9b872ed6d8ed89bb8bd668900d5705ad8b4360f | 40 | py | Python | lepmlutils/general/__init__.py | Lewington-pitsos/mlutils | c92322a8a2fc0b5342d44b0d92051a93c6eede44 | [
"MIT"
] | null | null | null | lepmlutils/general/__init__.py | Lewington-pitsos/mlutils | c92322a8a2fc0b5342d44b0d92051a93c6eede44 | [
"MIT"
] | null | null | null | lepmlutils/general/__init__.py | Lewington-pitsos/mlutils | c92322a8a2fc0b5342d44b0d92051a93c6eede44 | [
"MIT"
] | null | null | null | from .audio import *
from .help import * | 20 | 20 | 0.725 | 6 | 40 | 4.833333 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175 | 40 | 2 | 21 | 20 | 0.878788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a9cb38359b72eaef2008dca6b1135dd37938c1e6 | 72 | py | Python | tests/test_ReservationEnquiry.py | fraser-langton/Quandoo | 3a5e1241b645129d805213d01221ede8f2b79aa2 | [
"MIT"
] | 1 | 2019-08-08T11:05:28.000Z | 2019-08-08T11:05:28.000Z | tests/test_ReservationEnquiry.py | fraser-langton/Quandoo | 3a5e1241b645129d805213d01221ede8f2b79aa2 | [
"MIT"
] | 1 | 2021-01-31T23:16:09.000Z | 2021-03-05T01:33:49.000Z | tests/test_ReservationEnquiry.py | fraser-langton/Quandoo | 3a5e1241b645129d805213d01221ede8f2b79aa2 | [
"MIT"
] | 1 | 2020-08-19T09:06:42.000Z | 2020-08-19T09:06:42.000Z | import unittest
class ReservationEnquiry(unittest.TestCase):
pass
| 12 | 44 | 0.791667 | 7 | 72 | 8.142857 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152778 | 72 | 5 | 45 | 14.4 | 0.934426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
e756d15612f9bfead11e9121641f624b520bb17e | 128 | py | Python | discretisedfield/tests/test_init.py | ubermag/discretisedfield | fec016c85fcc091006e678845bca999b993b987c | [
"BSD-3-Clause"
] | 9 | 2019-08-30T14:00:43.000Z | 2022-01-16T15:01:44.000Z | discretisedfield/tests/test_init.py | StephenPotato/discretisedfield | de49577b47acadd9372854252688194c348844a3 | [
"BSD-3-Clause"
] | 50 | 2019-06-13T13:41:57.000Z | 2022-03-28T09:14:33.000Z | discretisedfield/tests/test_init.py | StephenPotato/discretisedfield | de49577b47acadd9372854252688194c348844a3 | [
"BSD-3-Clause"
] | 7 | 2019-08-28T14:16:10.000Z | 2021-12-13T21:06:06.000Z | import discretisedfield as df
def test_version():
assert isinstance(df.__version__, str)
assert '.' in df.__version__
| 18.285714 | 42 | 0.734375 | 16 | 128 | 5.3125 | 0.6875 | 0.211765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179688 | 128 | 6 | 43 | 21.333333 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0.007813 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.25 | true | 0 | 0.25 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e773d66b1cb77fed04906fed9ba3fae003499161 | 32 | py | Python | pybarker/utils/otp/__init__.py | darkbarker/pybarker | 55e86a9b4b15737cfdedb36f23b37a808a44a885 | [
"MIT"
] | 2 | 2019-06-22T18:40:26.000Z | 2022-02-01T12:15:20.000Z | pybarker/utils/otp/__init__.py | darkbarker/pybarker | 55e86a9b4b15737cfdedb36f23b37a808a44a885 | [
"MIT"
] | null | null | null | pybarker/utils/otp/__init__.py | darkbarker/pybarker | 55e86a9b4b15737cfdedb36f23b37a808a44a885 | [
"MIT"
] | null | null | null | from .otp_store import OtpStore
| 16 | 31 | 0.84375 | 5 | 32 | 5.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 32 | 1 | 32 | 32 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e7c10e084394bd57b11f5ae3d741af9a0e18506f | 190 | py | Python | data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-2/exercicio2-2.py | joaovictor-loureiro/data-science | 21ad240e1db94d614e54fcb3fbf6ef74a78af9d8 | [
"MIT"
] | null | null | null | data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-2/exercicio2-2.py | joaovictor-loureiro/data-science | 21ad240e1db94d614e54fcb3fbf6ef74a78af9d8 | [
"MIT"
] | null | null | null | data-science/exercicios/livro-introducao-a-programacao-com-python/capitulo-2/exercicio2-2.py | joaovictor-loureiro/data-science | 21ad240e1db94d614e54fcb3fbf6ef74a78af9d8 | [
"MIT"
] | null | null | null | # Exercício 2.2 - Digite a seguinte expressão no interpretador:
# 10 % 3 * 10 ** 2 + 1 - 10 * 4 / 2
print('10 % 3 * 10 ** 2 + 1 - 10 * 4 / 2 = {}'.format(10 % 3 * 10 ** 2 + 1 - 10 * 4 / 2)) | 47.5 | 89 | 0.494737 | 35 | 190 | 2.685714 | 0.4 | 0.095745 | 0.159574 | 0.191489 | 0.351064 | 0.351064 | 0.351064 | 0.351064 | 0 | 0 | 0 | 0.263158 | 0.3 | 190 | 4 | 89 | 47.5 | 0.443609 | 0.5 | 0 | 0 | 0 | 0 | 0.408602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
8216d7c335fbfff622c13c667740afad98f2079d | 744 | py | Python | nfv/nfv-vim/nfv_vim/api/openstack/__init__.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2020-02-07T19:01:36.000Z | 2022-02-23T01:41:46.000Z | nfv/nfv-vim/nfv_vim/api/openstack/__init__.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 1 | 2021-01-14T12:02:25.000Z | 2021-01-14T12:02:25.000Z | nfv/nfv-vim/nfv_vim/api/openstack/__init__.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2021-01-13T08:39:21.000Z | 2022-02-09T00:21:55.000Z | #
# Copyright (c) 2015-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_vim.api.openstack._config import CONF # noqa: F401
from nfv_vim.api.openstack._config import config_load # noqa: F401
from nfv_vim.api.openstack._openstack import get_directory # noqa: F401
from nfv_vim.api.openstack._openstack import get_token # noqa: F401
from nfv_vim.api.openstack._openstack import OPENSTACK_SERVICE # noqa: F401
from nfv_vim.api.openstack._openstack import PLATFORM_SERVICE # noqa: F401
from nfv_vim.api.openstack._openstack import SERVICE_CATEGORY # noqa: F401
from nfv_vim.api.openstack._openstack import validate_token # noqa: F401
from nfv_vim.api.openstack._rest_api import rest_api_request # noqa: F401
| 49.6 | 76 | 0.803763 | 114 | 744 | 5 | 0.298246 | 0.110526 | 0.157895 | 0.205263 | 0.712281 | 0.712281 | 0.712281 | 0.578947 | 0.508772 | 0.350877 | 0 | 0.056231 | 0.115591 | 744 | 14 | 77 | 53.142857 | 0.81003 | 0.245968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
82329d285d9a3fa4b43d5f8c4989bc191fce16d8 | 196 | py | Python | codes_/0434_Number_of_Segments_in_a_String.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0434_Number_of_Segments_in_a_String.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0434_Number_of_Segments_in_a_String.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | # %% [434. Number of Segments in a String](https://leetcode.com/problems/number-of-segments-in-a-string/)
class Solution:
def countSegments(self, s: str) -> int:
return len(s.split())
| 39.2 | 105 | 0.673469 | 29 | 196 | 4.551724 | 0.758621 | 0.121212 | 0.242424 | 0.272727 | 0.378788 | 0.378788 | 0 | 0 | 0 | 0 | 0 | 0.018182 | 0.158163 | 196 | 4 | 106 | 49 | 0.781818 | 0.52551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
823d80206c74739b4ae58e9e4671e30c83b3bb7c | 49 | py | Python | terrascript/icinga2/d.py | vutsalsinghal/python-terrascript | 3b9fb5ad77453d330fb0cd03524154a342c5d5dc | [
"BSD-2-Clause"
] | null | null | null | terrascript/icinga2/d.py | vutsalsinghal/python-terrascript | 3b9fb5ad77453d330fb0cd03524154a342c5d5dc | [
"BSD-2-Clause"
] | null | null | null | terrascript/icinga2/d.py | vutsalsinghal/python-terrascript | 3b9fb5ad77453d330fb0cd03524154a342c5d5dc | [
"BSD-2-Clause"
] | null | null | null | # terrascript/icinga2/d.py
import terrascript
| 9.8 | 27 | 0.77551 | 6 | 49 | 6.333333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 0.142857 | 49 | 4 | 28 | 12.25 | 0.880952 | 0.489796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4189d7bce5bd8b90212bd37ee21024cac26c5064 | 837 | py | Python | news/consumers.py | soumith2105/vasv-stdin-backend | 72472af0f4a9ea5d9d51f980d148badbb9252fe6 | [
"MIT"
] | null | null | null | news/consumers.py | soumith2105/vasv-stdin-backend | 72472af0f4a9ea5d9d51f980d148badbb9252fe6 | [
"MIT"
] | 1 | 2022-02-21T15:09:06.000Z | 2022-02-21T15:09:06.000Z | news/consumers.py | soumith2105/vasv-stdin-backend | 72472af0f4a9ea5d9d51f980d148badbb9252fe6 | [
"MIT"
] | null | null | null | from channels.generic.websocket import AsyncJsonWebsocketConsumer
from news.utilities.news_helpers import fetch_news
class NewsSetupWebSocket(AsyncJsonWebsocketConsumer):
async def connect(self):
await self.accept()
await fetch_news(1, 20)
await self.send_message("success", "Done")
await self.close()
async def send_message(self, message_type, message):
await self.send_json({"type": message_type, "message": message})
class NewsSyncWebSocket(AsyncJsonWebsocketConsumer):
async def connect(self):
await self.accept()
await fetch_news(1, 2, increment=2)
await self.send_message("success", "Done")
await self.close()
async def send_message(self, message_type, message):
await self.send_json({"type": message_type, "message": message})
| 32.192308 | 72 | 0.703704 | 98 | 837 | 5.867347 | 0.306122 | 0.125217 | 0.090435 | 0.142609 | 0.72 | 0.72 | 0.72 | 0.72 | 0.72 | 0.72 | 0 | 0.008889 | 0.193548 | 837 | 25 | 73 | 33.48 | 0.842963 | 0 | 0 | 0.666667 | 0 | 0 | 0.052569 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
68cb36ee35dc1e5246975fca4d5778e2a8ac361d | 810 | py | Python | octicons16px/eye.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | 1 | 2021-01-28T06:47:39.000Z | 2021-01-28T06:47:39.000Z | octicons16px/eye.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | null | null | null | octicons16px/eye.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | null | null | null |
OCTICON_EYE = """
<svg class="octicon octicon-eye" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path></svg>
"""
| 162 | 786 | 0.67037 | 226 | 810 | 2.39823 | 0.469027 | 0.02214 | 0.027675 | 0.0369 | 0.306273 | 0.306273 | 0.306273 | 0.306273 | 0.191882 | 0.125461 | 0 | 0.583799 | 0.116049 | 810 | 4 | 787 | 202.5 | 0.173184 | 0 | 0 | 0 | 0 | 0.333333 | 0.974042 | 0.097651 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
68fef071a8adc2e330997231a7f4d7a13200f324 | 150 | py | Python | qulab/server/setup.py | weiyangliu/QuLab | f3ff8ff2120be96f57c1d293d9be15df17717526 | [
"MIT"
] | null | null | null | qulab/server/setup.py | weiyangliu/QuLab | f3ff8ff2120be96f57c1d293d9be15df17717526 | [
"MIT"
] | null | null | null | qulab/server/setup.py | weiyangliu/QuLab | f3ff8ff2120be96f57c1d293d9be15df17717526 | [
"MIT"
] | null | null | null | import asyncio
from motor.motor_asyncio import AsyncIOMotorClient
from notebook.auth.security import passwd, passwd_check
def setup():
pass
| 18.75 | 55 | 0.793333 | 19 | 150 | 6.157895 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 150 | 7 | 56 | 21.428571 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0.4 | 0.6 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
6b55ac23b3a1c892a1c90b70ca4230cd14098521 | 24 | py | Python | catakin_proj/devel/lib/python2.7/dist-packages/quad_controller_rl/srv/__init__.py | amitkumar05/ReinforcementLearning-Quadcopter | b7b8985f348068af6b85c385a5fb5d8b8fff0f8f | [
"MIT"
] | 1 | 2018-10-17T14:45:33.000Z | 2018-10-17T14:45:33.000Z | catakin_proj/devel/lib/python2.7/dist-packages/quad_controller_rl/srv/__init__.py | amitkumar05/ReinforcementLearning-Quadcopter | b7b8985f348068af6b85c385a5fb5d8b8fff0f8f | [
"MIT"
] | null | null | null | catakin_proj/devel/lib/python2.7/dist-packages/quad_controller_rl/srv/__init__.py | amitkumar05/ReinforcementLearning-Quadcopter | b7b8985f348068af6b85c385a5fb5d8b8fff0f8f | [
"MIT"
] | null | null | null | from ._SetPose import *
| 12 | 23 | 0.75 | 3 | 24 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 24 | 1 | 24 | 24 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6b7c7a8fbaffca5be111e4beb4518c9da07254be | 15,927 | py | Python | spdb/spatialdb/test/test_region.py | jhuapl-boss/spdb | 44d41e2b7a7b961e55746e1a5527d5419a74c2ce | [
"Apache-2.0"
] | 5 | 2016-05-12T19:48:45.000Z | 2018-11-17T00:15:23.000Z | spdb/spatialdb/test/test_region.py | jhuapl-boss/spdb | 44d41e2b7a7b961e55746e1a5527d5419a74c2ce | [
"Apache-2.0"
] | 5 | 2018-01-15T18:14:42.000Z | 2020-07-30T21:59:16.000Z | spdb/spatialdb/test/test_region.py | jhuapl-boss/spdb | 44d41e2b7a7b961e55746e1a5527d5419a74c2ce | [
"Apache-2.0"
] | 3 | 2017-09-21T11:40:06.000Z | 2018-05-14T20:15:40.000Z | # Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spdb.spatialdb.region import Region
import unittest
class TestRegion(unittest.TestCase):
def test_get_cuboid_aligned_sub_region_cuboid_aligned(self):
"""Region already cuboid aligned case."""
resolution = 0
corner = (512, 1024, 32)
extent = (1024, 512, 32)
expected = Region.Cuboids(
x_cuboids=range(1, 3),
y_cuboids=range(2, 3),
z_cuboids=range(2, 4)
)
actual = Region.get_cuboid_aligned_sub_region(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_cuboid_aligned_sub_region_x_not_cuboid_aligned(self):
"""Region not cuboid aligned along x axis."""
resolution = 0
corner = (511, 1024, 32)
extent = (1026, 512, 32)
expected = Region.Cuboids(
x_cuboids=range(1, 3),
y_cuboids=range(2, 3),
z_cuboids=range(2, 4)
)
actual = Region.get_cuboid_aligned_sub_region(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_cuboid_aligned_sub_region_y_not_cuboid_aligned(self):
"""Region not cuboid aligned along y axis."""
resolution = 0
corner = (512, 1023, 32)
extent = (1024, 514, 32)
expected = Region.Cuboids(
x_cuboids=range(1, 3),
y_cuboids=range(2, 3),
z_cuboids=range(2, 4)
)
actual = Region.get_cuboid_aligned_sub_region(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_cuboid_aligned_sub_region_z_not_cuboid_aligned(self):
"""Region not cuboid aligned along z axis."""
resolution = 0
corner = (512, 1024, 15)
extent = (1024, 512, 18)
expected = Region.Cuboids(
x_cuboids=range(1, 3),
y_cuboids=range(2, 3),
z_cuboids=range(1, 2)
)
actual = Region.get_cuboid_aligned_sub_region(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_cuboid_aligned_sub_region_smaller_than_cuboid(self):
"""Requested region smaller than a cuboid."""
resolution = 0
corner = (512, 1024, 16)
extent = (100, 50, 12)
expected = Region.Cuboids(
x_cuboids=range(1, 1),
y_cuboids=range(2, 2),
z_cuboids=range(1, 1)
)
actual = Region.get_cuboid_aligned_sub_region(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_cuboid_aligned_sub_region_smaller_than_cuboid_within_first_cuboid(self):
"""Request region within the bounds of the first cuboid."""
resolution = 0
corner = (100, 50, 4)
extent = (20, 20, 4)
expected = Region.Cuboids(
x_cuboids=range(0, -1),
y_cuboids=range(0, -1),
z_cuboids=range(0, -1)
)
actual = Region.get_cuboid_aligned_sub_region(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_near_side_none(self):
"""Near side cuboid aligned along z axis, so z extent is 0."""
resolution = 0
corner = (512, 1024, 16)
extent = (1024, 512, 16)
expected = Region.Bounds(
corner=corner,
extent=(1024, 512, 0)
)
actual = Region.get_sub_region_x_y_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_near_side(self):
"""Near side non-cuboid aligned along z axis."""
resolution = 0
corner = (512, 1024, 14)
extent = (1024, 512, 18)
expected = Region.Bounds(
corner=corner,
extent=(1024, 512, 2)
)
actual = Region.get_sub_region_x_y_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_near_side_small_extents(self):
"""
Near side non-cuboid aligned along z axis and extents less than a
cuboid, but a cuboid boundary is crossed.
"""
resolution = 0
corner = (512, 490, 14)
extent = (100, 100, 16)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_x_y_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_near_side_less_than_cuboid(self):
"""Near side non-cuboid aligned along z axis - extents less than a cuboid."""
resolution = 0
corner = (512, 1024, 4)
extent = (1024, 512, 10)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_x_y_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_near_side_less_than_cuboid2(self):
"""Cuboid aligned on near side but extents less than a cuboid."""
resolution = 0
corner = (512, 1024, 16)
extent = (1024, 512, 10)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_x_y_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_far_side_none(self):
"""Far side cuboid aligned along z axis, so z extent is 0."""
resolution = 0
corner = (512, 1024, 14)
extent = (1024, 512, 18)
expected = Region.Bounds(
corner=(corner[0], corner[1], 32),
extent=(1024, 512, 0)
)
actual = Region.get_sub_region_x_y_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_far_side(self):
"""Far side non-cuboid aligned along z axis."""
resolution = 0
corner = (512, 1024, 18)
extent = (1024, 512, 15)
expected = Region.Bounds(
corner=(corner[0], corner[1], 32),
extent=(1024, 512, 1)
)
actual = Region.get_sub_region_x_y_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_y_block_far_side_less_than_cuboid(self):
"""
Far side non-cuboid aligned along z axis - extents less than a cuboid.
Expect a 0 width slice in the z dimension. This case should be covered
by Region.get_sub_region_x_y_block_near_side().
"""
resolution = 0
corner = (512, 1024, 17)
extent = (1024, 512, 10)
expected = Region.Bounds(
corner=(corner[0], corner[1], 16),
extent=(1024, 512, 0)
)
actual = Region.get_sub_region_x_y_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_near_side_none(self):
"""Near side cuboid aligned along y axis, so y extent is 0."""
resolution = 0
corner = (512, 1024, 16)
extent = (1024, 512, 16)
expected = Region.Bounds(
corner=corner,
extent=(1024, 0, 16)
)
actual = Region.get_sub_region_x_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_near_side(self):
"""Near side non-cuboid aligned along y axis."""
resolution = 0
corner = (512, 1022, 16)
extent = (1024, 514, 16)
expected = Region.Bounds(
corner=corner,
extent=(1024, 2, 16)
)
actual = Region.get_sub_region_x_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_near_side_small_extents(self):
"""
Near side non-cuboid aligned along x axis and extents less than a
cuboid, but a cuboid boundary is crossed.
"""
resolution = 0
corner = (512, 490, 14)
extent = (100, 100, 16)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_x_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_near_side_less_than_cuboid(self):
"""Near side non-cuboid aligned along y axis - extents less than a cuboid."""
resolution = 0
corner = (512, 100, 0)
extent = (1024, 128, 32)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_x_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_near_side_less_than_cuboid2(self):
"""
Near side non-cuboid aligned along y axis - extents less than a cuboid.
This is the same as test_get_sub_region_x_z_block_far_side_less_than_cuboid(),
but for the near side calculation, there should be non-zero extents.
"""
resolution = 0
corner = (512, 1024, 17)
extent = (1024, 12, 50)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_x_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_far_side_none(self):
"""Far side cuboid aligned along z axis, so z extent is 0."""
resolution = 0
corner = (512, 1023, 16)
extent = (1024, 513, 20)
expected = Region.Bounds(
corner=(corner[0], 1536, corner[2]),
extent=(1024, 0, 20)
)
actual = Region.get_sub_region_x_z_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_far_side(self):
"""Far side non-cuboid aligned along z axis."""
resolution = 0
corner = (512, 1024, 18)
extent = (1024, 514, 16)
expected = Region.Bounds(
corner=(corner[0], 1536, corner[2]),
extent=(1024, 2, 16)
)
actual = Region.get_sub_region_x_z_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_x_z_block_far_side_less_than_cuboid(self):
"""
Far side non-cuboid aligned along z axis - extents less than a cuboid.
Expect a 0 width slice in the z dimension. This case should be covered
by Region.get_sub_region_x_z_block_near_side().
See test_get_sub_region_x_z_block_near_side_less_than_cuboid2().
"""
resolution = 0
corner = (512, 1024, 17)
extent = (1024, 12, 50)
expected = Region.Bounds(
corner=(corner[0], 1024, corner[2]),
extent=(1024, 0, 50)
)
actual = Region.get_sub_region_x_z_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_near_side_none(self):
"""Near side cuboid aligned along x axis, so x extent is 0."""
resolution = 0
corner = (512, 1024, 16)
extent = (1024, 512, 16)
expected = Region.Bounds(
corner=corner,
extent=(0, 512, 16)
)
actual = Region.get_sub_region_y_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_near_side(self):
"""Near side non-cuboid aligned along x axis."""
resolution = 0
corner = (509, 1024, 14)
extent = (1027, 512, 16)
expected = Region.Bounds(
corner=corner,
extent=(3, 512, 16)
)
actual = Region.get_sub_region_y_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_near_side_small_extents(self):
"""
Near side non-cuboid aligned along x axis and extents less than a
cuboid, but a cuboid boundary is crossed.
"""
resolution = 0
corner = (509, 1024, 14)
extent = (100, 512, 16)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_y_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_near_side_less_than_cuboid(self):
"""Near side non-cuboid aligned along x axis - extents less than a cuboid."""
resolution = 0
corner = (400, 1024, 4)
extent = (80, 512, 10)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_y_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_near_side_less_than_cuboid2(self):
"""Cuboid aligned on near side but extents less than a cuboid."""
resolution = 0
corner = (512, 1024, 16)
extent = (200, 512, 10)
expected = Region.Bounds(
corner=corner,
extent=extent
)
actual = Region.get_sub_region_y_z_block_near_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_far_side_none(self):
"""Far side cuboid aligned along x axis, so x extent is 0."""
resolution = 0
corner = (512, 1023, 16)
extent = (1024, 513, 20)
expected = Region.Bounds(
corner=(1536, corner[1], corner[2]),
extent=(0, extent[1], extent[2])
)
actual = Region.get_sub_region_y_z_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_far_side(self):
"""Far side non-cuboid aligned along x axis."""
resolution = 0
corner = (512, 1024, 18)
extent = (1026, 514, 16)
expected = Region.Bounds(
corner=(1536, corner[1], corner[2]),
extent=(2, extent[1], extent[2])
)
actual = Region.get_sub_region_y_z_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
def test_get_sub_region_y_z_block_far_side_less_than_cuboid(self):
"""
Far side non-cuboid aligned along x axis - extents less than a cuboid.
Expect a 0 width slice in the x dimension. This case should be covered
by Region.get_sub_region_y_z_block_near_side().
See test_get_sub_region_y_z_block_near_side_less_than_cuboid2().
"""
resolution = 0
corner = (512, 1024, 17)
extent = (104, 12, 50)
expected = Region.Bounds(
corner=corner,
extent=(0, extent[1], extent[2])
)
actual = Region.get_sub_region_y_z_block_far_side(resolution, corner, extent)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| 35.393333 | 89 | 0.622465 | 2,083 | 15,927 | 4.492559 | 0.078733 | 0.063475 | 0.069246 | 0.050011 | 0.890468 | 0.884911 | 0.878927 | 0.868455 | 0.851785 | 0.817055 | 0 | 0.065177 | 0.286181 | 15,927 | 449 | 90 | 35.47216 | 0.757938 | 0.189929 | 0 | 0.665595 | 0 | 0 | 0.00064 | 0 | 0 | 0 | 0 | 0 | 0.096463 | 1 | 0.096463 | false | 0 | 0.006431 | 0 | 0.106109 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6baf91a7278411acaa278ed5a1849fb8da3b0a14 | 319 | py | Python | sisyphe/__init__.py | antoinediez/Sisyphe | f6bb067cd8898450174c5d97bb0f3f0cb5db8b87 | [
"MIT"
] | 8 | 2021-06-05T20:03:35.000Z | 2021-10-01T21:20:24.000Z | sisyphe/__init__.py | antoinediez/Sisyphe | f6bb067cd8898450174c5d97bb0f3f0cb5db8b87 | [
"MIT"
] | 4 | 2021-08-30T22:48:29.000Z | 2021-09-18T21:25:12.000Z | sisyphe/__init__.py | antoinediez/Sisyphe | f6bb067cd8898450174c5d97bb0f3f0cb5db8b87 | [
"MIT"
] | 3 | 2021-06-10T20:21:17.000Z | 2021-09-28T12:47:44.000Z | import os
from .test.test_script import test_sisyphe
from .test.quick_test import test_neighbours
from .test.quick_test import test_bsr
from .test.quick_test import test_vicsek
from .test.quick_test import test_vicsek_disk
from .test.quick_test import test_dorsogna
from .test.quick_test import test_volume_exclusion
| 29 | 50 | 0.855799 | 53 | 319 | 4.849057 | 0.264151 | 0.217899 | 0.303502 | 0.396887 | 0.677043 | 0.677043 | 0.256809 | 0 | 0 | 0 | 0 | 0 | 0.100313 | 319 | 10 | 51 | 31.9 | 0.89547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6bc1b55252ab3ffa131468765e48d9ecc6226d6d | 178 | py | Python | office365/sharepoint/fields/related_field.py | vgrem/Office365-REST-Python-Client | 9975f44b3ce02dd56e321f89fdbafa14a83e532f | [
"MIT"
] | 544 | 2016-08-04T17:10:16.000Z | 2022-03-31T07:17:20.000Z | office365/sharepoint/fields/related_field.py | stefanstapinski/Office365-REST-Python-Client | e118941b9b91cf8f4bd0d9a4884de5d3f9203836 | [
"MIT"
] | 438 | 2016-10-11T12:24:22.000Z | 2022-03-31T19:30:35.000Z | office365/sharepoint/fields/related_field.py | stefanstapinski/Office365-REST-Python-Client | e118941b9b91cf8f4bd0d9a4884de5d3f9203836 | [
"MIT"
] | 202 | 2016-08-22T19:29:40.000Z | 2022-03-30T20:26:15.000Z | from office365.sharepoint.base_entity import BaseEntity
class RelatedField(BaseEntity):
"""Represents a Lookup Field that points to a given list on a Web site."""
pass
| 25.428571 | 78 | 0.758427 | 25 | 178 | 5.36 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 0.174157 | 178 | 6 | 79 | 29.666667 | 0.891156 | 0.382022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
d43357a56411065c137ab46d7ea86b05c75cd202 | 544 | py | Python | narwhallet/core/kcl/bip_utils/bip39/__init__.py | Snider/narwhallet | 0d528763c735f1e68b8264e302854d41e7cf1956 | [
"MIT"
] | 3 | 2021-12-29T11:25:13.000Z | 2022-01-16T13:57:17.000Z | narwhallet/core/kcl/bip_utils/bip39/__init__.py | Snider/narwhallet | 0d528763c735f1e68b8264e302854d41e7cf1956 | [
"MIT"
] | null | null | null | narwhallet/core/kcl/bip_utils/bip39/__init__.py | Snider/narwhallet | 0d528763c735f1e68b8264e302854d41e7cf1956 | [
"MIT"
] | 1 | 2022-01-16T13:57:20.000Z | 2022-01-16T13:57:20.000Z | from narwhallet.core.kcl.bip_utils.bip39.bip39_ex import Bip39InvalidFileError, Bip39ChecksumError
from narwhallet.core.kcl.bip_utils.bip39.ibip39_seed_generator import IBip39SeedGenerator
from narwhallet.core.kcl.bip_utils.bip39.bip39_mnemonic import (
Bip39EntropyBitLen, Bip39Languages, Bip39WordsNum,
Bip39EntropyGenerator, Bip39MnemonicGenerator, Bip39MnemonicValidator
)
from narwhallet.core.kcl.bip_utils.bip39.bip39_seed_generator import Bip39SeedGenerator
from narwhallet.core.kcl.bip_utils.bip39.bip39_utils import Bip39Utils
| 60.444444 | 98 | 0.873162 | 63 | 544 | 7.349206 | 0.380952 | 0.151188 | 0.194384 | 0.226782 | 0.410367 | 0.410367 | 0.410367 | 0.336933 | 0 | 0 | 0 | 0.082677 | 0.066176 | 544 | 8 | 99 | 68 | 0.82874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.625 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d4770bb31034796ab690d94c1e63c8d177b7b711 | 227 | py | Python | v1/validator_confirmation_services/tests/conftest.py | DucPhamTV/Bank | 4905ec7d63ef4daafe2119bf6b32928d4db2d4f2 | [
"MIT"
] | 94 | 2020-07-12T23:08:47.000Z | 2022-03-05T14:00:01.000Z | v1/validator_confirmation_services/tests/conftest.py | DucPhamTV/Bank | 4905ec7d63ef4daafe2119bf6b32928d4db2d4f2 | [
"MIT"
] | 84 | 2020-07-13T23:30:50.000Z | 2022-03-15T15:47:46.000Z | v1/validator_confirmation_services/tests/conftest.py | DucPhamTV/Bank | 4905ec7d63ef4daafe2119bf6b32928d4db2d4f2 | [
"MIT"
] | 63 | 2020-07-13T02:46:51.000Z | 2021-11-26T09:29:29.000Z | import pytest
from ..factories.validator_confirmation_service import ValidatorConfirmationServiceFactory
@pytest.fixture
def validator_confirmation_services():
yield ValidatorConfirmationServiceFactory.create_batch(100)
| 25.222222 | 90 | 0.872247 | 20 | 227 | 9.65 | 0.75 | 0.217617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014354 | 0.079295 | 227 | 8 | 91 | 28.375 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2e4dc1e1586da156ce4e744505e949cfd72eabe1 | 14,230 | py | Python | tests/unittests/tools/test_package.py | bossjones/scarlett-os | dc3b96604220a5848c51a14a343e97d464ad811b | [
"Apache-2.0"
] | 5 | 2016-11-08T21:01:00.000Z | 2018-05-07T11:02:43.000Z | tests/unittests/tools/test_package.py | bossjones/scarlett-os | dc3b96604220a5848c51a14a343e97d464ad811b | [
"Apache-2.0"
] | 854 | 2016-09-21T13:06:32.000Z | 2022-02-10T13:21:47.000Z | tests/unittests/tools/test_package.py | bossjones/scarlett-os | dc3b96604220a5848c51a14a343e97d464ad811b | [
"Apache-2.0"
] | 2 | 2016-12-02T15:12:41.000Z | 2017-02-25T08:21:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_package
----------------------------------
Tests for `scarlett_os` module.
"""
# import ipdb
# import mock
import builtins
import imp
import os
import signal
import sys
import unittest
import unittest.mock as mock
# import threading
import pytest
import scarlett_os
from scarlett_os.tools import package # Module with our thing to test
from contextlib import contextmanager
# source: https://github.com/YosaiProject/yosai/blob/master/test/isolated_tests/core/conf/conftest.py
# FIXME: Since we currently have an issue with mocks leaking into other tests,
# this fixture ensures that we isolate the patched object, stop mocks,
# and literally re-import modules to set environment back to normal.
# It's possible this will all get fixed when we upgrade to a later version of python past 3.5.2
@pytest.fixture(scope="function")
def package_unit_mocker_stopall(mocker):
"Stop previous mocks, yield mocker plugin obj, then stopall mocks again"
print("Called [setup]: mocker.stopall()")
mocker.stopall()
print("Called [setup]: imp.reload(package)")
imp.reload(package)
yield mocker
print("Called [teardown]: mocker.stopall()")
mocker.stopall()
print("Called [setup]: imp.reload(package)")
imp.reload(package)
# SOURCE: https://github.com/ansible/ansible/blob/370a7ace4b3c8ffb6187900f37499990f1b976a2/test/units/module_utils/basic/test_atomic_move.py
@pytest.fixture
def sys_and_site_mocks(package_unit_mocker_stopall):
mocks = {}
yield mocks
# SOURCE: https://github.com/ansible/ansible/blob/370a7ace4b3c8ffb6187900f37499990f1b976a2/test/units/module_utils/basic/test_atomic_move.py
@pytest.fixture
def sys_and_site_mocks_darwin(package_unit_mocker_stopall):
mocks = {
"os": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_os_module"
),
"sys": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_sys_module"
),
"get_python_lib": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_distutils_sysconfig_function_get_python_lib"
),
"flatpak_site_packages": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_flatpak_site_packages"
),
"package_list_with_dups": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.create_list_with_dups"
),
"uniq_package_list": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_uniq_list"
),
"create_package_symlinks": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.create_package_symlinks"
),
}
mocks["os"].environ = dict()
mocks[
"sys"
].version.return_value = "3.6.5 (default, Apr 25 2018, 14:22:56) \n[GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.42.1)]"
mocks[
"get_python_lib"
].return_value = lambda: "/usr/local/lib/python3.6/site-packages"
mocks["flatpak_site_packages"].return_value = "/app/lib/python3.6/site-packages"
yield mocks
@pytest.fixture
def fake_stat(package_unit_mocker_stopall):
stat1 = package_unit_mocker_stopall.MagicMock()
stat1.st_mode = 0o0644
stat1.st_uid = 0
stat1.st_gid = 0
yield stat1
@pytest.mark.unittest
@pytest.mark.scarlettonly
@pytest.mark.scarlettonlyunittest
class TestPackage(object):
# @contextmanager
# def assertNotRaises(self, exc_type):
# try:
# yield None
# except exc_type:
# raise self.failureException('{} raised'.format(exc_type.__name__))
# pytest -s -p no:timeout -k test_get_uniq_list --pdb
def test_get_uniq_list(self, sys_and_site_mocks):
seq = [
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
]
assert scarlett_os.tools.package.get_uniq_list(seq) == [
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
]
def test_get_uniq_list_with_dups(self, sys_and_site_mocks):
seq = [
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
]
assert scarlett_os.tools.package.get_uniq_list(seq) == [
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.5/dist-packages",
"/usr/lib/python3.5/site-packages",
]
def test_check_gi(self, sys_and_site_mocks, package_unit_mocker_stopall):
sys_and_site_mocks["gi"] = package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_gi_module"
)
sys_and_site_mocks["add_gi_packages"] = package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.add_gi_packages"
)
# Since everythin is valid, we should not get any type of warning at all
with pytest.warns(None) as record:
scarlett_os.tools.package.check_gi()
assert len(record) == 0
def test_get_os_module(self, sys_and_site_mocks, package_unit_mocker_stopall):
# Since everythin is valid, we should not get any type of warning at all. This will simply test that we can import module os
with pytest.warns(None) as record:
_ = scarlett_os.tools.package.get_os_module()
assert len(record) == 0
def test_get_sys_module(self, sys_and_site_mocks, package_unit_mocker_stopall):
# Since everythin is valid, we should not get any type of warning at all. This will simply test that we can import module os
with pytest.warns(None) as record:
_ = scarlett_os.tools.package.get_sys_module()
assert len(record) == 0
def test_get_distutils_sysconfig_function_get_python_lib(
self, sys_and_site_mocks, package_unit_mocker_stopall
):
# Since everythin is valid, we should not get any type of warning at all. This will simply test that we can import module os
with pytest.warns(None) as record:
_ = (
scarlett_os.tools.package.get_distutils_sysconfig_function_get_python_lib()
)
assert len(record) == 0
def test_get_itertools_module(
self, sys_and_site_mocks, package_unit_mocker_stopall
):
# Since everythin is valid, we should not get any type of warning at all. This will simply test that we can import module os
with pytest.warns(None) as record:
_ = scarlett_os.tools.package.get_itertools_module()
assert len(record) == 0
def test_get_subprocess_module(
self, sys_and_site_mocks, package_unit_mocker_stopall
):
# Since everythin is valid, we should not get any type of warning at all. This will simply test that we can import module os
with pytest.warns(None) as record:
_ = scarlett_os.tools.package.get_subprocess_module()
assert len(record) == 0
def test_check_gi_import_error(
self, sys_and_site_mocks, package_unit_mocker_stopall
):
sys_and_site_mocks["gi"] = package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_gi_module"
)
sys_and_site_mocks["add_gi_packages"] = package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.add_gi_packages"
)
sys_and_site_mocks["gi"].side_effect = ImportError()
with pytest.warns(ImportWarning) as record:
scarlett_os.tools.package.check_gi()
assert len(record) == 1
assert record[0].message.args[0] == "PyGI library is not available"
def test_add_gi_packages(
self, sys_and_site_mocks_darwin, package_unit_mocker_stopall
):
scarlett_os.tools.package.add_gi_packages()
# Make sure sys.version[:3] returns 3.6 for this example
assert sys_and_site_mocks_darwin["sys"].version.return_value[:3] == "3.6"
def test_create_list_with_dups(self, package_unit_mocker_stopall):
mocks = {
"os": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_os_module"
),
"sys": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_sys_module"
),
"get_python_lib": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_distutils_sysconfig_function_get_python_lib"
),
"flatpak_site_packages": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_flatpak_site_packages"
),
"create_package_symlinks": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.create_package_symlinks"
),
}
mocks["os"].environ = {"PYTHONPATH": "/usr/local/share/jhbuild/sitecustomize"}
mocks[
"sys"
].version.return_value = "3.6.5 (default, Apr 25 2018, 14:22:56) \n[GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.42.1)]"
mocks[
"get_python_lib"
].return_value = lambda: "/usr/local/lib/python3.6/site-packages"
mocks["flatpak_site_packages"].return_value = [
"/app/lib/python3.6/site-packages"
]
python_version = mocks["sys"].version.return_value[:3]
global_path_system = os.path.join("/usr/lib", "python" + python_version)
py_path = mocks["os"].environ.get("PYTHONPATH")
py_paths = py_path.split(":")
flatpak_site_packages = mocks["flatpak_site_packages"].return_value
global_sitepackages = [
os.path.join(global_path_system, "dist-packages"), # for Debian-based
os.path.join(global_path_system, "site-packages"), # for others
]
# Current value should be: ['/app/lib/python3.6/site-packages', ['/usr/local/share/jhbuild/sitecustomize'], ['/usr/lib/python3.6/dist-packages', '/usr/lib/python3.6/site-packages']]
all_package_paths = [flatpak_site_packages, py_paths, global_sitepackages]
package_list_with_dups = scarlett_os.tools.package.create_list_with_dups(
all_package_paths
)
uniq_package_list = scarlett_os.tools.package.get_uniq_list(
package_list_with_dups
)
for i in package_list_with_dups:
assert i in [
"/app/lib/python3.6/site-packages",
"/usr/local/share/jhbuild/sitecustomize",
"",
"/usr/lib/python3.6/dist-packages",
"/usr/lib/python3.6/site-packages",
]
def test_uniq_package_list(self, package_unit_mocker_stopall):
mocks = {
"os": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_os_module"
),
"sys": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_sys_module"
),
"get_python_lib": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_distutils_sysconfig_function_get_python_lib"
),
"flatpak_site_packages": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.get_flatpak_site_packages"
),
"create_package_symlinks": package_unit_mocker_stopall.patch(
"scarlett_os.tools.package.create_package_symlinks"
),
}
mocks["os"].environ = {"PYTHONPATH": "/usr/local/share/jhbuild/sitecustomize"}
mocks[
"sys"
].version.return_value = "3.6.5 (default, Apr 25 2018, 14:22:56) \n[GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.42.1)]"
mocks[
"get_python_lib"
].return_value = lambda: "/usr/local/lib/python3.6/site-packages"
mocks["flatpak_site_packages"].return_value = [
"/app/lib/python3.6/site-packages"
]
python_version = mocks["sys"].version.return_value[:3]
global_path_system = os.path.join("/usr/lib", "python" + python_version)
py_path = mocks["os"].environ.get("PYTHONPATH")
py_paths = py_path.split(":")
flatpak_site_packages = mocks["flatpak_site_packages"].return_value
global_sitepackages = [
os.path.join(global_path_system, "dist-packages"), # for Debian-based
os.path.join(global_path_system, "site-packages"), # for others
]
# Current value should be: ['/app/lib/python3.6/site-packages', ['/usr/local/share/jhbuild/sitecustomize'], ['/usr/lib/python3.6/dist-packages', '/usr/lib/python3.6/site-packages']]
all_package_paths = [flatpak_site_packages, py_paths, global_sitepackages]
package_list_with_dups = scarlett_os.tools.package.create_list_with_dups(
all_package_paths
)
uniq_package_list = scarlett_os.tools.package.get_uniq_list(
package_list_with_dups
)
assert uniq_package_list == [
"/app/lib/python3.6/site-packages",
"/usr/local/share/jhbuild/sitecustomize",
"/usr/lib/python3.6/dist-packages",
"/usr/lib/python3.6/site-packages",
]
def test_get_flatpak_site_packages(self, package_unit_mocker_stopall):
test_python_version = sys.version[:3]
flatpak_site_packages = scarlett_os.tools.package.get_flatpak_site_packages()
expected_site_packages = [
"/app/lib/python{}/site-packages".format(test_python_version)
]
assert flatpak_site_packages == expected_site_packages
| 38.668478 | 189 | 0.657063 | 1,815 | 14,230 | 4.871074 | 0.132231 | 0.061079 | 0.062776 | 0.100441 | 0.812238 | 0.794141 | 0.787467 | 0.770275 | 0.750368 | 0.744599 | 0 | 0.021659 | 0.234294 | 14,230 | 367 | 190 | 38.773842 | 0.789739 | 0.16416 | 0 | 0.650558 | 0 | 0.011152 | 0.285618 | 0.214675 | 0 | 0 | 0 | 0.002725 | 0.052045 | 1 | 0.063197 | false | 0 | 0.052045 | 0 | 0.118959 | 0.01487 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2e67722bfe87879e1227722dc760a1da112a6cde | 49 | py | Python | e3d/sound_management/__init__.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | 8 | 2017-04-19T03:59:43.000Z | 2020-04-29T00:29:12.000Z | e3d/sound_management/__init__.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | null | null | null | e3d/sound_management/__init__.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | 3 | 2018-04-26T16:57:46.000Z | 2021-03-01T05:48:06.000Z | from hissing import StatesEnum as SoundStatesEnum | 49 | 49 | 0.897959 | 6 | 49 | 7.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102041 | 49 | 1 | 49 | 49 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2e8c93292ae578f031367cce4761635fa71c6925 | 34 | py | Python | sumopy/__init__.py | xavivars/sumopy | 31663a0a7ec1d60b7df920dc858db70146d077cc | [
"MIT"
] | 2 | 2019-10-22T23:33:34.000Z | 2021-05-05T14:04:14.000Z | sumopy/__init__.py | xavivars/sumopy | 31663a0a7ec1d60b7df920dc858db70146d077cc | [
"MIT"
] | 1 | 2020-05-29T10:56:45.000Z | 2020-05-29T13:00:00.000Z | sumopy/__init__.py | xavivars/sumopy | 31663a0a7ec1d60b7df920dc858db70146d077cc | [
"MIT"
] | 1 | 2020-05-29T10:54:20.000Z | 2020-05-29T10:54:20.000Z | from sumopy.handler import Handler | 34 | 34 | 0.882353 | 5 | 34 | 6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2e8db8d98b064efa1e16b7fa8a427d0f1c6d6240 | 38 | py | Python | pruner/__init__.py | mattjegan/pruner | 8fb6faf0a4c111342f27120b84b50888186479cb | [
"Apache-2.0"
] | 3 | 2017-11-04T19:10:39.000Z | 2020-01-03T01:18:38.000Z | pruner/__init__.py | mattjegan/pruner | 8fb6faf0a4c111342f27120b84b50888186479cb | [
"Apache-2.0"
] | 5 | 2017-02-19T01:09:42.000Z | 2017-02-19T12:16:20.000Z | pruner/__init__.py | mattjegan/pruner | 8fb6faf0a4c111342f27120b84b50888186479cb | [
"Apache-2.0"
] | 3 | 2018-02-21T19:24:54.000Z | 2019-08-29T03:58:04.000Z | from pruner.pruner import Pruner, main | 38 | 38 | 0.842105 | 6 | 38 | 5.333333 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 38 | 1 | 38 | 38 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cf0b147c5408e0d659daca67d578e8fb8408f7b3 | 30 | py | Python | py_weernl/__init__.py | tvdsluijs/py_weernl | 4925d71d170b34e1307ea01cf4754de6e9f82eee | [
"MIT"
] | null | null | null | py_weernl/__init__.py | tvdsluijs/py_weernl | 4925d71d170b34e1307ea01cf4754de6e9f82eee | [
"MIT"
] | null | null | null | py_weernl/__init__.py | tvdsluijs/py_weernl | 4925d71d170b34e1307ea01cf4754de6e9f82eee | [
"MIT"
] | null | null | null | from .weerlive import weerLive | 30 | 30 | 0.866667 | 4 | 30 | 6.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 30 | 1 | 30 | 30 | 0.962963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cf6ec31595ef8a2fa94cde136f4af150bcf9dfe0 | 183 | py | Python | fastapi_versioned/helpers.py | ccharlesgb/fastapi-versioned | 5488cda86665de5cb2d0cfce7ff5660e90c6358f | [
"MIT"
] | null | null | null | fastapi_versioned/helpers.py | ccharlesgb/fastapi-versioned | 5488cda86665de5cb2d0cfce7ff5660e90c6358f | [
"MIT"
] | null | null | null | fastapi_versioned/helpers.py | ccharlesgb/fastapi-versioned | 5488cda86665de5cb2d0cfce7ff5660e90c6358f | [
"MIT"
] | null | null | null | from starlette.requests import Request
from fastapi_versioned import FastAPIVersioned
def get_parent_app(request: Request) -> FastAPIVersioned:
return request.app.state.parent
| 22.875 | 57 | 0.825137 | 22 | 183 | 6.727273 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120219 | 183 | 7 | 58 | 26.142857 | 0.919255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
cf826fc6634689698f29cef6e6f4a91c7e515cb9 | 22,769 | py | Python | Academia/views.py | 20carlos/AcademiaBaile | efe9834bfc485e77c8b07c875868629e1cb7744f | [
"Apache-2.0"
] | null | null | null | Academia/views.py | 20carlos/AcademiaBaile | efe9834bfc485e77c8b07c875868629e1cb7744f | [
"Apache-2.0"
] | null | null | null | Academia/views.py | 20carlos/AcademiaBaile | efe9834bfc485e77c8b07c875868629e1cb7744f | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.models import User
from Academia.models import *
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index_view(request, template_name = 'Academia/index.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
'''try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login/')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
count_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()'''
return render(request, template_name, locals(),)
def login_view(request, template_name = 'Academia/login.html'):
#Modulo que permite a los usuarios ingresar sus datos de usuario y contraseña para acceder a las distintas funciones del sistema
#Declaramos las variables iniciales para el manejo de la informacion del formulario
state = ""
username = ""
password = ""
next = ""
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de alerta
if request.GET:
next = request.GET['next']
#Si el formulario es mandado por metodo post
if request.POST:
#Obtenemso los datos del formulario
username = request.POST['username']
password = request.POST['password']
try:
if '@' in username:
check = User.objects.get(email=username)
else:
check = User.objects.get(username=username)
username = check.username
#Col dos datos del formulario validamos que estos pertenezcan a algun usuario den la base de datos
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
if next == "":
request.session.set_expiry(14400)#4 horas para que caduque la session
#check.last_login = datetime.now()
#check.save()
return HttpResponseRedirect('/home/')
else:
return HttpResponseRedirect(next)
else:
#Mandamos la excepcion en caso de que un usuario aun sin activar trate de ingresar
estado = 1
mensaje = "¡Lo sentimos! Este usuario debe activarse."
#messages.warning(request, mensaje)
return render(request, template_name, locals(),)
valid = check.check_password(password)
if not valid:
#Si la contraseña no es valida
estado = 1
mensaje = "¡Hay un problema! La contraseña es incorrecta."
#messages.warning(request, mensaje)
return render(request, template_name, locals(),)
except ObjectDoesNotExist:
if '@' in username:
#Si el usuario no esta registrado en la base de datos
estado = 1
mensaje = "¡Lo sentimos! El email no existe, registrese por favor."
#messages.info(request, mensaje)
return render(request, template_name, locals(),)
else:
estado = 1
mensaje = "¡Upps! El nombre de usuario no existe, registrese por favor."
##messages.warning(request, mensaje)
return render(request, template_name, locals(),)
return render(request, template_name, {'mensaje':mensaje, 'username': username, 'next': next,},)
def home_view(request, template_name = 'Academia/home.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
return render(request, template_name, locals(),)
def login_out(request):
#Pagina a la que se accede despues de un logout del usuario
logout(request)
return HttpResponseRedirect('/')
def lista_alumnos(request, template_name = 'Academia/lista_alumnos.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
alumnos = Alumno.objects.all()
return render(request, template_name, locals(),)
def lista_clases(request, template_name = 'Academia/lista_clases.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
clases = Clase.objects.all()
return render(request, template_name, locals(),)
def agregar_alumno(request, template_name = 'Academia/agregar_alumno.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
clases = Clase.objects.all()
if request.method == "POST":
nombre = request.POST['nombre']
a_paterno = request.POST['a_paterno']
a_materno = request.POST['a_materno']
edad = request.POST['edad']
clase = request.POST['clase']
if Alumno.objects.all().filter(nombre=nombre, a_paterno=a_paterno, a_materno=a_materno, edad=edad).exists():
estado = 1
mensaje = 'Ya existe el alumno!'
return render(request, template_name, locals(),)
else:
clase_obj = Clase.objects.get(pk=clase)
new_alumno = Alumno(nombre=nombre, a_paterno=a_paterno, a_materno=a_materno, edad=edad, clase=clase_obj)
new_alumno.save()
estado = 1
mensaje = 'Alumno agregado con exito!'
return render(request, template_name, locals(),)
#print(nombre, a_paterno, a_materno, edad, clase)
return render(request, template_name, locals(),)
def agregar_clase(request, template_name = 'Academia/agregar_clase.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
#clases = Clase.objects.all()
if request.method == "POST":
nombre = request.POST['nombre']
horario = request.POST['horario']
if Clase.objects.all().filter(nombre=nombre, horario=horario).exists():
estado = 1
mensaje = 'Ya existe la clase con el mismo horario!'
return render(request, template_name, locals(),)
else:
new_clase = Clase(nombre=nombre, horario=horario)
new_clase.save()
estado = 1
mensaje = 'Clase agregada con exito!'
return render(request, template_name, locals(),)
#print(nombre, a_paterno, a_materno, edad, clase)
return render(request, template_name, locals(),)
def eliminar_alumno(request, id_alumno):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
#clases = Clase.objects.all()
alumno = Alumno.objects.get(id=id_alumno)
alumno.delete()
return HttpResponseRedirect('/lista_alumnos')
'''if request.method == "POST":
nombre = request.POST['nombre']
horario = request.POST['horario']
if Clase.objects.all().filter(nombre=nombre, horario=horario).exists():
estado = 1
mensaje = 'Ya existe la clase con el mismo horario!'
return render(request, template_name, locals(),)
else:
new_clase = Clase(nombre=nombre, horario=horario)
new_clase.save()
estado = 1
mensaje = 'Clase agregada con exito!'
return render(request, template_name, locals(),)
#print(nombre, a_paterno, a_materno, edad, clase)
return render(request, template_name, locals(),)'''
def eliminar_clase(request, id_clase):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
#clases = Clase.objects.all()
clase = Clase.objects.get(id=id_clase)
clase.delete()
return HttpResponseRedirect('/lista_clases')
def modificar_alumno(request, id_alumno, template_name = 'Academia/modificar_alumno.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
alumno_obj = Alumno.objects.get(id=id_alumno)
alumno = {
"nombre" : alumno_obj.nombre,
"a_paterno" : alumno_obj.a_paterno,
"a_materno" : alumno_obj.a_materno,
"edad" : alumno_obj.edad,
"clase" : alumno_obj.clase.nombre + " : " + alumno_obj.clase.horario,
"clase_id" : alumno_obj.clase.id
}
clases = Clase.objects.all()
if request.method == "POST":
nombre = request.POST['nombre']
a_paterno = request.POST['a_paterno']
a_materno = request.POST['a_materno']
edad = request.POST['edad']
clase = request.POST['clase']
if Alumno.objects.all().filter(nombre=nombre, a_paterno=a_paterno, a_materno=a_materno, edad=edad).exclude(id = id_alumno).exists():
estado = 1
mensaje = 'Ya existe un alumno con estos datos!'
return render(request, template_name, locals(),)
else:
clase_obj = Clase.objects.get(pk=clase)
alumno_obj.nombre = nombre
alumno_obj.a_paterno = a_paterno
alumno_obj.a_materno = a_materno
alumno_obj.edad = edad
alumno_obj.clase = clase_obj
alumno_obj.save()
estado = 1
mensaje = 'Alumno modificado con exito!'
alumno = {
"nombre" : alumno_obj.nombre,
"a_paterno" : alumno_obj.a_paterno,
"a_materno" : alumno_obj.a_materno,
"edad" : alumno_obj.edad,
"clase" : alumno_obj.clase.nombre + " : " + alumno_obj.clase.horario,
"clase_id" : alumno_obj.clase.id
}
return render(request, template_name, locals(),)
#print(nombre, a_paterno, a_materno, edad, clase)
return render(request, template_name, locals(),)
def modificar_clase(request, id_clase, template_name = 'Academia/modificar_clase.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
#count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
#xcount_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()
clase_obj = Clase.objects.get(id=id_clase)
clase = {
"nombre" : clase_obj.nombre,
"horario" : clase_obj.horario
}
if request.method == "POST":
nombre = request.POST['nombre']
horario = request.POST['horario']
if Clase.objects.all().filter(nombre=nombre, horario=horario).exclude(id = id_clase).exists():
estado = 1
mensaje = 'Ya existe una clase con estos datos!'
return render(request, template_name, locals(),)
else:
clase_obj.nombre = nombre
clase_obj.horario = horario
clase_obj.save()
estado = 1
mensaje = 'Clase modificada con exito!'
clase = {
"nombre" : clase_obj.nombre,
"horario" : clase_obj.horario
}
return render(request, template_name, locals(),)
#print(nombre, a_paterno, a_materno, edad, clase)
return render(request, template_name, locals(),)
def eventos(request, template_name = 'Academia/eventos.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
'''try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login/')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
count_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()'''
return render(request, template_name, locals(),)
def maestros(request, template_name = 'Academia/maestros.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
'''try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login/')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
count_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()'''
return render(request, template_name, locals(),)
def contacto(request, template_name = 'Academia/contacto.html'):
'''
Función home
Es la primera vista despues de que el usuario se logueo
Parametros:
-request --[peticion]-- nos permite obtener el usuario logueado
-template_name --[string]-- proporciona la url vinculada a la función
Excepciones:
-sin excepciones --
Return:
-locals() --[encapsulado]-- retorna todas las variables declaradas en el views
'''
'''try:
usuario = request.user
user = User.objects.get(username = usuario)
usuarioExt = Usuario.objects.get(user = user)
isAdministrador = user.groups.filter(name = 'Administrador').exists()
except:
return HttpResponseRedirect('/login/')
estado = 0#Bandera de alertas
mensaje = ""#Mensaje de la alerta
count_instrumentos = Instrumento.objects.filter(usuario=usuarioExt).count()
count_grupos = Cat_grupo.objects.filter(usuario=usuarioExt).count()'''
return render(request, template_name, locals(),) | 40.156966 | 140 | 0.624402 | 2,473 | 22,769 | 5.666397 | 0.084917 | 0.04453 | 0.050168 | 0.052023 | 0.837223 | 0.81039 | 0.796546 | 0.788339 | 0.780918 | 0.767787 | 0 | 0.002058 | 0.274452 | 22,769 | 567 | 141 | 40.156966 | 0.845944 | 0.348764 | 0 | 0.659004 | 0 | 0 | 0.104789 | 0.02031 | 0 | 0 | 0 | 0.001764 | 0 | 1 | 0.057471 | false | 0.015326 | 0.022989 | 0 | 0.226054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cf8875e33574551c82018d0e9e558cd5b804f0ee | 5,934 | py | Python | text_summary/tests/test_summaries.py | aevtikheev/text_summary | 422cad9155da685cfba0cd8e1aecd1068c0bfeb6 | [
"MIT"
] | null | null | null | text_summary/tests/test_summaries.py | aevtikheev/text_summary | 422cad9155da685cfba0cd8e1aecd1068c0bfeb6 | [
"MIT"
] | null | null | null | text_summary/tests/test_summaries.py | aevtikheev/text_summary | 422cad9155da685cfba0cd8e1aecd1068c0bfeb6 | [
"MIT"
] | null | null | null | """Tests for /summaries/ resource."""
import json
import pytest
SUMMARIES_ENDPOINT = 'summaries'
ID_FIELD = 'id'
URL_FIELD = 'url'
SUMMARY_FIELD = 'summary'
CREATED_AT_FIELD = 'created_at'
ERROR_DETAIL_FIELD = 'detail'
def test_create_summary(test_app_with_db, mocked_summarizer):
summary_url = 'http://example.com'
response = test_app_with_db.post(
f'{SUMMARIES_ENDPOINT}/',
data=json.dumps({URL_FIELD: summary_url}),
)
assert response.status_code == 201, f'Invalid response code: {response.status_code}'
assert response.json()[URL_FIELD] == summary_url, (
f'Invalid summary URL: {response.json()[URL_FIELD]}',
)
def test_read_summary(test_app_with_db, existing_summary):
summary_id, summary_url = existing_summary
response = test_app_with_db.get(f'{SUMMARIES_ENDPOINT}/{summary_id}/')
assert response.status_code == 200, f'Invalid response code: {response.status_code}'
response_json = response.json()
assert response_json[ID_FIELD] == summary_id, f'Invalid id field: {response_json[ID_FIELD]}'
assert response_json[URL_FIELD] == summary_url, f'Invalid url field: {response_json[URL_FIELD]}'
assert SUMMARY_FIELD in response_json, 'Missing summary field'
assert response_json.get(CREATED_AT_FIELD), 'Missing or empty created_at field'
def test_read_all_summaries(test_app_with_db, existing_summary):
summary_id, summary_url = existing_summary
response = test_app_with_db.get(f'{SUMMARIES_ENDPOINT}/')
assert response.status_code == 200, f'Invalid response code: {response.status_code}'
response_json = response.json()
assert len(list(filter(lambda summary: summary[ID_FIELD] == summary_id, response_json))) == 1, (
'Existing summary is not present in the result.'
)
def test_update_summary(test_app_with_db, existing_summary):
summary_id, summary_url = existing_summary
new_summary = 'updated_summary'
response = test_app_with_db.put(
f'{SUMMARIES_ENDPOINT}/{summary_id}/',
data=json.dumps({URL_FIELD: summary_url, SUMMARY_FIELD: new_summary}),
)
assert response.status_code == 200, f'Invalid response code: {response.status_code}'
response_json = response.json()
assert response_json[ID_FIELD] == summary_id, f'Invalid id field: {response_json[ID_FIELD]}'
assert response_json[URL_FIELD] == summary_url, f'Invalid url field: {response_json[URL_FIELD]}'
assert response_json[SUMMARY_FIELD] == new_summary, (
f'Invalid summary field: {response_json[SUMMARY_FIELD]}',
)
assert response_json.get(CREATED_AT_FIELD), 'Missing or empty created_at field'
def test_delete_summary(test_app_with_db, existing_summary):
summary_id, _ = existing_summary
response = test_app_with_db.delete(f'{SUMMARIES_ENDPOINT}/{summary_id}/')
assert response.status_code == 200, f'Invalid response code: {response.status_code}'
@pytest.mark.negative
@pytest.mark.parametrize(
'payload',
[{}, {URL_FIELD: 'invalid://url'}],
ids=['empty payload', 'incorrect url'],
)
def test_create_summary_incorrect_payload(test_app_with_db, payload, mocked_summarizer):
response = test_app_with_db.post(f'{SUMMARIES_ENDPOINT}/', data=json.dumps({}))
assert response.status_code == 422, f'Invalid response code: {response.status_code}'
assert response.json().get(ERROR_DETAIL_FIELD), 'Details about the error are not provided'
@pytest.mark.negative
@pytest.mark.parametrize(
'summary_id,response_code',
[('abc', 422), ('0', 422), ('99999999', 404)],
ids=['non-digit ID', 'zero ID', 'Nonexistent ID'],
)
def test_read_summary_incorrect_id(test_app_with_db, summary_id, response_code):
response = test_app_with_db.get(f'{SUMMARIES_ENDPOINT}/{summary_id}/')
assert response.status_code == response_code, f'Invalid response code: {response.status_code}'
assert response.json().get(ERROR_DETAIL_FIELD), 'Details about the error are not provided'
@pytest.mark.negative
@pytest.mark.parametrize(
'summary_id,response_code',
[('abc', 422), ('0', 422), ('99999999', 404)],
ids=['non-digit ID', 'zero ID', 'Nonexistent ID'],
)
def test_update_summary_incorrect_id(test_app_with_db, summary_id, response_code):
response = test_app_with_db.put(
f'{SUMMARIES_ENDPOINT}/{summary_id}/',
data=json.dumps({URL_FIELD: 'http://example.com', SUMMARY_FIELD: 'updated_summary'}),
)
assert response.status_code == response_code, f'Invalid response code: {response.status_code}'
assert response.json().get(ERROR_DETAIL_FIELD), 'Details about the error are not provided'
@pytest.mark.negative
@pytest.mark.parametrize(
'payload',
[
{SUMMARY_FIELD: 'new_summary'},
{SUMMARY_FIELD: 'new_summary', URL_FIELD: 'invalid://url'},
{URL_FIELD: 'http://example.com'},
{},
],
ids=['Missing URL', 'Incorrect URL', 'Missing summary', 'Empty payload'],
)
def test_update_summary_incorrect_payload(test_app_with_db, existing_summary, payload):
summary_id, summary_url = existing_summary
response = test_app_with_db.put(
f'{SUMMARIES_ENDPOINT}/{summary_id}/',
data=json.dumps(payload),
)
assert response.status_code == 422, f'Invalid response code: {response.status_code}'
assert response.json().get(ERROR_DETAIL_FIELD), 'Details about the error are not provided'
@pytest.mark.negative
@pytest.mark.parametrize(
'summary_id,response_code',
[('abc', 422), ('0', 422), ('99999999', 404)],
ids=['non-digit ID', 'zero ID', 'Nonexistent ID'],
)
def test_delete_summary_incorrect_id(test_app_with_db, summary_id, response_code):
response = test_app_with_db.delete(f'{SUMMARIES_ENDPOINT}/{summary_id}/')
assert response.status_code == response_code, f'Invalid response code: {response.status_code}'
assert response.json().get(ERROR_DETAIL_FIELD), 'Details about the error are not provided'
| 37.320755 | 100 | 0.723795 | 799 | 5,934 | 5.066333 | 0.100125 | 0.08004 | 0.054348 | 0.064229 | 0.807065 | 0.785079 | 0.781374 | 0.739872 | 0.739872 | 0.722579 | 0 | 0.015032 | 0.147961 | 5,934 | 158 | 101 | 37.556962 | 0.785601 | 0.005224 | 0 | 0.5 | 0 | 0 | 0.310327 | 0.127692 | 0 | 0 | 0 | 0 | 0.215517 | 1 | 0.086207 | false | 0 | 0.017241 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d8a36dc8a94caa352f73508fe16a89b3021c8b21 | 35 | py | Python | python/my/tensorflow/__init__.py | HuiyingLi/DIIN | cbc538b348858fb6de09a27d2fb8b220efd1c3e2 | [
"Apache-2.0"
] | 262 | 2017-10-10T18:35:29.000Z | 2022-02-25T12:02:18.000Z | python/my/tensorflow/__init__.py | HuiyingLi/DIIN | cbc538b348858fb6de09a27d2fb8b220efd1c3e2 | [
"Apache-2.0"
] | 20 | 2017-11-23T01:12:03.000Z | 2022-02-09T23:30:47.000Z | python/my/tensorflow/__init__.py | HuiyingLi/DIIN | cbc538b348858fb6de09a27d2fb8b220efd1c3e2 | [
"Apache-2.0"
] | 69 | 2017-10-30T19:05:24.000Z | 2021-09-29T13:48:23.000Z | from my.tensorflow.general import * | 35 | 35 | 0.828571 | 5 | 35 | 5.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 35 | 1 | 35 | 35 | 0.90625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d8a393af96d27cc6c4f1479a51a1f801c4e2ec84 | 14,845 | py | Python | Sajeon.py | joshuachoe/KDicDisc | d1bee5ec93c5d3284117c9c69d363083b5ae7dc2 | [
"MIT"
] | null | null | null | Sajeon.py | joshuachoe/KDicDisc | d1bee5ec93c5d3284117c9c69d363083b5ae7dc2 | [
"MIT"
] | null | null | null | Sajeon.py | joshuachoe/KDicDisc | d1bee5ec93c5d3284117c9c69d363083b5ae7dc2 | [
"MIT"
] | null | null | null | #############################################################################
# Sajeon Bot created by Joshua Choe (Caesura#5738) #
# Bot 'frame' was used from the Discord Bot Tutorial from HABchy #1665 #
# #
# I made this as a fun project to do over the winter break. #
# It seemed like there was a need for a korean dictionary bot for discord #
# that wasn't being met, so just thought I might as well make one myself #
#############################################################################
# These are the dependecies. The bot depends on these to function, hence the name. Please do not change these unless your adding to them, because they can break the bot.
import discord
import asyncio
from discord.ext.commands import Bot
from discord.ext import commands
import platform
from urllib.request import urlopen
from langdetect import detect
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import quote
# Here you can modify the bot's prefix and description and wether it sends help in direct messages or not.
client = Bot(description="Use '^dic or ^얓 ___' to summon a definition from naver!", command_prefix="^", pm_help = True)
# This is what happens everytime the bot launches. In this case, it prints information like server count, user count the bot is connected to, and the bot id in the console.
# Do not mess with it because the bot can break, if you wish to do so, please consult me or someone trusted.
@client.event
async def on_ready():
print('Logged in as '+client.user.name+' (ID:'+client.user.id+') | Connected to '+str(len(client.servers))+' servers | Connected to '+str(len(set(client.get_all_members())))+' users')
print('--------')
print('Current Discord.py Version: {} | Current Python Version: {}'.format(discord.__version__, platform.python_version()))
print('--------')
print('Use this link to invite {}:'.format(client.user.name))
print('https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=117824'.format(client.user.id))
print('--------')
print('Support Discord Server: https://discord.gg/FNNNgqb')
print('Github Link: https://github.com/Habchy/BasicBot')
print('--------')
print('Bot Tutorial created by Habchy#1665')
print('Sajeon created by Caesura#5738')
print()
# This is a basic example of a call and response command. You tell it do "this" and it does it.
@client.command()
async def dic(*args):
# Pretty sure theres a better way to do this, but this just concatenates a query with more than one word
if args:
if len(args) > 1:
query = " ".join(args)
elif len(args) == 1:
query = args[0]
#Detects the language of the query
lang = detect(query)
#If the language detected is Korean then we are translating Korean to English
if lang == "ko":
# Sets search_url as the url that would search naver for the word
search_url = "http://endic.naver.com/search.nhn?sLn=kr&searchOption=all&query=" + quote(query)
# Initialize the parallel lists that we will use
listing_list = []
hanja_list = []
detail_link_list = []
definition_list = []
kr_ex_sent_list = []
en_ex_sent_list = []
# Opens the search page from search_url and uses BeautifulSoup to get the html for it.
with urllib.request.urlopen(search_url) as response:
soup = BeautifulSoup(response.read(), "html.parser")
for header in soup.find_all('span', class_='fnt_e30'):
# This try except block appends the actual word
try:
listing_list.append(header.find('a').text)
except:
listing_list.append(None)
# This appends the link for each word that will go to their detailed page
detail_link_list.append("http://endic.naver.com" + header.find('a')['href'])
# Appends the hanja for the word (if it exists), Note only the first entry usually has hanja
if header.contents[2]:
hanja_list.append(header.contents[2].strip())
else:
hanja_list.append("")
#This is the overall html block that contains the definition and example sentences
for block in soup.find_all('div', class_='align_right'):
definition_list.append(block.find('span', class_='fnt_k05').text)
# If the korean example sentence is able to be found, append it
if block.find('span', class_='fnt_e07 _ttsText'):
kr_ex_sent_list.append(block.find('span', class_='fnt_e07 _ttsText').text)
else:
kr_ex_sent_list.append("")
# If the english example sentence is able to be found, append it
if block.find('span', class_='fnt_k10 _ttsText'):
en_ex_sent_list.append(block.find('span', class_='fnt_k10 _ttsText').text)
else:
en_ex_sent_list.append("")
response.close()
# Checks to make sure that the very first entry has a value (aka an example sentence)
# If not, then we output 'no example sentence'
# If so, then we output the sentence
if kr_ex_sent_list[0] == "":
single_output = """**[{0}:]({1})** {2} {3}\n\t*예시 문장이 없습니다 / No example sentence*""".format(listing_list[0],detail_link_list[0],hanja_list[0],definition_list[0])
else:
single_output = """**[{0}:]({1})** {2} {3}\n\t*{4}*\n\t*{5}*""".format(listing_list[0],detail_link_list[0],hanja_list[0],definition_list[0],kr_ex_sent_list[0],en_ex_sent_list[0])
# Sets up the embed and outputs it to discord
embed_title = "Results for {0}".format(query)
simple_em = discord.Embed(title=embed_title, description=single_output, url=search_url, colour=discord.Colour.red())
await client.say(embed=simple_em)
#then we are translating English to Korean
elif query.isalpha() == True or lang == 'en':
# Sets search_url as the url that would search naver for the word
search_url = "http://endic.naver.com/search.nhn?sLn=kr&isOnlyViewEE=N&query=" + quote(query)
# Initialize the parallel lists that we will use
listing_list = []
part_of_speech_list = []
detail_link_list = []
definition_list = []
kr_ex_sent_list = []
en_ex_sent_list = []
# Opens the search page from search_url and uses BeautifulSoup to get the html for it.
with urllib.request.urlopen(search_url) as response:
soup = BeautifulSoup(response.read(), "html.parser")
for header in soup.find_all('span', class_='fnt_e30'):
# This try except block appends the actual word
try:
listing_list.append(header.find('a').text)
except:
listing_list.append(None)
# This appends the link for each word that will go to their detailed page
detail_link_list.append("http://endic.naver.com" + header.find('a')['href'])
#This is the overall html block that contains the definition and example sentences
for block in soup.find_all('div', class_='align_right'):
if block.find('span', class_='fnt_k09'):
part_of_speech_list.append(block.find('span', class_='fnt_k09').text)
else:
part_of_speech_list.append("")
definition_list.append(block.find('span', class_='fnt_k05').text)
# If the korean example sentence is able to be found, append it
if block.find('span', class_='fnt_e07 _ttsText'):
en_ex_sent_list.append(block.find('span', class_='fnt_e07 _ttsText').text)
else:
en_ex_sent_list.append("")
# If the english example sentence is able to be found, append it
if block.find('span', class_='fnt_k10 _ttsText'):
kr_ex_sent_list.append(block.find('span', class_='fnt_k10 _ttsText').text)
else:
kr_ex_sent_list.append("")
response.close()
# Checks to make sure that the very first entry has a value (aka an example sentence)
# If not, then we output 'no example sentence'
# If so, then we output the sentence
if en_ex_sent_list[0] == "":
single_output = """**[{0}:]({1})** {2} {3}\n\t*No example sentence / 예시 문장이 없습니다*""".format(listing_list[0],detail_link_list[0],part_of_speech_list[0],definition_list[0])
else:
single_output = """**[{0}:]({1})** {2} {3}\n\t*{4}*\n\t*{5}*""".format(listing_list[0],detail_link_list[0],part_of_speech_list[0],definition_list[0],en_ex_sent_list[0],kr_ex_sent_list[0])
# Sets up the embed and outputs it to discord
embed_title = "Results for {0}".format(query)
simple_em = discord.Embed(title=embed_title, description=single_output, url=search_url, colour=discord.Colour.blue())
await client.say(embed=simple_em)
else:
await client.say("Invalid input or a non Korean/English language! Please try again.")
# Same method, but allowing for the korean equivalent of typing 'dic' but on a korean keyboard
@client.command()
async def 얓(*args):
if args:
if len(args) > 1:
query = " ".join(args)
elif len(args) == 1:
query = args[0]
lang = detect(query)
if lang == "ko":
# Then we are translating Korean to English
search_url = "http://endic.naver.com/search.nhn?sLn=kr&searchOption=all&query=" + quote(query)
listing_list = []
hanja_list = []
detail_link_list = []
definition_list = []
kr_ex_sent_list = []
en_ex_sent_list = []
with urllib.request.urlopen(search_url) as response:
soup = BeautifulSoup(response.read(), "html.parser")
for header in soup.find_all('span', class_='fnt_e30'):
# This try except block appends the actual word
try:
listing_list.append(header.find('a').text)
except:
listing_list.append(None)
# This appends the link for each word that will go to their detailed page
detail_link_list.append("http://endic.naver.com" + header.find('a')['href'])
if header.contents[2]:
hanja_list.append(header.contents[2].strip())
else:
hanja_list.append("")
#get the link to show more definitions for each on of these
for block in soup.find_all('div', class_='align_right'):
definition_list.append(block.find('span', class_='fnt_k05').text)
if block.find('span', class_='fnt_e07 _ttsText'):
kr_ex_sent_list.append(block.find('span', class_='fnt_e07 _ttsText').text)
else:
kr_ex_sent_list.append("")
if block.find('span', class_='fnt_k10 _ttsText'):
en_ex_sent_list.append(block.find('span', class_='fnt_k10 _ttsText').text)
else:
en_ex_sent_list.append("")
response.close()
if kr_ex_sent_list[0] == "":
single_output = """**[{0}:]({1})** {2} {3}\n\t*예시 문장이 없습니다 / No example sentence*""".format(listing_list[0],detail_link_list[0],hanja_list[0],definition_list[0])
else:
single_output = """**[{0}:]({1})** {2} {3}\n\t*{4}*\n\t*{5}*""".format(listing_list[0],detail_link_list[0],hanja_list[0],definition_list[0],kr_ex_sent_list[0],en_ex_sent_list[0])
#simple_output = """**[{2}:]({6})** {3}\n\t*{4}*\n\t*{5}*\n---\n**[{7}:]({11})** {8}\n\t*{9}*\n\t*{10}*""".format(query,search_url,listing_list[0],definition_list[0],kr_ex_sent_list[0],en_ex_sent_list[0],detail_link_list[0],listing_list[1],definition_list[1],kr_ex_sent_list[1],en_ex_sent_list[1],detail_link_list[1])
embed_title = "Results for {0}".format(query)
simple_em = discord.Embed(title=embed_title, description=single_output, url=search_url, colour=discord.Colour.red())
await client.say(embed=simple_em)
#await client.say(url) #get a url shortener
elif query.isalpha() == True or lang == 'en':
# Sets search_url as the url that would search naver for the word
search_url = "http://endic.naver.com/search.nhn?sLn=kr&isOnlyViewEE=N&query=" + quote(query)
# Initialize the parallel lists that we will use
listing_list = []
part_of_speech_list = []
detail_link_list = []
definition_list = []
kr_ex_sent_list = []
en_ex_sent_list = []
# Opens the search page from search_url and uses BeautifulSoup to get the html for it.
with urllib.request.urlopen(search_url) as response:
soup = BeautifulSoup(response.read(), "html.parser")
for header in soup.find_all('span', class_='fnt_e30'):
# This try except block appends the actual word
try:
listing_list.append(header.find('a').text)
except:
listing_list.append(None)
# This appends the link for each word that will go to their detailed page
detail_link_list.append("http://endic.naver.com" + header.find('a')['href'])
#This is the overall html block that contains the definition and example sentences
for block in soup.find_all('div', class_='align_right'):
if block.find('span', class_='fnt_k09'):
part_of_speech_list.append(block.find('span', class_='fnt_k09').text)
else:
part_of_speech_list.append("")
definition_list.append(block.find('span', class_='fnt_k05').text)
# If the korean example sentence is able to be found, append it
if block.find('span', class_='fnt_e07 _ttsText'):
en_ex_sent_list.append(block.find('span', class_='fnt_e07 _ttsText').text)
else:
en_ex_sent_list.append("")
# If the english example sentence is able to be found, append it
if block.find('span', class_='fnt_k10 _ttsText'):
kr_ex_sent_list.append(block.find('span', class_='fnt_k10 _ttsText').text)
else:
kr_ex_sent_list.append("")
response.close()
# Checks to make sure that the very first entry has a value (aka an example sentence)
# If not, then we output 'no example sentence'
# If so, then we output the sentence
if en_ex_sent_list[0] == "":
single_output = """**[{0}:]({1})** {2} {3}\n\t*No example sentence / 예시 문장이 없습니다*""".format(listing_list[0],detail_link_list[0],part_of_speech_list[0],definition_list[0])
else:
single_output = """**[{0}:]({1})** {2} {3}\n\t*{4}*\n\t*{5}*""".format(listing_list[0],detail_link_list[0],part_of_speech_list[0],definition_list[0],en_ex_sent_list[0],kr_ex_sent_list[0])
# Sets up the embed and outputs it to discord
embed_title = "Results for {0}".format(query)
simple_em = discord.Embed(title=embed_title, description=single_output, url=search_url, colour=discord.Colour.blue())
await client.say(embed=simple_em)
else:
await client.say("Invalid input or a non Korean/English language! Please try again.")
# After you have modified the code, feel free to delete the line above (line 33) so it does not keep popping up everytime you initiate the ping commmand.
client.run('Enter the token here.')
# Basic Bot was created by Habchy#1665
# Please join this Discord server if you need help: https://discord.gg/FNNNgqb
# Please modify the parts of the code where it asks you to. Example: The Prefix or The Bot Token
# This is by no means a full bot, it's more of a starter to show you what the python language can do in Discord.
# Thank you for using this and don't forget to star my repo on GitHub! [Repo Link: https://github.com/Habchy/BasicBot]
# The help command is currently set to be Direct Messaged.
# If you would like to change that, change "pm_help = True" to "pm_help = False" on line 9. | 42.053824 | 320 | 0.684473 | 2,323 | 14,845 | 4.20706 | 0.151528 | 0.025069 | 0.040929 | 0.044203 | 0.717487 | 0.717487 | 0.709403 | 0.701525 | 0.701525 | 0.701525 | 0 | 0.016879 | 0.173863 | 14,845 | 353 | 321 | 42.053824 | 0.780007 | 0.331964 | 0 | 0.874372 | 0 | 0.060302 | 0.209787 | 0.008672 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.050251 | 0 | 0.050251 | 0.065327 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d8ab387e8af3ab9f85a120a9a8310c1edb183399 | 29 | py | Python | src/ctc/protocols/compound_utils/__init__.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 94 | 2022-02-15T19:34:49.000Z | 2022-03-26T19:26:22.000Z | src/ctc/protocols/compound_utils/__init__.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-03-03T02:58:47.000Z | 2022-03-11T18:41:05.000Z | src/ctc/protocols/compound_utils/__init__.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-02-15T17:53:07.000Z | 2022-03-17T19:14:17.000Z | from .compound_crud import *
| 14.5 | 28 | 0.793103 | 4 | 29 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d8f38c2af095d91b4b02dd3d1b0f7543e9b346e0 | 42 | py | Python | old/servo_motor/__init__.py | SaltyHash/BWO | 1b57569e6024fa7b7b23dce8ce7e3a408b89b792 | [
"MIT"
] | 2 | 2021-04-03T20:29:59.000Z | 2021-04-28T00:32:18.000Z | old/servo_motor/__init__.py | SaltyHash/BWO | 1b57569e6024fa7b7b23dce8ce7e3a408b89b792 | [
"MIT"
] | null | null | null | old/servo_motor/__init__.py | SaltyHash/BWO | 1b57569e6024fa7b7b23dce8ce7e3a408b89b792 | [
"MIT"
] | 1 | 2021-07-16T12:07:25.000Z | 2021-07-16T12:07:25.000Z | from . import abstract
from . import model | 21 | 22 | 0.785714 | 6 | 42 | 5.5 | 0.666667 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 42 | 2 | 23 | 21 | 0.942857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
2b0efbfa7a55113382f76bc2f11cbe5b45cb2cef | 110 | py | Python | novelsave/client/cli/__init__.py | damare01/novelsave | 7896e8393c944e169e3cb52a33ab81ae396dff9f | [
"Apache-2.0"
] | 15 | 2020-11-05T10:05:01.000Z | 2021-06-28T14:43:56.000Z | novelsave/client/cli/__init__.py | damare01/novelsave | 7896e8393c944e169e3cb52a33ab81ae396dff9f | [
"Apache-2.0"
] | 21 | 2020-11-01T04:36:56.000Z | 2021-08-16T09:36:48.000Z | novelsave/cli/__init__.py | mHaisham/novelsave | 011b6c5d705591783aee64662bc88b207bdc7205 | [
"Apache-2.0"
] | 6 | 2021-10-03T11:31:08.000Z | 2022-03-29T07:28:49.000Z | from . import controllers
from . import groups
from .events import update_check_event
from .main import main
| 18.333333 | 38 | 0.809091 | 16 | 110 | 5.4375 | 0.5625 | 0.229885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.154545 | 110 | 5 | 39 | 22 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2b684dacce30f42cef9504dc729555e2b8e99fc7 | 185 | py | Python | project/books/__init__.py | Miguel619/wiredin | 921ee46d3868ea6150696b57767f5d282c16209d | [
"BSD-2-Clause"
] | 1 | 2022-02-12T21:53:28.000Z | 2022-02-12T21:53:28.000Z | project/books/__init__.py | Miguel619/wiredin | 921ee46d3868ea6150696b57767f5d282c16209d | [
"BSD-2-Clause"
] | null | null | null | project/books/__init__.py | Miguel619/wiredin | 921ee46d3868ea6150696b57767f5d282c16209d | [
"BSD-2-Clause"
] | null | null | null | """
The `books` blueprint handles displaying recipes.
"""
from flask import Blueprint
books_blueprint = Blueprint('books', __name__, template_folder='templates')
from . import routes
| 20.555556 | 75 | 0.767568 | 21 | 185 | 6.47619 | 0.666667 | 0.205882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.124324 | 185 | 8 | 76 | 23.125 | 0.839506 | 0.264865 | 0 | 0 | 0 | 0 | 0.109375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0.666667 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 6 |
990ffde10dbb2bd81a150ae53991c724d8b4c92c | 34 | py | Python | zunzun/LongRunningProcess/animation_tools/__init__.py | Sturtuk/Zun | 62e626b3c865d3eba83b7ee6bd896fea688f3dda | [
"BSD-2-Clause"
] | null | null | null | zunzun/LongRunningProcess/animation_tools/__init__.py | Sturtuk/Zun | 62e626b3c865d3eba83b7ee6bd896fea688f3dda | [
"BSD-2-Clause"
] | null | null | null | zunzun/LongRunningProcess/animation_tools/__init__.py | Sturtuk/Zun | 62e626b3c865d3eba83b7ee6bd896fea688f3dda | [
"BSD-2-Clause"
] | null | null | null | import Figtodat
import images2gif
| 11.333333 | 17 | 0.882353 | 4 | 34 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 0.117647 | 34 | 2 | 18 | 17 | 0.966667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
993c72ead04ce342cd34747b1ab803b8080ea905 | 90 | py | Python | SLpackage/private/thirdparty/python/python_2.7.16/site-packages/pylab.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 5 | 2022-02-20T07:10:02.000Z | 2022-03-18T17:47:53.000Z | SLpackage/private/thirdparty/python/python_2.7.16/site-packages/pylab.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | SLpackage/private/thirdparty/python/python_2.7.16/site-packages/pylab.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | from matplotlib.pylab import *
import matplotlib.pylab
__doc__ = matplotlib.pylab.__doc__
| 22.5 | 34 | 0.833333 | 11 | 90 | 6.090909 | 0.454545 | 0.671642 | 0.537313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 90 | 3 | 35 | 30 | 0.82716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5154d6d9318d89d960b25eb19250643be4626012 | 2,104 | py | Python | depth_and_motion/tools/channels_tools.py | Dtananaev/unsupervised_depth_and_motion | c3e1916c95991794309763836e79a20548d95bf8 | [
"MIT"
] | 2 | 2022-03-22T11:29:00.000Z | 2022-03-22T11:29:04.000Z | depth_and_motion/tools/channels_tools.py | Dtananaev/unsupervised_depth_and_motion | c3e1916c95991794309763836e79a20548d95bf8 | [
"MIT"
] | null | null | null | depth_and_motion/tools/channels_tools.py | Dtananaev/unsupervised_depth_and_motion | c3e1916c95991794309763836e79a20548d95bf8 | [
"MIT"
] | null | null | null | #
# Author: Denis Tananaev
# Date: 26.02.2021
#
import tensorflow as tf
import numpy as np
def to_channels_first_tf(tensor):
"""
The function gets tensor of [N,H,W,C] (or H,W,C) and returns tensor of [N,C,H,W] (or C,H,W).
Tensorflow function.
"""
shape_len = len(tf.shape(tensor)) #tf.size
if shape_len == 3:
out = tf.transpose(tensor, [2, 0, 1])
elif shape_len == 4:
out = tf.transpose(tensor, [0, 3, 1, 2])
else:
raise ValueError(f"The shape of the tensor should be 3 or 4 dimensions but it is {shape_len}")
return out
def to_channels_first_np(tensor):
"""
The function gets tensor of [N,H,W,C] (or H, W, C) and returns tensor of [N,C,H,W] (or C, H, W).
Numpy function.
"""
shape_len = len(tensor.shape)
if shape_len == 3:
out = np.transpose(tensor, (2, 0, 1))
elif shape_len == 4:
out = np.transpose(tensor, (0, 3, 1, 2))
else:
raise ValueError(f"The shape of the tensor should be 3 or 4 dimensions but it is {shape_len}")
return out
def to_channels_last_tf(tensor):
"""The function gets tensor of [N,C,H,W] (or C,H,W) and returns tensor of [N,H,W,C] (or H,W,C)
Tensorflow function.
"""
shape_len = len(tf.shape(tensor))
if shape_len == 3:
out = tf.transpose(tensor, [1, 2, 0])
elif shape_len == 4:
out = tf.transpose(tensor, [0, 2, 3, 1])
else:
raise ValueError(f"The shape of the tensor should be 3 or 4 dimensions but it is {shape_len}")
out = tf.transpose(tensor, )
return out
def to_channels_last_np(tensor):
"""The function gets tensor of [N,C,H,W] (or C,H,W) and returns tensor of [N,H,W,C] (or H,W,C)
Numpy function.
"""
shape_len = len(tensor.shape)
if shape_len == 3:
out = np.transpose(tensor, (1, 2, 0))
elif shape_len == 4:
out = np.transpose(tensor, (0, 2, 3, 1))
else:
raise ValueError(f"The shape of the tensor should be 3 or 4 dimensions but it is {shape_len}")
return out
| 31.878788 | 100 | 0.587452 | 355 | 2,104 | 3.402817 | 0.149296 | 0.02649 | 0.059603 | 0.082781 | 0.904801 | 0.904801 | 0.879967 | 0.879967 | 0.777318 | 0.770695 | 0 | 0.034369 | 0.280894 | 2,104 | 65 | 101 | 32.369231 | 0.764045 | 0.306559 | 0 | 0.615385 | 0 | 0 | 0.207386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.051282 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
515a4a671ceed3ed76bdcf1424eca442af8235e4 | 34 | py | Python | Python/app/db_config.py | derekhanger/ColonyCounter-api | 1412c642891d59ca1c209f28f471daac6d1beb2c | [
"MIT"
] | null | null | null | Python/app/db_config.py | derekhanger/ColonyCounter-api | 1412c642891d59ca1c209f28f471daac6d1beb2c | [
"MIT"
] | null | null | null | Python/app/db_config.py | derekhanger/ColonyCounter-api | 1412c642891d59ca1c209f28f471daac6d1beb2c | [
"MIT"
] | null | null | null | from pymongo import MongoClient
| 8.5 | 31 | 0.823529 | 4 | 34 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176471 | 34 | 3 | 32 | 11.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5a958810e13d122bda2f004d1fc8eae939177b69 | 124 | py | Python | dist/Basilisk/fswAlgorithms/rwNullSpace/__init__.py | ian-cooke/basilisk_mag | a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14 | [
"0BSD"
] | null | null | null | dist/Basilisk/fswAlgorithms/rwNullSpace/__init__.py | ian-cooke/basilisk_mag | a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14 | [
"0BSD"
] | 1 | 2019-03-13T20:52:22.000Z | 2019-03-13T20:52:22.000Z | dist/Basilisk/fswAlgorithms/rwNullSpace/__init__.py | ian-cooke/basilisk_mag | a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14 | [
"0BSD"
] | null | null | null | # This __init__.py file for the rwNullSpace package is automatically generated by the build system
from rwNullSpace import * | 62 | 98 | 0.830645 | 18 | 124 | 5.5 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145161 | 124 | 2 | 99 | 62 | 0.933962 | 0.774194 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5abe3231d618633532c6516fdaaa8f70e87dff35 | 1,101 | py | Python | asyncowfs/mock/structs.py | smurfix/trio-owfs | 5e930b7d1b21a49b1eb16bf23340948d892c4c3b | [
"Apache-2.0",
"MIT"
] | 3 | 2018-11-04T20:28:31.000Z | 2021-06-29T04:09:43.000Z | asyncowfs/mock/structs.py | smurfix/trio-owfs | 5e930b7d1b21a49b1eb16bf23340948d892c4c3b | [
"Apache-2.0",
"MIT"
] | 1 | 2022-01-22T19:52:43.000Z | 2022-01-23T11:04:01.000Z | asyncowfs/mock/structs.py | smurfix/trio-owfs | 5e930b7d1b21a49b1eb16bf23340948d892c4c3b | [
"Apache-2.0",
"MIT"
] | 1 | 2018-09-04T13:29:39.000Z | 2018-09-04T13:29:39.000Z | """
Structure for device data. Generated by the "real" server,
but when we're testing we don't have that.
"""
# Feel free to add data from other device types as required.
structs = {
"10": {
"address": "a,000000,000001,ro,000016,f,",
"alias": "l,000000,000001,rw,000256,f,",
"crc8": "a,000000,000001,ro,000002,f,",
"family": "a,000000,000001,ro,000002,f,",
"id": "a,000000,000001,ro,000012,f,",
"latesttemp": "t,000000,000001,ro,000012,v,",
"locator": "a,000000,000001,ro,000016,f,",
"power": "y,000000,000001,ro,000001,v,",
"temperature": "t,000000,000001,ro,000012,v,",
"temphigh": "t,000000,000001,rw,000012,s,",
"templow": "t,000000,000001,rw,000012,s,",
"type": "a,000000,000001,ro,000032,f,",
"foo": {
"bar": "i,000000,000001,rw,000012,s,",
"baz": {"quux": "f,000000,000001,rw,000012,s,"},
"plugh.A": "i,00000,000001,rw,000012,s,",
"plover.0": "i,00000,000001,rw,000012,s,",
},
},
"1F": {},
"20": {},
"28": {},
}
| 33.363636 | 60 | 0.537693 | 151 | 1,101 | 3.92053 | 0.450331 | 0.283784 | 0.212838 | 0.152027 | 0.439189 | 0.368243 | 0 | 0 | 0 | 0 | 0 | 0.351609 | 0.237965 | 1,101 | 32 | 61 | 34.40625 | 0.353993 | 0.146231 | 0 | 0 | 1 | 0 | 0.598712 | 0.478541 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5aeff099108a0bb38b8f2b0ce12704b1bc06f777 | 36 | py | Python | src/python/module/affogato/interactive/imjoy/__init__.py | constantinpape/affogato | 22ea369313b01e10f5cfefa21b7db0df719f75b0 | [
"MIT"
] | 6 | 2021-04-11T00:47:37.000Z | 2021-10-03T23:41:06.000Z | src/python/module/affogato/interactive/imjoy/__init__.py | constantinpape/affogato | 22ea369313b01e10f5cfefa21b7db0df719f75b0 | [
"MIT"
] | 8 | 2019-05-28T16:12:07.000Z | 2022-01-10T18:21:03.000Z | src/python/module/affogato/interactive/imjoy/__init__.py | constantinpape/affogato | 22ea369313b01e10f5cfefa21b7db0df719f75b0 | [
"MIT"
] | 1 | 2021-06-01T12:16:23.000Z | 2021-06-01T12:16:23.000Z | from .mws_plugin import ImjoyPlugin
| 18 | 35 | 0.861111 | 5 | 36 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 36 | 1 | 36 | 36 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8516c00ffc83939c470660da238a7ac7c13c1ffc | 422 | py | Python | src/brujeria/platform.py | bruxisma/brujeria | 66822e9190ce2ac6103f6144e145c04f4780903a | [
"MIT"
] | 19 | 2018-02-24T23:09:21.000Z | 2021-06-12T09:46:07.000Z | src/brujeria/platform.py | bruxisma/brujeria | 66822e9190ce2ac6103f6144e145c04f4780903a | [
"MIT"
] | 21 | 2019-03-22T23:59:30.000Z | 2020-12-25T13:13:53.000Z | src/brujeria/platform.py | slurps-mad-rips/brujeria | 66822e9190ce2ac6103f6144e145c04f4780903a | [
"MIT"
] | 1 | 2022-03-18T15:39:06.000Z | 2022-03-18T15:39:06.000Z | from enum import Enum
import sys
class Platform(Enum):
WINDOWS = "win32"
LINUX = "linux"
MACOS = "darwin"
def current() -> Platform:
return Platform(sys.platform)
def windows() -> bool:
return current() == Platform.WINDOWS
def linux() -> bool:
return current() == Platform.LINUX
def macos() -> bool:
return current() == Platform.MACOS
def posix() -> bool:
return not windows()
| 14.066667 | 40 | 0.630332 | 49 | 422 | 5.428571 | 0.346939 | 0.225564 | 0.191729 | 0.281955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006192 | 0.234597 | 422 | 29 | 41 | 14.551724 | 0.817337 | 0 | 0 | 0 | 0 | 0 | 0.038005 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3125 | false | 0 | 0.125 | 0.3125 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
8520bb738396768afcbdf04821dd55bd35c12a27 | 1,033 | py | Python | tests/test_none_group.py | artyl/regex-rename | 64e06ff62d3c210c8168d796fff94ec44ac8b094 | [
"MIT"
] | 1 | 2022-03-04T09:49:57.000Z | 2022-03-04T09:49:57.000Z | tests/test_none_group.py | artyl/regex-rename | 64e06ff62d3c210c8168d796fff94ec44ac8b094 | [
"MIT"
] | null | null | null | tests/test_none_group.py | artyl/regex-rename | 64e06ff62d3c210c8168d796fff94ec44ac8b094 | [
"MIT"
] | null | null | null | from regex_rename.bulk import match_filename
def test_file_with_none_group():
match = match_filename('1.XXX.txt', r'(\d+)(\.XXX)?.txt', '\\1\\2.txt',
full=False, padding=0, testing=False)
assert match is not None
assert match.name_to == '1.XXX.txt'
def test_file_with_none_group_padding():
match = match_filename('1.XXX.txt', r'(\d+)(\.XXX)?.txt', '\\1\\2.txt',
full=False, padding=2, testing=False)
assert match is not None
assert match.name_to == '01.XXX.txt'
def test_file_without_none_group():
match = match_filename('1.txt', r'(\d+)(\.XXX)?.txt', '\\1\\2.txt',
full=False, padding=0, testing=False)
assert match is not None
assert match.name_to == '1.txt'
def test_file_without_none_group_padding():
match = match_filename('1.txt', r'(\d+)(\.XXX)?.txt', '\\1\\2.txt',
full=False, padding=2, testing=False)
assert match is not None
assert match.name_to == '01.txt'
| 34.433333 | 75 | 0.596321 | 153 | 1,033 | 3.843137 | 0.202614 | 0.081633 | 0.07483 | 0.129252 | 0.930272 | 0.914966 | 0.848639 | 0.707483 | 0.707483 | 0.707483 | 0 | 0.027883 | 0.236205 | 1,033 | 29 | 76 | 35.62069 | 0.717364 | 0 | 0 | 0.571429 | 0 | 0 | 0.160697 | 0 | 0 | 0 | 0 | 0 | 0.380952 | 1 | 0.190476 | false | 0 | 0.047619 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5196cddcd7905907ae4a084f8579d3bdfbf34def | 138 | py | Python | reserver_app/views.py | TensorHive/TensorHive-ResourcesReserver | bd96b82cf9af5237e9d8505d8edd5bfca008aa01 | [
"MIT"
] | null | null | null | reserver_app/views.py | TensorHive/TensorHive-ResourcesReserver | bd96b82cf9af5237e9d8505d8edd5bfca008aa01 | [
"MIT"
] | 1 | 2021-03-10T16:04:31.000Z | 2021-03-10T16:04:31.000Z | reserver_app/views.py | TensorHive/TensorHive-ResourcesReserver | bd96b82cf9af5237e9d8505d8edd5bfca008aa01 | [
"MIT"
] | 1 | 2020-01-29T22:47:36.000Z | 2020-01-29T22:47:36.000Z | from reserver_app.run import app
from flask import jsonify
@app.route('/')
def index():
return jsonify({'msg': 'Unprotected access'}) | 23 | 49 | 0.717391 | 19 | 138 | 5.157895 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137681 | 138 | 6 | 49 | 23 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0.158273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
51cece6aa368cd17717ee3df7e6bdd1e4ed0db66 | 262 | py | Python | src/examples/d_repetition/main.py | MSGP117/acc-cosc-1336-spring-2022-MSGP117 | 46fdfa5da8f8eb887d2c79fe205b8a0064d6903d | [
"MIT"
] | null | null | null | src/examples/d_repetition/main.py | MSGP117/acc-cosc-1336-spring-2022-MSGP117 | 46fdfa5da8f8eb887d2c79fe205b8a0064d6903d | [
"MIT"
] | null | null | null | src/examples/d_repetition/main.py | MSGP117/acc-cosc-1336-spring-2022-MSGP117 | 46fdfa5da8f8eb887d2c79fe205b8a0064d6903d | [
"MIT"
] | 1 | 2022-02-12T03:50:32.000Z | 2022-02-12T03:50:32.000Z | import repetition
#repetition.display_numbers(3)
#repetition.for_intro_loop_strings()
#repetition.for_num_in_range(5)
#repetition.for_num_in_range_w_start_value(1, 5)
#repetition.for_num_range_w_step_value(0, 10, 2)
repetition.for_display_sum_of_squares(1, 11) | 29.111111 | 48 | 0.847328 | 45 | 262 | 4.444444 | 0.555556 | 0.325 | 0.24 | 0.18 | 0.23 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044177 | 0.049618 | 262 | 9 | 49 | 29.111111 | 0.759036 | 0.717557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
51e5daffa45ee934059747d8166eb8b916da6d63 | 9,441 | py | Python | uni_ticket/views/datatables.py | mspasiano/uniTicket | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | [
"Apache-2.0"
] | null | null | null | uni_ticket/views/datatables.py | mspasiano/uniTicket | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | [
"Apache-2.0"
] | null | null | null | uni_ticket/views/datatables.py | mspasiano/uniTicket | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import JsonResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from datatables_ajax.datatables import DjangoDatatablesServerProc
from organizational_area.models import (OrganizationalStructure,
OrganizationalStructureOfficeEmployee)
from uni_ticket.decorators import is_manager, is_operator
from uni_ticket.models import Ticket, TicketAssignment
from uni_ticket.utils import visible_tickets_to_user
_columns = ['pk','code','subject','get_category',
'created','get_priority','get_status']
class DTD(DjangoDatatablesServerProc):
def get_queryset(self):
"""
Sets DataTable tickets common queryset
"""
data_year = self.request.GET.get('created__year') or \
self.request.POST.get('created__year') or \
timezone.localdate().year
if self.search_key:
self.aqs = self.model.filter(created__year=data_year)\
.filter(\
Q(code__icontains=self.search_key) | \
Q(subject__icontains=self.search_key) | \
Q(input_module__ticket_category__name__icontains=self.search_key) | \
Q(created__icontains=self.search_key))
else:
self.aqs = self.model.filter(created__year=data_year)
@csrf_exempt
@login_required
def user_all_tickets(request):
"""
Returns all tickets opened by user
:return: JsonResponse
"""
ticket_list = Ticket.objects.filter(created_by=request.user)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@login_required
def user_unassigned_ticket(request):
"""
Returns all unassigned tickets opened by user
:return: JsonResponse
"""
ticket_list = Ticket.objects.filter(created_by=request.user,
is_taken=False,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@login_required
def user_opened_ticket(request):
"""
Returns all assigned and not closed tickets opened by user
:return: JsonResponse
"""
ticket_list = Ticket.objects.filter(created_by=request.user,
is_taken=True,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@login_required
def user_closed_ticket(request):
"""
Returns all closed tickets opened by user
:return: JsonResponse
"""
ticket_list = Ticket.objects.filter(created_by=request.user,
is_closed=True)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_manager
def manager_not_closed_ticket(request, structure_slug, structure):
"""
Returns all not closed tickets managed by manager
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: manager structure slug
:param structure: manager structure (from @is_manager)
:return: JsonResponse
"""
tickets = TicketAssignment.get_ticket_per_structure(structure=structure)
ticket_list = Ticket.objects.filter(pk__in=tickets,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_manager
def manager_unassigned_ticket(request, structure_slug, structure):
"""
Returns all unassigned tickets managed by manager
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: manager structure slug
:param structure: manager structure (from @is_manager)
:return: JsonResponse
"""
tickets = TicketAssignment.get_ticket_per_structure(structure=structure)
ticket_list = Ticket.objects.filter(pk__in=tickets,
is_taken=False,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_manager
def manager_opened_ticket(request, structure_slug, structure):
"""
Returns all assigned and not closed tickets managed by manager
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: manager structure slug
:param structure: manager structure (from @is_manager)
:return: JsonResponse
"""
tickets = TicketAssignment.get_ticket_per_structure(structure=structure)
ticket_list = Ticket.objects.filter(pk__in=tickets,
is_taken=True,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_manager
def manager_closed_ticket(request, structure_slug, structure):
"""
Returns all closed tickets managed by manager
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: manager structure slug
:param structure: manager structure (from @is_manager)
:return: JsonResponse
"""
tickets = TicketAssignment.get_ticket_per_structure(structure=structure)
ticket_list = Ticket.objects.filter(pk__in=tickets, is_closed=True)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_operator
def operator_not_closed_ticket(request, structure_slug,
structure, office_employee):
"""
Returns all not closed tickets managed by operator
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_operator)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: operator structure slug
:param structure: operator structure (from @is_operator)
:param office_employee: queryset with operator and his offices (from @is_operator)
:return: JsonResponse
"""
tickets = visible_tickets_to_user(request.user, structure, office_employee)
ticket_list = Ticket.objects.filter(pk__in=tickets,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_operator
def operator_unassigned_ticket(request, structure_slug,
structure, office_employee):
"""
Returns all unassigned tickets managed by operator
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_operator)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: operator structure slug
:param structure: operator structure (from @is_operator)
:param office_employee: queryset with operator and his offices (from @is_operator)
:return: JsonResponse
"""
tickets = visible_tickets_to_user(request.user, structure, office_employee)
ticket_list = Ticket.objects.filter(pk__in=tickets,
is_taken=False,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_operator
def operator_opened_ticket(request, structure_slug,
structure, office_employee):
"""
Returns all assigned and not closed tickets managed by operator
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_operator)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: operator structure slug
:param structure: operator structure (from @is_operator)
:param office_employee: queryset with operator and his offices (from @is_operator)
:return: JsonResponse
"""
tickets = visible_tickets_to_user(request.user, structure, office_employee)
ticket_list = Ticket.objects.filter(pk__in=tickets,
is_taken=True,
is_closed=False)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
@csrf_exempt
@is_operator
def operator_closed_ticket(request, structure_slug,
structure, office_employee):
"""
Returns all closed tickets managed by operator
:type structure_slug: String
:type structure: OrganizationalStructure (from @is_operator)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: operator structure slug
:param structure: operator structure (from @is_operator)
:param office_employee: queryset with operator and his offices (from @is_operator)
:return: JsonResponse
"""
tickets = visible_tickets_to_user(request.user, structure, office_employee)
ticket_list = Ticket.objects.filter(pk__in=tickets, is_closed=True)
dtd = DTD( request, ticket_list, _columns )
return JsonResponse(dtd.get_dict())
| 35.761364 | 86 | 0.686262 | 1,039 | 9,441 | 5.975938 | 0.094321 | 0.067 | 0.036077 | 0.044452 | 0.851667 | 0.840554 | 0.827347 | 0.809631 | 0.793203 | 0.755838 | 0 | 0 | 0.237263 | 9,441 | 263 | 87 | 35.897338 | 0.862241 | 0.331109 | 0 | 0.674603 | 0 | 0 | 0.013552 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103175 | false | 0 | 0.079365 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cfa9a816f1d402cf0114c339f378659716cb9aeb | 85 | py | Python | metaclasses/user.py | gitgik/expert_python | a5caf8e759c6ec7acaace8fc5071acb4593318c5 | [
"MIT"
] | 2 | 2017-10-20T05:48:15.000Z | 2019-07-09T18:05:37.000Z | metaclasses/user.py | gitgik/expert_python | a5caf8e759c6ec7acaace8fc5071acb4593318c5 | [
"MIT"
] | null | null | null | metaclasses/user.py | gitgik/expert_python | a5caf8e759c6ec7acaace8fc5071acb4593318c5 | [
"MIT"
] | null | null | null | from metaclass import Base
class Derived(Base):
def bar():
return 'bar'
| 14.166667 | 26 | 0.635294 | 11 | 85 | 4.909091 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.270588 | 85 | 5 | 27 | 17 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0.035294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.25 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
32129941b20d3f27737893cc13cdb59920a99c69 | 202 | py | Python | cadquery/occ_impl/exporters/utils.py | tvtrong/cadquery | da07216688d78a596fb687c0925b14ad7d5effc4 | [
"Apache-2.0"
] | 1,423 | 2018-10-28T18:01:04.000Z | 2022-03-30T20:22:28.000Z | cadquery/occ_impl/exporters/utils.py | tvtrong/cadquery | da07216688d78a596fb687c0925b14ad7d5effc4 | [
"Apache-2.0"
] | 1,017 | 2018-11-18T20:50:34.000Z | 2022-03-31T22:56:39.000Z | cadquery/occ_impl/exporters/utils.py | tvtrong/cadquery | da07216688d78a596fb687c0925b14ad7d5effc4 | [
"Apache-2.0"
] | 175 | 2018-11-18T06:07:54.000Z | 2022-03-31T16:21:18.000Z | from ...cq import Workplane
from ..shapes import Compound, Shape
def toCompound(shape: Workplane) -> Compound:
return Compound.makeCompound(val for val in shape.vals() if isinstance(val, Shape))
| 25.25 | 87 | 0.747525 | 27 | 202 | 5.592593 | 0.62963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148515 | 202 | 7 | 88 | 28.857143 | 0.877907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
5c7604c9fca2b99d7fddd5b5810770b4fc933405 | 204 | py | Python | lib/MetabolicModelGapfilling/core/__init__.py | ModelSEED/MetabolicModelGapfilling | d0a844a5dc8c08e532d0cc762f13810a9386e3c0 | [
"MIT"
] | null | null | null | lib/MetabolicModelGapfilling/core/__init__.py | ModelSEED/MetabolicModelGapfilling | d0a844a5dc8c08e532d0cc762f13810a9386e3c0 | [
"MIT"
] | null | null | null | lib/MetabolicModelGapfilling/core/__init__.py | ModelSEED/MetabolicModelGapfilling | d0a844a5dc8c08e532d0cc762f13810a9386e3c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from MetabolicModelGapfilling.core.basemodule import BaseModule
from MetabolicModelGapfilling.core.gapfillingmodule import GapfillingModule | 34 | 75 | 0.848039 | 20 | 204 | 8.4 | 0.55 | 0.333333 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005376 | 0.088235 | 204 | 6 | 75 | 34 | 0.897849 | 0.102941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
5ca56f138dbdc03d044fa1a86aa1e476f65d2f09 | 134 | py | Python | boa3_test/test_sc/interop_test/runtime/GetNetworkTooManyArguments.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/interop_test/runtime/GetNetworkTooManyArguments.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/interop_test/runtime/GetNetworkTooManyArguments.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from typing import Any
from boa3.builtin.interop.runtime import get_network
def main(arg: Any) -> int:
return get_network(arg)
| 16.75 | 52 | 0.753731 | 21 | 134 | 4.714286 | 0.714286 | 0.20202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008929 | 0.164179 | 134 | 7 | 53 | 19.142857 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
5ce404130805459a329ecce67eff5153b523d434 | 154 | py | Python | src/tokenizers/mitie_tokenizer.py | samhavens/rasa_nlu | 0687d1498d26ef67d7f5ddaa5c2cfce0b2331cdd | [
"Apache-2.0"
] | 1 | 2021-07-13T19:38:44.000Z | 2021-07-13T19:38:44.000Z | src/tokenizers/mitie_tokenizer.py | samhavens/rasa_nlu | 0687d1498d26ef67d7f5ddaa5c2cfce0b2331cdd | [
"Apache-2.0"
] | null | null | null | src/tokenizers/mitie_tokenizer.py | samhavens/rasa_nlu | 0687d1498d26ef67d7f5ddaa5c2cfce0b2331cdd | [
"Apache-2.0"
] | 1 | 2019-09-09T07:13:50.000Z | 2019-09-09T07:13:50.000Z | from mitie import tokenize
class MITIETokenizer(object):
def __init__(self):
pass
def tokenize(self,text):
return tokenize(text)
| 19.25 | 29 | 0.675325 | 18 | 154 | 5.555556 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.246753 | 154 | 7 | 30 | 22 | 0.862069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0.166667 | 0.166667 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
7a37fb64892d56a74d049da33bc2a8b3250b8c2d | 30 | py | Python | stalky/cogs/__init__.py | mlovatonv/stalky | 74cb61dba4baeaee92c44e3d1e77a8da56057d3c | [
"MIT"
] | null | null | null | stalky/cogs/__init__.py | mlovatonv/stalky | 74cb61dba4baeaee92c44e3d1e77a8da56057d3c | [
"MIT"
] | 1 | 2021-04-11T01:22:22.000Z | 2021-04-11T01:22:22.000Z | stalky/cogs/__init__.py | mlovatonv/stalky | 74cb61dba4baeaee92c44e3d1e77a8da56057d3c | [
"MIT"
] | null | null | null | from stalky.cogs import shame
| 15 | 29 | 0.833333 | 5 | 30 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 30 | 1 | 30 | 30 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7a3cfa75c2b08c7614f55d1f4f43941bf6c3eb37 | 3,015 | py | Python | tests/test_motions_find.py | AlphaMycelium/pathfinder.vim | 4f67053cbea56a45020d004b6bd059e38934a21a | [
"MIT"
] | 228 | 2020-05-26T11:46:21.000Z | 2020-08-04T22:39:17.000Z | tests/test_motions_find.py | AlphaMycelium/pathfinder.vim | 4f67053cbea56a45020d004b6bd059e38934a21a | [
"MIT"
] | 42 | 2020-05-25T12:41:35.000Z | 2020-08-10T16:23:48.000Z | tests/test_motions_find.py | danth/pathfinder.vim | 4f67053cbea56a45020d004b6bd059e38934a21a | [
"MIT"
] | 6 | 2020-05-26T20:32:34.000Z | 2020-06-16T00:47:12.000Z | from collections import namedtuple
from unittest import mock
from pathfinder.server.motions import Motion
from pathfinder.server.motions.find import FindMotionGenerator
View = namedtuple("View", "lnum col curswant")
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
@mock.patch.object(FindMotionGenerator, "_create_node", new=lambda self, v, m: (v, m))
def test_find_f():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 0, 0), "f"))
assert output == [
(View(1, 1, 1), Motion("f", "b")),
(View(1, 2, 2), Motion("f", "c")),
(View(1, 3, 3), Motion("f", "d")),
(View(1, 5, 5), Motion("f", "e")),
]
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
def test_find_f_final_column():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 5, 5), "f"))
assert len(output) == 0
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
@mock.patch.object(FindMotionGenerator, "_create_node", new=lambda self, v, m: (v, m))
def test_find_t():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 0, 0), "t"))
assert output == [
(View(1, 1, 1), Motion("t", "c")),
(View(1, 2, 2), Motion("t", "d")),
(View(1, 4, 4), Motion("t", "e")),
]
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
def test_find_t_penultimate_column():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 4, 4), "t"))
assert len(output) == 0
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
@mock.patch.object(FindMotionGenerator, "_create_node", new=lambda self, v, m: (v, m))
def test_find_F():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 5, 5), "F"))
assert output == [
(View(1, 4, 4), Motion("F", "d")),
(View(1, 2, 2), Motion("F", "c")),
(View(1, 1, 1), Motion("F", "b")),
(View(1, 0, 0), Motion("F", "a")),
]
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
def test_find_F_first_column():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 0, 0), "F"))
assert len(output) == 0
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
@mock.patch.object(FindMotionGenerator, "_create_node", new=lambda self, v, m: (v, m))
def test_find_T():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 5, 5), "T"))
assert output == [
(View(1, 3, 3), Motion("T", "c")),
(View(1, 2, 2), Motion("T", "b")),
(View(1, 1, 1), Motion("T", "a")),
]
@mock.patch("pathfinder.server.motions.find.vim.current.buffer", ["abcdde"])
def test_find_T_second_column():
generator = FindMotionGenerator(None)
output = list(generator._find(View(1, 1, 1), "T"))
assert len(output) == 0
| 35.05814 | 86 | 0.630846 | 417 | 3,015 | 4.465228 | 0.131894 | 0.059076 | 0.123523 | 0.130505 | 0.874866 | 0.827605 | 0.820086 | 0.80666 | 0.80666 | 0.745435 | 0 | 0.0279 | 0.167828 | 3,015 | 85 | 87 | 35.470588 | 0.714229 | 0 | 0 | 0.41791 | 0 | 0 | 0.180763 | 0.130017 | 0 | 0 | 0 | 0 | 0.119403 | 1 | 0.119403 | false | 0 | 0.059701 | 0 | 0.179104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7a9a45c9a8ab313dca6e3034a0d7836eab132a4c | 30 | py | Python | __init__.py | mishavetl/coordrounder | d289397960d901926c3891ba54f2343ce2be6145 | [
"MIT"
] | null | null | null | __init__.py | mishavetl/coordrounder | d289397960d901926c3891ba54f2343ce2be6145 | [
"MIT"
] | null | null | null | __init__.py | mishavetl/coordrounder | d289397960d901926c3891ba54f2343ce2be6145 | [
"MIT"
] | null | null | null | from src.coordrounder import * | 30 | 30 | 0.833333 | 4 | 30 | 6.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 30 | 1 | 30 | 30 | 0.925926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7ac01b85b6b4793c3d443b20d46c0d5ccddad246 | 8,403 | py | Python | wiggin_mito/actions/conformations.py | golobor/wiggin_mito | 51103894a4c7eac07cbf0cd6e891856b6e6bced7 | [
"MIT"
] | null | null | null | wiggin_mito/actions/conformations.py | golobor/wiggin_mito | 51103894a4c7eac07cbf0cd6e891856b6e6bced7 | [
"MIT"
] | null | null | null | wiggin_mito/actions/conformations.py | golobor/wiggin_mito | 51103894a4c7eac07cbf0cd6e891856b6e6bced7 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import logging
from typing import Union, Tuple, Sequence, Any, Optional # noqa: F401
import numpy as np
from .. import conformations
from wiggin.core import SimAction
logging.basicConfig(level=logging.INFO)
@dataclass
class HelicalLoopBrushConformation(SimAction):
helix_radius: Optional[float] = None
helix_turn_length: Optional[float] = None
helix_step: Optional[float] = None
axial_compression_factor: Optional[float] = None
random_loop_orientations: bool = True
_reads_shared = ['N', 'loops']
_writes_shared = ['initial_conformation']
def configure(self):
out_shared = {}
n_params = sum(
[
i is None
for i in [
self.helix_radius,
self.helix_turn_length,
self.helix_step,
self.axial_compression_factor,
]
]
)
if n_params not in [0, 2]:
raise ValueError(
"Please specify 0 or 2 out of these four parameters: "
"radius, turn_length, step and axis-to-backbone ratio"
)
if (self.helix_radius is not None) and (
self.helix_step is not None
):
helix_radius = self.helix_radius
helix_step = self.helix_step
elif (self.helix_turn_length is not None) and (
self.helix_step is not None
):
helix_step = self.helix_step
helix_radius_squared = (
(
(self.helix_turn_length) ** 2
- (self.helix_step) ** 2
)
/ np.pi
/ np.pi
/ 2.0
/ 2.0
)
if helix_radius_squared <= 0:
raise ValueError(
"The provided values of helix_step and helix_turn_length are incompatible"
)
helix_radius = helix_radius_squared ** 0.5
elif (self.helix_turn_length is not None) and (
self.helix_radius is not None
):
helix_radius = self.helix_radius
helix_step_squared = (self.helix_turn_length) ** 2 - (
2 * np.pi * helix_radius
) ** 2
if helix_step_squared <= 0:
raise ValueError(
"The provided values of helix_step and helix_turn_length are incompatible"
)
helix_step = helix_step_squared ** 0.5
elif (self.axial_compression_factor is not None) and (
self.helix_radius is not None
):
helix_radius = self.helix_radius
helix_step = (
2
* np.pi
* helix_radius
/ np.sqrt(self.axial_compression_factor ** 2 - 1)
)
elif (self.axial_compression_factor is not None) and (
self.helix_turn_length is not None
):
helix_step = (
self.helix_turn_length
/ self.axial_compression_factor
)
helix_radius_squared = (
((self.helix_turn_length) ** 2 - (helix_step) ** 2)
/ np.pi
/ np.pi
/ 2.0
/ 2.0
)
if helix_radius_squared <= 0:
raise ValueError(
"The provided values of helix_step and helix_turn_length are incompatible"
)
helix_radius = helix_radius_squared ** 0.5
elif (self.axial_compression_factor is not None) and (
self.helix_step is not None
):
helix_step = self.helix_step
helix_turn_length = helix_step * self.axial_compression_factor
helix_radius_squared = (
((helix_turn_length) ** 2 - (helix_step) ** 2)
/ np.pi
/ np.pi
/ 2.0
/ 2.0
)
if helix_radius_squared <= 0:
raise ValueError(
"The provided values of helix_step and helix_turn_length are incompatible"
)
helix_radius = helix_radius_squared ** 0.5
else:
helix_radius = 0
helix_step = int(1e9)
self.helix_step = helix_step
self.helix_radius = helix_radius
out_shared[
"initial_conformation"
] = conformations.make_helical_loopbrush(
L=self._shared["N"],
helix_radius=helix_radius,
helix_step=helix_step,
loops=self._shared["loops"],
random_loop_orientations=self.random_loop_orientations,
)
return out_shared
def run_init(self, sim):
# do not use self.params!
# only use parameters from config.action and config.shared
sim.set_data(self._shared["initial_conformation"])
return sim
@dataclass
class UniformHelicalLoopBrushConformation(SimAction):
helix_radius: Optional[float] = None
helix_step: Optional[float] = None
axial_compression_factor: Optional[float] = None
period_particles: Optional[float] = None
loop_fold: str = "RW"
chain_bond_length: float = 1.0
_reads_shared = ['N', 'loops']
_writes_shared = ['initial_conformation']
def configure(self):
out_shared = {}
n_params = sum(
[
i is not None
for i in [
self.helix_radius,
self.helix_step,
self.axial_compression_factor,
]
]
)
if n_params not in [0, 2]:
raise ValueError(
"Please specify 0 or 2 out of these three parameters: "
"radius, step and axis-to-backbone ratio"
)
if (self.helix_radius is not None) and (
self.helix_step is not None
):
helix_radius = self.helix_radius
helix_step = self.helix_step
elif (self.axial_compression_factor is not None) and (
self.helix_radius is not None
):
helix_radius = self.helix_radius
helix_step = (
2
* np.pi
* helix_radius
/ np.sqrt(self.axial_compression_factor ** 2 - 1)
)
elif (self.axial_compression_factor is not None) and (
self.helix_step is not None
):
helix_step = self.helix_step
helix_turn_length = helix_step * self.axial_compression_factor
helix_radius_squared = (
(helix_turn_length ** 2 - helix_step ** 2) / np.pi / np.pi / 2.0 / 2.0
)
helix_radius = helix_radius_squared ** 0.5
else:
helix_radius = 0
helix_step = int(1e9)
self.helix_step = helix_step
self.helix_radius = helix_radius
out_shared[
"initial_conformation"
] = conformations.make_uniform_helical_loopbrush(
L=self._shared["N"],
helix_radius=helix_radius,
helix_step=helix_step,
period_particles=self.period_particles,
loops=self._shared["loops"],
chain_bond_length=self.chain_bond_length,
loop_fold=self.loop_fold,
)
return out_shared
def run_init(self, sim):
# do not use self.params!
# only use parameters from config.action and config.shared
sim.set_data(self._shared["initial_conformation"])
return sim
@dataclass
class RWLoopBrushConformation(SimAction):
end: Optional[Tuple[float, float, float]] = None
_reads_shared = ['N', 'loops']
_writes_shared = ['initial_conformation']
def configure(self):
out_shared = {}
out_shared[
"initial_conformation"
] = conformations.make_random_loopbrush(
L=self._shared["N"],
loops=self._shared["loops"],
end=self.end
)
return out_shared
def run_init(self, sim):
# do not use self.params!
# only use parameters from config.action and config.shared
sim.set_data(self._shared["initial_conformation"])
return sim
| 30.667883 | 94 | 0.54183 | 909 | 8,403 | 4.740374 | 0.127613 | 0.119981 | 0.039684 | 0.072407 | 0.825946 | 0.812718 | 0.796008 | 0.766999 | 0.754235 | 0.740543 | 0 | 0.012756 | 0.384268 | 8,403 | 273 | 95 | 30.78022 | 0.820062 | 0.030108 | 0 | 0.67713 | 0 | 0 | 0.08622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026906 | false | 0 | 0.026906 | 0 | 0.174888 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8f8750b53792328053b4b47677de7da8b95ceb7b | 3,771 | py | Python | tests/test_command_new.py | vincentywdeng/kestrel-lang | 91e61c179bef433f5dc2e9fb6edf184d38ae6173 | [
"Apache-2.0"
] | 119 | 2021-06-04T15:40:10.000Z | 2022-03-24T09:56:53.000Z | tests/test_command_new.py | raymundl/kestrel-lang | aeae52dab0560415fdb7bd076eb439041030fbc3 | [
"Apache-2.0"
] | 76 | 2021-06-04T15:06:10.000Z | 2022-03-20T21:03:13.000Z | tests/test_command_new.py | raymundl/kestrel-lang | aeae52dab0560415fdb7bd076eb439041030fbc3 | [
"Apache-2.0"
] | 28 | 2021-06-05T07:27:15.000Z | 2022-01-20T18:43:47.000Z | import pytest
from kestrel.session import Session
from kestrel.exceptions import MissingEntityType
def test_new_with_full_json():
with Session() as s:
stmt = """
newvar = NEW [ {"type": "process", "name": "cmd.exe", "pid": "123"}
, {"type": "process", "name": "explorer.exe", "pid": "99"}
]
"""
s.execute(stmt)
v = s.get_variable("newvar")
assert len(v) == 2
assert v[0]["type"] == "process"
assert v[0]["name"] in ["cmd.exe", "explorer.exe"]
if v[0]["name"] == "cmd.exe":
assert v[0]["pid"] == "123"
else:
assert v[0]["pid"] == "99"
def test_new_with_json_no_type():
with Session() as s:
stmt = """
newvar = NEW process [ {"name": "cmd.exe", "pid": "123"}
, {"name": "explorer.exe", "pid": "99"}
]
"""
s.execute(stmt)
v = s.get_variable("newvar")
assert len(v) == 2
assert v[0]["type"] == "process"
assert v[0]["name"] in ["cmd.exe", "explorer.exe"]
if v[0]["name"] == "cmd.exe":
assert v[0]["pid"] == "123"
else:
assert v[0]["pid"] == "99"
def test_new_with_json_no_type_to_fail():
with Session() as s:
stmt = """
newvar = NEW [ {"name": "cmd.exe", "pid": "123"}
, {"name": "explorer.exe", "pid": "99"}
]
"""
with pytest.raises(MissingEntityType) as e:
s.execute(stmt)
def test_new_with_list_of_strings():
with Session() as s:
stmt = (
"""newvar = NEW process ["cmd.exe", "explorer.exe", "google-chrome.exe"]"""
)
s.execute(stmt)
v = s.get_variable("newvar")
assert len(v) == 3
assert v[0]["type"] == "process"
assert sorted([i["name"] for i in v]) == [
"cmd.exe",
"explorer.exe",
"google-chrome.exe",
]
def test_new_list_of_strings_without_type_to_fail():
with Session() as s:
stmt = """newvar = NEW ["cmd.exe", "explorer.exe", "google-chrome.exe"]"""
with pytest.raises(MissingEntityType) as e:
s.execute(stmt)
def test_new_with_int_pid():
with Session() as s:
stmt = """
newvar = NEW [ {"type": "process", "name": "cmd.exe", "pid": 123}
, {"type": "process", "name": "explorer.exe", "pid": 99}
]
"""
s.execute(stmt)
v = s.get_variable("newvar")
assert len(v) == 2
assert v[0]["type"] == "process"
assert v[0]["name"] in ["cmd.exe", "explorer.exe"]
if v[0]["name"] == "cmd.exe":
assert v[0]["pid"] == 123
else:
assert v[0]["pid"] == 99
def test_new_with_missing_field():
with Session() as s:
stmt = """
newvar = NEW [ {"type": "process", "name": "cmd.exe", "pid": "123"}
, {"type": "process", "name": "explorer.exe"}
]
"""
s.execute(stmt)
v = sorted(s.get_variable("newvar"), key=lambda d: d["name"])
assert len(v) == 2
assert v[0]["type"] == "process"
assert v[0]["name"] in ["cmd.exe", "explorer.exe"]
assert v[0]["pid"] == "123"
assert v[1]["pid"] == None
def test_new_with_missing_field_first():
with Session() as s:
stmt = """
newvar = NEW [ {"type": "process", "name": "cmd.exe"}
, {"type": "process", "name": "explorer.exe", "pid": "99"}
]
"""
s.execute(stmt)
v = sorted(s.get_variable("newvar"), key=lambda d: d["name"])
assert len(v) == 2
assert v[0]["type"] == "process"
assert v[0]["name"] in ["cmd.exe", "explorer.exe"]
assert v[0]["pid"] == None
assert v[1]["pid"] == "99"
| 30.168 | 87 | 0.490321 | 477 | 3,771 | 3.775681 | 0.127883 | 0.024431 | 0.084398 | 0.062188 | 0.881177 | 0.881177 | 0.844531 | 0.791227 | 0.75347 | 0.75347 | 0 | 0.028714 | 0.307346 | 3,771 | 124 | 88 | 30.41129 | 0.660796 | 0 | 0 | 0.650943 | 0 | 0.04717 | 0.340097 | 0 | 0 | 0 | 0 | 0 | 0.264151 | 1 | 0.075472 | false | 0 | 0.028302 | 0 | 0.103774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8fc73cf98231465e9cad6ec8294f8f68f488b01c | 250 | py | Python | snmpagent_unity/unity_impl/HostName.py | factioninc/snmp-unity-agent | 3525dc0fac60d1c784dcdd7c41693544bcbef843 | [
"Apache-2.0"
] | 2 | 2019-03-01T11:14:59.000Z | 2019-10-02T17:47:59.000Z | snmpagent_unity/unity_impl/HostName.py | factioninc/snmp-unity-agent | 3525dc0fac60d1c784dcdd7c41693544bcbef843 | [
"Apache-2.0"
] | 2 | 2019-03-01T11:26:29.000Z | 2019-10-11T18:56:54.000Z | snmpagent_unity/unity_impl/HostName.py | factioninc/snmp-unity-agent | 3525dc0fac60d1c784dcdd7c41693544bcbef843 | [
"Apache-2.0"
] | 1 | 2019-10-03T21:09:17.000Z | 2019-10-03T21:09:17.000Z | class HostName(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_host_name(idx_name)
class HostNameColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_hosts()
| 27.777778 | 53 | 0.728 | 36 | 250 | 4.75 | 0.416667 | 0.25731 | 0.128655 | 0.25731 | 0.362573 | 0.362573 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176 | 250 | 8 | 54 | 31.25 | 0.830097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.333333 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
8f0d48ffc9f20c633c0780565500b0381fdf1dd1 | 8,528 | py | Python | test/test_cli.py | art-of-dom/HashIt | 197b54c35f61e9322e60ee3957496174951285b4 | [
"MIT"
] | null | null | null | test/test_cli.py | art-of-dom/HashIt | 197b54c35f61e9322e60ee3957496174951285b4 | [
"MIT"
] | 4 | 2021-07-19T07:16:31.000Z | 2021-08-25T04:35:51.000Z | test/test_cli.py | art-of-dom/HashIt | 197b54c35f61e9322e60ee3957496174951285b4 | [
"MIT"
] | null | null | null | '''Tests for the cli interface'''
from __future__ import absolute_import
import unittest
import sys
from nose.tools import assert_equals
from hashit.cli.cli import cli_main
from hashit.cli.cli_status import CliStatus
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
# pylint: disable=no-self-use
# pylint: disable=bad-continuation
class TestCLI(unittest.TestCase):
def setUp(self):
self.args = {
'--hash-type': None,
'--generate': None,
'--verify': None,
'-r': False,
'-f': False,
'-a': False,
'-x': False,
'-b': False,
'<input>': None
}
def tearDown(self):
pass
# arg checks
def test_cil_retruns_error_if_no_args(self):
assert_equals(CliStatus.ARG_INVALID.value, cli_main(None))
def test_cil_retruns_success_no_vaild_args(self):
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
# arg checks hash-type check
def test_cil_retruns_success_known_hash_uppercase(self):
self.args['--hash-type'] = 'CRC32'
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_retruns_success_known_hash_lowercase(self):
self.args['--hash-type'] = 'crc32'
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_retruns_success_known_hash_mixedcase(self):
self.args['--hash-type'] = 'cRc32'
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_retruns_error_unknown_hash(self):
self.args['--hash-type'] = 'foobar'
assert_equals(CliStatus.ARG_INVALID.value, cli_main(self.args))
self.assertEqual("Unknown hash type foobar",
sys.stdout.getvalue().strip()
)
# base hash / base hash-type
def test_cil_uses_default_hash_on_file(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: test/support/example.bin | hash: BAD3",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_ascii(self):
self.args['-a'] = True
self.args['<input>'] = '123456789'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 123456789 | hash: BB3D",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_hex(self):
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 010203040506070809 | hash: 4204",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_file_reverse(self):
self.args['-f'] = True
self.args['-r'] = True
self.args['<input>'] = 'test/support/example.bin'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: test/support/example.bin | hash: EE93",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_ascii_reverse(self):
self.args['-a'] = True
self.args['-r'] = True
self.args['<input>'] = '123456789'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 123456789 | hash: 39D9",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_hex_reverse(self):
self.args['-x'] = True
self.args['-r'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 010203040506070809 | hash: C0E0",
sys.stdout.getvalue().strip()
)
# verify hash
def test_cil_verify_bad_hash_size(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = '0BAD3'
assert_equals(CliStatus.ARG_INVALID.value, cli_main(self.args))
def test_cil_verify_good_result_returns_zero_file(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = 'BAD3'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_bad_result_returns_error_file(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_good_result_returns_zero_ascii(self):
self.args['-a'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'BB3D'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_bad_result_returns_error_ascii(self):
self.args['-a'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_good_result_returns_zero_hex(self):
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = '4204'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_bad_result_returns_error_hex(self):
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
# verify hash brute force
def test_cil_verify_brute_force_good_result_returns_zero_file(self):
self.args['-f'] = True
self.args['-b'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = 'BAD3'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_brute_force_bad_result_returns_error_file(self):
self.args['-f'] = True
self.args['-b'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = '000D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_brute_force_good_result_returns_zero_ascii(self):
self.args['-a'] = True
self.args['-b'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'BB3D'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_brute_force_bad_result_returns_error_ascii(self):
self.args['-a'] = True
self.args['-b'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_brute_force_good_result_returns_zero_hex(self):
self.args['-x'] = True
self.args['-b'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = '4204'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_brute_force_bad_result_returns_error_hex(self):
self.args['-x'] = True
self.args['-b'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
# generate hash
def test_cil_generate_bad_hash(self):
self.args['--generate'] = '0BAD3'
assert_equals(CliStatus.ARG_INVALID.value, cli_main(self.args))
def test_cil_generate_good_hash_returns_success(self):
self.args['--generate'] = 'BAD3'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_generate_unhandled_hash_generation_error(self):
self.args['--hash-type'] = 'CRC32'
self.args['--generate'] = 'BAD3BAD3'
assert_equals(CliStatus.GENERATION_ERROR.value, cli_main(self.args))
| 39.119266 | 76 | 0.624765 | 1,038 | 8,528 | 4.876686 | 0.102119 | 0.161201 | 0.073489 | 0.085342 | 0.835638 | 0.810747 | 0.805808 | 0.788621 | 0.759779 | 0.747136 | 0 | 0.049517 | 0.235108 | 8,528 | 217 | 77 | 39.299539 | 0.726506 | 0.031309 | 0 | 0.575581 | 0 | 0 | 0.142736 | 0.026195 | 0 | 0 | 0 | 0 | 0.209302 | 1 | 0.174419 | false | 0.005814 | 0.034884 | 0 | 0.215116 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8f55e4bdd0e5a2bd884b15c4ec96feeff80936a6 | 40 | py | Python | Data Scientist Career Path/3. Python Fundamentals/7. Python Strings/1. Intro to String/9. iterate string.py | myarist/Codecademy | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | [
"MIT"
] | 23 | 2021-06-06T15:35:55.000Z | 2022-03-21T06:53:42.000Z | Data Scientist Career Path/3. Python Fundamentals/7. Python Strings/1. Intro to String/9. iterate string.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Data Scientist Career Path/3. Python Fundamentals/7. Python Strings/1. Intro to String/9. iterate string.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 9 | 2021-06-08T01:32:04.000Z | 2022-03-18T15:38:09.000Z | def get_length(text):
return len(text) | 20 | 21 | 0.75 | 7 | 40 | 4.142857 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 40 | 2 | 22 | 20 | 0.828571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
56e03707010aaf64816d2d27490100d84cf7a49d | 31,260 | py | Python | experiences/tests/unit_test_interactors.py | jordifierro/abidria-api | d7689783bf23fbe43c395b07572a1380654652cd | [
"MIT"
] | 93 | 2017-08-12T09:41:21.000Z | 2022-03-19T20:04:41.000Z | experiences/tests/unit_test_interactors.py | jordifierro/abidria-api | d7689783bf23fbe43c395b07572a1380654652cd | [
"MIT"
] | 1 | 2017-10-09T16:49:10.000Z | 2017-10-13T18:07:29.000Z | experiences/tests/unit_test_interactors.py | jordifierro/abidria-api | d7689783bf23fbe43c395b07572a1380654652cd | [
"MIT"
] | 25 | 2017-08-18T04:31:23.000Z | 2022-02-20T20:31:47.000Z | from mock import Mock
from abidria.exceptions import InvalidEntityException, EntityDoesNotExistException, NoLoggedException, \
NoPermissionException, ConflictException
from experiences.entities import Experience
from experiences.interactors import GetAllExperiencesInteractor, CreateNewExperienceInteractor, \
ModifyExperienceInteractor, UploadExperiencePictureInteractor, SaveUnsaveExperienceInteractor
class TestGetAllExperiences:
def test_returns_repo_response(self):
TestGetAllExperiences.ScenarioMaker() \
.given_a_logged_person_id() \
.given_mine_true() \
.given_saved_true() \
.given_a_permission_validator_that_returns_true() \
.given_an_experience() \
.given_another_experience() \
.given_a_repo_that_returns_both_experiences() \
.when_interactor_is_executed() \
.then_validate_permissions_should_be_called_with_logged_person_id() \
.then_result_should_be_both_experiences()
def test_no_logged_raises_exception(self):
TestGetAllExperiences.ScenarioMaker() \
.given_a_permission_validator_that_raises_exception() \
.when_interactor_is_executed() \
.then_validate_permissions_should_be_called_with_logged_person_id() \
.then_should_raise_no_logged_exception()
class ScenarioMaker:
def __init__(self):
self.logged_person_id = None
self.experience_repo = None
self.permissions_validator = None
self.mine = None
self.saved = None
def given_a_logged_person_id(self):
self.logged_person_id = '0'
return self
def given_mine_true(self):
self.mine = True
return self
def given_saved_true(self):
self.saved = True
return self
def given_an_experience(self):
self.experience_a = Experience(id=1, title='A', description='some',
picture=None, author_id='1', author_username='usr')
return self
def given_another_experience(self):
self.experience_b = Experience(id=2, title='B', description='other',
picture=None, author_id='1', author_username='usr')
return self
def given_a_permission_validator_that_returns_true(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.return_value = True
return self
def given_a_permission_validator_that_raises_exception(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.side_effect = NoLoggedException()
return self
def given_a_repo_that_returns_both_experiences(self):
self.experience_repo = Mock()
self.experience_repo.get_all_experiences.return_value = [self.experience_a, self.experience_b]
return self
def when_interactor_is_executed(self):
try:
self.response = GetAllExperiencesInteractor(experience_repo=self.experience_repo,
permissions_validator=self.permissions_validator) \
.set_params(mine=self.mine, saved=self.saved, logged_person_id=self.logged_person_id).execute()
except Exception as e:
self.error = e
return self
def then_result_should_be_both_experiences(self):
assert self.response == [self.experience_a, self.experience_b]
return self
def then_should_call_get_all_experience_with_logged_person_id_and_mine_params(self):
self.experience_repo.get_all_experiences.assert_called_once_with(mine=self.mine, saved=self.saved,
logged_person_id=self.logged_person_id)
def then_validate_permissions_should_be_called_with_logged_person_id(self):
self.permissions_validator.validate_permissions \
.assert_called_once_with(logged_person_id=self.logged_person_id)
return self
def then_should_raise_no_logged_exception(self):
assert type(self.error) is NoLoggedException
return self
class TestCreateNewExperience:
def test_creates_and_returns_experience(self):
TestCreateNewExperience.ScenarioMaker() \
.given_a_logged_person_id() \
.given_an_experience() \
.given_an_experience_repo_that_returns_that_experience_on_create() \
.given_a_permissions_validator_that_returns_true() \
.given_a_title() \
.given_a_description() \
.given_an_author_id() \
.given_an_experience_validator_that_accepts_them() \
.when_execute_interactor() \
.then_result_should_be_the_experience() \
.then_should_validate_permissions() \
.then_repo_create_method_should_be_called_with_params() \
.then_params_should_be_validated()
def test_invalid_experience_returns_error_and_doesnt_create_it(self):
TestCreateNewExperience.ScenarioMaker() \
.given_a_logged_person_id() \
.given_an_experience() \
.given_an_experience_repo() \
.given_a_title() \
.given_a_description() \
.given_an_author_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience_validator_that_raises_invalid_entity_exception() \
.when_execute_interactor() \
.then_should_raise_invalid_entity_exception() \
.then_should_validate_permissions() \
.then_params_should_be_validated() \
.then_repo_create_method_should_not_be_called()
def test_no_permissions_raises_exception(self):
TestCreateNewExperience.ScenarioMaker() \
.given_a_logged_person_id() \
.given_an_experience() \
.given_an_experience_repo() \
.given_a_title() \
.given_a_description() \
.given_an_author_id() \
.given_a_permissions_validator_that_raises_no_permission_exception() \
.given_an_experience_validator_that_raises_invalid_entity_exception() \
.when_execute_interactor() \
.then_should_raise_no_permissions_exception() \
.then_should_validate_permissions() \
.then_repo_create_method_should_not_be_called()
class ScenarioMaker:
def __init__(self):
self.author_id = None
def given_a_logged_person_id(self):
self.logged_person_id = '5'
return self
def given_an_experience(self):
self.experience = Experience(title='Title', description='', author_id='3')
return self
def given_an_experience_repo_that_returns_that_experience_on_create(self):
self.experience_repo = Mock()
self.experience_repo.create_experience.return_value = self.experience
return self
def given_a_title(self):
self.title = 'Title'
return self
def given_a_description(self):
self.description = 'desc'
return self
def given_an_author_id(self):
self.author_id = '4'
return self
def given_an_experience_repo(self):
self.experience_repo = Mock()
return self
def given_a_permissions_validator_that_returns_true(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.return_value = True
return self
def given_a_permissions_validator_that_raises_no_permission_exception(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.side_effect = NoPermissionException()
return self
def given_an_experience_validator_that_accepts_them(self):
self.experience_validator = Mock()
self.experience_validator.validate_experience.return_value = True
return self
def given_an_experience_validator_that_raises_invalid_entity_exception(self):
self.experience_validator = Mock()
self.experience_validator.validate_experience.side_effect = \
InvalidEntityException(source='title', code='empty_attribute',
message='Title must be between 1 and 20 chars')
return self
def when_execute_interactor(self):
try:
self.response = CreateNewExperienceInteractor(self.experience_repo,
self.experience_validator, self.permissions_validator) \
.set_params(title=self.title, description=self.description,
logged_person_id=self.logged_person_id).execute()
except Exception as e:
self.error = e
return self
def then_result_should_be_the_experience(self):
assert self.response == self.experience
return self
def then_should_raise_invalid_entity_exception(self):
assert type(self.error) is InvalidEntityException
assert self.error.source == 'title'
assert self.error.code == 'empty_attribute'
assert str(self.error) == 'Title must be between 1 and 20 chars'
return self
def then_repo_create_method_should_be_called_with_params(self):
experience_params = Experience(title=self.title, description=self.description,
author_id=self.logged_person_id)
self.experience_repo.create_experience.assert_called_once_with(experience_params)
return self
def then_repo_create_method_should_not_be_called(self):
self.experience_repo.create_experience.assert_not_called()
return self
def then_params_should_be_validated(self):
experience_params = Experience(title=self.title, description=self.description,
author_id=self.logged_person_id)
self.experience_validator.validate_experience.assert_called_once_with(experience_params)
return self
def then_should_validate_permissions(self):
self.permissions_validator.validate_permissions \
.assert_called_once_with(logged_person_id=self.logged_person_id, wants_to_create_content=True)
return self
def then_should_raise_no_permissions_exception(self):
assert type(self.error) is NoPermissionException
return self
class TestModifyExperience:
def test_gets_modifies_not_none_params_and_returns_experience(self):
TestModifyExperience.ScenarioMaker() \
.given_an_experience() \
.given_an_id() \
.given_a_description() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_another_experience_updated_with_that_params() \
.given_an_experience_repo_that_returns_both_experiences_on_get_and_update() \
.given_an_experience_validator_that_accepts() \
.when_interactor_is_executed() \
.then_result_should_be_second_experience() \
.then_should_validate_permissions() \
.then_get_experience_should_be_called_with_id_and_logged_person_id() \
.then_experience_validation_should_be_called_with_updated_experience() \
.then_update_experience_should_be_called_with_updated_experience()
def test_invalid_experience_returns_error_and_doesnt_update_it(self):
TestModifyExperience.ScenarioMaker() \
.given_an_id() \
.given_a_description() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience() \
.given_another_experience_updated_with_that_params() \
.given_an_experience_repo_that_returns_that_experience_on_get() \
.given_an_experience_validator_that_raises_invalid_entity_exception() \
.when_interactor_is_executed() \
.then_should_raise_invalid_entity_exception() \
.then_should_validate_permissions() \
.then_get_experience_should_be_called_with_id_and_logged_person_id() \
.then_experience_validation_should_be_called_with_updated_experience() \
.then_update_experience_should_be_not_called()
def test_unexistent_experience_returns_entity_does_not_exist_error(self):
TestModifyExperience.ScenarioMaker() \
.given_an_id() \
.given_a_description() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience_repo_that_raises_entity_does_not_exist() \
.given_an_experience_validator() \
.when_interactor_is_executed() \
.then_should_raise_entity_does_not_exists() \
.then_should_validate_permissions() \
.then_get_experience_should_be_called_with_id_and_logged_person_id() \
.then_update_experience_should_be_not_called()
def test_no_permissions_raises_expcetion(self):
TestModifyExperience.ScenarioMaker() \
.given_an_id() \
.given_a_description() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_raises_no_permissions_exception() \
.given_an_experience_repo_that_raises_entity_does_not_exist() \
.given_an_experience_validator() \
.when_interactor_is_executed() \
.then_should_raise_no_permissions_exception() \
.then_should_validate_permissions() \
.then_update_experience_should_be_not_called()
class ScenarioMaker:
def given_an_experience(self):
self.experience = Experience(id='1', title='Title', description='some',
author_id='2', author_username='usr')
return self
def given_an_id(self):
self.id = '1'
return self
def given_a_description(self):
self.description = ''
return self
def given_a_logged_person_id(self):
self.logged_person_id = '2'
return self
def given_a_permissions_validator_that_returns_true(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.return_value = True
return self
def given_a_permissions_validator_that_raises_no_permissions_exception(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.side_effect = NoPermissionException()
return self
def given_another_experience_updated_with_that_params(self):
self.updated_experience = Experience(id=self.experience.id, title=self.experience.title,
description=self.description, author_id=self.experience.author_id,
author_username=self.experience.author_username)
return self
def given_an_experience_repo_that_returns_both_experiences_on_get_and_update(self):
self.experience_repo = Mock()
self.experience_repo.get_experience.return_value = self.experience
self.experience_repo.update_experience.return_value = self.updated_experience
return self
def given_an_experience_repo_that_returns_that_experience_on_get(self):
self.experience_repo = Mock()
self.experience_repo.get_experience.return_value = self.experience
return self
def given_an_experience_repo_that_raises_entity_does_not_exist(self):
self.experience_repo = Mock()
self.experience_repo.get_experience.side_effect = EntityDoesNotExistException()
return self
def given_an_experience_validator(self):
self.experience_validator = Mock()
return self
def given_an_experience_validator_that_accepts(self):
self.experience_validator = Mock()
self.experience_validator.validate_experience.return_value = True
return self
def given_an_experience_validator_that_raises_invalid_entity_exception(self):
self.experience_validator = Mock()
self.experience_validator.validate_experience.side_effect = \
InvalidEntityException(source='title', code='empty_attribute',
message='Title must be between 1 and 20 chars')
return self
def when_interactor_is_executed(self):
try:
self.result = ModifyExperienceInteractor(self.experience_repo, self.experience_validator,
self.permissions_validator) \
.set_params(id=self.id, title=None, description=self.description,
logged_person_id=self.logged_person_id).execute()
except Exception as e:
print(e)
self.error = e
return self
def then_result_should_be_second_experience(self):
assert self.result == self.updated_experience
return self
def then_get_experience_should_be_called_with_id_and_logged_person_id(self):
self.experience_repo.get_experience \
.assert_called_once_with(id=self.id, logged_person_id=self.logged_person_id)
return self
def then_experience_validation_should_be_called_with_updated_experience(self):
self.experience_validator.validate_experience.assert_called_once_with(self.updated_experience)
return self
def then_update_experience_should_be_called_with_updated_experience(self):
self.experience_validator.validate_experience.assert_called_once_with(self.updated_experience)
return self
def then_update_experience_should_be_not_called(self):
self.experience_repo.updated_experience.assert_not_called()
return self
def then_should_raise_invalid_entity_exception(self):
assert type(self.error) is InvalidEntityException
assert self.error.source == 'title'
assert self.error.code == 'empty_attribute'
assert str(self.error) == 'Title must be between 1 and 20 chars'
return self
def then_should_raise_entity_does_not_exists(self):
assert type(self.error) is EntityDoesNotExistException
return self
def then_should_validate_permissions(self):
self.permissions_validator.validate_permissions \
.assert_called_once_with(logged_person_id=self.logged_person_id,
has_permissions_to_modify_experience=self.id)
return self
def then_should_raise_no_permissions_exception(self):
assert type(self.error) is NoPermissionException
return self
class TestUploadExperiencePictureInteractor:
def test_validates_permissions_and_attach_picture_to_experience(self):
TestUploadExperiencePictureInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience() \
.given_an_experience_repo_that_returns_that_experience_on_attach() \
.given_an_experience_id() \
.given_a_picture() \
.when_interactor_is_executed() \
.then_should_validate_permissions() \
.then_should_call_repo_attach_picture_to_experience() \
.then_should_return_experience()
def test_invalid_permissions_doesnt_attach_picture(self):
TestUploadExperiencePictureInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_raises_no_permissions_exception() \
.given_an_experience_repo() \
.given_an_experience_id() \
.given_a_picture() \
.when_interactor_is_executed() \
.then_should_validate_permissions() \
.then_should_not_call_repo_attach_picture_to_experience() \
.then_should_raise_no_permissions_exception()
class ScenarioMaker:
def given_a_logged_person_id(self):
self.logged_person_id = '9'
return self
def given_a_permissions_validator_that_returns_true(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.return_value = True
return self
def given_a_permissions_validator_that_raises_no_permissions_exception(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.side_effect = NoPermissionException
return self
def given_an_experience(self):
self.experience = Experience(id='2', title='T', description='s', author_id='4')
return self
def given_an_experience_repo_that_returns_that_experience_on_attach(self):
self.experience_repo = Mock()
self.experience_repo.attach_picture_to_experience.return_value = self.experience
return self
def given_an_experience_repo(self):
self.experience_repo = Mock()
return self
def given_an_experience_id(self):
self.experience_id = '5'
return self
def given_a_picture(self):
self.picture = 'pic'
return self
def when_interactor_is_executed(self):
try:
interactor = UploadExperiencePictureInteractor(experience_repo=self.experience_repo,
permissions_validator=self.permissions_validator)
self.result = interactor.set_params(experience_id=self.experience_id, picture=self.picture,
logged_person_id=self.logged_person_id).execute()
except Exception as e:
self.error = e
return self
def then_should_validate_permissions(self):
self.permissions_validator.validate_permissions \
.assert_called_once_with(logged_person_id=self.logged_person_id,
has_permissions_to_modify_experience=self.experience_id)
return self
def then_should_call_repo_attach_picture_to_experience(self):
self.experience_repo.attach_picture_to_experience.assert_called_once_with(experience_id=self.experience_id,
picture=self.picture)
return self
def then_should_return_experience(self):
assert self.result == self.experience
return self
def then_should_not_call_repo_attach_picture_to_experience(self):
self.experience_repo.attach_picture_to_experience.assert_not_called()
return self
def then_should_raise_no_permissions_exception(self):
assert type(self.error) is NoPermissionException
return self
class TestSaveUnsaveExperienceInteractor:
def test_unauthorized_raises_no_logged_exception(self):
TestSaveUnsaveExperienceInteractor.ScenarioMaker() \
.given_a_permissions_validator_that_raises_no_permissions_exception() \
.given_an_experience_repo_that_returns_true_on_save_and_others_experience() \
.when_interactor_is_executed(action=SaveUnsaveExperienceInteractor.Action.SAVE) \
.then_should_not_call_repo_save_experience() \
.then_should_raise_no_logged_exception()
def test_save_you_own_experience_raises_conflict_exception(self):
TestSaveUnsaveExperienceInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience_id() \
.given_an_experience_repo_that_returns_own_experience() \
.when_interactor_is_executed(action=SaveUnsaveExperienceInteractor.Action.SAVE) \
.then_should_validate_permissions() \
.then_should_call_repo_get_experience_with_experience_id() \
.then_should_not_call_repo_save_experience() \
.then_should_raise_conflict_exception()
def test_save_calls_repo_save_and_returns_true(self):
TestSaveUnsaveExperienceInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience_id() \
.given_an_experience_repo_that_returns_true_on_save_and_others_experience() \
.when_interactor_is_executed(action=SaveUnsaveExperienceInteractor.Action.SAVE) \
.then_should_validate_permissions() \
.then_should_call_repo_get_experience_with_experience_id() \
.then_should_call_repo_save_experience_with_person_id() \
.then_should_return_true()
def test_unsave_calls_repo_unsave_and_returns_true(self):
TestSaveUnsaveExperienceInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_permissions_validator_that_returns_true() \
.given_an_experience_id() \
.given_an_experience_repo_that_returns_true_on_save_and_others_experience() \
.when_interactor_is_executed(action=SaveUnsaveExperienceInteractor.Action.UNSAVE) \
.then_should_validate_permissions() \
.then_should_call_repo_get_experience_with_experience_id() \
.then_should_call_repo_unsave_experience_with_person_id() \
class ScenarioMaker:
def __init__(self):
self.experience_id = None
self.logged_person_id = None
def given_a_logged_person_id(self):
self.logged_person_id = '9'
return self
def given_a_permissions_validator_that_returns_true(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.return_value = True
return self
def given_a_permissions_validator_that_raises_no_permissions_exception(self):
self.permissions_validator = Mock()
self.permissions_validator.validate_permissions.side_effect = NoLoggedException
return self
def given_an_experience_repo_that_returns_true_on_save_and_others_experience(self):
others_experience = Experience(id='4', title='t', description='d', author_id='3')
self.experience_repo = Mock()
self.experience_repo.save_experience.return_value = True
self.experience_repo.get_experience.return_value = others_experience
return self
def given_an_experience_repo_that_returns_own_experience(self):
others_experience = Experience(id='4', title='t', description='d', author_id=self.logged_person_id)
self.experience_repo = Mock()
self.experience_repo.get_experience.return_value = others_experience
return self
def given_an_experience_repo_that_returns_true_on_unsave(self):
self.experience_repo = Mock()
self.experience_repo.unsave_experience.return_value = True
return self
def given_an_experience_id(self):
self.experience_id = '5'
return self
def when_interactor_is_executed(self, action):
try:
interactor = SaveUnsaveExperienceInteractor(experience_repo=self.experience_repo,
permissions_validator=self.permissions_validator)
self.result = interactor.set_params(action=action, experience_id=self.experience_id,
logged_person_id=self.logged_person_id).execute()
except Exception as e:
self.error = e
return self
def then_should_validate_permissions(self):
self.permissions_validator.validate_permissions \
.assert_called_once_with(logged_person_id=self.logged_person_id)
return self
def then_should_call_repo_get_experience_with_experience_id(self):
self.experience_repo.get_experience.assert_called_once_with(id=self.experience_id)
return self
def then_should_call_repo_save_experience_with_person_id(self):
self.experience_repo.save_experience.assert_called_once_with(experience_id=self.experience_id,
person_id=self.logged_person_id)
return self
def then_should_call_repo_unsave_experience_with_person_id(self):
self.experience_repo.unsave_experience.assert_called_once_with(experience_id=self.experience_id,
person_id=self.logged_person_id)
return self
def then_should_return_true(self):
assert self.result is True
return self
def then_should_not_call_repo_save_experience(self):
self.experience_repo.save_experience.assert_not_called()
return self
def then_should_raise_no_logged_exception(self):
assert type(self.error) is NoLoggedException
return self
def then_should_raise_conflict_exception(self):
assert type(self.error) is ConflictException
assert self.error.source == 'experience'
assert self.error.code == 'self_save'
assert str(self.error) == 'You cannot save your own experiences'
return self
| 46.379822 | 119 | 0.64936 | 3,219 | 31,260 | 5.795278 | 0.048773 | 0.063039 | 0.055052 | 0.040525 | 0.880622 | 0.85398 | 0.811632 | 0.775181 | 0.739373 | 0.685982 | 0 | 0.001487 | 0.289923 | 31,260 | 673 | 120 | 46.448737 | 0.838942 | 0 | 0 | 0.667269 | 0 | 0 | 0.011196 | 0 | 0 | 0 | 0 | 0 | 0.075949 | 1 | 0.186257 | false | 0 | 0.007233 | 0 | 0.363472 | 0.001808 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.