hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1722d7bd59551285b9b6398b2f0e801fc249803e
| 36
|
py
|
Python
|
build/lib/annotation_utils/old/util/checks/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 13
|
2020-01-28T04:45:22.000Z
|
2022-03-10T03:35:49.000Z
|
build/lib/annotation_utils/old/util/checks/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 4
|
2020-02-14T08:56:03.000Z
|
2021-05-21T10:38:30.000Z
|
build/lib/annotation_utils/old/util/checks/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 7
|
2020-04-10T07:56:25.000Z
|
2021-12-17T11:19:23.000Z
|
from .checks import check_shape_type
| 36
| 36
| 0.888889
| 6
| 36
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17483bae28c2421ff4d530529899de72f0aa2372
| 2,864
|
py
|
Python
|
signmob/collection/migrations/0002_auto_20190623_1521.py
|
okfde/signmob
|
7bc4c2eff988287c49ed2aea20b6d18f5461e3cc
|
[
"MIT"
] | 2
|
2019-07-08T15:49:16.000Z
|
2019-07-11T20:38:59.000Z
|
signmob/collection/migrations/0002_auto_20190623_1521.py
|
okfde/signmob
|
7bc4c2eff988287c49ed2aea20b6d18f5461e3cc
|
[
"MIT"
] | 2
|
2020-07-17T17:27:02.000Z
|
2021-05-10T00:16:53.000Z
|
signmob/collection/migrations/0002_auto_20190623_1521.py
|
okfde/signmob
|
7bc4c2eff988287c49ed2aea20b6d18f5461e3cc
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-06-23 13:21
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("schedule", "0011_event_calendar_not_null"),
("collection", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="collectionevent",
name="description",
field=models.TextField(blank=True),
),
migrations.AddField(
model_name="collectionevent",
name="event_occurence",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="schedule.Occurrence",
),
),
migrations.AddField(
model_name="collectionevent",
name="geo",
field=django.contrib.gis.db.models.fields.PointField(
blank=True, geography=True, null=True, srid=4326
),
),
migrations.AddField(
model_name="collectionevent",
name="name",
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name="collectioneventmember",
name="end",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="collectioneventmember",
name="start",
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name="collectiongroup",
name="calendar",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="schedule.Calendar",
),
),
migrations.AddField(
model_name="collectiongroup",
name="name",
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name="collectionlocation",
name="description",
field=models.TextField(blank=True),
),
migrations.AddField(
model_name="collectionlocation",
name="geo",
field=django.contrib.gis.db.models.fields.PointField(
blank=True, geography=True, null=True, srid=4326
),
),
migrations.AddField(
model_name="collectionlocation",
name="name",
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name="collectionlocation",
name="start",
field=models.DateField(null=True),
),
]
| 31.472527
| 65
| 0.545042
| 251
| 2,864
| 6.119522
| 0.250996
| 0.140625
| 0.179688
| 0.210938
| 0.810547
| 0.810547
| 0.61849
| 0.61849
| 0.61849
| 0.61849
| 0
| 0.021288
| 0.343925
| 2,864
| 90
| 66
| 31.822222
| 0.796168
| 0.015712
| 0
| 0.809524
| 1
| 0
| 0.132765
| 0.024849
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
17625bb145d191c3d2942ec3c2f333df083dab7c
| 81
|
py
|
Python
|
clinical_study_questionnaire/api.py
|
somushiv/clinical_study_questionnaire
|
309054dd4a27d8814f477517cba80f26f464b648
|
[
"MIT"
] | null | null | null |
clinical_study_questionnaire/api.py
|
somushiv/clinical_study_questionnaire
|
309054dd4a27d8814f477517cba80f26f464b648
|
[
"MIT"
] | null | null | null |
clinical_study_questionnaire/api.py
|
somushiv/clinical_study_questionnaire
|
309054dd4a27d8814f477517cba80f26f464b648
|
[
"MIT"
] | null | null | null |
import frappe
@frappe.whitelist()
def testapi():
return "Clinical Test Api"
| 13.5
| 30
| 0.716049
| 10
| 81
| 5.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17284
| 81
| 5
| 31
| 16.2
| 0.865672
| 0
| 0
| 0
| 0
| 0
| 0.209877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
176540deca60d672350520a6b7dba9466859f3d3
| 26,963
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/messagedefinition.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/messagedefinition.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/messagedefinition.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
BooleanType,
DataType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class MessageDefinitionSchema:
"""
Defines the characteristics of a message that can be shared between systems,
including the type of event that initiates the message, the content to be
transmitted and what response(s), if any, are permitted.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
Defines the characteristics of a message that can be shared between systems,
including the type of event that initiates the message, the content to be
transmitted and what response(s), if any, are permitted.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a MessageDefinition resource
url: An absolute URI that is used to identify this message definition when it is
referenced in a specification, model, design or an instance. This SHALL be a
URL, SHOULD be globally unique, and SHOULD be an address at which this message
definition is (or will be) published. The URL SHOULD include the major version
of the message definition. For more information see [Technical and Business
Versions](resource.html#versions).
identifier: A formal identifier that is used to identify this message definition when it
is represented in other formats, or referenced in a specification, model,
design or an instance.
version: The identifier that is used to identify this version of the message definition
when it is referenced in a specification, model, design or instance. This is
an arbitrary value managed by the message definition author and is not
expected to be globally unique. For example, it might be a timestamp (e.g.
yyyymmdd) if a managed version is not available. There is also no expectation
that versions can be placed in a lexicographical sequence.
name: A natural language name identifying the message definition. This name should
be usable as an identifier for the module by machine processing applications
such as code generation.
title: A short, descriptive, user-friendly title for the message definition.
status: The status of this message definition. Enables tracking the life-cycle of the
content.
experimental: A boolean value to indicate that this message definition is authored for
testing purposes (or education/evaluation/marketing), and is not intended to
be used for genuine usage.
date: The date (and optionally time) when the message definition was published. The
date must change if and when the business version changes and it must change
if the status code changes. In addition, it should change when the substantive
content of the message definition changes.
publisher: The name of the individual or organization that published the message
definition.
contact: Contact details to assist a user in finding and communicating with the
publisher.
description: A free text natural language description of the message definition from a
consumer's perspective.
useContext: The content was developed with a focus and intent of supporting the contexts
that are listed. These terms may be used to assist with indexing and searching
for appropriate message definition instances.
jurisdiction: A legal or geographic region in which the message definition is intended to be
used.
purpose: Explaination of why this message definition is needed and why it has been
designed as it has.
copyright: A copyright statement relating to the message definition and/or its contents.
Copyright statements are generally legal restrictions on the use and
publishing of the message definition.
base: The MessageDefinition that is the basis for the contents of this resource.
parent: Identifies a protocol or workflow that this MessageDefinition represents a
step in.
replaces: A MessageDefinition that is superseded by this definition.
event: A coded identifier of a supported messaging event.
category: The impact of the content of the message.
focus: Identifies the resource (or resources) that are being addressed by the event.
For example, the Encounter for an admit message or two Account records for a
merge.
responseRequired: Indicates whether a response is required for this message.
allowedResponse: Indicates what types of messages may be sent as an application-level response
to this message.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.contactdetail import (
ContactDetailSchema,
)
from spark_fhir_schemas.stu3.complex_types.usagecontext import (
UsageContextSchema,
)
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema
from spark_fhir_schemas.stu3.complex_types.messagedefinition_focus import (
MessageDefinition_FocusSchema,
)
from spark_fhir_schemas.stu3.complex_types.messagedefinition_allowedresponse import (
MessageDefinition_AllowedResponseSchema,
)
if (
max_recursion_limit
and nesting_list.count("MessageDefinition") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["MessageDefinition"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a MessageDefinition resource
StructField("resourceType", StringType(), True),
# An absolute URI that is used to identify this message definition when it is
# referenced in a specification, model, design or an instance. This SHALL be a
# URL, SHOULD be globally unique, and SHOULD be an address at which this message
# definition is (or will be) published. The URL SHOULD include the major version
# of the message definition. For more information see [Technical and Business
# Versions](resource.html#versions).
StructField("url", StringType(), True),
# A formal identifier that is used to identify this message definition when it
# is represented in other formats, or referenced in a specification, model,
# design or an instance.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The identifier that is used to identify this version of the message definition
# when it is referenced in a specification, model, design or instance. This is
# an arbitrary value managed by the message definition author and is not
# expected to be globally unique. For example, it might be a timestamp (e.g.
# yyyymmdd) if a managed version is not available. There is also no expectation
# that versions can be placed in a lexicographical sequence.
StructField("version", StringType(), True),
# A natural language name identifying the message definition. This name should
# be usable as an identifier for the module by machine processing applications
# such as code generation.
StructField("name", StringType(), True),
# A short, descriptive, user-friendly title for the message definition.
StructField("title", StringType(), True),
# The status of this message definition. Enables tracking the life-cycle of the
# content.
StructField("status", StringType(), True),
# A boolean value to indicate that this message definition is authored for
# testing purposes (or education/evaluation/marketing), and is not intended to
# be used for genuine usage.
StructField("experimental", BooleanType(), True),
# The date (and optionally time) when the message definition was published. The
# date must change if and when the business version changes and it must change
# if the status code changes. In addition, it should change when the substantive
# content of the message definition changes.
StructField("date", StringType(), True),
# The name of the individual or organization that published the message
# definition.
StructField("publisher", StringType(), True),
# Contact details to assist a user in finding and communicating with the
# publisher.
StructField(
"contact",
ArrayType(
ContactDetailSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A free text natural language description of the message definition from a
# consumer's perspective.
StructField("description", StringType(), True),
# The content was developed with a focus and intent of supporting the contexts
# that are listed. These terms may be used to assist with indexing and searching
# for appropriate message definition instances.
StructField(
"useContext",
ArrayType(
UsageContextSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A legal or geographic region in which the message definition is intended to be
# used.
StructField(
"jurisdiction",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Explaination of why this message definition is needed and why it has been
# designed as it has.
StructField("purpose", StringType(), True),
# A copyright statement relating to the message definition and/or its contents.
# Copyright statements are generally legal restrictions on the use and
# publishing of the message definition.
StructField("copyright", StringType(), True),
# The MessageDefinition that is the basis for the contents of this resource.
StructField(
"base",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies a protocol or workflow that this MessageDefinition represents a
# step in.
StructField(
"parent",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A MessageDefinition that is superseded by this definition.
StructField(
"replaces",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A coded identifier of a supported messaging event.
StructField(
"event",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The impact of the content of the message.
StructField("category", StringType(), True),
# Identifies the resource (or resources) that are being addressed by the event.
# For example, the Encounter for an admit message or two Account records for a
# merge.
StructField(
"focus",
ArrayType(
MessageDefinition_FocusSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Indicates whether a response is required for this message.
StructField("responseRequired", BooleanType(), True),
# Indicates what types of messages may be sent as an application-level response
# to this message.
StructField(
"allowedResponse",
ArrayType(
MessageDefinition_AllowedResponseSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 51.851923
| 102
| 0.573156
| 2,684
| 26,963
| 5.60693
| 0.144188
| 0.048641
| 0.030899
| 0.044654
| 0.817662
| 0.808426
| 0.808426
| 0.786896
| 0.763572
| 0.751479
| 0
| 0.002231
| 0.385046
| 26,963
| 519
| 103
| 51.95183
| 0.905374
| 0.399325
| 0
| 0.588235
| 0
| 0
| 0.028881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003268
| false
| 0
| 0.045752
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd7122736e7beb5cb96f82e02cf48ec3419fa5b3
| 122
|
py
|
Python
|
desc/local/__init__.py
|
LSSTDESC/desc-wfmon
|
fa73ee1a00e9503e6bd82d1f81d9806fd9623783
|
[
"BSD-3-Clause"
] | null | null | null |
desc/local/__init__.py
|
LSSTDESC/desc-wfmon
|
fa73ee1a00e9503e6bd82d1f81d9806fd9623783
|
[
"BSD-3-Clause"
] | null | null | null |
desc/local/__init__.py
|
LSSTDESC/desc-wfmon
|
fa73ee1a00e9503e6bd82d1f81d9806fd9623783
|
[
"BSD-3-Clause"
] | null | null | null |
import importlib.metadata
__version__ = importlib.metadata.version('desc-wfmon')
from .local import install_dir, install
| 24.4
| 54
| 0.819672
| 15
| 122
| 6.333333
| 0.666667
| 0.357895
| 0.505263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090164
| 122
| 4
| 55
| 30.5
| 0.855856
| 0
| 0
| 0
| 0
| 0
| 0.081967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bdb05dd6d10338b636184233531f9dd118617696
| 35
|
py
|
Python
|
simulators/__init__.py
|
nisarkhanatwork/mctsnet
|
2ff9e8234bd4a944246aab803e3dd07082042f62
|
[
"Apache-2.0"
] | 5
|
2021-03-02T09:11:58.000Z
|
2022-03-11T03:57:03.000Z
|
simulators/__init__.py
|
nisarkhanatwork/mctsnet
|
2ff9e8234bd4a944246aab803e3dd07082042f62
|
[
"Apache-2.0"
] | null | null | null |
simulators/__init__.py
|
nisarkhanatwork/mctsnet
|
2ff9e8234bd4a944246aab803e3dd07082042f62
|
[
"Apache-2.0"
] | 1
|
2021-02-19T20:22:46.000Z
|
2021-02-19T20:22:46.000Z
|
from .rocksample import RockSample
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bdea135cafcb6901270c6857aad6aade3d3857ad
| 414
|
py
|
Python
|
mlqm/models/__init__.py
|
Nuclear-Physics-with-Machine-Learning/MLQM
|
69472921b130abb530b11840ab8c1b8c608b5089
|
[
"Apache-2.0"
] | 8
|
2021-05-13T13:58:56.000Z
|
2022-02-28T22:11:06.000Z
|
mlqm/models/__init__.py
|
coreyjadams/AI-for-QM
|
69472921b130abb530b11840ab8c1b8c608b5089
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:44:26.000Z
|
2021-09-23T17:51:43.000Z
|
mlqm/models/__init__.py
|
coreyjadams/AI-for-QM
|
69472921b130abb530b11840ab8c1b8c608b5089
|
[
"Apache-2.0"
] | 1
|
2022-03-15T07:18:24.000Z
|
2022-03-15T07:18:24.000Z
|
from .HarmonicOscillatorWavefunction import HarmonicOscillatorWavefunction
from .PolynomialWavefunction import PolynomialWavefunction
from .NeuralWavefunction import NeuralWavefunction
from .DeepSetsWavefunction import DeepSetsWavefunction
from .GaussianBoundaryCondition import GaussianBoundaryCondition
from .ExponentialBoundaryCondition import ExponentialBoundaryCondition
| 51.75
| 74
| 0.835749
| 24
| 414
| 14.416667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149758
| 414
| 7
| 75
| 59.142857
| 0.982955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da11ccf3a1c0725aff46c3a602d61cc3af53ea9a
| 38
|
py
|
Python
|
segmentation_research/backbones/__init__.py
|
cj-mclaughlin/segmentation_research
|
6d59ffccdb274430b2ef02258d120f65db9004d5
|
[
"MIT"
] | 1
|
2021-07-19T04:46:46.000Z
|
2021-07-19T04:46:46.000Z
|
segmentation_research/backbones/__init__.py
|
cj-mclaughlin/segmentation_research
|
6d59ffccdb274430b2ef02258d120f65db9004d5
|
[
"MIT"
] | null | null | null |
segmentation_research/backbones/__init__.py
|
cj-mclaughlin/segmentation_research
|
6d59ffccdb274430b2ef02258d120f65db9004d5
|
[
"MIT"
] | null | null | null |
from . import drn
from . import resnet
| 19
| 20
| 0.763158
| 6
| 38
| 4.833333
| 0.666667
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 20
| 19
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
da1d1560bbdf5a9ffa5baf84d7f53888d53d3c69
| 164
|
py
|
Python
|
2020/gcd.py
|
kyz/adventofcode
|
b3dd544624a8fc313ca1fad0d2f02f53bd79ce3d
|
[
"MIT"
] | null | null | null |
2020/gcd.py
|
kyz/adventofcode
|
b3dd544624a8fc313ca1fad0d2f02f53bd79ce3d
|
[
"MIT"
] | null | null | null |
2020/gcd.py
|
kyz/adventofcode
|
b3dd544624a8fc313ca1fad0d2f02f53bd79ce3d
|
[
"MIT"
] | null | null | null |
# greatest common divisor
def gcd(a, b):
while b:
a, b = b, a % b
return a
# lowest common multiple
def lcm(a, b):
return (a * b) // gcd(a, b)
| 16.4
| 31
| 0.536585
| 29
| 164
| 3.034483
| 0.413793
| 0.136364
| 0.113636
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.323171
| 164
| 9
| 32
| 18.222222
| 0.792793
| 0.280488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
da331d6ed56bea648f9b83dbf9063a03d473b295
| 510
|
py
|
Python
|
lib/hachoir/parser/program/__init__.py
|
0x20Man/Watcher3
|
4656b42bc5879a3741bb95f534b7c6612a25264d
|
[
"Apache-2.0"
] | 320
|
2017-03-28T23:33:45.000Z
|
2022-02-17T08:45:01.000Z
|
lib/hachoir/parser/program/__init__.py
|
0x20Man/Watcher3
|
4656b42bc5879a3741bb95f534b7c6612a25264d
|
[
"Apache-2.0"
] | 300
|
2017-03-28T19:22:54.000Z
|
2021-12-01T01:11:55.000Z
|
lib/hachoir/parser/program/__init__.py
|
0x20Man/Watcher3
|
4656b42bc5879a3741bb95f534b7c6612a25264d
|
[
"Apache-2.0"
] | 90
|
2017-03-29T16:12:43.000Z
|
2022-03-01T06:23:48.000Z
|
from hachoir.parser.program.elf import ElfFile # noqa
from hachoir.parser.program.exe import ExeFile # noqa
from hachoir.parser.program.macho import MachoFile, MachoFatFile # noqa
from hachoir.parser.program.python import PythonCompiledFile # noqa
from hachoir.parser.program.java import JavaCompiledClassFile # noqa
from hachoir.parser.program.prc import PRCFile # noqa
from hachoir.parser.program.nds import NdsFile # noqa
from hachoir.parser.program.java_serialized import JavaSerializedFile # noqa
| 56.666667
| 77
| 0.823529
| 66
| 510
| 6.348485
| 0.348485
| 0.210024
| 0.324582
| 0.458234
| 0.486874
| 0.152745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111765
| 510
| 8
| 78
| 63.75
| 0.924945
| 0.076471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e50608b6153d6ba2a0d6a5a4eb0bb78742d77e72
| 39
|
py
|
Python
|
terrainbento/derived_models/model_100_basicSt/__init__.py
|
mcflugen/terrainbento
|
1b756477b8a8ab6a8f1275b1b30ec84855c840ea
|
[
"MIT"
] | null | null | null |
terrainbento/derived_models/model_100_basicSt/__init__.py
|
mcflugen/terrainbento
|
1b756477b8a8ab6a8f1275b1b30ec84855c840ea
|
[
"MIT"
] | null | null | null |
terrainbento/derived_models/model_100_basicSt/__init__.py
|
mcflugen/terrainbento
|
1b756477b8a8ab6a8f1275b1b30ec84855c840ea
|
[
"MIT"
] | null | null | null |
from .model_100_basicSt import BasicSt
| 19.5
| 38
| 0.871795
| 6
| 39
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.102564
| 39
| 1
| 39
| 39
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5bc0993b907f649cbf9838455bf521acfec0b7f
| 50
|
py
|
Python
|
cs250/t1.py
|
icterguru/DrLutchClass
|
4ae75e047d00e36af7fd5019a7d751a44bc7daa8
|
[
"Apache-2.0"
] | null | null | null |
cs250/t1.py
|
icterguru/DrLutchClass
|
4ae75e047d00e36af7fd5019a7d751a44bc7daa8
|
[
"Apache-2.0"
] | null | null | null |
cs250/t1.py
|
icterguru/DrLutchClass
|
4ae75e047d00e36af7fd5019a7d751a44bc7daa8
|
[
"Apache-2.0"
] | 1
|
2018-09-20T20:50:08.000Z
|
2018-09-20T20:50:08.000Z
|
print "Hello, t1.py"
print "Hello, Dr. Mokter"
| 8.333333
| 25
| 0.64
| 8
| 50
| 4
| 0.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.2
| 50
| 5
| 26
| 10
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0.604167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
00bfe2f174c7ed98aafc6235ea7f4e1da06b04f3
| 48
|
py
|
Python
|
instatag/__init__.py
|
Moduland/instatag
|
f5cf518b9f552f81db01ead3aa6406ead6e9753e
|
[
"MIT"
] | 23
|
2017-06-21T16:24:29.000Z
|
2021-11-15T10:39:53.000Z
|
instatag/__init__.py
|
Moduland/instatag
|
f5cf518b9f552f81db01ead3aa6406ead6e9753e
|
[
"MIT"
] | 2
|
2018-07-01T14:32:54.000Z
|
2018-07-31T05:17:08.000Z
|
instatag/__init__.py
|
Moduland/instatag
|
f5cf518b9f552f81db01ead3aa6406ead6e9753e
|
[
"MIT"
] | 4
|
2017-07-07T17:21:18.000Z
|
2018-11-24T17:09:43.000Z
|
# -*- coding: utf-8 -*-
from .instatag import *
| 16
| 23
| 0.583333
| 6
| 48
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.1875
| 48
| 3
| 24
| 16
| 0.692308
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
daec29e58e05da0b748263fb7679434af15c9eec
| 20
|
py
|
Python
|
python/testData/quickFixes/PyAddImportQuickFixTest/allVariantsSuggestedWhenExistingNonProjectImportFits/time.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/quickFixes/PyAddImportQuickFixTest/allVariantsSuggestedWhenExistingNonProjectImportFits/time.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/quickFixes/PyAddImportQuickFixTest/allVariantsSuggestedWhenExistingNonProjectImportFits/time.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def time():
pass
| 10
| 11
| 0.55
| 3
| 20
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 20
| 2
| 12
| 10
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
daf4c179757f25a6ba0fe65ff41b1c57f4e167c9
| 164
|
py
|
Python
|
daily/admin.py
|
yangfeiffei/Dsystem
|
8c4b677151d8a9777c265b0a8744c068d122e780
|
[
"MIT"
] | null | null | null |
daily/admin.py
|
yangfeiffei/Dsystem
|
8c4b677151d8a9777c265b0a8744c068d122e780
|
[
"MIT"
] | null | null | null |
daily/admin.py
|
yangfeiffei/Dsystem
|
8c4b677151d8a9777c265b0a8744c068d122e780
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from daily import models
# Register your models here.
admin.site.register(models.Daily)
admin.site.register(models.Categories)
| 18.222222
| 38
| 0.804878
| 23
| 164
| 5.73913
| 0.521739
| 0.136364
| 0.257576
| 0.348485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115854
| 164
| 9
| 38
| 18.222222
| 0.910345
| 0.158537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
97070228e3b9ba6a65b2a5888ebc2e8346de75cb
| 1,126
|
py
|
Python
|
basic_tutorials/strings.py
|
LearnPythonAndMakeGames/BasicPythonTutorialSeries
|
be129702680aaf1186fb62add13f94002d4baa63
|
[
"Apache-2.0"
] | 7
|
2015-04-16T14:30:47.000Z
|
2021-08-18T15:37:12.000Z
|
basic_tutorials/strings.py
|
LearnPythonAndMakeGames/BasicPythonTutorialSeries
|
be129702680aaf1186fb62add13f94002d4baa63
|
[
"Apache-2.0"
] | null | null | null |
basic_tutorials/strings.py
|
LearnPythonAndMakeGames/BasicPythonTutorialSeries
|
be129702680aaf1186fb62add13f94002d4baa63
|
[
"Apache-2.0"
] | 2
|
2015-04-21T09:57:21.000Z
|
2020-01-07T08:41:41.000Z
|
attack_power = 100
# Print keyword only available python 2 and lower.
print "Attack Power:", attack_power
print "Attack Power: {} points".format(attack_power)
print "Attack Power: {attack_power} points".format(attack_power=100)
print "Attack Power: %s" % (attack_power) # python 1 and 2... won't work on 3
# Print as a built-in function
print("Attack Power:", attack_power)
print("Attack Power: {} points".format(attack_power))
# 0th 1st ...
print("Attack Power: {0} points".format(attack_power, percent_to_hit, ...))
print("Attack Power: {attack_power} points".format(attack_power=100))
print "Attack Power".lower() # attack power
print "Attack Power".upper() # ATTACK POWER
print "Attack Power".capitalize() # Attack power
print ":".join("Attack Power", "{}".format(attack_power)) # Attack Power : 100
print "Attack " + "Power" # Attack Power
for character in "Attack Power":
print character # A t t a c k P o w e r <--- each on its own line
# A
# t
# t
ap_string = "Attack Power"
if "attack power" == ap_string.lower():
pass
| 33.117647
| 79
| 0.653641
| 159
| 1,126
| 4.528302
| 0.320755
| 0.504167
| 0.266667
| 0.183333
| 0.506944
| 0.416667
| 0.375
| 0.375
| 0.375
| 0.375
| 0
| 0.021493
| 0.21492
| 1,126
| 33
| 80
| 34.121212
| 0.792986
| 0.276199
| 0
| 0
| 0
| 0
| 0.350811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.052632
| 0
| null | null | 0.736842
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
970e02d7316b564118ce5ed1d4b6fd689c804156
| 14,715
|
py
|
Python
|
pcdet/utils/memory_ensemble_utils.py
|
collector-m/ST3D
|
720e04aa3dc4bb95ac336171b240b6c3130144e5
|
[
"Apache-2.0"
] | 184
|
2021-03-09T12:19:49.000Z
|
2022-03-31T09:19:05.000Z
|
pcdet/utils/memory_ensemble_utils.py
|
collector-m/ST3D
|
720e04aa3dc4bb95ac336171b240b6c3130144e5
|
[
"Apache-2.0"
] | 36
|
2021-03-23T08:42:38.000Z
|
2022-03-31T09:14:41.000Z
|
pcdet/utils/memory_ensemble_utils.py
|
collector-m/ST3D
|
720e04aa3dc4bb95ac336171b240b6c3130144e5
|
[
"Apache-2.0"
] | 22
|
2021-03-10T09:32:27.000Z
|
2022-03-28T05:01:45.000Z
|
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from pcdet.utils import common_utils
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
def consistency_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a['gt_boxes']
new_cls_scores = gt_infos_a['cls_scores']
new_iou_scores = gt_infos_a['iou_scores']
new_memory_counter = gt_infos_a['memory_counter']
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# get ious
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7]).cpu()
ious, match_idx = torch.max(iou_matrix, dim=1)
ious, match_idx = ious.numpy(), match_idx.numpy()
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
match_pairs_idx = np.concatenate((
np.array(list(range(gt_box_a.shape[0]))).reshape(-1, 1),
match_idx.reshape(-1, 1)), axis=1)
#########################################################
# filter matched pair boxes by IoU
# if matching succeeded, use boxes with higher confidence
#########################################################
iou_mask = (ious >= memory_ensemble_cfg.IOU_THRESH)
matching_selected = match_pairs_idx[iou_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
if memory_ensemble_cfg.get('WEIGHTED', None):
weight = gt_box_selected_a[:, 8] / (gt_box_selected_a[:, 8] + gt_box_selected_b[:, 8])
min_scores = np.minimum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
max_scores = np.maximum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
weighted_score = weight * (max_scores - min_scores) + min_scores
new_gt_box[matching_selected[:, 0], :7] = weight.reshape(-1, 1) * gt_box_selected_a[:, :7] + \
(1 - weight.reshape(-1, 1)) * gt_box_selected_b[:, :7]
new_gt_box[matching_selected[:, 0], 8] = weighted_score
else:
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b['cls_scores'][
matching_selected[score_mask, 1]]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b['iou_scores'][
matching_selected[score_mask, 1]]
# for matching pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
#######################################################
# If previous bboxes disappeared: ious <= 0.1
#######################################################
disappear_idx = (ious < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = new_iou_scores[remain_mask]
# Add new appear boxes
ious_b2a, match_idx_b2a = torch.max(iou_matrix, dim=0)
ious_b2a, match_idx_b2a = ious_b2a.numpy(), match_idx_b2a.numpy()
newboxes_idx = (ious_b2a < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate((new_gt_box, gt_infos_b['gt_boxes'][newboxes_idx, :]), axis=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((new_cls_scores, gt_infos_b['cls_scores'][newboxes_idx]), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((new_iou_scores, gt_infos_b['iou_scores'][newboxes_idx]), axis=0)
new_memory_counter = np.concatenate((new_memory_counter, gt_infos_b['memory_counter'][newboxes_idx]), axis=0)
new_gt_infos = {
'gt_boxes': new_gt_box,
'cls_scores': new_cls_scores if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter
}
return new_gt_infos
def nms_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
if gt_box_b.shape[0] == 0:
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
gt_boxes = torch.cat((gt_box_a, gt_box_b), dim=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((gt_infos_a['cls_scores'], gt_infos_b['cls_scores']), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((gt_infos_a['iou_scores'], gt_infos_b['iou_scores']), axis=0)
new_memory_counter = np.concatenate((gt_infos_a['memory_counter'], gt_infos_b['memory_counter']), axis=0)
selected, selected_scores = class_agnostic_nms(
box_scores=gt_boxes[:, -1], box_preds=gt_boxes[:, :7], nms_config=memory_ensemble_cfg.NMS_CONFIG
)
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(selected, list):
selected = np.array(selected)
else:
selected = selected.cpu().numpy()
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
ious, _ = torch.max(iou_matrix, dim=1)
ious = ious.cpu().numpy()
gt_box_a_size = gt_box_a.shape[0]
selected_a = selected[selected < gt_box_a_size]
matched_mask = (ious[selected_a] > memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH)
match_idx = selected_a[matched_mask]
new_memory_counter[match_idx] = 0
# for previous bboxes disappeared
disappear_idx = (ious < memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH).nonzero()[0]
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
gt_boxes[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
rm_idx = (new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH).nonzero()[0]
selected = np.setdiff1d(selected, rm_idx)
selected_gt_boxes = gt_boxes[selected]
new_gt_infos = {
'gt_boxes': selected_gt_boxes,
'cls_scores': new_cls_scores[selected] if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores[selected] if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter[selected]
}
return new_gt_infos
def bipartite_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a['gt_boxes']
new_cls_scores = gt_infos_a['cls_scores']
new_iou_scores = gt_infos_a['iou_scores']
new_memory_counter = gt_infos_a['memory_counter']
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# bipartite matching
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
iou_matrix = iou_matrix.cpu().numpy()
a_idx, b_idx = linear_sum_assignment(-iou_matrix)
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
matching_paris_idx = np.concatenate((a_idx.reshape(-1, 1), b_idx.reshape(-1, 1)), axis=1)
ious = iou_matrix[matching_paris_idx[:, 0], matching_paris_idx[:, 1]]
# matched a boxes.
matched_mask = ious > memory_ensemble_cfg.IOU_THRESH
matching_selected = matching_paris_idx[matched_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b['cls_scores'][
matching_selected[score_mask, 1]]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b['iou_scores'][
matching_selected[score_mask, 1]]
# for matched pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
##############################################
# disppeared boxes for previous pseudo boxes
##############################################
gt_box_a_idx = np.array(list(range(gt_box_a.shape[0])))
disappear_idx = np.setdiff1d(gt_box_a_idx, matching_selected[:, 0])
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = new_iou_scores[remain_mask]
##############################################
# new appear boxes for current pseudo boxes
##############################################
gt_box_b_idx = np.array(list(range(gt_box_b.shape[0])))
newboxes_idx = np.setdiff1d(gt_box_b_idx, matching_selected[:, 1])
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate((new_gt_box, gt_infos_b['gt_boxes'][newboxes_idx, :]), axis=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((new_cls_scores,
gt_infos_b['cls_scores'][newboxes_idx]), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((new_iou_scores,
gt_infos_b['iou_scores'][newboxes_idx]), axis=0)
new_memory_counter = np.concatenate((new_memory_counter,
gt_infos_b['memory_counter'][newboxes_idx]), axis=0)
new_gt_infos = {
'gt_boxes': new_gt_box,
'cls_scores': new_cls_scores if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter
}
return new_gt_infos
| 42.900875
| 117
| 0.644648
| 2,190
| 14,715
| 3.918721
| 0.065297
| 0.055348
| 0.04288
| 0.023305
| 0.827779
| 0.801794
| 0.782452
| 0.743183
| 0.733629
| 0.715451
| 0
| 0.012487
| 0.221747
| 14,715
| 342
| 118
| 43.026316
| 0.736902
| 0.188787
| 0
| 0.585106
| 0
| 0
| 0.066797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015957
| false
| 0
| 0.031915
| 0
| 0.095745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97159a6be287412247b5e236b366eec7bf0f7977
| 1,035
|
py
|
Python
|
utils/colorfy.py
|
obatsis/Distributed-NTUA
|
0bf39163b64aaefb2576be01337e0ec6e026ce6d
|
[
"MIT"
] | null | null | null |
utils/colorfy.py
|
obatsis/Distributed-NTUA
|
0bf39163b64aaefb2576be01337e0ec6e026ce6d
|
[
"MIT"
] | null | null | null |
utils/colorfy.py
|
obatsis/Distributed-NTUA
|
0bf39163b64aaefb2576be01337e0ec6e026ce6d
|
[
"MIT"
] | null | null | null |
## Uppercase is BOLT
# to use: from utils.beautyfy import *
def red(string):
return '\033[1;91m {}\033[00m'.format(string)
def RED(string):
return '\033[1;91m {}\033[00m'.format(string)
def yellow(string):
return '\033[93m {}\033[00m'.format(string)
def YELLOW(string):
return '\033[1;93m {}\033[00m'.format(string)
def blue(string):
return '\033[94m {}\033[00m'.format(string)
def BLUE(string):
return '\033[1;94m {}\033[00m'.format(string)
def green(string):
return '\033[92m {}\033[00m'.format(string)
def GREEN(string):
return '\033[1;92m {}\033[00m'.format(string)
def cyan(string):
return '\033[96m {}\033[00m'.format(string)
def underline(string):
return '\033[4m{}\033[00m'.format(string)
def header(string):
return '\033[95m{}\033[00m'.format(string)
# HEADER = '\033[95m'
# OKBLUE = '\033[94m'
# OKCYAN = '\033[96m'
# OKGREEN = '\033[92m'
# WARNING = '\033[93m'
# FAIL = '\033[91m'
# ENDC = '\033[0m'
# BOLD = '\033[1m'
# UNDERLINE = '\033[4m'
| 23.522727
| 49
| 0.613527
| 151
| 1,035
| 4.205298
| 0.258278
| 0.207874
| 0.259843
| 0.311811
| 0.622047
| 0.555906
| 0.494488
| 0.494488
| 0.494488
| 0.140157
| 0
| 0.18267
| 0.174879
| 1,035
| 43
| 50
| 24.069767
| 0.56089
| 0.222222
| 0
| 0.090909
| 0
| 0
| 0.273072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
973db94f1754a2486c012365556d28b87d81582a
| 144
|
py
|
Python
|
FieldPlayer.py
|
ymesika7/XGAnalytic
|
1a2aad12b8f1f8608835d269ade031c662398bcd
|
[
"Apache-2.0"
] | 1
|
2020-05-06T16:43:00.000Z
|
2020-05-06T16:43:00.000Z
|
FieldPlayer.py
|
ymesika7/XGAnalytic
|
1a2aad12b8f1f8608835d269ade031c662398bcd
|
[
"Apache-2.0"
] | null | null | null |
FieldPlayer.py
|
ymesika7/XGAnalytic
|
1a2aad12b8f1f8608835d269ade031c662398bcd
|
[
"Apache-2.0"
] | null | null | null |
from Player import Player
class FieldPlayer(Player):
def __init__(self, locations, color):
Player.__init__(self, locations, color)
| 24
| 47
| 0.729167
| 17
| 144
| 5.705882
| 0.588235
| 0.164948
| 0.350515
| 0.453608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180556
| 144
| 6
| 47
| 24
| 0.822034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
975293f5784e71425512ba0abb1df3b8b81b2ecd
| 30
|
py
|
Python
|
gpd_ws/devel/lib/python2.7/dist-packages/gpd_ros/srv/__init__.py
|
JisuHann/Point-Cloud--Grasp
|
083244632412709dbc29ac7841b6a837e4ed3cb6
|
[
"BSD-2-Clause"
] | null | null | null |
gpd_ws/devel/lib/python2.7/dist-packages/gpd_ros/srv/__init__.py
|
JisuHann/Point-Cloud--Grasp
|
083244632412709dbc29ac7841b6a837e4ed3cb6
|
[
"BSD-2-Clause"
] | null | null | null |
gpd_ws/devel/lib/python2.7/dist-packages/gpd_ros/srv/__init__.py
|
JisuHann/Point-Cloud--Grasp
|
083244632412709dbc29ac7841b6a837e4ed3cb6
|
[
"BSD-2-Clause"
] | 1
|
2021-03-31T06:27:31.000Z
|
2021-03-31T06:27:31.000Z
|
from ._detect_grasps import *
| 15
| 29
| 0.8
| 4
| 30
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9758780dfe8020c87cca2c415b4d82930e6c285c
| 7,054
|
py
|
Python
|
tests/logkeep/test_Application.py
|
c0yote/toolbag
|
5128af31f0069372ecb537dead4402a6aac33428
|
[
"MIT"
] | null | null | null |
tests/logkeep/test_Application.py
|
c0yote/toolbag
|
5128af31f0069372ecb537dead4402a6aac33428
|
[
"MIT"
] | 7
|
2018-03-18T22:50:24.000Z
|
2018-05-31T17:38:15.000Z
|
tests/logkeep/test_Application.py
|
c0yote/toolbag
|
5128af31f0069372ecb537dead4402a6aac33428
|
[
"MIT"
] | null | null | null |
import os
import unittest
from unittest import skip
from unittest.mock import MagicMock, mock_open, patch
from toolbag.logkeep import _Application, main
TEST_LOG_PATH = '/some/path/to/log.log'
TEST_ERROR_RETURN = 'Some error occurred'
TEST_LIMIT_VALUE_A = 10
TEST_LIMIT_VALUE_B = 11
TEST_LIMIT_VALUE_C = 0.5
TEST_LIMIT_GREATER = 12
TEST_LIMIT_GREATER_FLOAT = 0.6
TEST_ARGS = ['app', TEST_LOG_PATH]
TEST_SIZE_ARGS = ['app', '--size_limit', f'{TEST_LIMIT_VALUE_A}', TEST_LOG_PATH]
TEST_SIZE_FLOAT_ARGS = ['app', '--size_limit', f'{TEST_LIMIT_VALUE_C}', TEST_LOG_PATH]
TEST_SIZE_ARGS = ['app', '--size_limit', f'{TEST_LIMIT_VALUE_A}', TEST_LOG_PATH]
TEST_LINE_ARGS = ['app', '--line_limit', f'{TEST_LIMIT_VALUE_B}', TEST_LOG_PATH]
TEST_SIZE_AND_LINE_ARGS = ['app', '--size_limit', f'{TEST_LIMIT_VALUE_A}',
'--line_limit', f'{TEST_LIMIT_VALUE_B}',
TEST_LOG_PATH]
TEST_UNHANDLED_EXCEPTION_OBJ = Exception(TEST_ERROR_RETURN)
class Application_TestCase(unittest.TestCase):
@patch('sys.argv', TEST_SIZE_ARGS)
@patch('toolbag.logkeep.Log')
def test_size_threshold_storage(self, *stubs):
app = _Application()
self.assertEqual(app._size_limit_mb, TEST_LIMIT_VALUE_A)
self.assertEqual(app._line_limit, None)
@patch('sys.argv', TEST_SIZE_FLOAT_ARGS)
@patch('toolbag.logkeep.Log')
def test_size_threshold_storage_with_float(self, *stubs):
app = _Application()
self.assertEqual(app._size_limit_mb, TEST_LIMIT_VALUE_C)
self.assertEqual(app._line_limit, None)
@patch('sys.argv', TEST_LINE_ARGS)
@patch('toolbag.logkeep.Log')
def test_line_threshold_storage(self, *stubs):
app = _Application()
self.assertEqual(app._size_limit_mb, None)
self.assertEqual(app._line_limit, TEST_LIMIT_VALUE_B)
@patch('sys.argv', TEST_SIZE_AND_LINE_ARGS)
@patch('toolbag.logkeep.Log')
def test_size_and_line_threshold_storage(self, *stubs):
app = _Application()
self.assertEqual(app._size_limit_mb, TEST_LIMIT_VALUE_A)
self.assertEqual(app._line_limit, TEST_LIMIT_VALUE_B)
@patch('sys.argv', TEST_ARGS)
@patch('toolbag.logkeep.Log')
def test_no_threshold_storage(self, *stubs):
app = _Application()
self.assertEqual(app._size_limit_mb, None)
self.assertEqual(app._line_limit, None)
@patch('sys.argv', TEST_ARGS)
@patch('toolbag.logkeep.Log')
def test_work_log_creation(self, log_mock):
app = _Application()
log_mock.assert_called_with(TEST_LOG_PATH)
@patch('sys.argv', TEST_ARGS)
@patch('toolbag.logkeep.Log')
def test_work_log_storage(self, log_mock):
app = _Application()
handle = log_mock()
self.assertEqual(app._work_log, handle)
@patch('sys.argv', TEST_ARGS)
@patch('toolbag.logkeep.Log')
def test_does_not_clone_log_on_no_thresholds(self, log_mock):
app = _Application()
app.run()
log_mock.clone_to_a_backup.assert_not_called()
@patch('sys.argv', TEST_SIZE_ARGS)
@patch('toolbag.logkeep.Log')
def test_clones_log_on_size_threshold(self, log_mock):
handle = log_mock()
handle.get_file_size_in_megabytes.return_value = TEST_LIMIT_GREATER
app = _Application()
app.run()
handle.clone_to_a_backup.assert_called_with()
@patch('sys.argv', TEST_SIZE_FLOAT_ARGS)
@patch('toolbag.logkeep.Log')
def test_clones_log_on_size_threshold_with_float(self, log_mock):
handle = log_mock()
handle.get_file_size_in_megabytes.return_value = TEST_LIMIT_GREATER_FLOAT
app = _Application()
app.run()
handle.clone_to_a_backup.assert_called_with()
@patch('sys.argv', TEST_LINE_ARGS)
@patch('toolbag.logkeep.Log')
def test_clones_log_on_line_count_threshold(self, log_mock):
handle = log_mock()
handle.get_line_count.return_value = TEST_LIMIT_GREATER
app = _Application()
app.run()
handle.clone_to_a_backup.assert_called_with()
@patch('sys.argv', TEST_SIZE_AND_LINE_ARGS)
@patch('toolbag.logkeep.Log')
def test_clones_log_on_size_and_line_count_threshold(self, log_mock):
handle = log_mock()
handle.get_line_count.return_value = TEST_LIMIT_GREATER
app = _Application()
app.run()
handle.clone_to_a_backup.assert_called_with()
@patch('sys.argv', TEST_ARGS)
@patch('toolbag.logkeep.Log', side_effect=PermissionError(TEST_ERROR_RETURN))
def test_constructor_raises_runtimeerror_on_bad_permissions(self, *args):
with self.assertRaises(RuntimeError) as e:
_Application()
@patch('sys.argv', TEST_ARGS)
@patch('toolbag.logkeep.Log', side_effect=FileNotFoundError(TEST_ERROR_RETURN))
def test_constructor_raises_runtimeerror_on_file_not_found(self, *args):
with self.assertRaises(RuntimeError) as e:
_Application()
@patch('sys.argv', TEST_SIZE_ARGS)
@patch('toolbag.logkeep.Log')
def test_run_raises_runtimeerror_on_bad_permissions(self, log_mock, *args):
handle = log_mock()
handle.clone_to_a_backup.side_effect = PermissionError(TEST_ERROR_RETURN)
handle.get_file_size_in_megabytes.return_value = TEST_LIMIT_GREATER
app = _Application()
with self.assertRaises(RuntimeError) as e:
app.run()
@patch('toolbag.logkeep.Log')
@patch('sys.argv', TEST_SIZE_ARGS)
def test_run_raises_runtimeerror_on_file_not_found(self, log_mock, *args):
handle = log_mock()
handle.clone_to_a_backup.side_effect = FileNotFoundError(TEST_ERROR_RETURN)
handle.get_file_size_in_megabytes.return_value = TEST_LIMIT_GREATER
app = _Application()
with self.assertRaises(RuntimeError) as e:
app.run()
class logkeep_main_TestCase(unittest.TestCase):
@patch('toolbag.logkeep._Application', side_effect=RuntimeError(TEST_ERROR_RETURN))
@patch('sys.argv', TEST_ARGS)
@patch('builtins.print')
def test_main_handles_runtime_errors_from_application_constructor(self, print_mock, *args):
main()
print_mock.assert_called_with('Error: '+TEST_ERROR_RETURN)
@patch('toolbag.logkeep._Application.run', side_effect=RuntimeError(TEST_ERROR_RETURN))
@patch('toolbag.logkeep.Log')
@patch('sys.argv', TEST_ARGS)
@patch('builtins.print')
def test_main_handles_runtime_errors_from_application_run(self, print_mock, *args):
main()
print_mock.assert_called_with('Error: '+TEST_ERROR_RETURN)
@patch('toolbag.logkeep._Application', side_effect=TEST_UNHANDLED_EXCEPTION_OBJ)
@patch('sys.argv', TEST_ARGS)
@patch('builtins.print')
def test_main_handles_unhandled_exceptions_from_application_constructor(self, print_mock, *args):
main()
print_mock.assert_called_with('Unhandled Exception: '+repr(TEST_UNHANDLED_EXCEPTION_OBJ))
@patch('toolbag.logkeep._Application.run', side_effect=TEST_UNHANDLED_EXCEPTION_OBJ)
@patch('toolbag.logkeep.Log')
@patch('sys.argv', TEST_ARGS)
@patch('builtins.print')
def test_main_handles_unhandled_exceptions_from_application_run(self, print_mock, *args):
main()
print_mock.assert_called_with('Unhandled Exception: '+repr(TEST_UNHANDLED_EXCEPTION_OBJ))
| 37.322751
| 99
| 0.745818
| 987
| 7,054
| 4.892604
| 0.096251
| 0.06668
| 0.08656
| 0.066266
| 0.874301
| 0.851522
| 0.817768
| 0.771381
| 0.756264
| 0.727687
| 0
| 0.001645
| 0.138078
| 7,054
| 189
| 100
| 37.322751
| 0.792599
| 0
| 0
| 0.653846
| 0
| 0
| 0.139476
| 0.019986
| 0
| 0
| 0
| 0
| 0.160256
| 1
| 0.128205
| false
| 0
| 0.032051
| 0
| 0.173077
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9773809a0e266aacd9703887ab60bfa9721eaf11
| 107
|
py
|
Python
|
bytevm/__main__.py
|
vrthra/PyVM
|
61fdf22533aceea2cc1f463f7cdbbc13eda6ff25
|
[
"MIT"
] | null | null | null |
bytevm/__main__.py
|
vrthra/PyVM
|
61fdf22533aceea2cc1f463f7cdbbc13eda6ff25
|
[
"MIT"
] | null | null | null |
bytevm/__main__.py
|
vrthra/PyVM
|
61fdf22533aceea2cc1f463f7cdbbc13eda6ff25
|
[
"MIT"
] | null | null | null |
"""A main program for Bytevm."""
import sys
from . import execfile
execfile.ExecFile().cmdline(sys.argv)
| 15.285714
| 37
| 0.728972
| 15
| 107
| 5.2
| 0.733333
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 107
| 6
| 38
| 17.833333
| 0.83871
| 0.242991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c11ee98bc384d240f5f18436155cc3a0a653ecaa
| 29
|
py
|
Python
|
worker.py
|
asoucase/flask-celery-kubernetes-example
|
82d49762cdb41aa785b0aae2c64da6f3131b1cec
|
[
"MIT"
] | null | null | null |
worker.py
|
asoucase/flask-celery-kubernetes-example
|
82d49762cdb41aa785b0aae2c64da6f3131b1cec
|
[
"MIT"
] | 2
|
2020-09-09T17:55:49.000Z
|
2020-09-09T18:17:04.000Z
|
worker.py
|
asoucase/flask-celery-kubernetes-example
|
82d49762cdb41aa785b0aae2c64da6f3131b1cec
|
[
"MIT"
] | null | null | null |
from flask_app.tasks import *
| 29
| 29
| 0.827586
| 5
| 29
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c14c8cc6020b8965ebb5d6b03ec0a1ee3ff8c814
| 177
|
py
|
Python
|
atividades/ex021.py
|
leonardoarthur/PythonAtividades
|
9bb2b6e1d7788d99276730de5f199cb0e9ab782f
|
[
"MIT"
] | 1
|
2021-05-02T09:03:27.000Z
|
2021-05-02T09:03:27.000Z
|
atividades/ex021.py
|
leonardoarthur/PythonAtividades
|
9bb2b6e1d7788d99276730de5f199cb0e9ab782f
|
[
"MIT"
] | null | null | null |
atividades/ex021.py
|
leonardoarthur/PythonAtividades
|
9bb2b6e1d7788d99276730de5f199cb0e9ab782f
|
[
"MIT"
] | null | null | null |
import pygame
# colocando uma música dentro do programa WOW
pygame.init()
pygame.mixer.music.load('ex21.mp3')
pygame.mixer.music.play()
while(pygame.mixer.music.get_busy()):pass
| 29.5
| 45
| 0.785311
| 28
| 177
| 4.928571
| 0.714286
| 0.23913
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018293
| 0.073446
| 177
| 6
| 46
| 29.5
| 0.823171
| 0.242938
| 0
| 0
| 0
| 0
| 0.06015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c190be449ef362cd628079904d35e625a6f4eea4
| 34
|
py
|
Python
|
dash_labs/plugins/__init__.py
|
ruxi/dash-labs
|
991f8e479886672bb24dba9cf878dfd748777730
|
[
"MIT"
] | 110
|
2021-04-16T14:41:54.000Z
|
2022-03-24T22:29:41.000Z
|
dash_labs/plugins/__init__.py
|
ruxi/dash-labs
|
991f8e479886672bb24dba9cf878dfd748777730
|
[
"MIT"
] | 59
|
2021-04-16T10:42:34.000Z
|
2022-03-21T18:43:25.000Z
|
dash_labs/plugins/__init__.py
|
ruxi/dash-labs
|
991f8e479886672bb24dba9cf878dfd748777730
|
[
"MIT"
] | 28
|
2021-04-16T16:26:32.000Z
|
2022-03-28T17:32:42.000Z
|
from .pages import page_container
| 17
| 33
| 0.852941
| 5
| 34
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c192c200bee226f50944b34ee8d84b04aa94786d
| 287
|
py
|
Python
|
pax/nets.py
|
NTT123/pax
|
b80e1e4b6bfb763afd6b4fdefa31a051ca8a3335
|
[
"MIT"
] | 11
|
2021-08-28T17:45:38.000Z
|
2022-01-26T17:50:03.000Z
|
pax/nets.py
|
NTT123/pax
|
b80e1e4b6bfb763afd6b4fdefa31a051ca8a3335
|
[
"MIT"
] | 1
|
2021-09-13T17:29:33.000Z
|
2021-09-13T21:50:34.000Z
|
pax/nets.py
|
NTT123/pax
|
b80e1e4b6bfb763afd6b4fdefa31a051ca8a3335
|
[
"MIT"
] | null | null | null |
"""Public nets."""
from pax._src.nets import (
ResNet18,
ResNet34,
ResNet50,
ResNet101,
ResNet152,
ResNet200,
Transformer,
)
__all__ = (
"ResNet18",
"ResNet34",
"ResNet50",
"ResNet101",
"ResNet152",
"ResNet200",
"Transformer",
)
| 13.045455
| 27
| 0.56446
| 22
| 287
| 7.136364
| 0.636364
| 0.203822
| 0.305732
| 0.420382
| 0.789809
| 0.789809
| 0.789809
| 0
| 0
| 0
| 0
| 0.147059
| 0.289199
| 287
| 21
| 28
| 13.666667
| 0.622549
| 0.041812
| 0
| 0
| 0
| 0
| 0.230483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1aa1d8b11ef90a5a1266d8d879a35529e339d92
| 34
|
py
|
Python
|
historia/__init__.py
|
eranimo/historia
|
5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd
|
[
"MIT"
] | 6
|
2016-04-26T18:39:36.000Z
|
2021-09-01T09:13:38.000Z
|
historia/__init__.py
|
eranimo/historia
|
5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd
|
[
"MIT"
] | null | null | null |
historia/__init__.py
|
eranimo/historia
|
5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd
|
[
"MIT"
] | 4
|
2016-04-10T23:47:23.000Z
|
2021-08-15T11:40:28.000Z
|
from historia.gen import Historia
| 17
| 33
| 0.852941
| 5
| 34
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c1cab7b096f7e1c3801a82537132f2a33ac70b74
| 1,527
|
py
|
Python
|
tests/test_condition.py
|
vail130/norm
|
01a16d6c73c2c6fff92430ca2ca745b295de9a3a
|
[
"MIT"
] | null | null | null |
tests/test_condition.py
|
vail130/norm
|
01a16d6c73c2c6fff92430ca2ca745b295de9a3a
|
[
"MIT"
] | 1
|
2016-02-10T00:43:15.000Z
|
2016-02-10T01:14:37.000Z
|
tests/test_condition.py
|
vail130/norm
|
01a16d6c73c2c6fff92430ca2ca745b295de9a3a
|
[
"MIT"
] | 1
|
2021-03-12T23:21:02.000Z
|
2021-03-12T23:21:02.000Z
|
from __future__ import absolute_import, unicode_literals
import unittest
from mason import Table, Param, AND, OR
class TheConditionClassToStringMethod(unittest.TestCase):
def test_and_works_with_one_argument(self):
table = Table('table')
param = Param('param')
self.assertEqual(str(AND(table.column == param)), 'table.column = %(param)s')
def test_and_works_with_two_arguments(self):
table = Table('table')
param = Param('param')
self.assertEqual(str(AND(table.column == param, table.column1 < param)),
'table.column = %(param)s AND table.column1 < %(param)s')
def test_or_works_with_one_argument(self):
table = Table('table')
param = Param('param')
self.assertEqual(str(OR(table.column == param)), 'table.column = %(param)s')
def test_or_works_with_two_arguments(self):
table = Table('table')
param = Param('param')
self.assertEqual(str(OR(table.column == param, table.column1 < param)),
'table.column = %(param)s OR table.column1 < %(param)s')
def test_and_and_or_work_together(self):
table = Table('table')
param = Param('param')
self.assertEqual(str(AND(table.column == param,
OR(table.column1 < param,
table.column2 > param))),
'table.column = %(param)s AND (table.column1 < %(param)s OR table.column2 > %(param)s)')
| 35.511628
| 113
| 0.595285
| 178
| 1,527
| 4.932584
| 0.179775
| 0.113895
| 0.182232
| 0.1082
| 0.759681
| 0.741458
| 0.712984
| 0.700456
| 0.700456
| 0.657175
| 0
| 0.00722
| 0.274394
| 1,527
| 42
| 114
| 36.357143
| 0.785199
| 0
| 0
| 0.344828
| 0
| 0.034483
| 0.189915
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 1
| 0.172414
| false
| 0
| 0.103448
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
de07561228172b9573039d2b575f6431ff22b7a4
| 38
|
py
|
Python
|
face_detectors/__init__.py
|
Saadmairaj/face-detectors
|
b705d84327343614dba0393b29e09207e3dd7f90
|
[
"Apache-2.0"
] | 1
|
2021-12-25T13:05:10.000Z
|
2021-12-25T13:05:10.000Z
|
face_detectors/__init__.py
|
Saadmairaj/face-detectors
|
b705d84327343614dba0393b29e09207e3dd7f90
|
[
"Apache-2.0"
] | null | null | null |
face_detectors/__init__.py
|
Saadmairaj/face-detectors
|
b705d84327343614dba0393b29e09207e3dd7f90
|
[
"Apache-2.0"
] | null | null | null |
from face_detectors.detectors import *
| 38
| 38
| 0.868421
| 5
| 38
| 6.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de0df530b25b89f6a0aab5f0e34a84e027e2c83d
| 5,389
|
py
|
Python
|
calculating_model_score/calculate_snips_slot.py
|
SmallStom/slot_learning
|
73753a1ecec61ee1cdb5bb6356f80f0ee552128a
|
[
"Apache-2.0"
] | 418
|
2019-03-18T07:57:44.000Z
|
2022-03-30T13:42:16.000Z
|
calculating_model_score/calculate_snips_slot.py
|
SmallStom/slot_learning
|
73753a1ecec61ee1cdb5bb6356f80f0ee552128a
|
[
"Apache-2.0"
] | 34
|
2019-03-18T08:20:36.000Z
|
2022-03-02T14:59:28.000Z
|
calculating_model_score/calculate_snips_slot.py
|
SmallStom/slot_learning
|
73753a1ecec61ee1cdb5bb6356f80f0ee552128a
|
[
"Apache-2.0"
] | 100
|
2019-04-09T04:28:10.000Z
|
2022-03-10T08:05:05.000Z
|
import os
from sklearn_metrics_function import show_metrics,delete_both_sides_is_O_word
SNIPS_slot_label = ['[Padding]', '[##WordPiece]', '[CLS]', '[SEP]', 'B-album', 'B-artist', 'B-best_rating', 'B-city', 'B-condition_description', 'B-condition_temperature', 'B-country', 'B-cuisine', 'B-current_location', 'B-entity_name', 'B-facility', 'B-genre', 'B-geographic_poi', 'B-location_name', 'B-movie_name', 'B-movie_type', 'B-music_item', 'B-object_location_type', 'B-object_name', 'B-object_part_of_series_type', 'B-object_select', 'B-object_type', 'B-party_size_description', 'B-party_size_number', 'B-playlist', 'B-playlist_owner', 'B-poi', 'B-rating_unit', 'B-rating_value', 'B-restaurant_name', 'B-restaurant_type', 'B-served_dish', 'B-service', 'B-sort', 'B-spatial_relation', 'B-state', 'B-timeRange', 'B-track', 'B-year', 'I-album', 'I-artist', 'I-city', 'I-country', 'I-cuisine', 'I-current_location', 'I-entity_name', 'I-facility', 'I-genre', 'I-geographic_poi', 'I-location_name', 'I-movie_name', 'I-movie_type', 'I-music_item', 'I-object_location_type', 'I-object_name', 'I-object_part_of_series_type', 'I-object_select', 'I-object_type', 'I-party_size_description', 'I-playlist', 'I-playlist_owner', 'I-poi', 'I-restaurant_name', 'I-restaurant_type', 'I-served_dish', 'I-service', 'I-sort', 'I-spatial_relation', 'I-state', 'I-timeRange', 'I-track', 'O']
SNIPS_slot_effective_label = ['B-album', 'B-artist', 'B-best_rating', 'B-city', 'B-condition_description', 'B-condition_temperature', 'B-country', 'B-cuisine', 'B-current_location', 'B-entity_name', 'B-facility', 'B-genre', 'B-geographic_poi', 'B-location_name', 'B-movie_name', 'B-movie_type', 'B-music_item', 'B-object_location_type', 'B-object_name', 'B-object_part_of_series_type', 'B-object_select', 'B-object_type', 'B-party_size_description', 'B-party_size_number', 'B-playlist', 'B-playlist_owner', 'B-poi', 'B-rating_unit', 'B-rating_value', 'B-restaurant_name', 'B-restaurant_type', 'B-served_dish', 'B-service', 'B-sort', 'B-spatial_relation', 'B-state', 'B-timeRange', 'B-track', 'B-year', 'I-album', 'I-artist', 'I-city', 'I-country', 'I-cuisine', 'I-current_location', 'I-entity_name', 'I-facility', 'I-genre', 'I-geographic_poi', 'I-location_name', 'I-movie_name', 'I-movie_type', 'I-music_item', 'I-object_location_type', 'I-object_name', 'I-object_part_of_series_type', 'I-object_select', 'I-object_type', 'I-party_size_description', 'I-playlist', 'I-playlist_owner', 'I-poi', 'I-restaurant_name', 'I-restaurant_type', 'I-served_dish', 'I-service', 'I-sort', 'I-spatial_relation', 'I-state', 'I-timeRange', 'I-track', 'O']
SNIPS_slot_effective_label2 = ['B-album', 'B-artist', 'B-best_rating', 'B-city', 'B-condition_description', 'B-condition_temperature', 'B-country', 'B-cuisine', 'B-current_location', 'B-entity_name', 'B-facility', 'B-genre', 'B-geographic_poi', 'B-location_name', 'B-movie_name', 'B-movie_type', 'B-music_item', 'B-object_location_type', 'B-object_name', 'B-object_part_of_series_type', 'B-object_select', 'B-object_type', 'B-party_size_description', 'B-party_size_number', 'B-playlist', 'B-playlist_owner', 'B-poi', 'B-rating_unit', 'B-rating_value', 'B-restaurant_name', 'B-restaurant_type', 'B-served_dish', 'B-service', 'B-sort', 'B-spatial_relation', 'B-state', 'B-timeRange', 'B-track', 'B-year', 'I-album', 'I-artist', 'I-city', 'I-country', 'I-cuisine', 'I-current_location', 'I-entity_name', 'I-facility', 'I-genre', 'I-geographic_poi', 'I-location_name', 'I-movie_name', 'I-movie_type', 'I-music_item', 'I-object_location_type', 'I-object_name', 'I-object_part_of_series_type', 'I-object_select', 'I-object_type', 'I-party_size_description', 'I-playlist', 'I-playlist_owner', 'I-poi', 'I-restaurant_name', 'I-restaurant_type', 'I-served_dish', 'I-service', 'I-sort', 'I-spatial_relation', 'I-state', 'I-timeRange', 'I-track']
with open(os.path.join("SNIPS_slot", "seq.out")) as label_f:
label_list = [label.replace("\n", "") for label in label_f.readlines()]
label_list = [seq.split() for seq in label_list]
#print(len(label_list), label_list)
with open(os.path.join("SNIPS_slot", "label_test.txt")) as predict_f:
predict_list = [predict_label.replace("\n", "") for predict_label in predict_f.readlines()]
#print(len(predict_list), predict_list)
predict_sentence_list = []
for word in predict_list:
if "[CLS]" == word:
a_sentence = []
a_sentence.append(word)
if "[SEP]" == word:
predict_sentence_list.append(a_sentence)
#print(len(predict_sentence_list), predict_sentence_list)
y_test_list = []
clean_y_predict_list = []
assert len(label_list)==len(predict_sentence_list)
for y_test, y_predict in zip(label_list, predict_sentence_list):
y_predict.remove('[CLS]')
y_predict.remove('[SEP]')
while '[Padding]' in y_predict:
y_predict.remove('[Padding]')
while '[##WordPiece]' in y_predict:
y_predict.remove('[##WordPiece]')
if len(y_predict)!=len(y_test):
print(y_predict)
print(y_test)
print("~"*100)
y_test_list.extend(y_test)
clean_y_predict_list.extend(y_predict)
assert len(y_test_list)==len(clean_y_predict_list)
y_test_list, clean_y_predict_list = delete_both_sides_is_O_word(y_test_list, clean_y_predict_list)
show_metrics(y_test=y_test_list, y_predict=clean_y_predict_list, labels=SNIPS_slot_effective_label)
| 107.78
| 1,277
| 0.705882
| 850
| 5,389
| 4.163529
| 0.12
| 0.038429
| 0.016954
| 0.030517
| 0.764058
| 0.751907
| 0.725911
| 0.703306
| 0.686352
| 0.686352
| 0
| 0.000817
| 0.091112
| 5,389
| 49
| 1,278
| 109.979592
| 0.721723
| 0.023752
| 0
| 0
| 0
| 0
| 0.552977
| 0.11071
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a9bc25d8e68c544d6e1c49ced89993801b475e53
| 18,667
|
py
|
Python
|
Models/plot_results_section2.py
|
Filoteea/dissertation_code
|
e538320c5c8a6801075c2380e6dedf78a9334a5e
|
[
"MIT"
] | null | null | null |
Models/plot_results_section2.py
|
Filoteea/dissertation_code
|
e538320c5c8a6801075c2380e6dedf78a9334a5e
|
[
"MIT"
] | null | null | null |
Models/plot_results_section2.py
|
Filoteea/dissertation_code
|
e538320c5c8a6801075c2380e6dedf78a9334a5e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on March 17 04:50:04 2022
@author: Filoteea Moldovan
Adapted from: Edward Chung
Script used for creating the plots in section 3.3.
"""
# Standard Library imports
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy import genfromtxt
# Semi-local imports
import name_qch4_couple.plot_h2
# Local imports
import chem_co
# Set the date-time variable
date = '2018-01'
dates_tHour = pd.date_range(
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=12),
closed='left',
freq='1H'
)
# =============================================================================
# Plotting the MHD and WAO observations and their difference and
# calculating the SD of this difference
# =============================================================================
'''
Inputs:
- H2 observations at WAO for 2018
- H2 observations at MHD for 2018
'''
# read observations
obs_mhd, sigma_obs_H2 = chem_co.read_obs(dates_tHour, "MHD_10magl", 0)
obs_wao, sigma_obs_H2 = chem_co.read_obs(dates_tHour, "WAO", 0)
'''
# calculate SD
dif = []
z = 0
count = 0
for i, j in zip(obs_mhd, obs_wao):
if np.isnan(i) or np.isnan(j):
count += 1
else:
dif.append(i - j)
z += 1
dev = np.std(dif)
# Plot
figs = {}
axs = {}
pobjs = {}
zorder = {
'background': 1,
'final': 2
}
fig_param = {
'mw': 10.5, 'mh': 7,
'mpw': 8.5, 'mph': 5.7,
'mgap': 0.05,
'mlmargin': 1.2, 'mbmargin': 1.5,
'ylblx': 0.05, 'ylbly': 1.5, # left, centre aligned
'fontsize': 15,
'fontsize2': 12,
}
plt.close('all')
ylabel = u'$\chi$ H$_{2}$ (nmol mol$^{-1}$)'
ylabel2 = 'Differences (nmol mol$^{-1}$)'
ylim = [350., 600.]
ylim2 = [-70., 600.]
yticks = np.arange(300., 600., 50.)
yticks2 = np.arange(-70., 600., 100.)
var_long_name = 'mole_fraction_of_hydrogen'
var_units = 'nmol mol-1'
for i in range(12, 13):
figs['main'] = plt.figure(figsize=(fig_param['mw'], fig_param['mh']), dpi=300)
axs['main'] = {}
pobjs['main'] = {}
figs['main'].clf()
# dev = np.std(np.array(bas_mhd[i]) - np.array(bas_wao[i]))
name_qch4_couple.plot_h2.generic3(
fig=figs['main'],
axs=axs['main'],
pobjs=pobjs['main'],
new_axs={
'date1': [
dict(
rect=[
(1 * fig_param['mlmargin']
+ fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mh']
- fig_param['mph']
+ fig_param['mgap'])
/ fig_param['mh'],
(fig_param['mpw']
- 2*fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mph']
- 2*fig_param['mgap'])
/ fig_param['mh']
],
label='date1',
projection=None
),
{
"set_yticks": [[yticks], {}],
"set_xlim": [[
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=12),
], {}],
"set_ylim": [[ylim], {}],
"tick_params": [[], dict(
axis='both', which='major', direction='in',
labelsize=fig_param['fontsize'],
left=True, bottom=False,
right=False, top=False,
labelleft=True, labelbottom=False,
labelright=False, labeltop=False,
)],
"xaxis.set_major_locator": [
[mdates.DayLocator(bymonthday=1)], {}
],
"xaxis.set_major_formatter": [
[mdates.DateFormatter('%Y-%m-%d')], {}
],
},
dict(
patch_alpha=0.0
)
]
},
new_pobjs={
'mhd': [
'date1', 'plot', [dates_tHour, np.array(obs_mhd), 'o'],
{'c': '#000000', 'ms': 1., 'mew': 1.,
'zorder': zorder['final'],
'label': 'Observed MHD'}
],
'wao': [
'date1', 'plot', [dates_tHour, np.array(obs_wao), 'o'],
{'c': '#0012FF', 'ms': 1., 'mew': 1.,
'zorder': zorder['final'],
'label': 'Observed WAO'}
],
'legend':[
'date1', 'legend', [],
dict(
loc='upper left',
numpoints=2, fontsize=fig_param['fontsize2'], ncol=3,
markerscale=5.0/3.5, handletextpad=0.2, columnspacing=1.0,
borderpad=0.2, borderaxespad=0.2
)
]
},
texts=[
{
'x': fig_param['mgap'] / fig_param['mw'],
'y': (fig_param['mh']
- 1/2*fig_param['mph'])
/ fig_param['mh'],
's': ylabel,
'ha': 'left', 'va': 'center',
'size': fig_param['fontsize'], 'rotation': 90
}
],
legend_params=[
[],
[],
{}
]
)
name_qch4_couple.plot_h2.generic3(
fig=figs['main'],
axs=axs['main'],
pobjs=pobjs['main'],
new_axs={
'date1': [
dict(
rect=[
(1 * fig_param['mlmargin']
+ fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mh']
- fig_param['mph']
+ fig_param['mgap'])
/ fig_param['mh'],
(fig_param['mpw']
- 2*fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mph']
- 2*fig_param['mgap'])
/ fig_param['mh']
],
label='date1',
projection=None
),
{
"set_yticks": [[yticks2], {}],
"set_xlim": [[
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=12),
], {}],
"set_ylim": [[ylim2], {}],
"tick_params": [[], dict(
axis='both', which='major', direction='in',
labelsize=fig_param['fontsize'],
left=False, bottom=True,
right=True, top=False,
labelleft=False, labelbottom=True,
labelright=True, labeltop=False,
)],
"xaxis.set_major_locator": [
[mdates.DayLocator(bymonthday=1)], {}
],
"xaxis.set_major_formatter": [
[mdates.DateFormatter('%Y-%m-%d')], {}
],
},
dict(
patch_alpha=0.0
)
]
},
new_pobjs={
'residual': [
'date1', 'plot', [dates_tHour, np.array(obs_mhd)-np.array(obs_wao), '--'],
{'c': '#767676', 'ms': 1., 'mew': 0.,
'zorder': zorder['final'],
'label': 'MHD-WAO - SD: {:.2f}'.format(dev)}
],
'legend':[
'date1', 'legend', [],
dict(
loc='upper right',
numpoints=2, fontsize=fig_param['fontsize2'], ncol=3,
markerscale=5.0/3.5, handletextpad=0.2, columnspacing=1.0,
borderpad=0.2, borderaxespad=0.2
)
]
},
texts=[
{
'x': 1,
'y': (fig_param['mh']
- 1/2*fig_param['mph'])
/ fig_param['mh'],
's': ylabel2,
'ha': 'right', 'va': 'center',
'size': fig_param['fontsize'], 'rotation': 270
}
],
legend_params=[
[],
[],
{}
]
)
for l in axs['main']['date1'].get_xticklabels():
l.set_ha("right")
l.set_rotation(30)
# figs['main'].savefig(f'outputs/obs_dif_sd.png')
'''
# =============================================================================
# Plotting the MHD and WAO modelled 'baselines' and their difference and
# calculating the SD of this difference
# =============================================================================
'''
Inputs:
- MHD modelled 'baseline' calculated in create_baseline.py
- WAO modelled 'baseline' calculated in create_baseline.py
S13 (Appendix 7.2.1.) was used for the final result
'''
# import modelled 'baselines'
bas_mhd = genfromtxt('outputs/models/baselines/lower_emm/2018/2018_mhd.csv', delimiter=',')
bas_wao = genfromtxt('outputs/models/baselines/lower_emm/2018/2018_wao.csv', delimiter=',')
bas_mhd = np.transpose(bas_mhd)
bas_wao = np.transpose(bas_wao)
# calculate SD
dev = np.std(bas_mhd[12] - bas_wao[12])
# remove data point where observations are missing
dif = []
z = 0
count = 0
for i in range(0, len(obs_mhd)):
if np.isnan(obs_mhd[i]) or np.isnan(obs_wao[i]):
dif.append(np.nan)
else:
dif.append(bas_mhd[12][i] - bas_wao[12][i])
mhd = bas_mhd[12]
wao = bas_wao[12]
for i in range(0, len(obs_mhd)):
if np.isnan(obs_mhd[i]):
mhd[i] = 0
if np.isnan(obs_wao[i]):
wao[i] = 0
# Plot
figs = {}
axs = {}
pobjs = {}
zorder = {
'background': 1,
'final': 2
}
fig_param = {
'mw': 10.5, 'mh': 7,
'mpw': 8.5, 'mph': 5.7,
'mgap': 0.05,
'mlmargin': 1.2, 'mbmargin': 1.5,
'ylblx': 0.05, 'ylbly': 1.5, # left, centre aligned
'fontsize': 15,
'fontsize2': 12,
}
plt.close('all')
ylabel = u'$\chi$ H$_{2}$ (nmol mol$^{-1}$)'
ylabel2 = 'Differences (nmol mol$^{-1}$)'
ylim = [350., 600.]
ylim2 = [-70., 600.]
yticks = np.arange(300., 600., 50.)
yticks2 = np.arange(-70., 600., 100.)
var_long_name = 'mole_fraction_of_hydrogen'
var_units = 'nmol mol-1'
for i in range(12, 13): # allows to create the plots for all the scenarios in one run
# in this case only plotting the modelled 'baseline' with the lowest SD
figs['main'] = plt.figure(figsize=(fig_param['mw'], fig_param['mh']), dpi=300)
axs['main'] = {}
pobjs['main'] = {}
figs['main'].clf()
name_qch4_couple.plot_h2.generic3(
fig=figs['main'],
axs=axs['main'],
pobjs=pobjs['main'],
new_axs={
'date1': [
dict(
rect=[
(1 * fig_param['mlmargin']
+ fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mh']
- fig_param['mph']
+ fig_param['mgap'])
/ fig_param['mh'],
(fig_param['mpw']
- 2*fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mph']
- 2*fig_param['mgap'])
/ fig_param['mh']
],
label='date1',
projection=None
),
{
"set_yticks": [[yticks], {}],
"set_xlim": [[
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=12),
], {}],
"set_ylim": [[ylim], {}],
"tick_params": [[], dict(
axis='both', which='major', direction='in',
labelsize=fig_param['fontsize'],
left=True, bottom=False,
right=False, top=False,
labelleft=True, labelbottom=False,
labelright=False, labeltop=False,
)],
"xaxis.set_major_locator": [
[mdates.DayLocator(bymonthday=1)], {}
],
"xaxis.set_major_formatter": [
[mdates.DateFormatter('%Y-%m-%d')], {}
],
},
dict(
patch_alpha=0.0
)
]
},
new_pobjs={
'mhd': [
'date1', 'plot', [dates_tHour, np.array(mhd), 'o'],
{'c': '#000000', 'ms': 1., 'mew': 1.,
'zorder': zorder['final'],
'label': 'Mobelled MHD baseline'}
],
'wao': [
'date1', 'plot', [dates_tHour, np.array(wao), 'o'],
{'c': '#0012FF', 'ms': 1., 'mew': 1.,
'zorder': zorder['final'],
'label': 'Modelled WAO baseline'}
],
'legend':[
'date1', 'legend', [],
dict(
loc='upper left',
numpoints=2, fontsize=fig_param['fontsize2'], ncol=3,
markerscale=5.0/3.5, handletextpad=0.2, columnspacing=1.0,
borderpad=0.2, borderaxespad=0.2
)
]
},
texts=[
{
'x': fig_param['mgap'] / fig_param['mw'],
'y': (fig_param['mh']
- 1/2*fig_param['mph'])
/ fig_param['mh'],
's': ylabel,
'ha': 'left', 'va': 'center',
'size': fig_param['fontsize'], 'rotation': 90
}
],
legend_params=[
[],
[],
{}
]
)
name_qch4_couple.plot_h2.generic3(
fig=figs['main'],
axs=axs['main'],
pobjs=pobjs['main'],
new_axs={
'date1': [
dict(
rect=[
(1 * fig_param['mlmargin']
+ fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mh']
- fig_param['mph']
+ fig_param['mgap'])
/ fig_param['mh'],
(fig_param['mpw']
- 2*fig_param['mgap'])
/ fig_param['mw'],
(fig_param['mph']
- 2*fig_param['mgap'])
/ fig_param['mh']
],
label='date1',
projection=None
),
{
"set_yticks": [[yticks2], {}],
"set_xlim": [[
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=12),
], {}],
"set_ylim": [[ylim2], {}],
"tick_params": [[], dict(
axis='both', which='major', direction='in',
labelsize=fig_param['fontsize'],
left=False, bottom=True,
right=True, top=False,
labelleft=False, labelbottom=True,
labelright=True, labeltop=False,
)],
"xaxis.set_major_locator": [
[mdates.DayLocator(bymonthday=1)], {}
],
"xaxis.set_major_formatter": [
[mdates.DateFormatter('%Y-%m-%d')], {}
],
},
dict(
patch_alpha=0.0
)
]
},
new_pobjs={
'residual': [
'date1', 'plot', [dates_tHour, np.array(dif), '--'],
{'c': '#767676', 'ms': 1., 'mew': 0.,
'zorder': zorder['final'],
'label': 'MHD-WAO - SD: {:.2f}'.format(dev)}
],
'legend':[
'date1', 'legend', [],
dict(
loc='upper right',
numpoints=2, fontsize=fig_param['fontsize2'], ncol=3,
markerscale=5.0/3.5, handletextpad=0.2, columnspacing=1.0,
borderpad=0.2, borderaxespad=0.2
)
]
},
texts=[
{
'x': 1,
'y': (fig_param['mh']
- 1/2*fig_param['mph'])
/ fig_param['mh'],
's': ylabel2,
'ha': 'right', 'va': 'center',
'size': fig_param['fontsize'], 'rotation': 270
}
],
legend_params=[
[],
[],
{}
]
)
for l in axs['main']['date1'].get_xticklabels():
l.set_ha("right")
l.set_rotation(30)
# figs['main'].savefig(f'outputs/obs_dif_sd.png')
| 32.464348
| 104
| 0.371083
| 1,639
| 18,667
| 4.081757
| 0.164735
| 0.10284
| 0.032885
| 0.040359
| 0.837369
| 0.824215
| 0.824215
| 0.798057
| 0.782362
| 0.770105
| 0
| 0.042715
| 0.472009
| 18,667
| 574
| 105
| 32.520906
| 0.636059
| 0.058338
| 0
| 0.502024
| 0
| 0
| 0.119973
| 0.025635
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02834
| 0
| 0.02834
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a9cd2ec94d16ed061fc349d32328e4b4dbfeb21f
| 73
|
py
|
Python
|
rpxdock/score/__init__.py
|
quecloud/rpxdock
|
41f7f98f5dacf24fc95897910263a0bec2209e59
|
[
"Apache-2.0"
] | null | null | null |
rpxdock/score/__init__.py
|
quecloud/rpxdock
|
41f7f98f5dacf24fc95897910263a0bec2209e59
|
[
"Apache-2.0"
] | null | null | null |
rpxdock/score/__init__.py
|
quecloud/rpxdock
|
41f7f98f5dacf24fc95897910263a0bec2209e59
|
[
"Apache-2.0"
] | 1
|
2020-04-13T20:07:52.000Z
|
2020-04-13T20:07:52.000Z
|
from .component import *
from .scorefunc import *
from .rpxhier import *
| 18.25
| 24
| 0.753425
| 9
| 73
| 6.111111
| 0.555556
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 25
| 24.333333
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7979ed258a22c023099e92c73c4cfe81788f021
| 22
|
py
|
Python
|
v3/as_drivers/as_GPS/__init__.py
|
Dilepa/micropython-async
|
3c8817d9ead33bcd8399d0935ffb24dd7bcd6e71
|
[
"MIT"
] | 443
|
2017-01-01T20:54:46.000Z
|
2022-03-28T06:17:30.000Z
|
v3/as_drivers/as_GPS/__init__.py
|
Dilepa/micropython-async
|
3c8817d9ead33bcd8399d0935ffb24dd7bcd6e71
|
[
"MIT"
] | 79
|
2017-01-28T17:53:32.000Z
|
2022-02-08T10:05:04.000Z
|
v3/as_drivers/as_GPS/__init__.py
|
Dilepa/micropython-async
|
3c8817d9ead33bcd8399d0935ffb24dd7bcd6e71
|
[
"MIT"
] | 126
|
2017-02-17T13:06:01.000Z
|
2022-03-07T03:50:50.000Z
|
from .as_GPS import *
| 11
| 21
| 0.727273
| 4
| 22
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e79bf5ecdef807e58dbecd8e5e7ff8a6617e25e0
| 65
|
py
|
Python
|
kmlfiles/__init__.py
|
aravindashokk/kmlfiles
|
d7eba48abb4d87019a0a0f3b6e1ba91720e4cc1e
|
[
"MIT"
] | null | null | null |
kmlfiles/__init__.py
|
aravindashokk/kmlfiles
|
d7eba48abb4d87019a0a0f3b6e1ba91720e4cc1e
|
[
"MIT"
] | null | null | null |
kmlfiles/__init__.py
|
aravindashokk/kmlfiles
|
d7eba48abb4d87019a0a0f3b6e1ba91720e4cc1e
|
[
"MIT"
] | null | null | null |
# Inside of __init__.py
from kmlfiles.read_kml import read_kml
| 21.666667
| 39
| 0.8
| 11
| 65
| 4.181818
| 0.818182
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 65
| 2
| 40
| 32.5
| 0.836364
| 0.323077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7de9ba7be1c3b30fe1b1f3403b7acb28cba343d
| 47
|
py
|
Python
|
zenml/models/__init__.py
|
bobbywlindsey/data-science
|
8c67abd75a1f70ce37a04aff074cc3416260a296
|
[
"MIT"
] | 1
|
2018-07-17T08:23:29.000Z
|
2018-07-17T08:23:29.000Z
|
zenml/models/__init__.py
|
bobbywlindsey/zenml
|
8c67abd75a1f70ce37a04aff074cc3416260a296
|
[
"MIT"
] | null | null | null |
zenml/models/__init__.py
|
bobbywlindsey/zenml
|
8c67abd75a1f70ce37a04aff074cc3416260a296
|
[
"MIT"
] | null | null | null |
from .pca import *
from .random_forest import *
| 23.5
| 28
| 0.765957
| 7
| 47
| 5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 28
| 23.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99f0a7bc98a398f4ff4710b66e9db01bfa98711a
| 45
|
py
|
Python
|
general/setpasswords.py
|
pastorsj/MusicVines
|
0151305ae865b29ece92daf4fb3c5455451b067f
|
[
"MIT"
] | null | null | null |
general/setpasswords.py
|
pastorsj/MusicVines
|
0151305ae865b29ece92daf4fb3c5455451b067f
|
[
"MIT"
] | null | null | null |
general/setpasswords.py
|
pastorsj/MusicVines
|
0151305ae865b29ece92daf4fb3c5455451b067f
|
[
"MIT"
] | null | null | null |
import os
os.environ["neo4jpass"] = "SET ME"
| 15
| 34
| 0.688889
| 7
| 45
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.133333
| 45
| 2
| 35
| 22.5
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
8245359e783e2257df881a84e90ecafb7d3a9608
| 106,415
|
py
|
Python
|
dannce/engine/generator.py
|
Darkweiss/dannce
|
dc64c73bebd4e3aeb5df6f4bc63e6b13e316877f
|
[
"MIT"
] | null | null | null |
dannce/engine/generator.py
|
Darkweiss/dannce
|
dc64c73bebd4e3aeb5df6f4bc63e6b13e316877f
|
[
"MIT"
] | null | null | null |
dannce/engine/generator.py
|
Darkweiss/dannce
|
dc64c73bebd4e3aeb5df6f4bc63e6b13e316877f
|
[
"MIT"
] | null | null | null |
"""Generator module for dannce training.
"""
import os
import numpy as np
from tensorflow import keras
from dannce.engine import processing as processing
from dannce.engine import ops as ops
from dannce.engine.video import LoadVideoFrame
import imageio
import warnings
import time
import scipy.ndimage.interpolation
import tensorflow as tf
# from tensorflow_graphics.geometry.transformation.axis_angle import rotate
from multiprocessing.dummy import Pool as ThreadPool
from typing import List, Dict, Tuple, Text
class DataGenerator(keras.utils.Sequence):
"""Generate data for Keras.
Attributes:
batch_size (int): Batch size to generate
camnames (List): List of camera names.
clusterIDs (List): List of sampleIDs
crop_height (Tuple): (first, last) pixels in image height
crop_width (tuple): (first, last) pixels in image width
currvideo (Dict): Contains open video objects
currvideo_name (Dict): Contains open video object names
dim_in (Tuple): Input dimension
dim_out (Tuple): Output dimension
extension (Text): Video extension
indexes (np.ndarray): sample indices used for batch generation
labels (Dict): Label dictionary
list_IDs (List): List of sampleIDs
mono (bool): If True, use grayscale image.
n_channels_in (int): Number of input channels
n_channels_out (int): Number of output channels
out_scale (int): Scale of the output gaussians.
samples_per_cluster (int): Samples per cluster
shuffle (bool): If True, shuffle the samples.
vidreaders (Dict): Dict containing video readers.
predict_flag (bool): If True, use imageio for reading videos, rather than OpenCV
"""
def __init__(
self,
list_IDs: List,
labels: Dict,
clusterIDs: List,
batch_size: int = 32,
dim_in: Tuple = (32, 32, 32),
n_channels_in: int = 1,
n_channels_out: int = 1,
out_scale: float = 5,
shuffle: bool = True,
camnames: List = [],
crop_width: Tuple = (0, 1024),
crop_height: Tuple = (20, 1300),
samples_per_cluster: int = 0,
vidreaders: Dict = None,
chunks: int = 3500,
mono: bool = False,
mirror: bool = False,
predict_flag: bool = False,
):
"""Initialize Generator.
Args:
list_IDs (List): List of sampleIDs
labels (Dict): Label dictionary
clusterIDs (List): List of sampleIDs
batch_size (int, optional): Batch size to generate
dim_in (Tuple, optional): Input dimension
n_channels_in (int, optional): Number of input channels
n_channels_out (int, optional): Number of output channels
out_scale (float, optional): Scale of the output gaussians.
shuffle (bool, optional): If True, shuffle the samples.
camnames (List, optional): List of camera names.
crop_width (Tuple, optional): (first, last) pixels in image width
crop_height (Tuple, optional): (first, last) pixels in image height
samples_per_cluster (int, optional): Samples per cluster
vidreaders (Dict, optional): Dict containing video readers.
chunks (int, optional): Size of chunks when using chunked mp4.
mono (bool, optional): If True, use grayscale image.
predict_flag (bool, optional): If True, use imageio for reading videos, rather than OpenCV
"""
self.dim_in = dim_in
self.dim_out = dim_in
self.batch_size = batch_size
self.labels = labels
self.vidreaders = vidreaders
self.list_IDs = list_IDs
self.n_channels_in = n_channels_in
self.n_channels_out = n_channels_out
self.shuffle = shuffle
# sigma for the ground truth joint probability map Gaussians
self.out_scale = out_scale
self.camnames = camnames
self.crop_width = crop_width
self.crop_height = crop_height
self.clusterIDs = clusterIDs
self.samples_per_cluster = samples_per_cluster
self._N_VIDEO_FRAMES = chunks
self.mono = mono
self.mirror = mirror
self.predict_flag = predict_flag
self.on_epoch_end()
if self.vidreaders is not None:
self.extension = (
"." + list(vidreaders[camnames[0][0]].keys())[0].rsplit(".")[-1]
)
assert len(self.list_IDs) == len(self.clusterIDs)
self.load_frame = LoadVideoFrame(self._N_VIDEO_FRAMES,
self.vidreaders,
self.camnames,
self.predict_flag)
def __len__(self) -> int:
"""Denote the number of batches per epoch.
Returns:
int: Batches per epoch
"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def on_epoch_end(self):
"""Update indexes after each epoch."""
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle:
np.random.shuffle(self.indexes)
def random_rotate(self, X: np.ndarray, y_3d: np.ndarray, log: bool = False):
"""Rotate each sample by 0, 90, 180, or 270 degrees.
log indicates whether to return the rotation pattern (for saving) as well.
Args:
X (np.ndarray): Input images
y_3d (np.ndarray): Output 3d targets
log (bool, optional): If True, log the rotations.
Returns:
Tuple[np.ndarray, np.ndarray]: Rotated X and y_3d.
or
Tuple[np.ndarray, np.ndarray, np.ndarray]: Rotated X, y_3d, and rot val
"""
rots = np.random.choice(np.arange(4), X.shape[0])
for i in range(X.shape[0]):
if rots[i] == 0:
pass
elif rots[i] == 1:
# Rotate180
X[i] = self.rot180(X[i])
y_3d[i] = self.rot180(y_3d[i])
elif rots[i] == 2:
# Rotate90
X[i] = self.rot90(X[i])
y_3d[i] = self.rot90(y_3d[i])
elif rots[i] == 3:
# Rotate -90/270
X[i] = self.rot90(X[i])
X[i] = self.rot180(X[i])
y_3d[i] = self.rot90(y_3d[i])
y_3d[i] = self.rot180(y_3d[i])
if log:
return X, y_3d, rots
else:
return X, y_3d
class DataGenerator_3Dconv(DataGenerator):
"""Update generator class to handle multiple experiments.
Attributes:
camera_params (Dict): Camera parameters dictionary.
channel_combo (Text): Method for shuffling camera input order
com3d (Dict): Dictionary of com3d data.
COM_aug (bool): If True, augment the COM.
crop_im (bool): If True, crop images.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
dim_out_3d (Tuple): Dimensions of the 3D volume, in voxels
distort (bool): If true, apply camera undistortion.
expval (bool): If True, process an expected value network (AVG)
gpu_id (Text): Identity of GPU to use.
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
interp (Text): Interpolation method.
labels_3d (Dict): Contains ground-truth 3D label coordinates.
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
multicam (bool): If True, formats data to work with multiple cameras as input.
norm_im (bool): If True, normalize images.
nvox (int): Number of voxels per box side
rotation (bool): If True, use simple rotation augmentation.
tifdirs (List): Directories of .tifs
var_reg (bool): If True, adds a variance regularization term to the loss function.
vmax (int): Maximum box dim (relative to the COM)
vmin (int): Minimum box dim (relative to the COM)
vsize (float): Side length of one voxel
predict_flag (bool): If True, use imageio for reading videos, rather than OpenCV
"""
def __init__(
self,
list_IDs: List,
labels: Dict,
labels_3d: Dict,
camera_params: Dict,
clusterIDs: List,
com3d: Dict,
tifdirs: List,
batch_size: int = 32,
dim_in: Tuple = (32, 32, 32),
n_channels_in: int = 1,
n_channels_out: int = 1,
out_scale: int = 5,
shuffle: bool = True,
camnames: List = [],
crop_width: Tuple = (0, 1024),
crop_height: Tuple = (20, 1300),
vmin: int = -100,
vmax: int = 100,
nvox: int = 32,
gpu_id: Text = "0",
interp: Text = "linear",
depth: bool = False,
channel_combo=None,
mode: Text = "3dprob",
samples_per_cluster: int = 0,
immode: Text = "tif",
rotation: bool = False,
vidreaders: Dict = None,
distort: bool = True,
expval: bool = False,
multicam: bool = True,
var_reg: bool = False,
COM_aug: bool = None,
crop_im: bool = True,
norm_im: bool = True,
chunks: int = 3500,
mono: bool = False,
mirror: bool = False,
predict_flag: bool = False,
):
"""Initialize data generator.
Args:
list_IDs (List): List of sample Ids
labels (Dict): Dictionary of labels
labels_3d (Dict): Dictionary of 3d labels.
camera_params (Dict): Camera parameters dictionary.
clusterIDs (List): List of sample Ids
com3d (Dict): Dictionary of com3d data.
tifdirs (List): Directories of .tifs
batch_size (int, optional): Batch size to generate
dim_in (Tuple, optional): Input dimension
n_channels_in (int, optional): Number of input channels
n_channels_out (int, optional): Number of output channels
out_scale (int, optional): Scale of the output gaussians.
shuffle (bool, optional): If True, shuffle the samples.
camnames (List, optional): List of camera names.
crop_width (Tuple, optional): (first, last) pixels in image width
crop_height (Tuple, optional): (first, last) pixels in image height
vmin (int, optional): Minimum box dim (relative to the COM)
vmax (int, optional): Maximum box dim (relative to the COM)
nvox (int, optional): Number of voxels per box side
gpu_id (Text, optional): Identity of GPU to use.
interp (Text, optional): Interpolation method.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
channel_combo (Text): Method for shuffling camera input order
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
samples_per_cluster (int, optional): Samples per cluster
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
rotation (bool, optional): If True, use simple rotation augmentation.
vidreaders (Dict, optional): Dict containing video readers.
distort (bool, optional): If true, apply camera undistortion.
expval (bool, optional): If True, process an expected value network (AVG)
multicam (bool): If True, formats data to work with multiple cameras as input.
var_reg (bool): If True, adds a variance regularization term to the loss function.
COM_aug (bool, optional): If True, augment the COM.
crop_im (bool, optional): If True, crop images.
norm_im (bool, optional): If True, normalize images.
chunks (int, optional): Size of chunks when using chunked mp4.
mono (bool, optional): If True, use grayscale image.
predict_flag (bool, optional): If True, use imageio for reading videos, rather than OpenCV
"""
DataGenerator.__init__(
self,
list_IDs,
labels,
clusterIDs,
batch_size,
dim_in,
n_channels_in,
n_channels_out,
out_scale,
shuffle,
camnames,
crop_width,
crop_height,
samples_per_cluster,
vidreaders,
chunks,
mono,
mirror,
predict_flag,
)
self.vmin = vmin
self.vmax = vmax
self.nvox = nvox
self.vsize = (vmax - vmin) / nvox
self.dim_out_3d = (nvox, nvox, nvox)
self.labels_3d = labels_3d
self.camera_params = camera_params
self.interp = interp
self.depth = depth
self.channel_combo = channel_combo
print(self.channel_combo)
self.mode = mode
self.immode = immode
self.tifdirs = tifdirs
self.com3d = com3d
self.rotation = rotation
self.distort = distort
self.expval = expval
self.multicam = multicam
self.var_reg = var_reg
self.COM_aug = COM_aug
self.crop_im = crop_im
# If saving npy as uint8 rather than training directly, dont normalize
self.norm_im = norm_im
self.gpu_id = gpu_id
def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray]:
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data
X (np.ndarray): Input volume
y (np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def rot90(self, X: np.ndarray) -> np.ndarray:
"""Rotate X by 90 degrees CCW.
Args:
X (np.ndarray): Input volume.
Returns:
np.ndarray: Rotated volume
"""
X = np.transpose(X, [1, 0, 2, 3])
X = X[:, ::-1, :, :]
return X
def rot180(self, X):
"""Rotate X by 180 degrees.
Args:
X (np.ndarray): Input volume.
Returns:
np.ndarray: Rotated volume
"""
X = X[::-1, ::-1, :, :]
return X
def __data_generation(self, list_IDs_temp: List) -> Tuple:
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d: Targets
rotangle: Rotation angle
Raises:
Exception: Invalid generator mode specified.
"""
# Initialization
first_exp = int(self.list_IDs[0].split("_")[0])
X = np.zeros(
(
self.batch_size * len(self.camnames[first_exp]),
*self.dim_out_3d,
self.n_channels_in + self.depth,
),
dtype="float32",
)
if self.mode == "3dprob":
y_3d = np.zeros(
(self.batch_size, self.n_channels_out, *self.dim_out_3d),
dtype="float32",
)
elif self.mode == "coordinates":
y_3d = np.zeros((self.batch_size, 3, self.n_channels_out), dtype="float32")
else:
raise Exception("not a valid generator mode")
if self.expval:
sz = self.dim_out_3d[0] * self.dim_out_3d[1] * self.dim_out_3d[2]
X_grid = np.zeros((self.batch_size, sz, 3), dtype="float32")
# Generate data
cnt = 0
for i, ID in enumerate(list_IDs_temp):
sampleID = int(ID.split("_")[1])
experimentID = int(ID.split("_")[0])
# For 3D ground truth
this_y_3d = self.labels_3d[ID]
this_COM_3d = self.com3d[ID]
if self.COM_aug is not None:
this_COM_3d = this_COM_3d.copy().ravel()
this_COM_3d = (
this_COM_3d
+ self.COM_aug * 2 * np.random.rand(len(this_COM_3d))
- self.COM_aug
)
# Create and project the grid here,
xgrid = np.arange(
self.vmin + this_COM_3d[0] + self.vsize / 2,
this_COM_3d[0] + self.vmax,
self.vsize,
)
ygrid = np.arange(
self.vmin + this_COM_3d[1] + self.vsize / 2,
this_COM_3d[1] + self.vmax,
self.vsize,
)
zgrid = np.arange(
self.vmin + this_COM_3d[2] + self.vsize / 2,
this_COM_3d[2] + self.vmax,
self.vsize,
)
(x_coord_3d, y_coord_3d, z_coord_3d) = np.meshgrid(xgrid, ygrid, zgrid)
if self.mode == "3dprob":
for j in range(self.n_channels_out):
y_3d[i, j] = np.exp(
-(
(y_coord_3d - this_y_3d[1, j]) ** 2
+ (x_coord_3d - this_y_3d[0, j]) ** 2
+ (z_coord_3d - this_y_3d[2, j]) ** 2
)
/ (2 * self.out_scale ** 2)
)
# When the voxel grid is coarse, we will likely miss
# the peak of the probability distribution, as it
# will lie somewhere in the middle of a large voxel.
# So here we renormalize to [~, 1]
if self.mode == "coordinates":
if this_y_3d.shape == y_3d[i].shape:
y_3d[i] = this_y_3d
else:
msg = "Note: ignoring dimension mismatch in 3D labels"
warnings.warn(msg)
if self.expval:
X_grid[i] = np.stack(
(
x_coord_3d.ravel(),
y_coord_3d.ravel(),
z_coord_3d.ravel(),
),
axis=1,
)
for _ci, camname in enumerate(self.camnames[experimentID]):
ts = time.time()
# Need this copy so that this_y does not change
this_y = np.round(self.labels[ID]["data"][camname]).copy()
if np.all(np.isnan(this_y)):
com_precrop = np.zeros_like(this_y[:, 0]) * np.nan
else:
# For projecting points, we should not use this offset
com_precrop = np.nanmean(this_y, axis=1)
# Store sample
if not self.mirror or _ci == 0:
# for pre-cropped tifs
if self.immode == "tif":
thisim = imageio.imread(
os.path.join(
self.tifdirs[experimentID],
camname,
"{}.tif".format(sampleID),
)
)
# From raw video, need to crop
elif self.immode == "vid":
thisim = self.load_frame.load_vid_frame(
self.labels[ID]["frames"][camname],
camname,
extension=self.extension,
)[
self.crop_height[0] : self.crop_height[1],
self.crop_width[0] : self.crop_width[1],
]
# print("Decode frame took {} sec".format(time.time() - ts))
tss = time.time()
# Load in the image file at the specified path
elif self.immode == "arb_ims":
thisim = imageio.imread(
self.tifdirs[experimentID]
+ self.labels[ID]["frames"][camname][0]
+ ".jpg"
)
if self.mirror:
# Save copy of the first image loaded in, so that it can be flipped accordingly.
self.raw_im = thisim.copy()
if self.mirror and self.camera_params[experimentID][camname]["m"] == 1:
thisim = self.raw_im.copy()
thisim = thisim[-1::-1]
elif self.mirror and self.camera_params[experimentID][camname]["m"] == 0:
thisim = self.raw_im
elif self.mirror:
raise Exception("Invalid mirror parameter, m, must be 0 or 1")
if self.immode == "vid" or self.immode == "arb_ims":
this_y[0, :] = this_y[0, :] - self.crop_width[0]
this_y[1, :] = this_y[1, :] - self.crop_height[0]
com = np.nanmean(this_y, axis=1)
if self.crop_im:
if np.all(np.isnan(com)):
thisim = np.zeros(
(
self.dim_in[1],
self.dim_in[0],
self.n_channels_in,
)
)
else:
thisim = processing.cropcom(
thisim, com, size=self.dim_in[0]
)
# Project de novo or load in approximate (faster)
# TODO(break up): This is hard to read, consider breaking up
ts = time.time()
proj_grid = ops.project_to2d(
np.stack(
(
x_coord_3d.ravel(),
y_coord_3d.ravel(),
z_coord_3d.ravel(),
),
axis=1,
),
self.camera_params[experimentID][camname]["K"],
self.camera_params[experimentID][camname]["R"],
self.camera_params[experimentID][camname]["t"],
)
if self.depth:
d = proj_grid[:, 2]
# print("2D Proj took {} sec".format(time.time() - ts))
ts = time.time()
if self.distort:
"""
Distort points using lens distortion parameters
"""
proj_grid = ops.distortPoints(
proj_grid[:, :2],
self.camera_params[experimentID][camname]["K"],
np.squeeze(
self.camera_params[experimentID][camname]["RDistort"]
),
np.squeeze(
self.camera_params[experimentID][camname]["TDistort"]
),
).T
# print("Distort took {} sec".format(time.time() - ts))
# ts = time.time()
if self.crop_im:
proj_grid = proj_grid[:, :2] - com_precrop + self.dim_in[0] // 2
# Now all coordinates should map properly to the image
# cropped around the COM
else:
# Then the only thing we need to correct for is
# crops at the borders
proj_grid = proj_grid[:, :2]
proj_grid[:, 0] = proj_grid[:, 0] - self.crop_width[0]
proj_grid[:, 1] = proj_grid[:, 1] - self.crop_height[0]
(r, g, b) = ops.sample_grid(thisim, proj_grid, method=self.interp)
# print("Sample grid took {} sec".format(time.time() - ts))
if (
~np.any(np.isnan(com_precrop))
or (self.channel_combo == "avg")
or not self.crop_im
):
X[cnt, :, :, :, 0] = np.reshape(
r, (self.nvox, self.nvox, self.nvox)
)
X[cnt, :, :, :, 1] = np.reshape(
g, (self.nvox, self.nvox, self.nvox)
)
X[cnt, :, :, :, 2] = np.reshape(
b, (self.nvox, self.nvox, self.nvox)
)
if self.depth:
X[cnt, :, :, :, 3] = np.reshape(
d, (self.nvox, self.nvox, self.nvox)
)
cnt = cnt + 1
# print("Projection grid took {} sec".format(time.time() - tss))
if self.multicam:
X = np.reshape(
X,
(
self.batch_size,
len(self.camnames[first_exp]),
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4],
),
)
X = np.transpose(X, [0, 2, 3, 4, 5, 1])
if self.channel_combo == "avg":
X = np.nanmean(X, axis=-1)
# Randomly reorder the cameras fed into the first layer
elif self.channel_combo == "random":
X = X[:, :, :, :, :, np.random.permutation(X.shape[-1])]
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
),
order="F",
)
else:
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
),
order="F",
)
else:
# Then leave the batch_size and num_cams combined
y_3d = np.tile(y_3d, [len(self.camnames[experimentID]), 1, 1, 1, 1])
if self.mode == "3dprob":
y_3d = np.transpose(y_3d, [0, 2, 3, 4, 1])
if self.rotation:
if self.expval:
# First make X_grid 3d
X_grid = np.reshape(
X_grid,
(self.batch_size, self.nvox, self.nvox, self.nvox, 3),
)
X, X_grid = self.random_rotate(X, X_grid)
# Need to reshape back to raveled version
X_grid = np.reshape(X_grid, (self.batch_size, -1, 3))
else:
X, y_3d = self.random_rotate(X, y_3d)
if self.mono and self.n_channels_in == 3:
# Convert from RGB to mono using the skimage formula. Drop the duplicated frames.
# Reshape so RGB can be processed easily.
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
self.n_channels_in,
-1,
),
order="F",
)
X = (
X[:, :, :, :, 0] * 0.2125
+ X[:, :, :, :, 1] * 0.7154
+ X[:, :, :, :, 2] * 0.0721
)
# Then we also need to return the 3d grid center coordinates,
# for calculating a spatial expected value
# Xgrid is typically symmetric for 90 and 180 degree rotations
# (when vmax and vmin are symmetric)
# around the z-axis, so no need to rotate X_grid.
if self.expval:
if self.var_reg:
return (
[processing.preprocess_3d(X), X_grid],
[y_3d, np.zeros((self.batch_size, 1))],
)
if self.norm_im:
# y_3d is in coordinates here.
return [processing.preprocess_3d(X), X_grid], y_3d
else:
return [X, X_grid], y_3d
else:
if self.norm_im:
return processing.preprocess_3d(X), y_3d
else:
return X, y_3d
class DataGenerator_3Dconv_torch(DataGenerator):
"""Update generator class to resample from kmeans clusters after each epoch.
Also handles data across multiple experiments
Attributes:
camera_params (Dict): Camera parameters dictionary.
channel_combo (Text): Method for shuffling camera input order
com3d (Dict): Dictionary of com3d data.
COM_aug (bool): If True, augment the COM.
crop_im (bool): If True, crop images.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
device (torch.device): GPU device identifier
dim_out_3d (Tuple): Dimensions of the 3D volume, in voxels
distort (bool): If true, apply camera undistortion.
expval (bool): If True, process an expected value network (AVG)
gpu_id (Text): Identity of GPU to use.
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
interp (Text): Interpolation method.
labels_3d (Dict): Contains ground-truth 3D label coordinates.
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
multicam (bool): If True, formats data to work with multiple cameras as input.
norm_im (bool): If True, normalize images.
nvox (int): Number of voxels per box side
rotation (bool): If True, use simple rotation augmentation.
session (tf.compat.v1.InteractiveSession): tensorflow session.
threadpool (Threadpool): threadpool object for parallelizing video loading
tifdirs (List): Directories of .tifs
var_reg (bool): If True, adds a variance regularization term to the loss function.
vmax (int): Maximum box dim (relative to the COM)
vmin (int): Minimum box dim (relative to the COM)
vsize (float): Side length of one voxel
predict_flag (bool): If True, use imageio for reading videos, rather than OpenCV
"""
def __init__(
self,
list_IDs,
labels,
labels_3d,
camera_params,
clusterIDs,
com3d,
tifdirs,
batch_size=32,
dim_in=(32, 32, 32),
n_channels_in=1,
n_channels_out=1,
out_scale=5,
shuffle=True,
camnames=[],
crop_width=(0, 1024),
crop_height=(20, 1300),
vmin=-100,
vmax=100,
nvox=32,
gpu_id="0",
interp="linear",
depth=False,
channel_combo=None,
mode="3dprob",
samples_per_cluster=0,
immode="tif",
rotation=False,
vidreaders=None,
distort=True,
expval=False,
multicam=True,
var_reg=False,
COM_aug=None,
crop_im=True,
norm_im=True,
chunks=3500,
mono=False,
mirror=False,
predict_flag=False,
):
"""Initialize data generator.
Args:
list_IDs (List): List of sample Ids
labels (Dict): Dictionary of labels
labels_3d (Dict): Dictionary of 3d labels.
camera_params (Dict): Camera parameters dictionary.
clusterIDs (List): List of sample Ids
com3d (Dict): Dictionary of com3d data.
tifdirs (List): Directories of .tifs
batch_size (int, optional): Batch size to generate
dim_in (Tuple, optional): Input dimension
n_channels_in (int, optional): Number of input channels
n_channels_out (int, optional): Number of output channels
out_scale (int, optional): Scale of the output gaussians.
shuffle (bool, optional): If True, shuffle the samples.
camnames (List, optional): List of camera names.
crop_width (Tuple, optional): (first, last) pixels in image width
crop_height (Tuple, optional): (first, last) pixels in image height
vmin (int, optional): Minimum box dim (relative to the COM)
vmax (int, optional): Maximum box dim (relative to the COM)
nvox (int, optional): Number of voxels per box side
gpu_id (Text, optional): Identity of GPU to use.
interp (Text, optional): Interpolation method.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
channel_combo (Text): Method for shuffling camera input order
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
samples_per_cluster (int, optional): Samples per cluster
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
rotation (bool, optional): If True, use simple rotation augmentation.
vidreaders (Dict, optional): Dict containing video readers.
distort (bool, optional): If true, apply camera undistortion.
expval (bool, optional): If True, process an expected value network (AVG)
multicam (bool): If True, formats data to work with multiple cameras as input.
var_reg (bool): If True, adds a variance regularization term to the loss function.
COM_aug (bool, optional): If True, augment the COM.
crop_im (bool, optional): If True, crop images.
norm_im (bool, optional): If True, normalize images.
chunks (int, optional): Size of chunks when using chunked mp4.
mono (bool, optional): If True, use grayscale image.
predict_flag (bool, optional): If True, use imageio for reading videos, rather than OpenCV
"""
DataGenerator.__init__(
self,
list_IDs,
labels,
clusterIDs,
batch_size,
dim_in,
n_channels_in,
n_channels_out,
out_scale,
shuffle,
camnames,
crop_width,
crop_height,
samples_per_cluster,
vidreaders,
chunks,
mono,
mirror,
predict_flag,
)
self.vmin = vmin
self.vmax = vmax
self.nvox = nvox
self.vsize = (vmax - vmin) / nvox
self.dim_out_3d = (nvox, nvox, nvox)
self.labels_3d = labels_3d
self.camera_params = camera_params
self.interp = interp
self.depth = depth
self.channel_combo = channel_combo
print(self.channel_combo)
self.gpu_id = gpu_id
self.mode = mode
self.immode = immode
self.tifdirs = tifdirs
self.com3d = com3d
self.rotation = rotation
self.distort = distort
self.expval = expval
self.multicam = multicam
self.var_reg = var_reg
self.COM_aug = COM_aug
self.crop_im = crop_im
# If saving npy as uint8 rather than training directly, dont normalize
self.norm_im = norm_im
# importing torch here allows other modes to run without pytorch installed
self.torch = __import__("torch")
self.device = self.torch.device("cuda:" + self.gpu_id)
# self.device = self.torch.device('cpu')
self.threadpool = ThreadPool(len(self.camnames[0]))
ts = time.time()
# Limit GPU memory usage by Tensorflow to leave memory for PyTorch
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
config.gpu_options.allow_growth = True
self.session = tf.compat.v1.InteractiveSession(config=config, graph=tf.Graph())
for i, ID in enumerate(list_IDs):
experimentID = int(ID.split("_")[0])
for camname in self.camnames[experimentID]:
# M only needs to be computed once for each camera
K = self.camera_params[experimentID][camname]["K"]
R = self.camera_params[experimentID][camname]["R"]
t = self.camera_params[experimentID][camname]["t"]
M = self.torch.as_tensor(
ops.camera_matrix(K, R, t), dtype=self.torch.float32
)
self.camera_params[experimentID][camname]["M"] = M
print("Init took {} sec.".format(time.time() - ts))
def __getitem__(self, index: int):
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data X
(np.ndarray): Input volume y
(np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def rot90(self, X):
"""Rotate X by 90 degrees CCW.
Args:
X (np.ndarray): Volume
Returns:
X (np.ndarray): Rotated volume
"""
X = X.permute(1, 0, 2, 3)
X = X.flip(1)
return X
def rot180(self, X):
"""Rotate X by 180 degrees.
Args:
X (np.ndarray): Volume
Returns:
X (np.ndarray): Rotated volume
"""
X = X.flip(0).flip(1)
return X
def project_grid(self, X_grid, camname, ID, experimentID):
"""Projects 3D voxel centers and sample images as projected 2D pixel coordinates
Args:
X_grid (np.ndarray): 3-D array containing center coordinates of each voxel.
camname (Text): camera name
ID (Text): string denoting a sample ID
experimentID (int): identifier for a video recording session.
Returns:
np.ndarray: projected voxel centers, now in 2D pixels
"""
ts = time.time()
# Need this copy so that this_y does not change
this_y = self.torch.as_tensor(
self.labels[ID]["data"][camname],
dtype=self.torch.float32,
device=self.device,
).round()
if self.torch.all(self.torch.isnan(this_y)):
com_precrop = self.torch.zeros_like(this_y[:, 0]) * self.torch.nan
else:
# For projecting points, we should not use this offset
com_precrop = self.torch.mean(this_y, axis=1)
this_y[0, :] = this_y[0, :] - self.crop_width[0]
this_y[1, :] = this_y[1, :] - self.crop_height[0]
com = self.torch.mean(this_y, axis=1)
thisim = self.load_frame.load_vid_frame(
self.labels[ID]["frames"][camname],
camname,
extension=self.extension,
)[
self.crop_height[0]: self.crop_height[1],
self.crop_width[0]: self.crop_width[1],
]
return self.pj_grid_post(X_grid, camname, ID, experimentID,
com, com_precrop, thisim)
def pj_grid_mirror(self, X_grid, camname, ID, experimentID, thisim):
this_y = self.torch.as_tensor(
self.labels[ID]["data"][camname],
dtype=self.torch.float32,
device=self.device,
).round()
if self.torch.all(self.torch.isnan(this_y)):
com_precrop = self.torch.zeros_like(this_y[:, 0]) * self.torch.nan
else:
# For projecting points, we should not use this offset
com_precrop = self.torch.mean(this_y, axis=1)
this_y[0, :] = this_y[0, :] - self.crop_width[0]
this_y[1, :] = this_y[1, :] - self.crop_height[0]
com = self.torch.mean(this_y, axis=1)
if not self.mirror:
raise Exception("Trying to project onto mirrored images without mirror being set properly")
if self.camera_params[experimentID][camname]["m"] == 1:
passim = thisim[-1::-1].copy()
elif self.camera_params[experimentID][camname]["m"] == 0:
passim = thisim.copy()
else:
raise Exception("Invalid mirror parameter, m, must be 0 or 1")
return self.pj_grid_post(X_grid, camname, ID, experimentID,
com, com_precrop, passim)
def pj_grid_post(self, X_grid, camname, ID, experimentID,
com, com_precrop, thisim):
# separate the porjection and sampling into its own function so that
# when mirror == True, this can be called directly
if self.crop_im:
if self.torch.all(self.torch.isnan(com)):
thisim = self.torch.zeros(
(self.dim_in[1], self.dim_in[0], self.n_channels_in),
dtype=self.torch.uint8,
device=self.device,
)
else:
thisim = processing.cropcom(thisim, com, size=self.dim_in[0])
# print('Frame loading took {} sec.'.format(time.time() - ts))
ts = time.time()
proj_grid = ops.project_to2d_torch(
X_grid, self.camera_params[experimentID][camname]["M"], self.device
)
# print('Project2d took {} sec.'.format(time.time() - ts))
ts = time.time()
if self.distort:
proj_grid = ops.distortPoints_torch(
proj_grid[:, :2],
self.camera_params[experimentID][camname]["K"],
np.squeeze(self.camera_params[experimentID][camname]["RDistort"]),
np.squeeze(self.camera_params[experimentID][camname]["TDistort"]),
self.device,
)
proj_grid = proj_grid.transpose(0, 1)
# print('Distort took {} sec.'.format(time.time() - ts))
ts = time.time()
if self.crop_im:
proj_grid = proj_grid[:, :2] - com_precrop + self.dim_in[0] // 2
# Now all coordinates should map properly to the image cropped around the COM
else:
# Then the only thing we need to correct for is crops at the borders
proj_grid = proj_grid[:, :2]
proj_grid[:, 0] = proj_grid[:, 0] - self.crop_width[0]
proj_grid[:, 1] = proj_grid[:, 1] - self.crop_height[0]
rgb = ops.sample_grid_torch(thisim, proj_grid, self.device, method=self.interp)
# print('Sample grid {} sec.'.format(time.time() - ts))
if (
~self.torch.any(self.torch.isnan(com_precrop))
or (self.channel_combo == "avg")
or not self.crop_im
):
X = rgb.permute(0, 2, 3, 4, 1)
return X
# TODO(nesting): There is pretty deep locigal nesting in this function,
# might be useful to break apart
def __data_generation(self, list_IDs_temp):
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d: Targets
rotangle: Rotation angle
Raises:
Exception: Invalid generator mode specified.
"""
# Initialization
first_exp = int(self.list_IDs[0].split("_")[0])
X = self.torch.zeros(
(
self.batch_size * len(self.camnames[first_exp]),
*self.dim_out_3d,
self.n_channels_in + self.depth,
),
dtype=self.torch.uint8,
device=self.device,
)
if self.mode == "3dprob":
y_3d = self.torch.zeros(
(self.batch_size, self.n_channels_out, *self.dim_out_3d),
dtype=self.torch.float32,
device=self.device,
)
elif self.mode == "coordinates":
y_3d = self.torch.zeros(
(self.batch_size, 3, self.n_channels_out),
dtype=self.torch.float32,
device=self.device,
)
else:
raise Exception("not a valid generator mode")
sz = self.dim_out_3d[0] * self.dim_out_3d[1] * self.dim_out_3d[2]
X_grid = self.torch.zeros(
(self.batch_size, sz, 3),
dtype=self.torch.float32,
device=self.device,
)
# Generate data
for i, ID in enumerate(list_IDs_temp):
sampleID = int(ID.split("_")[1])
experimentID = int(ID.split("_")[0])
# For 3D ground truth
this_y_3d = self.torch.as_tensor(
self.labels_3d[ID],
dtype=self.torch.float32,
device=self.device,
)
this_COM_3d = self.torch.as_tensor(
self.com3d[ID], dtype=self.torch.float32, device=self.device
)
# Create and project the grid here,
xgrid = self.torch.arange(
self.vmin + this_COM_3d[0] + self.vsize / 2,
this_COM_3d[0] + self.vmax,
self.vsize,
dtype=self.torch.float32,
device=self.device,
)
ygrid = self.torch.arange(
self.vmin + this_COM_3d[1] + self.vsize / 2,
this_COM_3d[1] + self.vmax,
self.vsize,
dtype=self.torch.float32,
device=self.device,
)
zgrid = self.torch.arange(
self.vmin + this_COM_3d[2] + self.vsize / 2,
this_COM_3d[2] + self.vmax,
self.vsize,
dtype=self.torch.float32,
device=self.device,
)
(x_coord_3d, y_coord_3d, z_coord_3d) = self.torch.meshgrid(
xgrid, ygrid, zgrid
)
if self.mode == "coordinates":
if this_y_3d.shape == y_3d[i].shape:
y_3d[i] = this_y_3d
else:
msg = "Note: ignoring dimension mismatch in 3D labels"
warnings.warn(msg)
X_grid[i] = self.torch.stack(
(
x_coord_3d.transpose(0, 1).flatten(),
y_coord_3d.transpose(0, 1).flatten(),
z_coord_3d.transpose(0, 1).flatten(),
),
axis=1,
)
# Compute projected images in parallel using multithreading
ts = time.time()
num_cams = len(self.camnames[experimentID])
arglist = []
if self.mirror:
# Here we only load the video once, and then parallelize the projection
# and sampling after mirror flipping. For setups that collect views
# in a single imgae with the use of mirrors
loadim = self.load_frame.load_vid_frame(
self.labels[ID]["frames"][self.camnames[experimentID][0]],
self.camnames[experimentID][0],
extension=self.extension,
)[
self.crop_height[0]: self.crop_height[1],
self.crop_width[0]: self.crop_width[1],
]
for c in range(num_cams):
arglist.append(
[X_grid[i], self.camnames[experimentID][c], ID, experimentID, loadim]
)
result = self.threadpool.starmap(self.pj_grid_mirror, arglist)
else:
for c in range(num_cams):
arglist.append(
[X_grid[i], self.camnames[experimentID][c], ID, experimentID]
)
result = self.threadpool.starmap(self.project_grid, arglist)
for c in range(num_cams):
ic = c + i * len(self.camnames[experimentID])
X[ic, :, :, :, :] = result[c]
# print('MP took {} sec.'.format(time.time()-ts))
if self.multicam:
X = X.reshape(
(
self.batch_size,
len(self.camnames[first_exp]),
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4],
)
)
X = X.permute((0, 2, 3, 4, 5, 1))
if self.channel_combo == "avg":
X = self.torch.mean(X, axis=-1)
# Randomly reorder the cameras fed into the first layer
elif self.channel_combo == "random":
X = X[:, :, :, :, :, self.torch.randperm(X.shape[-1])]
X = X.transpose(4, 5).reshape(
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
)
)
else:
X = X.transpose(4, 5).reshape(
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
)
)
else:
# Then leave the batch_size and num_cams combined
y_3d = y_3d.repeat(num_cams, 1, 1, 1, 1)
# 3dprob is required for *training* MAX networks
if self.mode == "3dprob":
y_3d = y_3d.permute([0, 2, 3, 4, 1])
if self.rotation:
if self.expval:
# First make X_grid 3d
X_grid = self.torch.reshape(
X_grid,
(self.batch_size, self.nvox, self.nvox, self.nvox, 3),
)
X, X_grid = self.random_rotate(X, X_grid)
# Need to reshape back to raveled version
X_grid = self.torch.reshape(X_grid, (self.batch_size, -1, 3))
else:
X, y_3d = self.random_rotate(X, y_3d)
if self.mono and self.n_channels_in == 3:
# Convert from RGB to mono using the skimage formula. Drop the duplicated frames.
# Reshape so RGB can be processed easily.
X = self.torch.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
len(self.camnames[first_exp]),
-1,
),
)
X = (
X[:, :, :, :, :, 0] * 0.2125
+ X[:, :, :, :, :, 1] * 0.7154
+ X[:, :, :, :, :, 2] * 0.0721
)
# Convert pytorch tensors back to numpy array
ts = time.time()
if self.torch.is_tensor(X):
X = X.float().cpu().numpy()
if self.torch.is_tensor(y_3d):
y_3d = y_3d.cpu().numpy()
# print('Numpy took {} sec'.format(time.time() - ts))
if self.expval:
if self.torch.is_tensor(X_grid):
X_grid = X_grid.cpu().numpy()
if self.var_reg:
return (
[processing.preprocess_3d(X), X_grid],
[y_3d, self.torch.zeros((self.batch_size, 1))],
)
if self.norm_im:
# y_3d is in coordinates here.
return [processing.preprocess_3d(X), X_grid], y_3d
else:
return [X, X_grid], y_3d
else:
if self.norm_im:
return processing.preprocess_3d(X), y_3d
else:
return X, y_3d
class DataGenerator_3Dconv_tf(DataGenerator):
"""Updated generator class to resample from kmeans clusters after each epoch.
Uses tensorflow operations to accelerate generation of projection grid
**Compatible with TF 2.0 and newer. Not compatible with 1.14 and previous versions.
Also handles data across multiple experiments
Attributes:
camera_params (Dict): Camera parameters dictionary.
channel_combo (Text): Method for shuffling camera input order
com3d (Dict): Dictionary of com3d data.
COM_aug (bool): If True, augment the COM.
crop_im (bool): If True, crop images.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
device (Text): GPU device identifier
dim_out_3d (Tuple): Dimensions of the 3D volume, in voxels
distort (bool): If true, apply camera undistortion.
expval (bool): If True, process an expected value network (AVG)
gpu_id (Text): Identity of GPU to use.
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
interp (Text): Interpolation method.
labels_3d (Dict): Contains ground-truth 3D label coordinates.
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
multicam (bool): If True, formats data to work with multiple cameras as input.
norm_im (bool): If True, normalize images.
nvox (int): Number of voxels per box side
rotation (bool): If True, use simple rotation augmentation.
session (tf.compat.v1.InteractiveSession): tensorflow session.
threadpool (Threadpool): threadpool object for parallelizing video loading
tifdirs (List): Directories of .tifs
var_reg (bool): If True, adds a variance regularization term to the loss function.
vmax (int): Maximum box dim (relative to the COM)
vmin (int): Minimum box dim (relative to the COM)
vsize (float): Side length of one voxel
predict_flag (bool): If True, use imageio for reading videos, rather than OpenCV
"""
def __init__(
self,
list_IDs,
labels,
labels_3d,
camera_params,
clusterIDs,
com3d,
tifdirs,
batch_size=32,
dim_in=(32, 32, 32),
n_channels_in=1,
n_channels_out=1,
out_scale=5,
shuffle=True,
camnames=[],
crop_width=(0, 1024),
crop_height=(20, 1300),
vmin=-100,
vmax=100,
nvox=32,
gpu_id="0",
interp="linear",
depth=False,
channel_combo=None,
mode="3dprob",
samples_per_cluster=0,
immode="tif",
rotation=False,
vidreaders=None,
distort=True,
expval=False,
multicam=True,
var_reg=False,
COM_aug=None,
crop_im=True,
norm_im=True,
chunks=3500,
mono=False,
mirror=False,
predict_flag=False,
):
"""Initialize data generator.
Args:
list_IDs (List): List of sample Ids
labels (Dict): Dictionary of labels
labels_3d (Dict): Dictionary of 3d labels.
camera_params (Dict): Camera parameters dictionary.
clusterIDs (List): List of sample Ids
com3d (Dict): Dictionary of com3d data.
tifdirs (List): Directories of .tifs
batch_size (int, optional): Batch size to generate
dim_in (Tuple, optional): Input dimension
n_channels_in (int, optional): Number of input channels
n_channels_out (int, optional): Number of output channels
out_scale (int, optional): Scale of the output gaussians.
shuffle (bool, optional): If True, shuffle the samples.
camnames (List, optional): List of camera names.
crop_width (Tuple, optional): (first, last) pixels in image width
crop_height (Tuple, optional): (first, last) pixels in image height
vmin (int, optional): Minimum box dim (relative to the COM)
vmax (int, optional): Maximum box dim (relative to the COM)
nvox (int, optional): Number of voxels per box side
gpu_id (Text, optional): Identity of GPU to use.
interp (Text, optional): Interpolation method.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
channel_combo (Text): Method for shuffling camera input order
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
samples_per_cluster (int, optional): Samples per cluster
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
rotation (bool, optional): If True, use simple rotation augmentation.
vidreaders (Dict, optional): Dict containing video readers.
distort (bool, optional): If true, apply camera undistortion.
expval (bool, optional): If True, process an expected value network (AVG)
multicam (bool): If True, formats data to work with multiple cameras as input.
var_reg (bool): If True, adds a variance regularization term to the loss function.
COM_aug (bool, optional): If True, augment the COM.
crop_im (bool, optional): If True, crop images.
norm_im (bool, optional): If True, normalize images.
chunks (int, optional): Size of chunks when using chunked mp4.
mono (bool, optional): If True, use grayscale image.
predict_flag (bool, optional): If True, use imageio for reading videos, rather than OpenCV
"""
DataGenerator.__init__(
self,
list_IDs,
labels,
clusterIDs,
batch_size,
dim_in,
n_channels_in,
n_channels_out,
out_scale,
shuffle,
camnames,
crop_width,
crop_height,
samples_per_cluster,
vidreaders,
chunks,
mono,
mirror,
predict_flag,
)
self.vmin = vmin
self.vmax = vmax
self.nvox = nvox
self.vsize = (vmax - vmin) / nvox
self.dim_out_3d = (nvox, nvox, nvox)
self.labels_3d = labels_3d
self.camera_params = camera_params
self.interp = interp
self.depth = depth
self.channel_combo = channel_combo
print(self.channel_combo)
self.gpu_id = gpu_id
self.mode = mode
self.immode = immode
self.tifdirs = tifdirs
self.com3d = com3d
self.rotation = rotation
self.distort = distort
self.expval = expval
self.multicam = multicam
self.var_reg = var_reg
self.COM_aug = COM_aug
self.crop_im = crop_im
# If saving npy as uint8 rather than training directly, dont normalize
self.norm_im = norm_im
self.config = tf.compat.v1.ConfigProto()
self.config.gpu_options.per_process_gpu_memory_fraction = 0.8
self.config.gpu_options.allow_growth = True
self.session = tf.compat.v1.InteractiveSession(config=self.config)
self.device = "/GPU:" + self.gpu_id
self.threadpool = ThreadPool(len(self.camnames[0]))
with tf.device(self.device):
ts = time.time()
for i, ID in enumerate(list_IDs):
experimentID = int(ID.split("_")[0])
for camname in self.camnames[experimentID]:
# M only needs to be computed once for each camera
K = self.camera_params[experimentID][camname]["K"]
R = self.camera_params[experimentID][camname]["R"]
t = self.camera_params[experimentID][camname]["t"]
self.camera_params[experimentID][camname]["M"] = np.array(
ops.camera_matrix(K, R, t), dtype="float32"
)
print("Init took {} sec.".format(time.time() - ts))
def __getitem__(self, index):
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data
X (np.ndarray): Input volume
y (np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
@tf.function
def rot90(self, X):
"""Rotate X by 90 degrees CCW.
Args:
X (np.ndarray): Volume
Returns:
X (np.ndarray): Rotated volume
"""
X = tf.transpose(X, [1, 0, 2, 3])
X = X[:, ::-1, :, :]
return X
@tf.function
def rot180(self, X):
"""Rotate X by 180 degrees.
Args:
X (np.ndarray): Volume
Returns:
X (np.ndarray): Rotated volume
"""
X = X[::-1, ::-1, :, :]
return X
def project_grid(self, X_grid, camname, ID, experimentID, device):
"""Projects 3D voxel centers and sample images as projected 2D pixel coordinates
Args:
X_grid (np.ndarray): 3-D array containing center coordinates of each voxel.
camname (Text): camera name
ID (Text): string denoting a sample ID
experimentID (int): identifier for a video recording session.
Returns:
np.ndarray: projected voxel centers, now in 2D pixels
"""
ts = time.time()
with tf.device(device):
# Need this copy so that this_y does not change
this_y = np.round(self.labels[ID]["data"][camname]).copy()
if np.all(np.isnan(this_y)):
com_precrop = np.zeros_like(this_y[:, 0]) * np.nan
else:
# For projecting points, we should not use this offset
com_precrop = np.nanmean(this_y, axis=1)
if self.immode == "vid":
ts = time.time()
thisim = self.load_frame.load_vid_frame(
self.labels[ID]["frames"][camname],
camname,
extension=self.extension,
)[
self.crop_height[0] : self.crop_height[1],
self.crop_width[0] : self.crop_width[1],
]
# print("Frame loading took {} sec.".format(time.time()-ts))
this_y[0, :] = this_y[0, :] - self.crop_width[0]
this_y[1, :] = this_y[1, :] - self.crop_height[0]
com = np.nanmean(this_y, axis=1)
if self.crop_im:
# Cropping takes negligible time
if np.all(np.isnan(com)):
thisim = np.zeros(
(self.dim_in[1], self.dim_in[0], self.n_channels_in),
dtype="uint8",
)
else:
thisim = processing.cropcom(thisim, com, size=self.dim_in[0])
# Project de novo
ts = time.time()
X_grid = tf.convert_to_tensor(X_grid)
pts1 = tf.ones((X_grid.shape[0], 1), dtype="float32")
projPts = tf.concat((X_grid, pts1), 1)
M = tf.convert_to_tensor(
self.camera_params[experimentID][camname]["M"], dtype="float32"
)
proj_grid = ops.project_to2d_tf(projPts, M)
# print("2D Project took {} sec.".format(time.time() - ts))
if self.distort:
ts = time.time()
proj_grid = ops.distortPoints_tf(
proj_grid,
tf.constant(
self.camera_params[experimentID][camname]["K"],
dtype="float32",
),
tf.squeeze(
tf.constant(
self.camera_params[experimentID][camname]["RDistort"],
dtype="float32",
)
),
tf.squeeze(
tf.constant(
self.camera_params[experimentID][camname]["TDistort"],
dtype="float32",
)
),
)
proj_grid = tf.transpose(proj_grid, (1, 0))
# print("tf Distort took {} sec.".format(time.time() - ts))
if self.crop_im:
proj_grid = proj_grid - com_precrop + self.dim_in[0] // 2
# Now all coordinates should map properly to the image
# cropped around the COM
else:
# Then the only thing we need to correct for is crops at the borders
proj_grid = proj_grid - tf.cast(
tf.stack([self.crop_width[0], self.crop_height[0]]),
"float32",
)
ts = time.time()
rgb = ops.sample_grid_tf(thisim, proj_grid, device, method=self.interp)
# print("Sample grid tf took {} sec".format(time.time() - ts))
X = tf.reshape(rgb, (self.nvox, self.nvox, self.nvox, 3))
return X
# TODO(nesting): There is pretty deep locigal nesting in this function,
# might be useful to break apart
def __data_generation(self, list_IDs_temp):
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d: Targets
rotangle: Rotation angle
Raises:
Exception: Invalid generator mode specified.
"""
# Initialization
ts = time.time()
first_exp = int(self.list_IDs[0].split("_")[0])
with tf.device(self.device):
if self.mode == "3dprob":
y_3d = tf.zeros(
(self.batch_size, self.n_channels_out, *self.dim_out_3d),
dtype="float32",
)
elif self.mode == "coordinates":
y_3d = tf.zeros(
(self.batch_size, 3, self.n_channels_out), dtype="float32"
)
else:
raise Exception("not a valid generator mode")
# sz = self.dim_out_3d[0] * self.dim_out_3d[1] * self.dim_out_3d[2]
# X_grid = tf.zeros((self.batch_size, sz, 3), dtype = 'float32')
# Generate data
for i, ID in enumerate(list_IDs_temp):
sampleID = int(ID.split("_")[1])
experimentID = int(ID.split("_")[0])
# For 3D ground truth
this_y_3d = self.labels_3d[ID]
this_COM_3d = self.com3d[ID]
with tf.device(self.device):
xgrid = tf.range(
self.vmin + this_COM_3d[0] + self.vsize / 2,
this_COM_3d[0] + self.vmax,
self.vsize,
dtype="float32",
)
ygrid = tf.range(
self.vmin + this_COM_3d[1] + self.vsize / 2,
this_COM_3d[1] + self.vmax,
self.vsize,
dtype="float32",
)
zgrid = tf.range(
self.vmin + this_COM_3d[2] + self.vsize / 2,
this_COM_3d[2] + self.vmax,
self.vsize,
dtype="float32",
)
(x_coord_3d, y_coord_3d, z_coord_3d) = tf.meshgrid(xgrid, ygrid, zgrid)
if self.mode == "coordinates":
if this_y_3d.shape == y_3d.shape:
if i == 0:
y_3d = tf.expand_dims(y_3d, 0)
else:
y_3d = tf.stack(y_3d, tf.expand_dims(this_y_3d, 0), axis=0)
else:
msg = "Note: ignoring dimension mismatch in 3D labels"
warnings.warn(msg)
xg = tf.stack(
(
tf.keras.backend.flatten(x_coord_3d),
tf.keras.backend.flatten(y_coord_3d),
tf.keras.backend.flatten(z_coord_3d),
),
axis=1,
)
if i == 0:
X_grid = tf.expand_dims(xg, 0)
else:
X_grid = tf.concat([X_grid, tf.expand_dims(xg, 0)], axis=0)
# print('Initialization took {} sec.'.format(time.time() - ts))
if tf.executing_eagerly():
# Compute projection grids using multithreading
num_cams = int(len(self.camnames[experimentID]))
arglist = []
for c in range(num_cams):
arglist.append(
[
xg,
self.camnames[experimentID][c],
ID,
experimentID,
self.device,
]
)
result = self.threadpool.starmap(self.project_grid, arglist)
for c in range(num_cams):
if i == 0 and c == 0:
X = tf.expand_dims(result[c], 0)
else:
X = tf.concat([X, tf.expand_dims(result[c], 0)], axis=0)
else:
for c in range(num_cams):
if c == 0:
X = tf.expand_dims(
self.project_grid(
xg,
self.camnames[experimentID][c],
ID,
experimentID,
self.device,
),
0,
)
else:
X = tf.concat(
(
X,
tf.expand_dims(
self.project_grid(
xg,
self.camnames[experimentID][c],
ID,
experimentID,
self.device,
),
0,
),
),
axis=0,
)
ts = time.time()
with tf.device(self.device):
if self.multicam:
X = tf.reshape(
X,
(
self.batch_size,
len(self.camnames[first_exp]),
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4],
),
)
X = tf.transpose(X, [0, 2, 3, 4, 5, 1])
if self.channel_combo == "avg":
X = tf.mean(X, axis=-1)
# Randomly reorder the cameras fed into the first layer
elif self.channel_combo == "random":
X = tf.transpose(X, [5, 0, 1, 2, 3, 4])
X = tf.random.shuffle(X)
X = tf.transpose(X, [1, 2, 3, 4, 0, 5])
X = tf.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
),
)
else:
X = tf.transpose(X, [0, 1, 2, 3, 5, 4])
X = tf.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4] * X.shape[5],
),
)
else:
# Then leave the batch_size and num_cams combined
y_3d = tf.tile(y_3d, [len(self.camnames[experimentID]), 1, 1, 1, 1])
if self.interp == "linear":
# fix rotation issue for linear interpolation sample_grid method
X = tf.squeeze(X)
X = self.rot90(X[:, ::-1, :, :])
X = self.rot180(X)
X = tf.expand_dims(X, 0)
if self.mode == "3dprob":
y_3d = tf.transpose(y_3d, [0, 2, 3, 4, 1])
X = tf.cast(X, "float32")
if self.rotation:
if self.expval:
# First make X_grid 3d
X_grid = tf.reshape(
X_grid,
(self.batch_size, self.nvox, self.nvox, self.nvox, 3),
)
X, X_grid = self.random_rotate(X, X_grid)
# Need to reshape back to raveled version
X_grid = tf.reshape(X_grid, (self.batch_size, -1, 3))
else:
X, y_3d = self.random_rotate(X, y_3d)
# Then we also need to return the 3d grid center coordinates,
# for calculating a spatial expected value
# Xgrid is typically symmetric for 90 and 180 degree rotations
# (when vmax and vmin are symmetric)
# around the z-axis, so no need to rotate X_grid.
# ts = time.time()
# Eager execution enabled in TF 2, tested in TF 2.0, 2.1, and 2.2
if tf.executing_eagerly():
X = X.numpy()
y_3d = y_3d.numpy()
X_grid = X_grid.numpy()
else:
# For compatibility with TF 1.14
# Eager execution disabled on 1.14; enabling eager causes model to fail
# Works on 1.14, but very slow. Graph grows in loop...
X = X.eval(session=self.session)
y_3d = y_3d.eval(session=self.session)
X_grid = X_grid.eval(session=self.session)
# print('Eval took {} sec.'.format(time.time()-ts))
# print('Wrap-up took {} sec.'.format(time.time()-ts))
if self.mono and self.n_channels_in == 3:
# Convert from RGB to mono using the skimage formula. Drop the duplicated frames.
# Reshape so RGB can be processed easily.
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
self.n_channels_in,
-1,
),
order="F",
)
X = (
X[:, :, :, :, 0] * 0.2125
+ X[:, :, :, :, 1] * 0.7154
+ X[:, :, :, :, 2] * 0.0721
)
if self.expval:
if self.var_reg:
return (
[processing.preprocess_3d(X), X_grid],
[y_3d, np.zeros((self.batch_size, 1))],
)
if self.norm_im:
# y_3d is in coordinates here.
return [processing.preprocess_3d(X), X_grid], y_3d
else:
return [X, X_grid], y_3d
else:
if self.norm_im:
return processing.preprocess_3d(X), y_3d
else:
return X, y_3d
def random_continuous_rotation(X, y_3d, max_delta=5):
"""Rotates X and y_3d a random amount around z-axis.
Args:
X (np.ndarray): input image volume
y_3d (np.ndarray): 3d target (for MAX network) or voxel center grid (for AVG network)
max_delta (int, optional): maximum range for rotation angle.
Returns:
np.ndarray: rotated image volumes
np.ndarray: rotated grid coordimates
"""
rotangle = np.random.rand() * (2 * max_delta) - max_delta
X = tf.reshape(X, [X.shape[0], X.shape[1], X.shape[2], -1]).numpy()
y_3d = tf.reshape(
y_3d, [y_3d.shape[0], y_3d.shape[1], y_3d.shape[2], -1]
).numpy()
for i in range(X.shape[0]):
X[i] = tf.keras.preprocessing.image.apply_affine_transform(
X[i],
theta=rotangle,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode="nearest",
cval=0.0,
order=1,
)
y_3d[i] = tf.keras.preprocessing.image.apply_affine_transform(
y_3d[i],
theta=rotangle,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode="nearest",
cval=0.0,
order=1,
)
X = tf.reshape(X, [X.shape[0], X.shape[1], X.shape[2], X.shape[2], -1]).numpy()
y_3d = tf.reshape(
y_3d,
[y_3d.shape[0], y_3d.shape[1], y_3d.shape[2], y_3d.shape[2], -1],
).numpy()
return X, y_3d
# TODO(inherit): Several methods are repeated, consider inheriting from parent
class DataGenerator_3Dconv_frommem(keras.utils.Sequence):
"""Generate 3d conv data from memory.
Attributes:
augment_brightness (bool): If True, applies brightness augmentation
augment_continuous_rotation (bool): If True, applies rotation augmentation in increments smaller than 90 degrees
augment_hue (bool): If True, applies hue augmentation
batch_size (int): Batch size
bright_val (float): Brightness augmentation range (-bright_val, bright_val), as fraction of raw image brightness
chan_num (int): Number of input channels
data (np.ndarray): Image volumes
expval (bool): If True, crafts input for an AVG network
hue_val (float): Hue augmentation range (-hue_val, hue_val), as fraction of raw image hue range
indexes (np.ndarray): Sample indices used for batch generation
labels (Dict): Label dictionary
list_IDs (List): List of sampleIDs
nvox (int): Number of voxels in each grid dimension
random (bool): If True, shuffles camera order for each batch
rotation (bool): If True, applies rotation augmentation in 90 degree increments
rotation_val (float): Range of angles used for continuous rotation augmentation
shuffle (bool): If True, shuffle the samples before each epoch
var_reg (bool): If True, returns input used for variance regularization
xgrid (np.ndarray): For the AVG network, this contains the 3D grid coordinates
n_rand_views (int): Number of reviews to sample randomly from the full set
replace (bool): If True, samples n_rand_views with replacement
"""
def __init__(
self,
list_IDs,
data,
labels,
batch_size,
rotation=True,
random=True,
chan_num=3,
shuffle=True,
expval=False,
xgrid=None,
var_reg=False,
nvox=64,
augment_brightness=True,
augment_hue=True,
augment_continuous_rotation=True,
bright_val=0.05,
hue_val=0.05,
rotation_val=5,
replace=True,
n_rand_views=None,
heatmap_reg=False,
heatmap_reg_coeff=0.01,
):
"""Initialize data generator.
Args:
list_IDs (List): List of sampleIDs
data data (np.ndarray): Image volumes
labels (Dict): Label dictionar
batch_size (int): batch size
rotation (bool, optional): If True, applies rotation augmentation in 90 degree increments
random (bool, optional): If True, shuffles camera order for each batch
chan_num (int, optional): Number of input channels
shuffle (bool, optional): If True, shuffle the samples before each epoch
expval (bool, optional): If True, crafts input for an AVG network
xgrid (None, optional): For the AVG network, this contains the 3D grid coordinates
var_reg (bool, optional): If True, returns input used for variance regularization
nvox (int, optional): Number of voxels in each grid dimension
augment_brightness (bool, optional): If True, applies brightness augmentation
augment_hue (bool, optional): If True, applies hue augmentation
augment_continuous_rotation (bool, optional): If True, applies rotation augmentation in increments smaller than 90 degree
bright_val (float, optional): brightness augmentation range (-bright_val, bright_val), as fraction of raw image brightness
hue_val (float, optional): Hue augmentation range (-hue_val, hue_val), as fraction of raw image hue range
rotation_val (float, optional): Range of angles used for continuous rotation augmentation
n_rand_views (int, optional): Number of reviews to sample randomly from the full set
replace (bool, optional): If True, samples n_rand_views with replacement
"""
self.list_IDs = list_IDs
self.data = data
self.labels = labels
self.rotation = rotation
self.batch_size = batch_size
self.random = random
self.chan_num = chan_num
self.shuffle = shuffle
self.expval = expval
self.augment_hue = augment_hue
self.augment_continuous_rotation = augment_continuous_rotation
self.augment_brightness = augment_brightness
self.var_reg = var_reg
self.xgrid = xgrid
self.nvox = nvox
self.bright_val = bright_val
self.hue_val = hue_val
self.rotation_val = rotation_val
self.n_rand_views = n_rand_views
self.replace = replace
self.heatmap_reg = heatmap_reg
self.heatmap_reg_coeff = heatmap_reg_coeff
self.on_epoch_end()
def __len__(self):
"""Denote the number of batches per epoch.
Returns:
int: Batches per epoch
"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data
X (np.ndarray): Input volume
y (np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
"""Update indexes after each epoch."""
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle:
np.random.shuffle(self.indexes)
def rot90(self, X):
"""Rotate X by 90 degrees CCW.
Args:
X (np.ndarray): Image volume or grid
Returns:
X (np.ndarray): Rotated image volume or grid
"""
X = np.transpose(X, [1, 0, 2, 3])
X = X[:, ::-1, :, :]
return X
def rot180(self, X):
"""Rotate X by 180 degrees.
Args:
X (np.ndarray): Image volume or grid
Returns:
X (np.ndarray): Rotated image volume or grid
"""
X = X[::-1, ::-1, :, :]
return X
def random_rotate(self, X, y_3d):
"""Rotate each sample by 0, 90, 180, or 270 degrees.
Args:
X (np.ndarray): Image volumes
y_3d (np.ndarray): 3D grid coordinates (AVG) or training target volumes (MAX)
Returns:
X (np.ndarray): Rotated image volumes
y_3d (np.ndarray): Rotated 3D grid coordinates (AVG) or training target volumes (MAX)
"""
rots = np.random.choice(np.arange(4), X.shape[0])
for i in range(X.shape[0]):
if rots[i] == 0:
pass
elif rots[i] == 1:
# Rotate180
X[i] = self.rot180(X[i])
y_3d[i] = self.rot180(y_3d[i])
elif rots[i] == 2:
# Rotate90
X[i] = self.rot90(X[i])
y_3d[i] = self.rot90(y_3d[i])
elif rots[i] == 3:
# Rotate -90/270
X[i] = self.rot90(X[i])
X[i] = self.rot180(X[i])
y_3d[i] = self.rot90(y_3d[i])
y_3d[i] = self.rot180(y_3d[i])
return X, y_3d
def visualize(self, original, augmented):
"""Plots example image after augmentation
Args:
original (np.ndarray): image before augmentation
augmented (np.ndarray): image after augmentation.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
plt.subplot(1, 2, 1)
plt.title("Original image")
plt.imshow(original)
plt.subplot(1, 2, 2)
plt.title("Augmented image")
plt.imshow(augmented)
plt.show()
input("Press Enter to continue...")
def do_augmentation(self, X, X_grid, y_3d):
"""Applies augmentation
Args:
X (np.ndarray): image volumes
X_grid (np.ndarray): 3D grid coordinates
y_3d (np.ndarray): training targets
Returns:
X (np.ndarray): Augemented image volumes
X_grid (np.ndarray): 3D grid coordinates
y_3d (np.ndarray): Training targets
"""
if self.rotation:
if self.expval:
# First make X_grid 3d
X_grid = np.reshape(
X_grid,
(self.batch_size, self.nvox, self.nvox, self.nvox, 3),
)
X, X_grid = self.random_rotate(X.copy(), X_grid.copy())
# Need to reshape back to raveled version
X_grid = np.reshape(X_grid, (self.batch_size, -1, 3))
else:
X, y_3d = self.random_rotate(X.copy(), y_3d.copy())
if self.augment_continuous_rotation:
if self.expval:
# First make X_grid 3d
X_grid = np.reshape(
X_grid,
(self.batch_size, self.nvox, self.nvox, self.nvox, 3),
)
X, X_grid = random_continuous_rotation(
X.copy(), X_grid.copy(), self.rotation_val
)
# Need to reshape back to raveled version
X_grid = np.reshape(X_grid, (self.batch_size, -1, 3))
else:
X, y_3d = random_continuous_rotation(
X.copy(), y_3d.copy(), self.rotation_val
)
if self.augment_hue and self.chan_num == 3:
for n_cam in range(int(X.shape[-1] / self.chan_num)):
channel_ids = np.arange(
n_cam * self.chan_num,
n_cam * self.chan_num + self.chan_num,
)
X[..., channel_ids] = tf.image.random_hue(
X[..., channel_ids], self.hue_val
)
elif self.augment_hue:
warnings.warn(
"Trying to augment hue with an image that is not RGB. Skipping."
)
if self.augment_brightness:
for n_cam in range(int(X.shape[-1] / self.chan_num)):
channel_ids = np.arange(
n_cam * self.chan_num,
n_cam * self.chan_num + self.chan_num,
)
X[..., channel_ids] = tf.image.random_brightness(
X[..., channel_ids], self.bright_val
)
return X, X_grid, y_3d
def do_random(self, X):
"""Randomly re-order camera views
Args:
X (np.ndarray): image volumes
Returns:
X (np.ndarray): Shuffled image volumes
"""
if self.random:
X = np.reshape(X,
(X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
self.chan_num,
-1),
order='F')
X = X[:, :, :, :, :, np.random.permutation(X.shape[-1])]
X = np.reshape(X,
(X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4]*X.shape[5]), order='F')
if self.n_rand_views is not None:
# Select a set of cameras randomly with replacement.
X = np.reshape(X,
(X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
self.chan_num,
-1),
order='F')
if self.replace:
X = X[..., np.random.randint(X.shape[-1], size=(self.n_rand_views,))]
else:
if not self.random:
raise Exception("For replace=False for n_rand_views, random must be turned on")
X = X[:, :, :, :, :, :self.n_rand_views]
X = np.reshape(X,
(X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4]*X.shape[5]),
order='F')
return X
def get_max_gt_ind(self, X_grid, y_3d):
"""Uses the gt label position to find the index of the voxel corresponding to it.
Used for heatmap regularization.
"""
diff = np.sum((X_grid[:, :, :, np.newaxis] - y_3d[:, np.newaxis, :, :])**2, axis=2)
inds = np.argmin(diff, axis=1)
grid_d = int(np.round(X_grid.shape[1]**(1/3)))
inds = np.unravel_index(inds, (grid_d, grid_d, grid_d))
return np.stack(inds, axis=1)
def __data_generation(self, list_IDs_temp):
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d: Targets
Raises:
Exception: For replace=False for n_rand_views, random must be turned on.
"""
# Initialization
X = np.zeros((self.batch_size, *self.data.shape[1:]))
y_3d = np.zeros((self.batch_size, *self.labels.shape[1:]))
# Only used when
if self.expval:
X_grid = np.zeros((self.batch_size, *self.xgrid.shape[1:]))
else:
X_grid = None
for i, ID in enumerate(list_IDs_temp):
X[i] = self.data[ID].copy()
y_3d[i] = self.labels[ID]
if self.expval:
X_grid[i] = self.xgrid[ID]
X, X_grid, y_3d = self.do_augmentation(X, X_grid, y_3d)
# Randomly re-order, if desired
X = self.do_random(X)
if self.expval:
if self.heatmap_reg:
return [X, X_grid, self.get_max_gt_ind(X_grid, y_3d)], [y_3d,
self.heatmap_reg_coeff*np.ones((self.batch_size, y_3d.shape[-1]), dtype='float32')]
return [X, X_grid], y_3d
else:
return X, y_3d
class DataGenerator_3Dconv_npy(DataGenerator_3Dconv_frommem):
"""Generates 3d conv data from npy files.
Attributes:
augment_brightness (bool): If True, applies brightness augmentation
augment_continuous_rotation (bool): If True, applies rotation augmentation in increments smaller than 90 degrees
augment_hue (bool): If True, applies hue augmentation
batch_size (int): Batch size
bright_val (float): Brightness augmentation range (-bright_val, bright_val), as fraction of raw image brightness
chan_num (int): Number of input channels
labels_3d (Dict): training targets
expval (bool): If True, crafts input for an AVG network
hue_val (float): Hue augmentation range (-hue_val, hue_val), as fraction of raw image hue range
indexes (np.ndarray): Sample indices used for batch generation
list_IDs (List): List of sampleIDs
nvox (int): Number of voxels in each grid dimension
random (bool): If True, shuffles camera order for each batch
rotation (bool): If True, applies rotation augmentation in 90 degree increments
rotation_val (float): Range of angles used for continuous rotation augmentation
shuffle (bool): If True, shuffle the samples before each epoch
var_reg (bool): If True, returns input used for variance regularization
n_rand_views (int): Number of reviews to sample randomly from the full set
replace (bool): If True, samples n_rand_views with replacement
imdir (Text): Name of image volume npy subfolder
griddir (Text): Name of grid volumw npy subfolder
mono (bool): If True, return monochrome image volumes
sigma (float): For MAX network, size of target Gaussian (mm)
cam1 (bool): If True, prepares input for training a single camea network
prefeat (bool): If True, prepares input for a network performing volume feature extraction before fusion
npydir (Dict): path to each npy volume folder for each recording (i.e. experiment)
"""
def __init__(self,
list_IDs,
labels_3d,
npydir,
batch_size,
rotation=True,
random=False,
chan_num=3,
shuffle=True,
expval=False,
var_reg=False,
imdir='image_volumes',
griddir='grid_volumes',
nvox=64,
n_rand_views=None,
mono=False,
cam1=False,
replace=True,
prefeat=False,
sigma=10,
augment_brightness=True,
augment_hue=True,
augment_continuous_rotation=True,
bright_val=0.05,
hue_val=0.05,
rotation_val=5,
heatmap_reg=False,
heatmap_reg_coeff=0.01,
):
"""Generates 3d conv data from npy files.
Args:
list_IDs (List): List of sampleIDs
labels_3d (Dict): training targets
npydir (Dict): path to each npy volume folder for each recording (i.e. experiment)
batch_size (int): Batch size
rotation (bool, optional): If True, applies rotation augmentation in 90 degree increments
random (bool, optional): If True, shuffles camera order for each batch
chan_num (int, optional): Number of input channels
shuffle (bool, optional): If True, shuffle the samples before each epoch
expval (bool, optional): If True, crafts input for an AVG network
var_reg (bool, optional): If True, returns input used for variance regularization
imdir (Text, optional): Name of image volume npy subfolder
griddir (Text, optional): Name of grid volumw npy subfolder
nvox (int, optional): Number of voxels in each grid dimension
n_rand_views (int, optional): Number of reviews to sample randomly from the full set
mono (bool, optional): If True, return monochrome image volumes
cam1 (bool, optional): If True, prepares input for training a single camea network
replace (bool, optional): If True, samples n_rand_views with replacement
prefeat (bool, optional): If True, prepares input for a network performing volume feature extraction before fusion
sigma (float, optional): For MAX network, size of target Gaussian (mm)
augment_brightness (bool, optional): If True, applies brightness augmentation
augment_hue (bool, optional): If True, applies hue augmentation
augment_continuous_rotation (bool, optional): If True, applies rotation augmentation in increments smaller than 90 degrees
bright_val (float, optional): Brightness augmentation range (-bright_val, bright_val), as fraction of raw image brightness
hue_val (float, optional): Hue augmentation range (-hue_val, hue_val), as fraction of raw image hue range
rotation_val (float, optional): Range of angles used for continuous rotation augmentation
"""
self.list_IDs = list_IDs
self.labels_3d = labels_3d
self.npydir = npydir
self.rotation = rotation
self.batch_size = batch_size
self.random = random
self.chan_num = chan_num
self.shuffle = shuffle
self.expval = expval
self.var_reg = var_reg
self.griddir = griddir
self.imdir = imdir
self.nvox = nvox
self.n_rand_views = n_rand_views
self.mono = mono
self.cam1 = cam1
self.replace = replace
self.prefeat = prefeat
self.sigma = sigma
self.augment_hue = augment_hue
self.augment_continuous_rotation = augment_continuous_rotation
self.augment_brightness = augment_brightness
self.bright_val = bright_val
self.hue_val = hue_val
self.rotation_val = rotation_val
self.heatmap_reg = heatmap_reg
self.heatmap_reg_coeff = heatmap_reg_coeff
self.on_epoch_end()
def __len__(self):
"""Denote the number of batches per epoch.
Returns:
int: Batches per epoch
"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data
X (np.ndarray): Input volume
y (np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
"""Update indexes after each epoch."""
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
print("SHUFFLING DATA INDICES")
np.random.shuffle(self.indexes)
def rot90(self, X):
# Rotate 90
X = np.transpose(X, [1, 0, 2, 3])
X = X[:, ::-1, :, :]
return X
def rot180(self,X):
#Rotate 180
X = X[::-1, ::-1, :, :]
return X
def random_rotate(self, X, y_3d):
"""
Rotate each sample by 0, 90, 180, or 270 degrees
"""
rots = np.random.choice(np.arange(4), X.shape[0])
for i in range(X.shape[0]):
if rots[i]==0:
pass
elif rots[i]==1:
#Rotate180
X[i] = self.rot180(X[i])
y_3d[i] = self.rot180(y_3d[i])
elif rots[i]==2:
#Rotate90
X[i] = self.rot90(X[i])
y_3d[i] = self.rot90(y_3d[i])
elif rots[i]==3:
#Rotate -90/270
X[i] = self.rot90(X[i])
X[i] = self.rot180(X[i])
y_3d[i] = self.rot90(y_3d[i])
y_3d[i] = self.rot180(y_3d[i])
else:
raise Exception("Failed to rotate properly")
return X, y_3d
def __data_generation(self, list_IDs_temp):
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d or y_3d_max: Targets
Raises:
Exception: For replace=False for n_rand_views, random must be turned on.
"""
# Initialization
X = []
y_3d = []
X_grid = []
for i, ID in enumerate(list_IDs_temp):
# Need to look up the experiment ID to get the correct directory
IDkey = ID.split("_")
eID = int(IDkey[0])
sID = IDkey[1]
X.append(np.load(os.path.join(self.npydir[eID],
self.imdir,
'0_' + sID + '.npy')).astype('float32'))
y_3d.append(self.labels_3d[ID])
X_grid.append(np.load(os.path.join(self.npydir[eID],
self.griddir,
'0_' + sID + '.npy')))
X = np.stack(X)
y_3d = np.stack(y_3d)
X_grid = np.stack(X_grid)
if not self.expval:
y_3d_max = np.zeros((self.batch_size,
self.nvox,
self.nvox,
self.nvox,
y_3d.shape[-1]))
if not self.expval:
X_grid = np.reshape(X_grid, (-1,
self.nvox,
self.nvox,
self.nvox,
3))
for gridi in range(X_grid.shape[0]):
x_coord_3d = X_grid[gridi, :, :, :, 0]
y_coord_3d = X_grid[gridi, :, :, :, 1]
z_coord_3d = X_grid[gridi, :, :, :, 2]
for j in range(y_3d_max.shape[-1]):
y_3d_max[gridi, :, :, :, j] = \
np.exp(-((y_coord_3d-y_3d[gridi, 1, j])**2 +
(x_coord_3d-y_3d[gridi, 0, j])**2 +
(z_coord_3d-y_3d[gridi, 2, j])**2)/(2*self.sigma**2))
if self.mono and self.chan_num == 3:
# Convert from RGB to mono using the skimage formula. Drop the duplicated frames.
# Reshape so RGB can be processed easily.
X = np.reshape(
X,
(
X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
self.chan_num,
-1,
),
order="F",
)
X = (
X[:, :, :, :, 0] * 0.2125
+ X[:, :, :, :, 1] * 0.7154
+ X[:, :, :, :, 2] * 0.0721
)
ncam = int(X.shape[-1]//self.chan_num)
X, X_grid, y_3d = self.do_augmentation(X, X_grid, y_3d)
# Randomly re-order, if desired
X = self.do_random(X)
if self.cam1:
# collapse the cameras to the batch dimensions.
X = np.reshape(X,
(X.shape[0],
X.shape[1],
X.shape[2],
X.shape[3],
self.chan_num,
-1),
order='F')
X = np.transpose(X, [0, 5, 1, 2, 3, 4])
X = np.reshape(X,
(-1,
X.shape[2],
X.shape[3],
X.shape[4],
X.shape[5]))
if self.expval:
y_3d = np.tile(y_3d, [ncam, 1, 1])
X_grid = np.tile(X_grid, [ncam, 1, 1])
else:
y_3d = np.tile(y_3d, [ncam, 1, 1, 1, 1])
X = processing.preprocess_3d(X)
XX = []
if self.prefeat:
for ix in range(ncam):
XX.append(X[..., ix*self.chan_num:(ix+1)*self.chan_num])
X = XX
if self.expval:
if not self.prefeat:
X = [X]
X = X + [X_grid]
if self.expval:
if self.heatmap_reg:
return [X, X_grid, self.get_max_gt_ind(X_grid, y_3d)], [y_3d,
self.heatmap_reg_coeff*np.ones((self.batch_size, y_3d.shape[-1]), dtype='float32')]
return X, y_3d
else:
return X, y_3d_max
| 38.403104
| 134
| 0.5085
| 12,386
| 106,415
| 4.232682
| 0.05918
| 0.009785
| 0.012017
| 0.017854
| 0.835578
| 0.808511
| 0.775855
| 0.750505
| 0.728551
| 0.713348
| 0
| 0.023121
| 0.395621
| 106,415
| 2,770
| 135
| 38.416968
| 0.792024
| 0.32424
| 0
| 0.677608
| 0
| 0
| 0.017943
| 0
| 0
| 0
| 0
| 0.001444
| 0.000586
| 1
| 0.025791
| false
| 0.003517
| 0.008793
| 0
| 0.067995
| 0.003517
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4169d41903e31adaea5325ef7eede568b5fbfeab
| 134
|
py
|
Python
|
pro_tes/middlewares/middleware.py
|
soumyadipDe/proTES
|
986b69c2408244a64fde811fbadbf9d8bd3c01e0
|
[
"Apache-2.0"
] | null | null | null |
pro_tes/middlewares/middleware.py
|
soumyadipDe/proTES
|
986b69c2408244a64fde811fbadbf9d8bd3c01e0
|
[
"Apache-2.0"
] | null | null | null |
pro_tes/middlewares/middleware.py
|
soumyadipDe/proTES
|
986b69c2408244a64fde811fbadbf9d8bd3c01e0
|
[
"Apache-2.0"
] | null | null | null |
class AbstractMiddleware:
def send_data(self):
pass
#def process_middleware_response(self):
# pass
| 14.888889
| 43
| 0.604478
| 13
| 134
| 6
| 0.769231
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.328358
| 134
| 9
| 44
| 14.888889
| 0.866667
| 0.343284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
416d65587382d6565390e49345f343837e3714d5
| 44
|
py
|
Python
|
tools/__init__.py
|
alex18212010045/pytorch-priv
|
0c007d693ef20ed0168b8b766e58835af5e8eebf
|
[
"MIT"
] | 60
|
2017-12-29T03:31:48.000Z
|
2021-10-03T09:13:08.000Z
|
tools/__init__.py
|
alex18212010045/pytorch-priv
|
0c007d693ef20ed0168b8b766e58835af5e8eebf
|
[
"MIT"
] | 1
|
2018-01-24T02:19:47.000Z
|
2018-01-24T06:21:06.000Z
|
tools/__init__.py
|
alex18212010045/pytorch-priv
|
0c007d693ef20ed0168b8b766e58835af5e8eebf
|
[
"MIT"
] | 28
|
2017-12-29T06:15:10.000Z
|
2021-06-01T11:01:47.000Z
|
"""Useful tools
"""
from .painter import *
| 8.8
| 22
| 0.636364
| 5
| 44
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 4
| 23
| 11
| 0.777778
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
419c82edd2ae1e1a0ebe9a5948269e51aadb9e9a
| 34,485
|
py
|
Python
|
slise/slise.py
|
vishalbelsare/pyslise
|
3d81fe62a58755fa71755a0c02e56b9aa3e15e96
|
[
"MIT"
] | 3
|
2021-05-06T11:31:29.000Z
|
2022-03-18T19:20:33.000Z
|
slise/slise.py
|
vishalbelsare/pyslise
|
3d81fe62a58755fa71755a0c02e56b9aa3e15e96
|
[
"MIT"
] | null | null | null |
slise/slise.py
|
vishalbelsare/pyslise
|
3d81fe62a58755fa71755a0c02e56b9aa3e15e96
|
[
"MIT"
] | 1
|
2021-08-20T13:46:31.000Z
|
2021-08-20T13:46:31.000Z
|
"""
This script contains the main slise functions, and classes
"""
from __future__ import annotations
from typing import Union, Tuple, Callable, List
from warnings import warn
from matplotlib.pyplot import Figure
import numpy as np
from scipy.special import expit as sigmoid
from slise.data import (
DataScaling,
add_constant_columns,
add_intercept_column,
remove_constant_columns,
normalise_robust,
scale_same,
unscale_model,
)
from slise.optimisation import graduated_optimisation, loss_sharp
from slise.initialisation import initialise_candidates
from slise.utils import SliseWarning, mat_mul_inter, limited_logit
from slise.plot import (
print_slise,
plot_2d,
fill_column_names,
fill_prediction_str,
plot_dist,
plot_image,
plot_dist_single,
)
def regression(
X: np.ndarray,
Y: np.ndarray,
epsilon: float,
lambda1: float = 0,
lambda2: float = 0,
intercept: bool = True,
normalise: bool = False,
initialisation: Callable[
np.ndarray, np.ndarray, float, ..., Tuple[np.ndarray, float]
] = initialise_candidates,
beta_max: float = 20,
max_approx: float = 1.15,
max_iterations: int = 300,
debug: bool = False,
) -> SliseRegression:
"""Use SLISE for robust regression
In robust regression we fit regression models that can handle data that
contains outliers. SLISE accomplishes this by fitting a model such that
the largest possible subset of the data items have an error less than a
given value. All items with an error larger than that are considered
potential outliers and do not affect the resulting model.
It is highly recommended that you normalise the data, either before using SLISE or by setting normalise = TRUE.
This is a wrapper that is equivalent to `SliseRegression(epsilon, **kwargs).fit(X, Y)`
Args:
X (np.ndarray): the data matrix
Y (np.ndarray): the response vector
epsilon (float): the error tolerance
lambda1 (float, optional): the L1 regularistaion strength. Defaults to 0.
lambda2 (float, optional): the L2 regularisation strength. Defaults to 0.
intercept (bool, optional): add an intercept term. Defaults to True.
normalise (bool, optional): should X aclasses not be scaled). Defaults to False.
initialisation (Callable[ np.ndarray, np.ndarray, ..., Tuple[np.ndarray, float] ], optional): function that takes X, Y and gives an initial values for alpha and beta. Defaults to initialise_candidates.
beta_max (float, optional): the stopping sigmoid steepness. Defaults to 20.
max_approx (float, optional): approximation ratio when selecting the next beta. Defaults to 1.15.
max_iterations (int, optional): maximum number of OWL-QN iterations. Defaults to 300.
debug (bool, optional): print debug statements each graduated optimisation step. Defaults to False.
Returns:
SliseRegression: object containing the regression result
"""
return SliseRegression(
epsilon,
lambda1,
lambda2,
intercept,
normalise,
initialisation,
beta_max,
max_approx,
max_iterations,
debug,
).fit(X, Y)
def explain(
X: np.ndarray,
Y: np.ndarray,
epsilon: float,
x: Union[np.ndarray, int],
y: Union[float, None] = None,
lambda1: float = 0,
lambda2: float = 0,
logit: bool = False,
normalise: bool = False,
initialisation: Callable[
np.ndarray, np.ndarray, float, ..., Tuple[np.ndarray, float]
] = initialise_candidates,
beta_max: float = 20,
max_approx: float = 1.15,
max_iterations: int = 300,
debug: bool = False,
) -> SliseExplainer:
"""Use SLISE for explaining outcomes from black box models.
SLISE can also be used to provide local model-agnostic explanations for
outcomes from black box models. To do this we replace the ground truth
response vector with the predictions from the complex model. Furthermore, we
force the model to fit a selected item (making the explanation local). This
gives us a local approximation of the complex model with a simpler linear
model. In contrast to other methods SLISE creates explanations using real
data (not some discretised and randomly sampled data) so we can be sure that
all inputs are valid (i.e. in the correct data manifold, and follows the
constraints used to generate the data, e.g., the laws of physics).
It is highly recommended that you normalise the data, either before using SLISE or by setting normalise = TRUE.
This is a wrapper that is equivalent to `SliseExplainer(X, Y, epsilon, **kwargs).explain(x, y)`
Args:
X (np.ndarray): the data matrix
Y (np.ndarray): the vector of predictions
epsilon (float): the error tolerance
x (Union[np.ndarray, int]): the data item to explain, or an index to get the item from self.X
y (Union[float, None], optional): the outcome to explain. If x is an index then this should be None (y is taken from self.Y). Defaults to None.
lambda1 (float, optional): the L1 regularistaion strength. Defaults to 0.
lambda2 (float, optional): the L2 regularistaion strength. Defaults to 0.
logit (bool, optional): do a logit transformation on the Y vector, this is recommended opnly if Y consists of probabilities. Defaults to False.
normalise (bool, optional): should X and Y be normalised (note that epsilon will not be scaled). Defaults to False.
initialisation (Callable[ np.ndarray, np.ndarray, float, ..., Tuple[np.ndarray, float] ], optional): function that takes (X, Y, epslion) and gives an initial values for alpha and beta. Defaults to initialise_candidates.
beta_max (float, optional): the final sigmoid steepness. Defaults to 20.
max_approx (float, optional): approximation ratio when selecting the next beta. Defaults to 1.15.
max_iterations (int, optional): maximum number of OWL-QN iterations. Defaults to 300.
debug (bool, optional): print debug statements each graduated optimisation step. Defaults to False.
Returns:
SliseExplainer: object containing the explanation
"""
return SliseExplainer(
X,
Y,
epsilon,
lambda1,
lambda2,
logit,
normalise,
initialisation,
beta_max,
max_approx,
max_iterations,
debug,
).explain(x, y)
class SliseRegression:
"""
Class for holding the result from using SLISE for regression.
Can also be used sklearn-style to do regression.
"""
def __init__(
self,
epsilon: float,
lambda1: float = 0,
lambda2: float = 0,
intercept: bool = True,
normalise: bool = False,
initialisation: Callable[
np.ndarray, np.ndarray, float, ..., Tuple[np.ndarray, float]
] = initialise_candidates,
beta_max: float = 20,
max_approx: float = 1.15,
max_iterations: int = 300,
debug: bool = False,
):
"""Use SLISE for robust regression.
In robust regression we fit regression models that can handle data that
contains outliers. SLISE accomplishes this by fitting a model such that
the largest possible subset of the data items have an error less than a
given value. All items with an error larger than that are considered
potential outliers and do not affect the resulting model.
This constructor prepares the parameters, call `fit` to fit a robust regression to a dataset.
It is highly recommended that you normalise the data, either before using SLISE or by setting normalise = TRUE.
Args:
epsilon (float): the error tolerance
lambda1 (float, optional): the L1 regularistaion strength. Defaults to 0.
lambda2 (float, optional): the L2 regularisation strength. Defaults to 0.
intercept (bool, optional): add an intercept term. Defaults to True.
normalise (bool, optional): should X and Y be normalised (note that epsilon will not be scaled). Defaults to False.
initialisation (Callable[ np.ndarray, np.ndarray, ..., Tuple[np.ndarray, float] ], optional): function that takes (X, Y, epslion) and gives an initial values for alpha and beta. Defaults to initialise_candidates.
beta_max (float, optional): the stopping sigmoid steepness. Defaults to 20.
max_approx (float, optional): approximation ratio when selecting the next beta. Defaults to 1.15.
max_iterations (int, optional): maximum number of OWL-QN iterations. Defaults to 300.
debug (bool, optional): print debug statements each graduated optimisation step. Defaults to False.
"""
self.epsilon_orig = epsilon
self.lambda1 = lambda1
self.lambda2 = lambda2
self.intercept = intercept
self.normalise = normalise
self.initialisation = initialisation
self.beta_max = beta_max
self.max_approx = max_approx
self.max_iterations = max_iterations
self.debug = debug
self.alpha = None
self.coefficients = None
self.epsilon = epsilon
self.X = None
self.Y = None
self.scale = None
def fit(self, X: np.ndarray, Y: np.ndarray) -> SliseRegression:
"""Robustly fit a linear regression to a dataset
Args:
X (np.ndarray): the data matrix
Y (np.ndarray): the response vector
Returns:
SliseRegression: self, containing the regression result
"""
if len(X.shape) == 1:
X = np.reshape(X, X.shape + (1,))
else:
X = X.copy()
Y = Y.copy()
self.X = X
self.Y = Y
# Preprocessing
if self.normalise:
X, x_cols = remove_constant_columns(X)
if self.X.shape[1] == X.shape[1]:
x_cols = None
X, x_center, x_scale = normalise_robust(X)
Y, y_center, y_scale = normalise_robust(Y)
self.scale = DataScaling(x_center, x_scale, y_center, y_scale, x_cols)
if self.intercept:
X = add_intercept_column(X)
# Initialisation
alpha, beta = self.initialisation(X, Y, self.epsilon_orig)
# Optimisation
alpha = graduated_optimisation(
alpha,
X,
Y,
epsilon=self.epsilon_orig,
lambda1=self.lambda1,
lambda2=self.lambda2,
beta=beta,
beta_max=self.beta_max,
max_approx=self.max_approx,
max_iterations=self.max_iterations,
debug=self.debug,
)
self.alpha = alpha
if self.normalise:
alpha2 = self.scale.unscale_model(alpha)
if not self.intercept:
if np.abs(alpha2[0]) > 1e-8:
warn(
"Intercept introduced due to scaling, consider setting intercept=True (or normalise=False)",
SliseWarning,
)
self.intercept = True
self.alpha = np.concatenate(([0], alpha))
else:
alpha2 = alpha2[1:]
self.coefficients = alpha2
self.epsilon = self.epsilon_orig * y_scale
else:
self.coefficients = alpha
return self
def get_params(self, normalised: bool = False) -> np.ndarray:
"""Get the coefficients of the linear model
Args:
normalised (bool, optional): if the data is normalised within SLISE, return a linear model ftting the normalised data. Defaults to False.
Returns:
np.ndarray: the coefficients of the linear model
"""
return self.alpha if normalised else self.coefficients
@property
def normalised(self):
if self.normalise:
return add_constant_columns(self.alpha, self.scale.columns, self.intercept)
else:
return None
def predict(self, X: Union[np.ndarray, None] = None) -> np.ndarray:
"""Use the fitted model to predict new responses
Args:
X (Union[np.ndarray, None], optional): data matrix to predict, or None for using the fitted dataset. Defaults to None.
Returns:
np.ndarray: the predicted response
"""
if X is None:
return mat_mul_inter(self.X, self.coefficients)
else:
return mat_mul_inter(X, self.coefficients)
def score(
self, X: Union[np.ndarray, None] = None, Y: Union[np.ndarray, None] = None
) -> float:
"""Calculate the loss. Lower is better and it should usually be negative (unless the regularisation is very (/too?) strong).
Args:
X (Union[np.ndarray, None], optional): data matrix, or None for using the fitted dataset. Defaults to None.
Y (Union[np.ndarray, None], optional): response vector, or None for using the fitted dataset. Defaults to None.
Returns:
float: the loss
"""
if X is None or Y is None:
X = self.X
Y = self.Y
if self.normalise:
X = self.scale.scale_x(X)
Y = self.scale.scale_y(Y)
return loss_sharp(
self.alpha, X, Y, self.epsilon_orig, self.lambda1, self.lambda2
)
loss = score
def subset(
self, X: Union[np.ndarray, None] = None, Y: Union[np.ndarray, None] = None
) -> np.ndarray:
"""Get the subset (of non-outliers) used for the robust regression model
Args:
X (Union[np.ndarray, None], optional): data matrix, or None for using the fitted dataset. Defaults to None.
Y (Union[np.ndarray, None], optional): response vector, or None for using the fitted dataset. Defaults to None.
Returns:
np.ndarray: the selected subset as a boolean mask
"""
if X is None or Y is None:
X = self.X
Y = self.Y
Y2 = mat_mul_inter(X, self.coefficients)
return (Y2 - Y) ** 2 < self.epsilon ** 2
def print(
self,
variables: Union[List[str], None] = None,
decimals: int = 3,
num_var: int = 10,
):
"""Print the current robust regression result
Args:
variables ( Union[List[str], None], optional): names of the variables/columns in X. Defaults to None.
num_var (int, optional): exclude zero weights if there are too many variables. Defaults to 10.
decimals (int, optional): the precision to use for printing. Defaults to 3.
"""
print_slise(
self.coefficients,
self.intercept,
self.subset(),
self.score(),
self.epsilon,
variables,
"SLISE Regression",
decimals,
num_var,
alpha=self.normalised,
)
def plot_2d(
self,
title: str = "SLISE Regression",
label_x: str = "x",
label_y: str = "y",
decimals: int = 3,
fig: Union[Figure, None] = None,
) -> SliseRegression:
"""Plot the regression in a 2D scatter plot with a line for the regression model
Args:
title (str, optional): plot title. Defaults to "SLISE Regression".
label_x (str, optional): x-axis label. Defaults to "x".
label_y (str, optional): y-axis label. Defaults to "y".
decimals (int, optional): number of decimals when writing numbers. Defaults to 3.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
Raises:
SliseException: if the data has too many dimensions
"""
plot_2d(
self.X,
self.Y,
self.coefficients,
self.epsilon,
None,
None,
False,
title,
label_x,
label_y,
decimals,
fig,
)
def plot_dist(
self,
title: str = "SLISE Regression",
variables: list = None,
decimals: int = 3,
fig: Union[Figure, None] = None,
) -> SliseExplainer:
"""Plot the regression with density distributions for the dataset and a barplot for the model.
Args:
title (str, optional): title of the plot. Defaults to "SLISE Explanation".
variables (list, optional): names for the variables. Defaults to None.
decimals (int, optional): the number of decimals to write. Defaults to 3.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
"""
plot_dist(
self.X,
self.Y,
self.coefficients,
self.subset(),
self.normalised,
None,
None,
None,
None,
title,
variables,
decimals,
fig,
)
def plot_subset(
self,
title: str = "Response Distribution",
decimals: int = 0,
fig: Union[Figure, None] = None,
):
"""Plot a density distributions for response and the response of the subset
Args:
title (str, optional): title of the plot. Defaults to "Response Distribution".
decimals (int, optional): number of decimals when writing the subset size. Defaults to 0.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
"""
plot_dist_single(self.Y, self.subset(), None, title, decimals, fig)
class SliseExplainer:
"""
Class for holding the result from using SLISE as an explainer.
Can also be used sklearn-style to create explanations.
"""
def __init__(
self,
X: np.ndarray,
Y: np.ndarray,
epsilon: float,
lambda1: float = 0,
lambda2: float = 0,
logit: bool = False,
normalise: bool = False,
initialisation: Callable[
np.ndarray, np.ndarray, float, ..., Tuple[np.ndarray, float]
] = initialise_candidates,
beta_max: float = 20,
max_approx: float = 1.15,
max_iterations: int = 300,
debug: bool = False,
):
"""Use SLISE for explaining outcomes from black box models.
SLISE can also be used to provide local model-agnostic explanations for
outcomes from black box models. To do this we replace the ground truth
response vector with the predictions from the complex model.
Furthermore, we force the model to fit a selected item (making the
explanation local). This gives us a local approximation of the complex
model with a simpler linear model. In contrast to other methods SLISE
creates explanations using real data (not some discretised and randomly
sampled data) so we can be sure that all inputs are valid (i.e. in the
correct data manifold, and follows the constraints used to generate the
data, e.g., the laws of physics).
This prepares the dataset used for the explanations, call `explain` on this object to explain outcomes.
It is highly recommended that you normalise the data, either before using SLISE or by setting normalise = TRUE.
Args:
X (np.ndarray): the data matrix
Y (np.ndarray): the vector of predictions
epsilon (float): the error tolerance
lambda1 (float, optional): the L1 regularistaion strength. Defaults to 0.
lambda2 (float, optional): the L2 regularistaion strength. Defaults to 0.
logit (bool, optional): do a logit transformation on the Y vector, this is recommended opnly if Y consists of probabilities. Defaults to False.
normalise (bool, optional): should X and Y be normalised (note that epsilon will not be scaled). Defaults to False.
initialisation (Callable[ np.ndarray, np.ndarray, float, ..., Tuple[np.ndarray, float] ], optional): function that takes (X, Y, epslion) and gives an initial values for alpha and beta. Defaults to initialise_candidates.
beta_max (float, optional): the final sigmoid steepness. Defaults to 20.
max_approx (float, optional): approximation ratio when selecting the next beta. Defaults to 1.15.
max_iterations (int, optional): maximum number of OWL-QN iterations. Defaults to 300.
debug (bool, optional): print debug statements each graduated optimisation step. Defaults to False.
"""
self.epsilon_orig = epsilon
self.lambda1 = lambda1
self.lambda2 = lambda2
self.logit = logit
self.normalise = normalise
self.initialisation = initialisation
self.beta_max = beta_max
self.max_approx = max_approx
self.max_iterations = max_iterations
self.debug = debug
if len(X.shape) == 1:
X = np.reshape(X, X.shape + (1,))
else:
X = X.copy()
Y = Y.copy()
self.X = X
self.Y = Y
self.x = None
self.y = None
self.alpha = None
self.coefficients = None
# Preprocess data
if logit:
Y = limited_logit(Y)
if self.normalise:
X2, x_cols = remove_constant_columns(X)
if X.shape[1] == X2.shape[1]:
x_cols = None
X, x_center, x_scale = normalise_robust(X2)
Y, y_center, y_scale = normalise_robust(Y)
self.scale = DataScaling(x_center, x_scale, y_center, y_scale, x_cols)
self.epsilon = epsilon * y_scale
else:
self.scale = None
self.epsilon = epsilon
self.X2 = X
self.Y2 = Y
def explain(
self, x: Union[np.ndarray, int], y: Union[float, None] = None
) -> SliseExplainer:
"""Explain an outcome from a black box model
Args:
x (Union[np.ndarray, int]): the data item to explain, or an index to get the item from self.X
y (Union[float, None], optional): the outcome to explain. If x is an index then this should be None (y is taken from self.Y). Defaults to None.
Returns:
SliseExplainer: self, with values set to the explanation
"""
if y is None:
self.y = self.Y[x]
self.x = self.X[x, :]
y = self.Y2[x]
x = self.X2[x, :]
else:
x = np.atleast_1d(x)
self.x = x
self.y = y
if self.logit:
y = limited_logit(y)
if self.normalise:
x = self.scale.scale_x(x)
y = self.scale.scale_y(y)
X = self.X2 - x[None, :]
Y = self.Y2 - y
alpha, beta = self.initialisation(X, Y, self.epsilon_orig)
alpha = graduated_optimisation(
alpha,
X,
Y,
epsilon=self.epsilon_orig,
lambda1=self.lambda1,
lambda2=self.lambda2,
beta=beta,
beta_max=self.beta_max,
max_approx=self.max_approx,
max_iterations=self.max_iterations,
debug=self.debug,
)
alpha = np.concatenate(
(y - np.sum(alpha * x, dtype=x.dtype, keepdims=True), alpha)
)
self.alpha = alpha
if self.normalise:
alpha2 = self.scale.unscale_model(alpha)
alpha2[0] = self.y - np.sum(self.x * alpha2[1:])
self.coefficients = alpha2
else:
self.coefficients = alpha
return self
def get_params(self, normalised: bool = False) -> np.ndarray:
"""Get the explanation as the coefficients of a linear model (approximating the black box model)
Args:
normalised (bool, optional): if the data is normalised within SLISE, return a linear model fitting the normalised data. Defaults to False.
Returns:
np.ndarray: the coefficients of the linear model (the first scalar in the vector is the intercept)
"""
return self.alpha if normalised else self.coefficients
@property
def normalised(self):
if self.normalise:
return add_constant_columns(self.alpha, self.scale.columns, True)
else:
return None
def predict(self, X: Union[np.ndarray, None] = None) -> np.ndarray:
"""Use the approximating linear model to predict new outcomes
Args:
X (Union[np.ndarray, None], optional): data matrix to predict, or None for using the fitted dataset. Defaults to None.
Returns:
np.ndarray: prediction vector
"""
if X is None:
Y = mat_mul_inter(self.X, self.coefficients)
else:
Y = mat_mul_inter(X, self.coefficients)
if self.scaler.logit:
Y = sigmoid(Y)
return Y
def score(
self, X: Union[np.ndarray, None] = None, Y: Union[np.ndarray, None] = None
) -> float:
"""Calculate the loss. Lower is better and it should usually be negative (unless the regularisation is very (/too?) strong).
Args:
X (Union[np.ndarray, None], optional): data matrix, or None for using the fitted dataset. Defaults to None.
Y (Union[np.ndarray, None], optional): response vector, or None for using the fitted dataset. Defaults to None.
Returns:
float: the loss
"""
x = self.x
y = self.y
if self.logit:
y = limited_logit(y)
if self.normalise:
x = self.scale.scale_x(x)
y = self.scale.scale_y(y)
if X is None or Y is None:
X = self.X2
Y = self.Y2
else:
if self.logit:
Y = limited_logit(Y)
if self.normalise:
X = self.scale.scale_x(X)
Y = self.scale.scale_y(Y)
X = X - x[None, :]
Y = Y - y
return loss_sharp(
self.alpha[1:], X, Y, self.epsilon_orig, self.lambda1, self.lambda2,
)
loss = score
def subset(
self, X: Union[np.ndarray, None] = None, Y: Union[np.ndarray, None] = None
) -> np.ndarray:
"""Get the subset / neighbourhood used for the approximation (explanation)
Args:
X (Union[np.ndarray, None], optional): data matrix, or None for using the fitted dataset. Defaults to None.
Y (Union[np.ndarray, None], optional): response vector, or None for using the fitted dataset. Defaults to None.
Returns:
np.ndarray: the subset as a boolean mask
"""
if X is None or Y is None:
X = self.X
Y = self.Y
if self.logit:
Y = limited_logit(Y)
res = mat_mul_inter(X, self.coefficients) - Y
return res ** 2 < self.epsilon ** 2
def get_impact(
self, normalised: bool = False, x: Union[None, np.ndarray] = None
) -> np.ndarray:
"""Get the "impact" of different variables on the outcome.
The impact is the (normalised) model times the (normalised) item.
Args:
normalised (bool, optional): return the normalised impact (if normalisation is used). Defaults to False.
x (Union[None, np.ndarray], optional): the item to calculate the impact for (uses the explained item if None). Defaults to None.
Returns:
np.ndarray: the impact vector
"""
if x is None:
x = self.x
if normalised and self.normalise:
return add_constant_columns(
add_intercept_column(self.scale.scale_x(x)) * self.alpha,
self.scale.columns,
True,
)
else:
return add_intercept_column(x) * self.coefficients
def print(
self,
variables: Union[List[str], None] = None,
classes: Union[List[str], None] = None,
num_var: int = 10,
decimals: int = 3,
):
"""Print the current explanation
Args:
variables (Union[List[str], None], optional): the names of the (columns/) variables. Defaults to None.
classes (Union[List[str], None], optional): the names of the classes, if explaining a classifier. Defaults to None.
num_var (int, optional): exclude zero weights if there are too many variables. Defaults to 10.
decimals (int, optional): the precision to use for printing. Defaults to 3.
"""
print_slise(
self.coefficients,
True,
self.subset(),
self.score(),
self.epsilon,
variables,
"SLISE Explanation",
decimals,
num_var,
unscaled=self.x,
unscaled_y=self.y,
impact=self.get_impact(False),
scaled=None if self.scale is None else self.scale.scale_x(self.x, False),
alpha=self.normalised,
scaled_impact=None if self.scale is None else self.get_impact(True),
classes=classes,
unscaled_preds=self.Y,
logit=self.logit,
)
def plot_2d(
self,
title: str = "SLISE Explanation",
label_x: str = "x",
label_y: str = "y",
decimals: int = 3,
fig: Union[Figure, None] = None,
) -> SliseRegression:
"""Plot the explanation in a 2D scatter plot (where the explained item is marked) with a line for the approximating model.
Args:
title (str, optional): plot title. Defaults to "SLISE Explanation".
label_x (str, optional): x-axis label. Defaults to "x".
label_y (str, optional): y-axis label. Defaults to "y".
decimals (int, optional): number of decimals when writing numbers. Defaults to 3.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
Raises:
SliseException: if the data has too many dimensions
"""
plot_2d(
self.X,
self.Y,
self.coefficients,
self.epsilon,
self.x,
self.y,
self.logit,
title,
label_x,
label_y,
decimals,
fig,
)
def plot_image(
self,
width: int,
height: int,
saturated: bool = True,
title: str = "SLISE Explanation",
classes: Union[List, str, None] = None,
decimals: int = 3,
fig: Union[Figure, None] = None,
) -> SliseExplainer:
"""Plot the current explanation for a black and white image (e.g. MNIST)
Args:
width (int): the width of the image
height (int): the height of the image
saturated (bool, optional): should the explanation be more saturated. Defaults to True.
title (str, optional): title of the plot. Defaults to "SLISE Explanation".
classes (Union[List, str, None], optional): list of class names (first the negative, then the positive), or a single (positive) class name. Defaults to None.
decimals (int, optional): the number of decimals to write. Defaults to 3.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
"""
plot_image(
self.x,
self.y,
self.Y,
self.coefficients,
width,
height,
saturated,
title,
classes,
decimals,
fig,
)
def plot_dist(
self,
title: str = "SLISE Explanation",
variables: list = None,
decimals: int = 3,
fig: Union[Figure, None] = None,
) -> SliseExplainer:
"""Plot the current explanation with density distributions for the dataset and a barplot for the model.
The barbplot contains both the approximating linear model (where the
weights can be loosely interpreted as the importance of the different
variables and their sign) and the "impact" which is the (scaled) model
time the (scaled) item values (which demonstrates how the explained
item interacts with the approximating linear model, since a negative
weight times a negative value actually supports a positive prediction).
Args:
title (str, optional): title of the plot. Defaults to "SLISE Explanation".
variables (list, optional): names for the variables. Defaults to None.
decimals (int, optional): the number of decimals to write. Defaults to 3.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
"""
plot_dist(
self.X,
self.Y,
self.coefficients,
self.subset(),
self.normalised,
self.x,
self.y,
self.get_impact(False),
self.get_impact(True) if self.normalise else None,
title,
variables,
decimals,
fig,
)
def plot_subset(
self,
title: str = "Prediction Distribution",
decimals: int = 0,
fig: Union[Figure, None] = None,
):
"""Plot a density distributions for predictions and the predictions of the subset
Args:
title (str, optional): title of the plot. Defaults to "Prediction Distribution".
decimals (int, optional): number of decimals when writing the subset size. Defaults to 0.
fig (Union[Figure, None], optional): Pyplot figure to plot on, if None then a new plot is created and shown. Defaults to None.
"""
plot_dist_single(self.Y, self.subset(), self.y, title, decimals, fig)
| 39.05436
| 231
| 0.599739
| 4,321
| 34,485
| 4.732701
| 0.090488
| 0.043032
| 0.0178
| 0.017604
| 0.7978
| 0.77956
| 0.759658
| 0.739364
| 0.718289
| 0.694425
| 0
| 0.008692
| 0.319385
| 34,485
| 882
| 232
| 39.098639
| 0.862596
| 0.475424
| 0
| 0.715356
| 0
| 0
| 0.015543
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048689
| false
| 0
| 0.020599
| 0
| 0.11236
| 0.009363
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
41a12adb994472b8b3633239b79a64ae8e3353b3
| 260
|
py
|
Python
|
odoo-13.0/addons/mrp_subcontracting/models/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/mrp_subcontracting/models/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/mrp_subcontracting/models/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import mrp_bom
from . import product
from . import res_company
from . import res_partner
from . import stock_move
from . import stock_move_line
from . import stock_picking
from . import stock_rule
from . import stock_warehouse
| 20
| 29
| 0.765385
| 39
| 260
| 4.871795
| 0.435897
| 0.473684
| 0.394737
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004608
| 0.165385
| 260
| 12
| 30
| 21.666667
| 0.870968
| 0.080769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41a6e57d6529007ecf97110bebec75a150b57f27
| 31
|
py
|
Python
|
src/odin/odin/mongo/__init__.py
|
wenshuoliu/odin
|
7998ee7541b3de44dd149899168983e964f2b8f7
|
[
"Apache-2.0"
] | 4
|
2020-12-15T15:57:14.000Z
|
2020-12-16T21:52:23.000Z
|
src/odin/odin/mongo/__init__.py
|
wenshuoliu/odin
|
7998ee7541b3de44dd149899168983e964f2b8f7
|
[
"Apache-2.0"
] | 2
|
2021-03-15T02:49:56.000Z
|
2021-03-27T12:42:38.000Z
|
src/odin/odin/mongo/__init__.py
|
wenshuoliu/odin
|
7998ee7541b3de44dd149899168983e964f2b8f7
|
[
"Apache-2.0"
] | 5
|
2020-12-15T19:09:00.000Z
|
2021-04-21T20:40:38.000Z
|
from odin.mongo.store import *
| 15.5
| 30
| 0.774194
| 5
| 31
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68c1dde563d81a9fe4f69720cb4675614c6d22f7
| 2,312
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/A_30_01_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_30_01_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_30_01_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_30_01_9 = {0: {'A': -0.273, 'C': 0.188, 'E': 0.612, 'D': 0.566, 'G': 0.063, 'F': 0.287, 'I': -0.054, 'H': -0.485, 'K': -0.921, 'M': -0.389, 'L': -0.073, 'N': 0.183, 'Q': 0.237, 'P': 0.557, 'S': -0.23, 'R': -0.832, 'T': 0.104, 'W': 0.296, 'V': 0.041, 'Y': 0.121}, 1: {'A': -0.287, 'C': 0.263, 'E': 0.858, 'D': 0.713, 'G': -0.157, 'F': -0.334, 'I': -0.231, 'H': 0.256, 'K': 0.006, 'M': -0.198, 'L': 0.008, 'N': 0.265, 'Q': -0.209, 'P': 0.781, 'S': -0.534, 'R': 0.493, 'T': -0.734, 'W': -0.087, 'V': -0.558, 'Y': -0.317}, 2: {'A': 0.278, 'C': 0.26, 'E': 0.49, 'D': 0.656, 'G': 0.316, 'F': -0.316, 'I': 0.086, 'H': -0.599, 'K': -0.83, 'M': -0.113, 'L': -0.087, 'N': -0.009, 'Q': 0.084, 'P': 0.337, 'S': 0.158, 'R': -1.159, 'T': 0.231, 'W': 0.155, 'V': 0.185, 'Y': -0.123}, 3: {'A': -0.118, 'C': -0.122, 'E': 0.192, 'D': 0.179, 'G': 0.187, 'F': -0.064, 'I': -0.021, 'H': 0.011, 'K': -0.009, 'M': -0.209, 'L': 0.045, 'N': -0.034, 'Q': 0.144, 'P': -0.052, 'S': 0.011, 'R': -0.13, 'T': 0.068, 'W': 0.05, 'V': -0.028, 'Y': -0.099}, 4: {'A': -0.156, 'C': 0.141, 'E': 0.15, 'D': 0.267, 'G': 0.056, 'F': -0.109, 'I': 0.012, 'H': -0.129, 'K': 0.045, 'M': -0.065, 'L': 0.062, 'N': 0.028, 'Q': -0.061, 'P': 0.103, 'S': -0.091, 'R': -0.167, 'T': -0.117, 'W': 0.189, 'V': -0.095, 'Y': -0.063}, 5: {'A': 0.093, 'C': 0.118, 'E': 0.193, 'D': 0.252, 'G': 0.013, 'F': -0.098, 'I': 0.036, 'H': -0.028, 'K': 0.083, 'M': -0.022, 'L': 0.082, 'N': -0.032, 'Q': -0.001, 'P': 0.081, 'S': -0.119, 'R': -0.153, 'T': -0.086, 'W': -0.186, 'V': 0.011, 'Y': -0.237}, 6: {'A': 0.017, 'C': -0.073, 'E': 0.38, 'D': 0.302, 'G': 0.087, 'F': 0.022, 'I': -0.222, 'H': -0.083, 'K': 0.168, 'M': -0.217, 'L': -0.104, 'N': 0.003, 'Q': -0.011, 'P': -0.353, 'S': 0.045, 'R': -0.065, 'T': 0.171, 'W': 0.039, 'V': -0.037, 'Y': -0.068}, 7: {'A': 0.073, 'C': 0.043, 'E': 0.039, 'D': 0.209, 'G': 0.07, 'F': -0.407, 'I': -0.051, 'H': -0.048, 'K': 0.2, 'M': 0.162, 'L': -0.056, 'N': 0.023, 'Q': 0.128, 'P': -0.263, 'S': -0.047, 'R': 0.117, 'T': 0.053, 'W': 0.034, 'V': -0.013, 'Y': -0.265}, 8: {'A': -0.746, 'C': 0.283, 'E': 0.384, 'D': 0.349, 'G': -0.216, 'F': 0.251, 'I': -0.299, 'H': 0.249, 'K': -1.203, 'M': -0.051, 'L': -0.189, 'N': 0.392, 'Q': 0.561, 'P': 0.122, 'S': 0.043, 'R': -0.09, 'T': 0.044, 'W': 0.599, 'V': -0.408, 'Y': -0.076}, -1: {'con': 4.23841}}
| 2,312
| 2,312
| 0.396194
| 557
| 2,312
| 1.639138
| 0.314183
| 0.019715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375967
| 0.161332
| 2,312
| 1
| 2,312
| 2,312
| 0.094894
| 0
| 0
| 0
| 0
| 0
| 0.079118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec18ace0f077ad4df50e84619d1d9c0b72f79892
| 546
|
py
|
Python
|
tests/converters/test_darwin_case.py
|
gieseladev/lettercase
|
2b4b97d5b96fcb5cd12f2eec93e0c64c78b84f6f
|
[
"MIT"
] | null | null | null |
tests/converters/test_darwin_case.py
|
gieseladev/lettercase
|
2b4b97d5b96fcb5cd12f2eec93e0c64c78b84f6f
|
[
"MIT"
] | null | null | null |
tests/converters/test_darwin_case.py
|
gieseladev/lettercase
|
2b4b97d5b96fcb5cd12f2eec93e0c64c78b84f6f
|
[
"MIT"
] | null | null | null |
from lettercase import snake_to_darwin_case, to_darwin_case
# only need to test this conversion because all others are implemented using this
def test_snake_to_darwin_case():
assert snake_to_darwin_case("snake_case") == "Snake_Case"
assert snake_to_darwin_case("this") == "This"
def test_to_darwin_case():
assert to_darwin_case("dom_dom_dom") == "Dom_Dom_Dom"
assert to_darwin_case("DOM_DOM_DOM") == "Dom_Dom_Dom"
assert to_darwin_case("domDomDom") == "Dom_Dom_Dom"
assert to_darwin_case("DomDomDom") == "Dom_Dom_Dom"
| 36.4
| 81
| 0.760073
| 86
| 546
| 4.360465
| 0.255814
| 0.224
| 0.32
| 0.192
| 0.528
| 0.528
| 0.384
| 0.384
| 0.384
| 0.384
| 0
| 0
| 0.137363
| 546
| 14
| 82
| 39
| 0.796178
| 0.144689
| 0
| 0
| 0
| 0
| 0.24086
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.222222
| true
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec530dc2f3a6679fd1b48ce7562c1065931c5d6c
| 802
|
py
|
Python
|
torchvision/transforms/functional_tensor.py
|
liyichao/vision
|
53b062ca58932bbf387b96f2dd3397c4495b735b
|
[
"BSD-3-Clause"
] | 1
|
2020-01-31T01:06:21.000Z
|
2020-01-31T01:06:21.000Z
|
torchvision/transforms/functional_tensor.py
|
liyichao/vision
|
53b062ca58932bbf387b96f2dd3397c4495b735b
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/transforms/functional_tensor.py
|
liyichao/vision
|
53b062ca58932bbf387b96f2dd3397c4495b735b
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torchvision.transforms.functional as F
def vflip(img_tensor):
"""Vertically flip the given the Image Tensor.
Args:
img_tensor (Tensor): Image Tensor to be flipped in the form [C, H, W].
Returns:
Tensor: Vertically flipped image Tensor.
"""
if not F._is_tensor_image(img_tensor):
raise TypeError('tensor is not a torch image.')
return img_tensor.flip(-2)
def hflip(img_tensor):
"""Horizontally flip the given the Image Tensor.
Args:
img_tensor (Tensor): Image Tensor to be flipped in the form [C, H, W].
Returns:
Tensor: Horizontally flipped image Tensor.
"""
if not F._is_tensor_image(img_tensor):
raise TypeError('tensor is not a torch image.')
return img_tensor.flip(-1)
| 23.588235
| 78
| 0.662095
| 114
| 802
| 4.535088
| 0.324561
| 0.139265
| 0.046422
| 0.058027
| 0.746615
| 0.746615
| 0.746615
| 0.746615
| 0.746615
| 0.746615
| 0
| 0.003317
| 0.24813
| 802
| 33
| 79
| 24.30303
| 0.854063
| 0.457606
| 0
| 0.4
| 0
| 0
| 0.146597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
ec561830f130c4cca8ebe01d7490803e883af192
| 24
|
py
|
Python
|
pyzorder/__init__.py
|
smatsumoto78/pyzorder
|
1940f08d680536eb8d2ec0680d27e510bc5073d9
|
[
"MIT"
] | 9
|
2019-11-13T02:57:34.000Z
|
2021-11-21T18:50:45.000Z
|
pyzorder/__init__.py
|
smatsumoto78/pyzorder
|
1940f08d680536eb8d2ec0680d27e510bc5073d9
|
[
"MIT"
] | null | null | null |
pyzorder/__init__.py
|
smatsumoto78/pyzorder
|
1940f08d680536eb8d2ec0680d27e510bc5073d9
|
[
"MIT"
] | null | null | null |
from .pyzorder import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b56f3af129c411d43802d2867a25b36b3ed8a74
| 359
|
py
|
Python
|
private/templates/default/maintenance.py
|
smeissner/eden
|
9c4c78f0808e53c52d3caa4fa68162cddc174547
|
[
"MIT"
] | 1
|
2021-01-21T18:24:25.000Z
|
2021-01-21T18:24:25.000Z
|
private/templates/default/maintenance.py
|
smeissner/eden
|
9c4c78f0808e53c52d3caa4fa68162cddc174547
|
[
"MIT"
] | null | null | null |
private/templates/default/maintenance.py
|
smeissner/eden
|
9c4c78f0808e53c52d3caa4fa68162cddc174547
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#from gluon import *
#from s3 import *
# =============================================================================
class Daily():
""" Daily Maintenance Tasks """
def __call__(self):
# @ToDo: cleanup scheduler logs
return
# END =========================================================================
| 21.117647
| 79
| 0.32312
| 23
| 359
| 4.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.175487
| 359
| 16
| 80
| 22.4375
| 0.371622
| 0.746518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
6b68784411c3c2b4c9f99313f667d398acf58fd7
| 32
|
py
|
Python
|
Gammal/square.py
|
Magdyedwar1996/python-level-one-codes
|
066086672f43488bc8b32c620b5e2f94cedfe3da
|
[
"MIT"
] | 1
|
2021-11-16T14:14:38.000Z
|
2021-11-16T14:14:38.000Z
|
Gammal/square.py
|
Magdyedwar1996/python-level-one-codes
|
066086672f43488bc8b32c620b5e2f94cedfe3da
|
[
"MIT"
] | null | null | null |
Gammal/square.py
|
Magdyedwar1996/python-level-one-codes
|
066086672f43488bc8b32c620b5e2f94cedfe3da
|
[
"MIT"
] | null | null | null |
def square (x):
return x * x
| 16
| 16
| 0.5625
| 6
| 32
| 3
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3125
| 32
| 2
| 16
| 16
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
6bad227606379f1098485785aa791277b61916a5
| 281
|
py
|
Python
|
trade_remedies_caseworker/core/templatetags/organisation_initialism.py
|
uktrade/trade-remedies-caseworker
|
fece9fde3cb241d96cbc1aaf7188d976f8621600
|
[
"MIT"
] | 1
|
2020-08-27T09:53:00.000Z
|
2020-08-27T09:53:00.000Z
|
trade_remedies_caseworker/core/templatetags/organisation_initialism.py
|
uktrade/trade-remedies-caseworker
|
fece9fde3cb241d96cbc1aaf7188d976f8621600
|
[
"MIT"
] | 7
|
2020-10-14T16:23:42.000Z
|
2021-09-24T14:18:47.000Z
|
trade_remedies_caseworker/core/templatetags/organisation_initialism.py
|
uktrade/trade-remedies-caseworker
|
fece9fde3cb241d96cbc1aaf7188d976f8621600
|
[
"MIT"
] | null | null | null |
from core.templatetags import register
from django.conf import settings
"""
Template tag to display organisation initialism
Usage:
{% organisation_initialism %}
"""
@register.simple_tag
def organisation_initialism():
return settings.ORGANISATION_INITIALISM
| 20.071429
| 48
| 0.758007
| 29
| 281
| 7.206897
| 0.62069
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174377
| 281
| 13
| 49
| 21.615385
| 0.900862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d4176aa24d19ee65fc8184057bd45ac0f1dbd31b
| 39
|
py
|
Python
|
guet/commands/set/__init__.py
|
AbhishekMashetty/pairprogrammingmasetty
|
0528d4999b472ec6d94058193275a505eaf2c762
|
[
"Apache-2.0"
] | 13
|
2018-12-21T22:47:28.000Z
|
2021-12-17T14:27:35.000Z
|
guet/commands/set/__init__.py
|
chiptopher/guet
|
1099ee623311ba1d052237612efc9b06b7ff68bb
|
[
"Apache-2.0"
] | 63
|
2018-08-30T11:19:12.000Z
|
2021-05-13T12:11:08.000Z
|
guet/commands/set/__init__.py
|
chiptopher/guet
|
1099ee623311ba1d052237612efc9b06b7ff68bb
|
[
"Apache-2.0"
] | 7
|
2019-05-21T13:52:37.000Z
|
2022-01-30T22:57:21.000Z
|
from ._set import SetCommittersCommand
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d44471b1cb96cc46a2d1bbc5bd0ca3507cd80afb
| 100
|
py
|
Python
|
headliner/model/__init__.py
|
datitran/headliner
|
b59de8ad7920f22eab3a74e54585c7de388659ce
|
[
"MIT"
] | 1
|
2019-10-16T17:04:20.000Z
|
2019-10-16T17:04:20.000Z
|
headliner/model/__init__.py
|
lucko515/headliner
|
ac2cef164a7fbad19b93501177cf25993cf6c588
|
[
"MIT"
] | null | null | null |
headliner/model/__init__.py
|
lucko515/headliner
|
ac2cef164a7fbad19b93501177cf25993cf6c588
|
[
"MIT"
] | null | null | null |
from .summarizer_attention import SummarizerAttention
from .summarizer_basic import SummarizerBasic
| 33.333333
| 53
| 0.9
| 10
| 100
| 8.8
| 0.7
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 2
| 54
| 50
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d45ba668311d2c254dc9c59ef2705ce0a6bb6d08
| 9,770
|
py
|
Python
|
scratch/test_avoiders_of_3_4_4_pairs.py
|
PermutaTriangle/PermStruct
|
6a494aa9c3ae3c63d27ecbcc4b478f0501eb3e48
|
[
"BSD-3-Clause"
] | 1
|
2015-09-14T17:23:33.000Z
|
2015-09-14T17:23:33.000Z
|
scratch/test_avoiders_of_3_4_4_pairs.py
|
PermutaTriangle/PermStruct
|
6a494aa9c3ae3c63d27ecbcc4b478f0501eb3e48
|
[
"BSD-3-Clause"
] | null | null | null |
scratch/test_avoiders_of_3_4_4_pairs.py
|
PermutaTriangle/PermStruct
|
6a494aa9c3ae3c63d27ecbcc4b478f0501eb3e48
|
[
"BSD-3-Clause"
] | null | null | null |
import permstruct
import permstruct.dag
from permstruct.lib import Permutations
import time
def enume(perm_prop, N):
for n in range(N+1):
print sum([1 for perm in Permutations(n) if perm_prop(perm)])
print 'Done counting!'
time.sleep(5)
# Since we usually don't want overlays:
overlays = False
#------------------------------------------------#
# Avoiding one classical pattern of length 3 and two of length 4
#-- Symmetry-class 1 --#
# Info
# SUCCESS!
# Details: A116721
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([1, 3, 2, 4])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X_mon1(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 2 --#
# Info
# FAILURE
# Details: A116735
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([1, 3, 4, 2])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 3 --#
# Info
# FAILURE
# Details: A116728
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([2, 1, 3, 4])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 4 --#
# Info
# FAILURE
# Details: A116731
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([2, 1, 4, 3])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 5 --#
# Info
# FAILURE
# Details: A116729
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([2, 3, 1, 4])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 6 --#
# Info
# FAILURE
# Details: A116711
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([2, 3, 4, 1])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 7 --#
# Info -> Symmetry-class 1
# FAILURE
# Details: A116721
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([2, 4, 1, 3])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 8 --#
# Info
# FAILURE
# Details: A116727
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 2, 4, 3]) and p.avoids([3, 4, 1, 2])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 9 --#
# Info -> Symmetry-class 4
# FAILURE
# Details: A116731
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 2, 4]) and p.avoids([1, 3, 4, 2])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 10 --#
# Info -> Symmetry-class 4
# FAILURE
# Details: A116731
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 2, 4]) and p.avoids([2, 1, 4, 3])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 11 --#
# Info
# FAILURE
# Details: A116733
#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 2, 4]) and p.avoids([2, 3, 4, 1])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 12 --#
# Info
# FAILURE
# Details: A027927
# Number of plane regions after drawing (general position) convex n-gon
# and all diagonals.
# G.f.: x^2*(1-3*x+5*x^2-3*x^3+x^4)/(1-x)^5
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 2, 4]) and p.avoids([2, 4, 1, 3])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 4
# max_rules = 100
# ignored = 1
#-- Symmetry-class 13 --#
# Lemma
# Av(321, 132, 3412) is needed as a unit
# Info
# SUCCESS!
# Details: 1, 1, 2, 4, 8, 10, 12, 14, 16
# NOT ON OEIS
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 2]) and p.avoids([3, 4, 1, 2])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X_mon1(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
# Info
# SUCCESS!
# Details: Seems to be A046092 after length 3 (non-inclusive)
# 4 times triangular numbers: 2*n*(n+1)
# G.f.: 4*x/(1-x)^3
# E.g.f.: exp(x)*(2*x^2+4*x)
#
# BUT: There is no mention of permutations or patterns for this
# sequence!
#
# The exact cover was VERY slow on this problem. We should modify it so it
# covers the short permutations first.
perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 2, 4]) and p.avoids([3, 4, 1, 2])
# enume(perm_prop, 8)
perm_bound = 7
# inp_dag = permstruct.dag.N_P_X2_mon2(perm_prop, perm_bound)
inp_dag = permstruct.dag.N_P_taylored_for_av_321_1324_3412(perm_bound)
max_rule_size = (5, 5)
max_non_empty = 5
max_rules = 8
ignored = 1
#-- Symmetry-class 14 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([1, 4, 2, 3])
#-- Symmetry-class 15 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([2, 1, 4, 3])
#-- Symmetry-class 16 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([2, 3, 1, 4])
#-- Symmetry-class 17 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([2, 3, 4, 1])
#-- Symmetry-class 18 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([2, 4, 1, 3])
#-- Symmetry-class 19 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([3, 1, 2, 4])
#-- Symmetry-class 20 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([3, 1, 4, 2])
#-- Symmetry-class 21 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([3, 4, 1, 2])
#-- Symmetry-class 22 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([1, 3, 4, 2]) and p.avoids([4, 1, 2, 3])
#-- Symmetry-class 23 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 1, 4, 3]) and p.avoids([2, 3, 4, 1])
#-- Symmetry-class 24 --#
# Info
# SUCCESS!
# Details: A000325
# These seem to be the Grassmannian permutations
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 1, 4, 3]) and p.avoids([2, 4, 1, 3])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X_mon1(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
#-- Symmetry-class 25 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 1, 4, 3]) and p.avoids([3, 4, 1, 2])
#-- Symmetry-class 26 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 3, 4, 1]) and p.avoids([2, 4, 1, 3])
#-- Symmetry-class 27 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 3, 4, 1]) and p.avoids([3, 4, 1, 2])
#-- Symmetry-class 28 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 3, 4, 1]) and p.avoids([4, 1, 2, 3])
#-- Symmetry-class 29 --#
# Info
# SUCCESS!
# Details: A034943
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 4, 1, 3]) and p.avoids([3, 1, 4, 2])
# enume(perm_prop, 8)
# perm_bound = 8
# inp_dag = permstruct.dag.N_P_X_mon1(perm_prop, perm_bound)
# max_rule_size = (3, 3)
# max_non_empty = 3
# max_rules = 100
# ignored = 1
#-- Symmetry-class 30 --#
# perm_prop = lambda p: p.avoids([3, 2, 1]) and p.avoids([2, 4, 1, 3]) and p.avoids([3, 4, 1, 2])
#------------------------------------------------#
if not overlays:
permstruct.exhaustive(perm_prop,
perm_bound,
inp_dag,
max_rule_size,
max_non_empty,
max_rules,
ignore_first = ignored)
else:
permstruct.exhaustive_with_overlays(perm_prop,
perm_bound,
inp_dag,
max_rule_size,
max_non_empty,
max_rules,
overlay_dag,
max_overlay_cnt,
max_overlay_size,
min_rule_size=(1,1))
| 26.476965
| 97
| 0.569294
| 1,658
| 9,770
| 3.188179
| 0.103136
| 0.123156
| 0.117291
| 0.070753
| 0.747446
| 0.743663
| 0.73969
| 0.737798
| 0.73723
| 0.709232
| 0
| 0.098012
| 0.253327
| 9,770
| 368
| 98
| 26.548913
| 0.626594
| 0.796622
| 0
| 0.277778
| 0
| 0
| 0.008197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.111111
| null | null | 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d48569072c38e01c75061ea0dc332d6a7bfadeed
| 1,591
|
py
|
Python
|
web/transiq/restapi/migrations/0015_auto_20180904_1114.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/restapi/migrations/0015_auto_20180904_1114.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14
|
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/restapi/migrations/0015_auto_20180904_1114.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-09-04 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restapi', '0014_auto_20180829_1539'),
]
operations = [
migrations.AlterField(
model_name='bookingstatuses',
name='status',
field=models.CharField(choices=[('confirmed', 'Confirmed'), ('loaded', 'Loaded'), ('lr_generated', 'Lr Generated'), ('advance_paid', 'Advance_Paid'), ('reconciled', 'Reconciled'), ('unloaded', 'Unloaded'), ('pod_uploaded', 'PoD Uploaded'), ('pod_verified', 'PoD Verified'), ('invoice_raised', 'Invoice Raised'), ('invoice_confirmed', 'Invoice Confirmed'), ('balance_paid', 'Balance Paid'), ('party_invoice_sent', 'Party Invoice Sent'), ('inward_followup', 'Inward Followup'), ('complete', 'Complete')], default='confirmed', max_length=35, null=True),
),
migrations.AlterField(
model_name='historicalbookingstatuses',
name='status',
field=models.CharField(choices=[('confirmed', 'Confirmed'), ('loaded', 'Loaded'), ('lr_generated', 'Lr Generated'), ('advance_paid', 'Advance_Paid'), ('reconciled', 'Reconciled'), ('unloaded', 'Unloaded'), ('pod_uploaded', 'PoD Uploaded'), ('pod_verified', 'PoD Verified'), ('invoice_raised', 'Invoice Raised'), ('invoice_confirmed', 'Invoice Confirmed'), ('balance_paid', 'Balance Paid'), ('party_invoice_sent', 'Party Invoice Sent'), ('inward_followup', 'Inward Followup'), ('complete', 'Complete')], default='confirmed', max_length=35, null=True),
),
]
| 66.291667
| 562
| 0.654305
| 161
| 1,591
| 6.285714
| 0.36646
| 0.043478
| 0.055336
| 0.057312
| 0.754941
| 0.754941
| 0.754941
| 0.754941
| 0.754941
| 0.754941
| 0
| 0.026022
| 0.15462
| 1,591
| 23
| 563
| 69.173913
| 0.726394
| 0.028284
| 0
| 0.470588
| 1
| 0
| 0.492228
| 0.031088
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4856d7166a2177349ba9a288e5aa944e4d1836d
| 76
|
py
|
Python
|
fundamentals-of-programming/labs/lab_5-11/ui/ui.py
|
vampy/university
|
9496cb63594dcf1cc2cec8650b8eee603f85fdab
|
[
"MIT"
] | 6
|
2015-06-22T19:43:13.000Z
|
2019-07-15T18:08:41.000Z
|
fundamentals-of-programming/labs/lab_5-11/ui/ui.py
|
vampy/university
|
9496cb63594dcf1cc2cec8650b8eee603f85fdab
|
[
"MIT"
] | null | null | null |
fundamentals-of-programming/labs/lab_5-11/ui/ui.py
|
vampy/university
|
9496cb63594dcf1cc2cec8650b8eee603f85fdab
|
[
"MIT"
] | 1
|
2015-09-26T09:01:54.000Z
|
2015-09-26T09:01:54.000Z
|
#!/usr/bin/python
from console import Console
class UI(Console):
pass
| 10.857143
| 27
| 0.710526
| 11
| 76
| 4.909091
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 76
| 6
| 28
| 12.666667
| 0.870968
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
2e4f0d9fbb1bf594c984bebaa8594d86eb355249
| 344
|
py
|
Python
|
venv/lib/python3.9/site-packages/blurhash/__init__.py
|
briansodenkin/People-Counting-in-Real-Time
|
b40b4483ecdcf0cc3c67e4c45f6ab512c12fd485
|
[
"MIT"
] | 66
|
2019-05-09T22:12:42.000Z
|
2022-02-26T03:17:52.000Z
|
venv/lib/python3.9/site-packages/blurhash/__init__.py
|
briansodenkin/People-Counting-in-Real-Time
|
b40b4483ecdcf0cc3c67e4c45f6ab512c12fd485
|
[
"MIT"
] | 4
|
2019-10-04T17:19:31.000Z
|
2021-06-17T07:41:18.000Z
|
venv/lib/python3.9/site-packages/blurhash/__init__.py
|
briansodenkin/People-Counting-in-Real-Time
|
b40b4483ecdcf0cc3c67e4c45f6ab512c12fd485
|
[
"MIT"
] | 2
|
2020-02-22T22:31:25.000Z
|
2020-08-09T01:42:34.000Z
|
from .blurhash import blurhash_encode as encode
from .blurhash import blurhash_decode as decode
from .blurhash import blurhash_components as components
from .blurhash import srgb_to_linear as srgb_to_linear
from .blurhash import linear_to_srgb as linear_to_srgb
__all__ = ['encode', 'decode', 'components', 'srgb_to_linear', 'linear_to_srgb']
| 43
| 80
| 0.825581
| 51
| 344
| 5.196078
| 0.215686
| 0.226415
| 0.339623
| 0.29434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110465
| 344
| 7
| 81
| 49.142857
| 0.866013
| 0
| 0
| 0
| 0
| 0
| 0.145349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e58aa2a93671bec8a696a19540cfe35282a03d3
| 107
|
py
|
Python
|
ostn02python/__init__.py
|
IanHopkinson/ostn02python
|
54e6aa52308859f0fcc306090612489e2a4e754e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
ostn02python/__init__.py
|
IanHopkinson/ostn02python
|
54e6aa52308859f0fcc306090612489e2a4e754e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
ostn02python/__init__.py
|
IanHopkinson/ostn02python
|
54e6aa52308859f0fcc306090612489e2a4e754e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import ostn02python.OSGB #import parse_grid, grid_to_ll
import ostn02python.OSTN02 #import OSGB36_to_ETRS89
| 53.5
| 55
| 0.878505
| 16
| 107
| 5.5625
| 0.625
| 0.404494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10101
| 0.074766
| 107
| 2
| 56
| 53.5
| 0.79798
| 0.485981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e6230b5750b90bdfc4932d555e515044ece2bec
| 54
|
py
|
Python
|
src/tests/utils/__init__.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | null | null | null |
src/tests/utils/__init__.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 4
|
2021-03-30T12:35:36.000Z
|
2021-06-10T18:11:24.000Z
|
src/tests/utils/__init__.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 2
|
2021-02-07T16:16:36.000Z
|
2021-07-13T05:26:51.000Z
|
from .test_page import *
from .test_paginator import *
| 27
| 29
| 0.796296
| 8
| 54
| 5.125
| 0.625
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 29
| 27
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e6b79a6c0f245527d124391ebd380d41aa3bc32
| 5,208
|
py
|
Python
|
src/genie/libs/parser/ios/tests/test_show_rpf.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/ios/tests/test_show_rpf.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/ios/tests/test_show_rpf.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | 1
|
2021-07-07T18:07:56.000Z
|
2021-07-07T18:07:56.000Z
|
# Python
import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.ios.show_rpf import ShowIpRpf, ShowIpv6Rpf
from genie.libs.parser.iosxe.tests.test_show_rpf import test_show_ipv6_rpf as test_show_ipv6_rpf_iosxe
# =============================================
# Unit test for 'show ip rpf <x.x.x.x>'
# Unit test for 'show ip rpf vrf xxx <x.x.x.x>'
# ==============================================
class test_show_ip_rpf(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vrf": {
"default": {
"source_address": "192.168.16.226",
"source_host": "?",
"mofrr": "Enabled",
"path": {
"192.168.145.2 Ethernet1/4": {
"interface_name": "Ethernet1/4",
"neighbor_host": "?",
"neighbor_address": "192.168.145.2",
"table_type": "unicast",
"table_feature": "ospf",
"table_feature_instance": "200",
"distance_preferred_lookup": True,
"lookup_topology": "ipv4 multicast base",
"originated_topology": "ipv4 unicast base"
}
}
}
}
}
golden_output = {'execute.return_value': '''\
Router# show ip rpf 192.168.16.226
RPF information for ? (192.168.16.226) MoFRR Enabled
RPF interface: Ethernet1/4
RPF neighbor: ? (192.168.145.2)
RPF route/mask: 255.255.255.225
RPF type: unicast (ospf 200)
Doing distance-preferred lookups across tables
RPF topology: ipv4 multicast base, originated from ipv4 unicast base
'''}
golden_parsed_output2 = {
"vrf": {
"VRF1": {
"source_address": "192.168.16.226",
"source_host": "?",
"mofrr": "Enabled",
"path": {
"192.168.145.2 Ethernet1/4": {
"interface_name": "Ethernet1/4",
"neighbor_host": "?",
"neighbor_address": "192.168.145.2",
"table_type": "unicast",
"table_feature": "ospf",
"table_feature_instance": "200",
"distance_preferred_lookup": True,
"lookup_topology": "ipv4 multicast base",
"originated_topology": "ipv4 unicast base"
}
}
}
}
}
golden_output2 = {'execute.return_value': '''\
Router# show ip rpf 192.168.16.226
RPF information for ? (192.168.16.226) MoFRR Enabled
RPF interface: Ethernet1/4
RPF neighbor: ? (192.168.145.2)
RPF route/mask: 255.255.255.225
RPF type: unicast (ospf 200)
Doing distance-preferred lookups across tables
RPF topology: ipv4 multicast base, originated from ipv4 unicast base
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowIpRpf(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(mroute='172.16.10.13')
def test_golden_vrf_default(self):
self.device = Mock(**self.golden_output)
obj = ShowIpRpf(device=self.device)
parsed_output = obj.parse(mroute='192.168.16.226')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_vrf_non_default(self):
self.device = Mock(**self.golden_output2)
obj = ShowIpRpf(device=self.device)
parsed_output = obj.parse(mroute='192.168.16.226', vrf='VRF1')
self.assertEqual(parsed_output,self.golden_parsed_output2)
# =============================================
# Unit test for 'show ipv6 rpf <x.x.x.x>'
# Unit test for 'show ipv6 rpf vrf xxx <x.x.x.x>'
# ==============================================
class test_show_ipv6_rpf(test_show_ipv6_rpf_iosxe):
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowIpv6Rpf(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(mroute='2001:99:99::99')
def test_golden_vrf_default(self):
self.device = Mock(**self.golden_output)
obj = ShowIpv6Rpf(device=self.device)
parsed_output = obj.parse(mroute='2001:99:99::99')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_vrf_non_default(self):
self.device = Mock(**self.golden_output2)
obj = ShowIpv6Rpf(device=self.device)
parsed_output = obj.parse(mroute='2001:99:99::99', vrf='VRF1')
self.assertEqual(parsed_output,self.golden_parsed_output2)
if __name__ == '__main__':
unittest.main()
| 37.73913
| 102
| 0.549923
| 563
| 5,208
| 4.902309
| 0.186501
| 0.030435
| 0.008696
| 0.031884
| 0.82971
| 0.807971
| 0.792754
| 0.792754
| 0.792754
| 0.773188
| 0
| 0.073481
| 0.304916
| 5,208
| 138
| 103
| 37.73913
| 0.68895
| 0.074117
| 0
| 0.672897
| 0
| 0
| 0.33264
| 0.019543
| 0
| 0
| 0
| 0
| 0.056075
| 1
| 0.056075
| false
| 0
| 0.056075
| 0
| 0.186916
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf3fefcf7f84bd417e5e91e117988f8ac35ef44b
| 148
|
py
|
Python
|
slack/signature/__init__.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | 2,486
|
2016-11-03T14:31:43.000Z
|
2020-10-26T23:07:44.000Z
|
slack/signature/__init__.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | 721
|
2016-11-03T21:26:56.000Z
|
2020-10-26T12:41:29.000Z
|
slack/signature/__init__.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | 627
|
2016-11-02T19:04:19.000Z
|
2020-10-25T19:21:13.000Z
|
from slack_sdk.signature import SignatureVerifier # noqa
from slack import deprecation
deprecation.show_message(__name__, "slack_sdk.signature")
| 24.666667
| 57
| 0.837838
| 18
| 148
| 6.5
| 0.611111
| 0.153846
| 0.290598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101351
| 148
| 5
| 58
| 29.6
| 0.879699
| 0.027027
| 0
| 0
| 0
| 0
| 0.133803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf8781433225c17f0a98614e2d463f07c4c5572b
| 31
|
py
|
Python
|
python/xpctl/sql/__init__.py
|
domyounglee/baseline
|
2261abfb7e770cc6f3d63a7f6e0015238d0e11f8
|
[
"Apache-2.0"
] | 2
|
2018-07-06T02:01:12.000Z
|
2018-07-06T02:01:14.000Z
|
python/xpctl/sql/__init__.py
|
domyounglee/baseline
|
2261abfb7e770cc6f3d63a7f6e0015238d0e11f8
|
[
"Apache-2.0"
] | null | null | null |
python/xpctl/sql/__init__.py
|
domyounglee/baseline
|
2261abfb7e770cc6f3d63a7f6e0015238d0e11f8
|
[
"Apache-2.0"
] | 3
|
2019-05-27T04:52:21.000Z
|
2022-02-15T00:22:53.000Z
|
from xpctl.sql.backend import *
| 31
| 31
| 0.806452
| 5
| 31
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d85cd4a52d05288d5a8d76a68a46980a536890b0
| 20
|
py
|
Python
|
src/sage/libs/gap/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/libs/gap/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/libs/gap/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
# libgap
import all
| 6.666667
| 10
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 2
| 11
| 10
| 0.9375
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d860a77e7577ee66c71516cdb948c35a25864114
| 118
|
py
|
Python
|
coderdojochi/views/__init__.py
|
rgroves/weallcode-website
|
ead60d3272dbbfe610b2d500978d1de44aef6386
|
[
"MIT"
] | 15
|
2019-05-04T00:24:00.000Z
|
2021-08-21T16:34:05.000Z
|
coderdojochi/views/__init__.py
|
rgroves/weallcode-website
|
ead60d3272dbbfe610b2d500978d1de44aef6386
|
[
"MIT"
] | 73
|
2019-04-24T15:53:42.000Z
|
2021-08-06T20:41:41.000Z
|
coderdojochi/views/__init__.py
|
rgroves/weallcode-website
|
ead60d3272dbbfe610b2d500978d1de44aef6386
|
[
"MIT"
] | 20
|
2019-04-26T20:13:08.000Z
|
2021-06-21T14:53:21.000Z
|
from .calendar import *
from .meetings import *
from .profile import *
from .sessions import *
from .welcome import *
| 19.666667
| 23
| 0.745763
| 15
| 118
| 5.866667
| 0.466667
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 5
| 24
| 23.6
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d8b3dd2bfb52f33b3899944f33e77d42e0b3fb8a
| 46
|
py
|
Python
|
ark_nlp/processor/graph/__init__.py
|
Zrealshadow/ark-nlp
|
159045d17747524bd4e9af7f65f1d0283e8098e6
|
[
"Apache-2.0"
] | 258
|
2021-09-04T14:01:13.000Z
|
2022-03-31T16:34:52.000Z
|
ark_nlp/processor/graph/__init__.py
|
yubuyuabc/ark-nlp
|
165d35cfacd7476791c0aeba19bf43f4f8079553
|
[
"Apache-2.0"
] | 17
|
2022-01-13T04:46:02.000Z
|
2022-03-31T16:34:07.000Z
|
ark_nlp/processor/graph/__init__.py
|
yubuyuabc/ark-nlp
|
165d35cfacd7476791c0aeba19bf43f4f8079553
|
[
"Apache-2.0"
] | 36
|
2021-11-17T06:18:45.000Z
|
2022-03-30T11:32:26.000Z
|
from .text_level_gcn import TextLevelGCNGraph
| 23
| 45
| 0.891304
| 6
| 46
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b2a973894d85bc3f40360cf68258089f09a0861
| 51
|
py
|
Python
|
kits19_3d_segmentation/configs/__init__.py
|
motokimura/kits19_3d_segmentation
|
871a24eaad388b8da427e0ab3c95f951629e36d6
|
[
"MIT"
] | 6
|
2021-03-08T11:46:36.000Z
|
2022-03-25T03:20:02.000Z
|
kits19_3d_segmentation/configs/__init__.py
|
motokimura/kits19_3d_segmentation
|
871a24eaad388b8da427e0ab3c95f951629e36d6
|
[
"MIT"
] | 1
|
2021-03-09T02:06:14.000Z
|
2021-03-09T14:38:05.000Z
|
kits19_3d_segmentation/configs/__init__.py
|
motokimura/kits19_3d_segmentation
|
871a24eaad388b8da427e0ab3c95f951629e36d6
|
[
"MIT"
] | 1
|
2022-02-26T14:30:50.000Z
|
2022-02-26T14:30:50.000Z
|
from .load_config import load_config # noqa: F401
| 25.5
| 50
| 0.784314
| 8
| 51
| 4.75
| 0.75
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0.156863
| 51
| 1
| 51
| 51
| 0.813953
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b37d6fe8355424f2bada5985baa7926f67f737c
| 45
|
py
|
Python
|
quake/job/__init__.py
|
It4innovations/quake
|
a57f37e5c871e0c7c00b84aef638b925ef96690a
|
[
"MIT"
] | 1
|
2021-03-26T14:23:44.000Z
|
2021-03-26T14:23:44.000Z
|
quake/job/__init__.py
|
It4innovations/quake
|
a57f37e5c871e0c7c00b84aef638b925ef96690a
|
[
"MIT"
] | null | null | null |
quake/job/__init__.py
|
It4innovations/quake
|
a57f37e5c871e0c7c00b84aef638b925ef96690a
|
[
"MIT"
] | null | null | null |
from .config import JobConfiguration # noqa
| 22.5
| 44
| 0.8
| 5
| 45
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 1
| 45
| 45
| 0.947368
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b5549957ee4375ba450c3ab021c8842e9378d7d
| 44
|
py
|
Python
|
playground.py
|
Bibhuprasad740/xoxo
|
afe6a25e288df76399b66cd100eb8c2bb6107906
|
[
"MIT"
] | null | null | null |
playground.py
|
Bibhuprasad740/xoxo
|
afe6a25e288df76399b66cd100eb8c2bb6107906
|
[
"MIT"
] | null | null | null |
playground.py
|
Bibhuprasad740/xoxo
|
afe6a25e288df76399b66cd100eb8c2bb6107906
|
[
"MIT"
] | null | null | null |
#comment
print("(New Branch)Playground.py")
| 14.666667
| 34
| 0.75
| 6
| 44
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 2
| 35
| 22
| 0.804878
| 0.159091
| 0
| 0
| 0
| 0
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2b69d9a8f76fb488395fcac78303d16b7134d262
| 1,801
|
py
|
Python
|
tests/kyu_6_tests/test_rotate_array.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_6_tests/test_rotate_array.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_6_tests/test_rotate_array.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_6.rotate_array import rotate
class RotateTestCase(unittest.TestCase):
def setUp(self):
self.data = [1, 2, 3, 4, 5]
def test_equals(self):
self.assertEqual(rotate(self.data, 1), [5, 1, 2, 3, 4])
def test_equals_2(self):
self.assertEqual(rotate(self.data, 2), [4, 5, 1, 2, 3])
def test_equals_3(self):
self.assertEqual(rotate(self.data, 3), [3, 4, 5, 1, 2])
def test_equals_4(self):
self.assertEqual(rotate(self.data, 4), [2, 3, 4, 5, 1])
def test_equals_5(self):
self.assertEqual(rotate(self.data, 5), [1, 2, 3, 4, 5])
def test_equals_6(self):
self.assertEqual(rotate(self.data, 0), [1, 2, 3, 4, 5])
def test_equals_7(self):
self.assertEqual(rotate(self.data, -1), [2, 3, 4, 5, 1])
def test_equals_8(self):
self.assertEqual(rotate(self.data, -2), [3, 4, 5, 1, 2])
def test_equals_9(self):
self.assertEqual(rotate(self.data, -3), [4, 5, 1, 2, 3])
def test_equals_10(self):
self.assertEqual(rotate(self.data, -4), [5, 1, 2, 3, 4])
def test_equals_11(self):
self.assertEqual(rotate(self.data, -5), [1, 2, 3, 4, 5])
def test_equals_12(self):
self.assertEqual(rotate(self.data, 7), [4, 5, 1, 2, 3])
def test_equals_13(self):
self.assertEqual(rotate(self.data, 11), [5, 1, 2, 3, 4])
def test_equals_14(self):
self.assertEqual(rotate(self.data, 12478), [3, 4, 5, 1, 2])
def test_equals_15(self):
self.assertEqual(rotate(['a', 'b', 'c'], 1), ['c', 'a', 'b'])
def test_equals_16(self):
self.assertEqual(rotate([1.0, 2.0, 3.0], 1), [3.0, 1.0, 2.0])
def test_equals_17(self):
self.assertEqual(rotate([True, True, False], 1), [False, True, True])
| 30.016667
| 77
| 0.590228
| 298
| 1,801
| 3.449664
| 0.137584
| 0.140078
| 0.214981
| 0.413424
| 0.729572
| 0.729572
| 0.601167
| 0.328794
| 0.101167
| 0.101167
| 0
| 0.096057
| 0.22543
| 1,801
| 59
| 78
| 30.525424
| 0.64086
| 0
| 0
| 0
| 0
| 0
| 0.003331
| 0
| 0
| 0
| 0
| 0
| 0.435897
| 1
| 0.461538
| false
| 0
| 0.051282
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
991926332f5ad31ce752d2442b7a6965f5651ce1
| 12,340
|
py
|
Python
|
scripts/schema.py
|
htmlcssphpjs/graphql
|
01fa314de7fdb2d9f90b54d73a0fc79b44c05fdb
|
[
"MIT"
] | null | null | null |
scripts/schema.py
|
htmlcssphpjs/graphql
|
01fa314de7fdb2d9f90b54d73a0fc79b44c05fdb
|
[
"MIT"
] | null | null | null |
scripts/schema.py
|
htmlcssphpjs/graphql
|
01fa314de7fdb2d9f90b54d73a0fc79b44c05fdb
|
[
"MIT"
] | null | null | null |
import strawberry, hashlib
from typing import Optional, List
from scripts.send import emailt
from models import db_session
from models.users import User
from models.ideas import Idea
db_session.global_init('database.db')
def hash_password(password):
h = hashlib.md5(password.encode())
return h.hexdigest()
@strawberry.type
class UsersType:
id: str
name: str
pitch: str
picture: str
skills: Optional[str]
roadmap: Optional[str]
email: str
hashed_password: str
@strawberry.type
class IdeasType:
id: str
title: str
pitch: str
descriptions: str
media: Optional[str]
jobs: Optional[str]
team: Optional[str]
roadmap: Optional[str]
@strawberry.type
class Result:
error: str
id: str
@strawberry.type
class Mutation:
Create = []
@strawberry.mutation
def create_user(self, id: str, name: str, pitch: str, picture: str, skills: str, roadmap: str, email: str, hashed_password: str) -> List[Result]:
try:
session = db_session.create_session()
Create = []
user = User(
id=id,
name=name,
pitch=pitch,
picture=picture,
skills=skills,
roadmap=roadmap,
email=email,
hashed_password=hash_password(hashed_password)
)
session.add(user)
session.commit()
result = Result(
error='None',
id=id
)
Create.append(result)
try:
data = {
"name": name,
"mail": email,
"username": id,
"url": 'url'
}
emailt(data, 'create', email)
except Exception as e:
print(e)
return Create
except Exception as e:
print(e)
Create = []
result = Result(
error='ID is busy',
id='None'
)
Create.append(result)
return Create
@strawberry.mutation
def create_idea(self, id: str, title: str, pitch: str, descriptions: str, media: Optional[str], jobs: Optional[str], team: Optional[str], roadmap: Optional[str]) -> List[Result]:
try:
session = db_session.create_session()
Create = []
idea = Idea(
id=id,
title=title,
pitch=pitch,
descriptions=descriptions,
media=media,
jobs=jobs,
team=team,
roadmap=roadmap
)
session.add(idea)
session.commit()
result = Result(
error='None',
id=id
)
Create.append(result)
return Create
except Exception as e:
print(e)
Create = []
result = Result(
error='ID is busy',
id='None'
)
Create.append(result)
return Create
@strawberry.mutation
def delete_user(self, id: str) -> List[Result]:
try:
session = db_session.create_session()
user_all = session.query(User).all()
Delete = []
email = ''
name = ''
for user in user_all:
if user.id == id:
email = user.email
name = user.name
find = session.query(User).filter(User.id == id)
find.delete()
session.commit()
result = Result(
error='None',
id=id
)
Delete.append(result)
try:
data = {
"name": name,
"mail": email,
"username": id,
"url": 'url'
}
emailt(data, 'delete', email)
except Exception as e:
print(e)
return Delete
except Exception as e:
print(e)
Delete = []
result = Result(
error='error',
id='None'
)
Delete.append(result)
return Delete
@strawberry.mutation
def delete_idea(self, id: str) -> List[Result]:
try:
session = db_session.create_session()
Delete = []
find = session.query(Idea).filter(Idea.id == id)
find.delete()
session.commit()
result = Result(
error='None',
id=id
)
Delete.append(result)
return Delete
except Exception as e:
print(e)
Delete = []
result = Result(
error='error',
id='None'
)
Delete.append(result)
return Delete
@strawberry.mutation
def update_user(self, id: str, name: str, pitch: str, picture: str, skills: str, roadmap: str, email: str, hashed_password: str) -> List[Result]:
try:
session = db_session.create_session()
Create = []
user_all = session.query(User).all()
for user in user_all:
if user.id == id:
if id: user.id = id
if name: user.name = name
if pitch: user.pitch = pitch
if picture: user.picture = picture
if skills: user.skills = skills
if roadmap: user.roadmap = roadmap
if email: user.email = email
if hashed_password:
user.hashed_password = hash_password(hashed_password)
session.commit()
result = Result(
error='None',
id=id
)
Create.append(result)
return Create
except Exception as e:
print(e)
Create = []
result = Result(
error='ID is busy',
id='None'
)
Create.append(result)
return Create
@strawberry.mutation
def update_idea(self, id: str, title: str, pitch: str, descriptions: str, media: Optional[str], jobs: Optional[str], team: Optional[str], roadmap: Optional[str]) -> List[Result]:
try:
session = db_session.create_session()
Create = []
idea_all = session.query(Idea).all()
for idea in idea_all:
if idea.id == id:
idea.id = id
idea.title = title
idea.pitch = pitch
idea.descriptions = descriptions
idea.media = media
idea.jobs = jobs
idea.team = team
idea.roadmap = roadmap
session.commit()
result = Result(
error='None',
id=id
)
Create.append(result)
return Create
except Exception as e:
print(e)
Create = []
result = Result(
error='ID is busy',
id='None'
)
Create.append(result)
return Create
@strawberry.type
class Query:
@strawberry.field
def find_user(self, id: Optional[str] = None) -> List[UsersType]:
session = db_session.create_session()
user_all = session.query(User).all()
users_list = []
for user in user_all:
if (id):
if str(user.id) == str(id):
user_as_dict = User(
id=user.id,
name=user.name,
pitch=user.pitch,
picture=user.picture,
skills=user.skills,
roadmap=user.roadmap,
email=user.email,
hashed_password=user.hashed_password
)
users_list.append(user_as_dict)
break
else:
break
print(users_list)
return users_list
@strawberry.field
def get_users(self, id: Optional[str] = None) -> List[UsersType]:
session = db_session.create_session()
user_all = session.query(User).all()
users_list = []
for user in user_all:
if (id):
if str(user.id) == str(id):
user_as_dict = User(
id=user.id,
name=user.name,
pitch=user.pitch,
picture=user.picture,
skills=user.skills,
roadmap=user.roadmap,
email=user.email,
hashed_password=user.hashed_password
)
users_list.append(user_as_dict)
break
else:
user_as_dict = User(
id=user.id,
name=user.name,
pitch=user.pitch,
picture=user.picture,
skills=user.skills,
roadmap=user.roadmap,
email=user.email,
hashed_password=user.hashed_password
)
users_list.append(user_as_dict)
print(users_list)
return users_list
@strawberry.field
def find_idea(self, id: Optional[str] = None) -> List[IdeasType]:
session = db_session.create_session()
idea_all = session.query(Idea).all()
ideas_list = []
for idea in idea_all:
if (id):
if str(idea.id) == str(id):
idea_as_dict = Idea(
id=idea.id,
title=idea.title,
pitch=idea.pitch,
descriptions=idea.descriptions,
media=idea.media,
jobs=idea.jobs,
team=idea.team,
roadmap=idea.roadmap
)
ideas_list.append(idea_as_dict)
break
else:
break
print(ideas_list)
return ideas_list
@strawberry.field
def get_ideas(self, id: Optional[str] = None) -> List[IdeasType]:
session = db_session.create_session()
idea_all = session.query(Idea).all()
ideas_list = []
for idea in idea_all:
if (id):
if str(idea.id) == str(id):
idea_as_dict = Idea(
id=idea.id,
title=idea.title,
pitch=idea.pitch,
descriptions=idea.descriptions,
media=idea.media,
jobs=idea.jobs,
team=idea.team,
roadmap=idea.roadmap
)
ideas_list.append(idea_as_dict)
break
else:
idea_as_dict = Idea(
id=idea.id,
title=idea.title,
pitch=idea.pitch,
descriptions=idea.descriptions,
media=idea.media,
jobs=idea.jobs,
team=idea.team,
roadmap=idea.roadmap
)
ideas_list.append(idea_as_dict)
print(ideas_list)
return ideas_list
schema = strawberry.Schema(query=Query, mutation=Mutation)
| 30.696517
| 183
| 0.43436
| 1,103
| 12,340
| 4.763373
| 0.074343
| 0.037686
| 0.038828
| 0.041873
| 0.791968
| 0.771603
| 0.74153
| 0.724971
| 0.713932
| 0.679292
| 0
| 0.000157
| 0.482415
| 12,340
| 401
| 184
| 30.773067
| 0.822452
| 0
| 0
| 0.717877
| 0
| 0
| 0.01382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030726
| false
| 0.030726
| 0.01676
| 0
| 0.162011
| 0.03352
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
99268425708c71db9ebe919461453bfbdc911b30
| 380
|
py
|
Python
|
utils/errors.py
|
roygbip/yuyuko
|
e8027cc638fc92f3a2d671e347215390f3ab0f20
|
[
"Apache-2.0"
] | null | null | null |
utils/errors.py
|
roygbip/yuyuko
|
e8027cc638fc92f3a2d671e347215390f3ab0f20
|
[
"Apache-2.0"
] | null | null | null |
utils/errors.py
|
roygbip/yuyuko
|
e8027cc638fc92f3a2d671e347215390f3ab0f20
|
[
"Apache-2.0"
] | null | null | null |
class Error(object):
def __init__(self, msg: str = "") -> None:
super().__init__()
self._msg = msg
def __str__(self) -> str:
return self._msg
def __repr__(self) -> str:
return self._msg
def __eq__(self, o: object) -> bool:
return self._msg == o.__repr__
def __nonzero__(self) -> bool:
return self._msg != ""
| 22.352941
| 46
| 0.555263
| 46
| 380
| 3.869565
| 0.347826
| 0.235955
| 0.292135
| 0.191011
| 0.258427
| 0.258427
| 0
| 0
| 0
| 0
| 0
| 0
| 0.305263
| 380
| 16
| 47
| 23.75
| 0.674242
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0
| 0.333333
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
99362e95d46b10b893dd77df75240db4d18fa926
| 211
|
py
|
Python
|
paramnet/__init__.py
|
spcornelius/paramnet
|
95050664bb6a2c464670eddf8678b3fb795e6bab
|
[
"MIT"
] | null | null | null |
paramnet/__init__.py
|
spcornelius/paramnet
|
95050664bb6a2c464670eddf8678b3fb795e6bab
|
[
"MIT"
] | null | null | null |
paramnet/__init__.py
|
spcornelius/paramnet
|
95050664bb6a2c464670eddf8678b3fb795e6bab
|
[
"MIT"
] | null | null | null |
import paramnet.exceptions
from paramnet.exceptions import *
import paramnet.base
from paramnet.base import *
import paramnet.meta
from paramnet.meta import *
import paramnet.view
from paramnet.view import *
| 17.583333
| 33
| 0.815166
| 28
| 211
| 6.142857
| 0.25
| 0.325581
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127962
| 211
| 11
| 34
| 19.181818
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9968809f62daa4f65250ea60ca197d2d559bd317
| 164
|
py
|
Python
|
raven/projects/Scripts/Na23Calc.py
|
arfc/2019-12-bigdata-npps
|
ebf03664c1d96541956d317f3a305323cf76c23d
|
[
"CC-BY-4.0"
] | null | null | null |
raven/projects/Scripts/Na23Calc.py
|
arfc/2019-12-bigdata-npps
|
ebf03664c1d96541956d317f3a305323cf76c23d
|
[
"CC-BY-4.0"
] | 2
|
2019-10-26T14:32:13.000Z
|
2019-12-17T17:48:05.000Z
|
raven/projects/Scripts/Na23Calc.py
|
arfc/2019-12-bigdata-npps
|
ebf03664c1d96541956d317f3a305323cf76c23d
|
[
"CC-BY-4.0"
] | 3
|
2019-10-25T18:50:31.000Z
|
2020-06-23T04:17:28.000Z
|
import MassFractionCalc
def evaluate(self):
return MassFractionCalc.return_value('Na23',self.salt_type,self.fuel_type,self.U235F4_mole_frac,self.UF4_mole_frac)
| 41
| 119
| 0.841463
| 24
| 164
| 5.458333
| 0.625
| 0.122137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.060976
| 164
| 3
| 120
| 54.666667
| 0.805195
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
998bc235a4ab4742e0bad9a149a26aa2275f5e52
| 80
|
py
|
Python
|
expkit/experiment/__init__.py
|
jonathangingras/expkit
|
1943543ac6b23e80c59b56b4e998f0b0aaa7d6c8
|
[
"WTFPL"
] | null | null | null |
expkit/experiment/__init__.py
|
jonathangingras/expkit
|
1943543ac6b23e80c59b56b4e998f0b0aaa7d6c8
|
[
"WTFPL"
] | null | null | null |
expkit/experiment/__init__.py
|
jonathangingras/expkit
|
1943543ac6b23e80c59b56b4e998f0b0aaa7d6c8
|
[
"WTFPL"
] | null | null | null |
from .dataset import *
from .experiment_setup import *
from .shortcuts import *
| 20
| 31
| 0.775
| 10
| 80
| 6.1
| 0.6
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 3
| 32
| 26.666667
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5135bfa31c0e171f09e65266c565ad84cc42805e
| 9,865
|
py
|
Python
|
concordat/interface_test.py
|
bmoore813/concordat
|
9a6d92211423583b57d9f771bc3795f8957741c2
|
[
"Apache-2.0"
] | null | null | null |
concordat/interface_test.py
|
bmoore813/concordat
|
9a6d92211423583b57d9f771bc3795f8957741c2
|
[
"Apache-2.0"
] | null | null | null |
concordat/interface_test.py
|
bmoore813/concordat
|
9a6d92211423583b57d9f771bc3795f8957741c2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Tuple
import pytest
from beartype.roar import (
BeartypeCallHintPepReturnException,
BeartypeCallHintPepParamException,
)
from concordat.interface import InterfaceMeta, abstract_method
class IValid(metaclass=InterfaceMeta):
@abstract_method
def run(self, path: str, id: int) -> None:
pass
@abstract_method
def read(self, path: str) -> None:
pass
class Valid(IValid):
def run(self, path: str, id: int) -> None:
print(f"{path} and {id}")
def read(self, path: str) -> None:
print(f"path is {path}")
# # TestBuildErrors:
def test_build_missing_method() -> None:
"""Test to see if a method is missing on the
implementation class
"""
with pytest.raises(NotImplementedError):
class MissingMethod(IValid):
def read(self, path: str) -> None:
print(f"path is {path}")
def test_build_misspelled_method() -> None:
"""Test to make sure that the methods are spelled the
same on the implementation class
"""
with pytest.raises(NotImplementedError):
class MisspelledMethod(IValid):
def runs(self, path: str, id: int) -> None:
print(f"{path} and {id}")
def read(self, path: str) -> None:
print(f"path is {path}")
def test_build_wrong_arg_names() -> None:
"""Test to make sure that the parameternames
are exactly the same
"""
with pytest.raises(TypeError):
class BadNames(IValid):
def run(self, poop: str, identification: int) -> None:
print(f"{poop} and {identification}")
def read(self, path: str) -> None:
print(f"path is {path}")
def test_build_wrong_type_hints() -> None:
"""Test to make sure that the type hints
match exactly with what we get from the interface
"""
with pytest.raises(TypeError):
class BadTypeHints(IValid):
def run(self, path: Dict, id: str) -> Tuple:
return f"{path} and {id}" # type:ignore
def read(self, path: int) -> str:
return f"path is {path}"
# # TestRuntimeErrors:
def test_run_wrong_arg_types() -> None:
with pytest.raises(BeartypeCallHintPepParamException):
v = Valid()
v.run("test/path", "not an int") # type:ignore
def test_no_errors() -> None:
v = Valid()
v.run("test/path", 1)
v.read("hello")
def test_bad_return_type() -> None:
with pytest.raises(BeartypeCallHintPepReturnException):
class IZeus(metaclass=InterfaceMeta):
@abstract_method
def run(self, path: str, id: int) -> None:
pass
@abstract_method
def read(self, path: str) -> int:
pass
class Zeus(IZeus):
def run(self, path: str, id: int) -> None:
print(f"{path} and {id}")
def read(self, path: str) -> int:
print(f"path is {path}")
return "p"
z = Zeus()
z.run("test/path", 1)
z.read("hello")
def test_custom_return_type() -> None:
class CustomType:
def __init__(self) -> None:
prop = "test property"
class IZeus(metaclass=InterfaceMeta):
@abstract_method
def run(self) -> CustomType:
pass
class Zeus(IZeus):
def run(self) -> CustomType:
return CustomType()
z = Zeus()
z.run()
with pytest.raises(BeartypeCallHintPepReturnException):
class Zeus(IZeus):
def run(self) -> CustomType:
return 1
z = Zeus()
z.run()
# # TODO: Error
# def test_multiple_return_types() -> None:
# class CustomType:
# def __init__(self) -> None:
# prop = "test property"
# class IZeus(metaclass=InterfaceMeta):
# @abstract_method
# def run(self, has_value: List, name: str) -> Tuple[List, str]:
# pass
# class Zeus(IZeus):
# def run(self, has_value: List, name: str) -> Tuple[List, str]:
# return has_value, name
# z = Zeus()
# a = [1, 2]
# z.run(has_value=a, name="Im a name")
# TestInheritanceErrors:
def test_inheritance_empty() -> None:
class InheritEmpty(Valid):
pass
def test_inheritance_empty_2() -> None:
class InheritEmpty(Valid):
pass
class InheritEmpty2(InheritEmpty):
pass
def test_inheritance_enhancement() -> None:
class Enhancement(Valid):
def new_func(self, path: str, id: int, extra: str) -> None:
print(f"{path} and {id} and {extra}")
enhance = Enhancement()
enhance.run("test", 69)
enhance.read("considerthisread")
enhance.new_func("path", 900, "extrasauceplz")
with pytest.raises(BeartypeCallHintPepParamException):
enhance.run("test", "not an int") # type:ignore
with pytest.raises(TypeError):
enhance.read("read", "extra arg") # type:ignore
def test_inheritance_override_method() -> None:
with pytest.raises(TypeError):
class BadOverride(Valid):
def run(self, sheesh: str, id: int) -> None:
print(f"{sheesh} and {id}")
class IStatic(metaclass=InterfaceMeta):
@abstract_method
def poop(path: str, id: int) -> None:
...
@abstract_method
def pee(path: str) -> None:
...
class Static(IStatic):
@staticmethod
def poop(path: str, id: int) -> None:
print(f"{path} and {id}")
@staticmethod
def pee(path: str) -> None:
print(f"path is {path}")
# class TestBuildErrors:
def test_static_build_missing_method() -> None:
"""Test to see if a method is missing on the
implementation class
"""
with pytest.raises(NotImplementedError):
class MissingMethod(IStatic):
@staticmethod
def poop(path: str, id: int) -> None:
print(f"path is {path}")
def test_static_build_misspelled_method() -> None:
"""Test to make sure that the methods are spelled the
same on the implementation class
"""
with pytest.raises(NotImplementedError):
class MisspelledMethod(IStatic):
def poop(path: str, id: int) -> None:
print(f"{path} and {id}")
def piz(path: str) -> None:
print(f"path is {path}")
def test_static_build_wrong_arg_names() -> None:
"""Test to make sure that the paramternames
are exactly the same
"""
with pytest.raises(TypeError):
class BadNames(IStatic):
def poop(self, poop: str, identification: int) -> None:
print(f"{poop} and {identification}")
def pee(self, path: str) -> None:
print(f"path is {path}")
def test_static_build_wrong_type_hints() -> None:
"""Test to make sure that the type hints
match exactly with what we get from the interface
"""
with pytest.raises(TypeError):
class BadTypeHints(IStatic):
def poop(path: Dict, id: str) -> Tuple:
return f"{path} and {id}"
def pee(path: int) -> str:
return f"path is {path}"
# TestRuntimeErrors:
def test_static_run_wrong_arg_types() -> None:
with pytest.raises(BeartypeCallHintPepParamException):
v = Static()
v.poop("test/path", "not an int") # type:ignore
def test_static_no_errors() -> None:
v = Static()
v.poop("test/path", 1)
v.pee("hello")
def test_bad_return_type() -> None:
with pytest.raises(BeartypeCallHintPepReturnException):
class IZeus(metaclass=InterfaceMeta):
@abstract_method
def run(self, path: str, id: int) -> None:
pass
@abstract_method
def read(self, path: str) -> int:
pass
class Zeus(IZeus):
@staticmethod
def run(path: str, id: int) -> None:
print(f"{path} and {id}")
@staticmethod
def read(path: str) -> int:
print(f"path is {path}")
return "p"
z = Zeus()
z.run("test/path", 1)
z.read("hello")
# TestInheritanceErrors:
def test_static_inheritance_empty() -> None:
class InheritEmpty(Static):
pass
def test_static_inheritance_empty_2() -> None:
class InheritEmpty(Static):
pass
class InheritEmpty2(InheritEmpty):
pass
def test_static_inheritance_enhancement() -> None:
class Enhancement(Static):
def new_func(self, path: str, id: int, extra: str) -> None:
print(f"{path} and {id} and {extra}")
enhance = Enhancement()
enhance.poop("test", 69)
enhance.pee("considerthisread")
enhance.new_func("path", 900, "extrasauceplz")
with pytest.raises(BeartypeCallHintPepParamException):
enhance.poop("test", "not an int") # type:ignore
with pytest.raises(TypeError):
enhance.pee("read", "extra arg") # type:ignore
def test_static_inheritance_override_method() -> None:
with pytest.raises(TypeError):
class BadOverride(Static):
def poop(self, sheesh: str, id: int) -> None:
print(f"{sheesh} and {id}")
def test_no_abc_impplementation() -> None:
class MyClass(metaclass=InterfaceMeta):
def __init__(self, path: str) -> None:
print(path)
def run(self, check: bool) -> int:
return 1
m = MyClass(path="1")
m.run(True)
with pytest.raises(BeartypeCallHintPepReturnException):
class BadClass(metaclass=InterfaceMeta):
def __init__(self, path: int) -> None:
print(path)
def run(self, check: bool) -> int:
return "hellp"
b = BadClass(1)
b.run(False)
| 25.623377
| 72
| 0.5852
| 1,151
| 9,865
| 4.911381
| 0.119896
| 0.033434
| 0.035379
| 0.039625
| 0.845038
| 0.770564
| 0.733062
| 0.698921
| 0.679993
| 0.668318
| 0
| 0.003449
| 0.294678
| 9,865
| 384
| 73
| 25.690104
| 0.808997
| 0.13259
| 0
| 0.622222
| 0
| 0
| 0.079986
| 0
| 0
| 0
| 0
| 0.002604
| 0
| 1
| 0.293333
| false
| 0.057778
| 0.017778
| 0.035556
| 0.497778
| 0.106667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
5148dfda652988ef6c275e31f7b6d2fe90bf6ec6
| 15,747
|
py
|
Python
|
sdk/python/pulumi_gcp/dataproc/job.py
|
23doors/pulumi-gcp
|
ded01b199f95b164884266ea3e6f8206c8231270
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-12-20T22:08:20.000Z
|
2019-12-20T22:08:20.000Z
|
sdk/python/pulumi_gcp/dataproc/job.py
|
pellizzetti/pulumi-gcp
|
fad74dd55a0cf7723f73046bb0e6fcbfd948ba84
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/dataproc/job.py
|
pellizzetti/pulumi-gcp
|
fad74dd55a0cf7723f73046bb0e6fcbfd948ba84
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Job(pulumi.CustomResource):
driver_controls_files_uri: pulumi.Output[str]
"""
If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"""
driver_output_resource_uri: pulumi.Output[str]
"""
A URI pointing to the location of the stdout of the job's driver program.
"""
force_delete: pulumi.Output[bool]
"""
By default, you can only delete inactive jobs within
Dataproc. Setting this to true, and calling destroy, will ensure that the
job is first cancelled before issuing the delete.
"""
hadoop_config: pulumi.Output[dict]
hive_config: pulumi.Output[dict]
labels: pulumi.Output[dict]
"""
The list of labels (key/value pairs) to add to the job.
"""
pig_config: pulumi.Output[dict]
placement: pulumi.Output[dict]
project: pulumi.Output[str]
"""
The project in which the `cluster` can be found and jobs
subsequently run against. If it is not provided, the provider project is used.
"""
pyspark_config: pulumi.Output[dict]
reference: pulumi.Output[dict]
region: pulumi.Output[str]
"""
The Cloud Dataproc region. This essentially determines which clusters are available
for this job to be submitted to. If not specified, defaults to `global`.
"""
scheduling: pulumi.Output[dict]
spark_config: pulumi.Output[dict]
sparksql_config: pulumi.Output[dict]
status: pulumi.Output[dict]
def __init__(__self__, resource_name, opts=None, force_delete=None, hadoop_config=None, hive_config=None, labels=None, pig_config=None, placement=None, project=None, pyspark_config=None, reference=None, region=None, scheduling=None, spark_config=None, sparksql_config=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a job resource within a Dataproc cluster within GCE. For more information see
[the official dataproc documentation](https://cloud.google.com/dataproc/).
!> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] force_delete: By default, you can only delete inactive jobs within
Dataproc. Setting this to true, and calling destroy, will ensure that the
job is first cancelled before issuing the delete.
:param pulumi.Input[dict] labels: The list of labels (key/value pairs) to add to the job.
:param pulumi.Input[str] project: The project in which the `cluster` can be found and jobs
subsequently run against. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Cloud Dataproc region. This essentially determines which clusters are available
for this job to be submitted to. If not specified, defaults to `global`.
The **hadoop_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **hive_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **pig_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **placement** object supports the following:
* `clusterName` (`pulumi.Input[str]`)
* `clusterUuid` (`pulumi.Input[str]`)
The **pyspark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainPythonFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
* `pythonFileUris` (`pulumi.Input[list]`)
The **reference** object supports the following:
* `job_id` (`pulumi.Input[str]`)
The **scheduling** object supports the following:
* `maxFailuresPerHour` (`pulumi.Input[float]`)
The **spark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **sparksql_config** object supports the following:
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['force_delete'] = force_delete
__props__['hadoop_config'] = hadoop_config
__props__['hive_config'] = hive_config
__props__['labels'] = labels
__props__['pig_config'] = pig_config
if placement is None:
raise TypeError("Missing required property 'placement'")
__props__['placement'] = placement
__props__['project'] = project
__props__['pyspark_config'] = pyspark_config
__props__['reference'] = reference
__props__['region'] = region
__props__['scheduling'] = scheduling
__props__['spark_config'] = spark_config
__props__['sparksql_config'] = sparksql_config
__props__['driver_controls_files_uri'] = None
__props__['driver_output_resource_uri'] = None
__props__['status'] = None
super(Job, __self__).__init__(
'gcp:dataproc/job:Job',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, driver_controls_files_uri=None, driver_output_resource_uri=None, force_delete=None, hadoop_config=None, hive_config=None, labels=None, pig_config=None, placement=None, project=None, pyspark_config=None, reference=None, region=None, scheduling=None, spark_config=None, sparksql_config=None, status=None):
"""
Get an existing Job resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] driver_controls_files_uri: If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
:param pulumi.Input[str] driver_output_resource_uri: A URI pointing to the location of the stdout of the job's driver program.
:param pulumi.Input[bool] force_delete: By default, you can only delete inactive jobs within
Dataproc. Setting this to true, and calling destroy, will ensure that the
job is first cancelled before issuing the delete.
:param pulumi.Input[dict] labels: The list of labels (key/value pairs) to add to the job.
:param pulumi.Input[str] project: The project in which the `cluster` can be found and jobs
subsequently run against. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Cloud Dataproc region. This essentially determines which clusters are available
for this job to be submitted to. If not specified, defaults to `global`.
The **hadoop_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **hive_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **pig_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **placement** object supports the following:
* `clusterName` (`pulumi.Input[str]`)
* `clusterUuid` (`pulumi.Input[str]`)
The **pyspark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainPythonFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
* `pythonFileUris` (`pulumi.Input[list]`)
The **reference** object supports the following:
* `job_id` (`pulumi.Input[str]`)
The **scheduling** object supports the following:
* `maxFailuresPerHour` (`pulumi.Input[float]`)
The **spark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **sparksql_config** object supports the following:
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **status** object supports the following:
* `details` (`pulumi.Input[str]`)
* `state` (`pulumi.Input[str]`)
* `stateStartTime` (`pulumi.Input[str]`)
* `substate` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["driver_controls_files_uri"] = driver_controls_files_uri
__props__["driver_output_resource_uri"] = driver_output_resource_uri
__props__["force_delete"] = force_delete
__props__["hadoop_config"] = hadoop_config
__props__["hive_config"] = hive_config
__props__["labels"] = labels
__props__["pig_config"] = pig_config
__props__["placement"] = placement
__props__["project"] = project
__props__["pyspark_config"] = pyspark_config
__props__["reference"] = reference
__props__["region"] = region
__props__["scheduling"] = scheduling
__props__["spark_config"] = spark_config
__props__["sparksql_config"] = sparksql_config
__props__["status"] = status
return Job(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.643478
| 345
| 0.612561
| 1,684
| 15,747
| 5.52791
| 0.142518
| 0.139435
| 0.064454
| 0.053067
| 0.773338
| 0.736384
| 0.731658
| 0.731658
| 0.724353
| 0.724353
| 0
| 0.000087
| 0.26894
| 15,747
| 344
| 346
| 45.776163
| 0.808548
| 0.537817
| 0
| 0.022727
| 1
| 0
| 0.1505
| 0.022184
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.011364
| 0.068182
| 0.022727
| 0.340909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5aa635f5e35eb1eb055c4acf7c32ba24fa54ced4
| 45
|
py
|
Python
|
pgreaper/io/__init__.py
|
vincentlaucsb/sqlify
|
cc84be6efef7b904aacc463d5b0211b3e52a8f25
|
[
"MIT"
] | 8
|
2017-05-01T10:11:40.000Z
|
2017-07-26T08:52:43.000Z
|
pgreaper/io/__init__.py
|
vincentlaucsb/pgreaper
|
cc84be6efef7b904aacc463d5b0211b3e52a8f25
|
[
"MIT"
] | 4
|
2017-05-01T13:11:05.000Z
|
2017-08-06T06:18:34.000Z
|
pgreaper/io/__init__.py
|
vincentlaucsb/sqlify
|
cc84be6efef7b904aacc463d5b0211b3e52a8f25
|
[
"MIT"
] | null | null | null |
from .json_reader import JSONStreamingDecoder
| 45
| 45
| 0.911111
| 5
| 45
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ab91551496f52e230550f46bbeeead29f193031
| 5,703
|
py
|
Python
|
DAY_3/SOLUTION_2.py
|
Malod219/AdventOfCode2019
|
df4c45a6917580075622dd063e007860c5425328
|
[
"Unlicense"
] | null | null | null |
DAY_3/SOLUTION_2.py
|
Malod219/AdventOfCode2019
|
df4c45a6917580075622dd063e007860c5425328
|
[
"Unlicense"
] | null | null | null |
DAY_3/SOLUTION_2.py
|
Malod219/AdventOfCode2019
|
df4c45a6917580075622dd063e007860c5425328
|
[
"Unlicense"
] | null | null | null |
with open("input1.txt","r") as f:
data = f.readlines()
data[0] = data[0].split(',')
data[1] = data[1].split(',')
# Might need to change w if too big
w = 22000
h = w
# 2000x2000 Wirespace
wireSpace = [[0 for x in range(w)] for y in range(h)]
centerX = w//2
centerY = h//2
currentPosX = centerX
currentPosY = centerY
# Plot all wire commands in the first line
for command in data[0]:
#print(command)
opcode, parameter = command[0], int(command[1:])
try:
if(opcode == "R"):
for i in range(parameter):
wireSpace[currentPosY][currentPosX+i] = 1
currentPosX += parameter
elif(opcode == "L"):
for i in range(parameter):
wireSpace[currentPosY][currentPosX-i] = 1
currentPosX -= parameter
elif(opcode == "U"):
for i in range(parameter):
wireSpace[currentPosY-i][currentPosX] = 1
currentPosY -= parameter
elif(opcode == "D"):
for i in range(parameter):
wireSpace[currentPosY+i][currentPosX] = 1
currentPosY += parameter
except:
print("Failed.\nOpcode: {}\nParameter: {}\nCurrent XY Coordinate: {}, {}".format(opcode, parameter, currentPosX, currentPosY))
break;
currentPosX = centerX
currentPosY = centerY
collisionPoints = []
for command in data[1]:
#print(command)
opcode, parameter = command[0], int(command[1:])
try:
if(opcode == "R"):
for i in range(parameter):
if(wireSpace[currentPosY][currentPosX+i] == 1):
if(currentPosX != centerX & currentPosY != centerY):
wireSpace[currentPosY][currentPosX+i]=2
collisionPoints.append([currentPosX+i,currentPosY,99999,99999])
currentPosX += parameter
elif(opcode == "L"):
for i in range(parameter):
if(wireSpace[currentPosY][currentPosX-i] == 1):
if(currentPosX != centerX & currentPosY != centerY):
wireSpace[currentPosY][currentPosX-i]=2
collisionPoints.append([currentPosX-i,currentPosY,99999,99999])
currentPosX -= parameter
elif(opcode == "U"):
for i in range(parameter):
if(wireSpace[currentPosY-i][currentPosX] == 1):
if(currentPosX != centerX & currentPosY != centerY):
wireSpace[currentPosY-i][currentPosX]=2
collisionPoints.append([currentPosX,currentPosY-i,99999,99999])
currentPosY -= parameter
elif(opcode == "D"):
for i in range(parameter):
if(wireSpace[currentPosY+i][currentPosX] == 1):
if(currentPosX != centerX & currentPosY != centerY):
wireSpace[currentPosY+i][currentPosX]=2
collisionPoints.append([currentPosX,currentPosY+i,99999,99999])
currentPosY += parameter
except:
print("Failed.\nOpcode: {}\nParameter: {}\nCurrent XY Coordinate: {}, {}".format(opcode, parameter, currentPosX, currentPosY))
break;
def getStepsCountToIntersect(data,pos):
currentPosX = centerX
currentPosY = centerY
steps = 0
# Plot all wire commands in the first line
for command in data:
#print(command)
opcode, parameter = command[0], int(command[1:])
try:
if(opcode == "R"):
for i in range(parameter):
if wireSpace[currentPosY][currentPosX+i] == 2:
for j in range(len(collisionPoints)):
if( currentPosX+i == collisionPoints[j][0] and currentPosY == collisionPoints[j][1] ):
collisionPoints[j][pos] = min(collisionPoints[j][pos], steps+i)
currentPosX += parameter
elif(opcode == "L"):
for i in range(parameter):
if wireSpace[currentPosY][currentPosX-i] == 2:
for j in range(len(collisionPoints)):
if( currentPosX-i == collisionPoints[j][0] and currentPosY == collisionPoints[j][1] ):
collisionPoints[j][pos] = min(collisionPoints[j][pos], steps+i)
currentPosX -= parameter
elif(opcode == "U"):
for i in range(parameter):
if wireSpace[currentPosY-i][currentPosX] == 2:
for j in range(len(collisionPoints)):
if( currentPosX == collisionPoints[j][0] and currentPosY-i == collisionPoints[j][1] ):
collisionPoints[j][pos] = min(collisionPoints[j][pos], steps+i)
currentPosY -= parameter
elif(opcode == "D"):
for i in range(parameter):
if wireSpace[currentPosY+i][currentPosX] == 2:
for j in range(len(collisionPoints)):
if( currentPosX == collisionPoints[j][0] and currentPosY+i == collisionPoints[j][1] ):
collisionPoints[j][pos] = min(collisionPoints[j][pos], steps+i)
currentPosY += parameter
steps+=parameter
except:
print("Failed.\nOpcode: {}\nParameter: {}\nCurrent XY Coordinate: {}, {}".format(opcode, parameter, currentPosX, currentPosY))
break;
getStepsCountToIntersect(data[0],2)
getStepsCountToIntersect(data[1],3)
minSum = 99999
for point in collisionPoints:
minSum = min(minSum, point[2]+point[3])
print(minSum)
print("Succesfully ended")
| 42.879699
| 138
| 0.550587
| 561
| 5,703
| 5.597148
| 0.137255
| 0.040127
| 0.02293
| 0.042038
| 0.83758
| 0.83758
| 0.83758
| 0.83758
| 0.83758
| 0.83758
| 0
| 0.027344
| 0.32667
| 5,703
| 132
| 139
| 43.204545
| 0.790365
| 0.031036
| 0
| 0.594828
| 0
| 0
| 0.04295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008621
| false
| 0
| 0
| 0
| 0.008621
| 0.043103
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
51aafd5e472cb867a4504109b9913174123657d8
| 213
|
py
|
Python
|
hibiscus_connect/hibiscus_connect/doctype/hibiscus_connect_bank_account/hibiscus_connect_bank_account.py
|
itsdave-de/hibiscus_connect
|
b535657336a4c37f558ef76cd7662984d833e4dc
|
[
"MIT"
] | null | null | null |
hibiscus_connect/hibiscus_connect/doctype/hibiscus_connect_bank_account/hibiscus_connect_bank_account.py
|
itsdave-de/hibiscus_connect
|
b535657336a4c37f558ef76cd7662984d833e4dc
|
[
"MIT"
] | null | null | null |
hibiscus_connect/hibiscus_connect/doctype/hibiscus_connect_bank_account/hibiscus_connect_bank_account.py
|
itsdave-de/hibiscus_connect
|
b535657336a4c37f558ef76cd7662984d833e4dc
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, itsdave GmbH and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class HibiscusConnectBankAccount(Document):
pass
| 23.666667
| 51
| 0.807512
| 26
| 213
| 6.615385
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021622
| 0.131455
| 213
| 8
| 52
| 26.625
| 0.908108
| 0.521127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
51cc4e9bbadf64adc659dec8ed89c875e19578e7
| 23,457
|
py
|
Python
|
flash/tabular/regression/data.py
|
ar90n/lightning-flash
|
61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131
|
[
"Apache-2.0"
] | null | null | null |
flash/tabular/regression/data.py
|
ar90n/lightning-flash
|
61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131
|
[
"Apache-2.0"
] | null | null | null |
flash/tabular/regression/data.py
|
ar90n/lightning-flash
|
61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Type, Union
from flash.core.data.io.input import Input
from flash.core.data.io.input_transform import INPUT_TRANSFORM_TYPE, InputTransform
from flash.core.utilities.imports import _PANDAS_AVAILABLE, _TABULAR_TESTING
from flash.core.utilities.stages import RunningStage
from flash.tabular.data import TabularData
from flash.tabular.regression.input import (
TabularRegressionCSVInput,
TabularRegressionDataFrameInput,
TabularRegressionDictInput,
TabularRegressionListInput,
)
if _PANDAS_AVAILABLE:
from pandas.core.frame import DataFrame
else:
DataFrame = object
# Skip doctests if requirements aren't available
if not _TABULAR_TESTING:
__doctest_skip__ = ["TabularRegressionData", "TabularRegressionData.*"]
class TabularRegressionData(TabularData):
"""The ``TabularRegressionData`` class is a :class:`~flash.core.data.data_module.DataModule` with a set of
classmethods for loading data for tabular regression."""
@classmethod
def from_data_frame(
cls,
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
train_data_frame: Optional[DataFrame] = None,
val_data_frame: Optional[DataFrame] = None,
test_data_frame: Optional[DataFrame] = None,
predict_data_frame: Optional[DataFrame] = None,
input_cls: Type[Input] = TabularRegressionDataFrameInput,
transform: INPUT_TRANSFORM_TYPE = InputTransform,
transform_kwargs: Optional[Dict] = None,
**data_module_kwargs: Any,
) -> "TabularRegressionData":
"""Creates a :class:`~flash.tabular.regression.data.TabularRegressionData` object from the given data
frames.
.. note::
The ``categorical_fields``, ``numerical_fields``, and ``target_field`` do not need to be provided if
``parameters`` are passed instead. These can be obtained from the
:attr:`~flash.tabular.data.TabularData.parameters` attribute of the
:class:`~flash.tabular.data.TabularData` object that contains your training data.
The targets will be extracted from the ``target_field`` in the data frames.
To learn how to customize the transforms applied for each stage, read our
:ref:`customizing transforms guide <customizing_transforms>`.
Args:
categorical_fields: The fields (column names) in the data frames containing categorical data.
numerical_fields: The fields (column names) in the data frames containing numerical data.
target_field: The field (column name) in the data frames containing the targets.
parameters: Parameters to use if ``categorical_fields``, ``numerical_fields``, and ``target_field`` are not
provided (e.g. when loading data for inference or validation).
train_data_frame: The DataFrame to use when training.
val_data_frame: The DataFrame to use when validating.
test_data_frame: The DataFrame to use when testing.
predict_data_frame: The DataFrame to use when predicting.
input_cls: The :class:`~flash.core.data.io.input.Input` type to use for loading the data.
transform: The :class:`~flash.core.data.io.input_transform.InputTransform` type to use.
transform_kwargs: Dict of keyword arguments to be provided when instantiating the transforms.
data_module_kwargs: Additional keyword arguments to provide to the
:class:`~flash.core.data.data_module.DataModule` constructor.
Returns:
The constructed :class:`~flash.tabular.regression.data.TabularRegressionData`.
Examples
________
.. testsetup::
>>> from pandas import DataFrame
>>> train_data = DataFrame.from_dict({
... "age": [2, 4, 1],
... "animal": ["cat", "dog", "cat"],
... "weight": [6, 10, 5],
... })
>>> predict_data = DataFrame.from_dict({
... "animal": ["dog", "dog", "cat"],
... "weight": [7, 12, 5],
... })
We have a DataFrame ``train_data`` with the following contents:
.. doctest::
>>> train_data.head(3)
age animal weight
0 2 cat 6
1 4 dog 10
2 1 cat 5
and a DataFrame ``predict_data`` with the following contents:
.. doctest::
>>> predict_data.head(3)
animal weight
0 dog 7
1 dog 12
2 cat 5
.. doctest::
>>> from flash import Trainer
>>> from flash.tabular import TabularRegressor, TabularRegressionData
>>> datamodule = TabularRegressionData.from_data_frame(
... "animal",
... "weight",
... "age",
... train_data_frame=train_data,
... predict_data_frame=predict_data,
... batch_size=4,
... )
>>> model = TabularRegressor.from_data(datamodule, backbone="tabnet")
>>> trainer = Trainer(fast_dev_run=True)
>>> trainer.fit(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Training...
>>> trainer.predict(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Predicting...
.. testcleanup::
>>> del train_data
>>> del predict_data
"""
ds_kw = dict(
categorical_fields=categorical_fields,
numerical_fields=numerical_fields,
target_field=target_field,
parameters=parameters,
)
train_input = input_cls(RunningStage.TRAINING, train_data_frame, **ds_kw)
ds_kw["parameters"] = train_input.parameters if train_input else parameters
return cls(
train_input,
input_cls(RunningStage.VALIDATING, val_data_frame, **ds_kw),
input_cls(RunningStage.TESTING, test_data_frame, **ds_kw),
input_cls(RunningStage.PREDICTING, predict_data_frame, **ds_kw),
transform=transform,
transform_kwargs=transform_kwargs,
**data_module_kwargs,
)
@classmethod
def from_csv(
cls,
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
input_cls: Type[Input] = TabularRegressionCSVInput,
transform: INPUT_TRANSFORM_TYPE = InputTransform,
transform_kwargs: Optional[Dict] = None,
**data_module_kwargs: Any,
) -> "TabularRegressionData":
"""Creates a :class:`~flash.tabular.regression.data.TabularRegressionData` object from the given CSV files.
.. note::
The ``categorical_fields``, ``numerical_fields``, and ``target_field`` do not need to be provided if
``parameters`` are passed instead. These can be obtained from the
:attr:`~flash.tabular.data.TabularData.parameters` attribute of the
:class:`~flash.tabular.data.TabularData` object that contains your training data.
The targets will be extracted from the ``target_field`` in the CSV files.
To learn how to customize the transforms applied for each stage, read our
:ref:`customizing transforms guide <customizing_transforms>`.
Args:
categorical_fields: The fields (column names) in the CSV files containing categorical data.
numerical_fields: The fields (column names) in the CSV files containing numerical data.
target_field: The field (column name) in the CSV files containing the targets.
parameters: Parameters to use if ``categorical_fields``, ``numerical_fields``, and ``target_field`` are not
provided (e.g. when loading data for inference or validation).
train_file: The path to the CSV file to use when training.
val_file: The path to the CSV file to use when validating.
test_file: The path to the CSV file to use when testing.
predict_file: The path to the CSV file to use when predicting.
input_cls: The :class:`~flash.core.data.io.input.Input` type to use for loading the data.
transform: The :class:`~flash.core.data.io.input_transform.InputTransform` type to use.
transform_kwargs: Dict of keyword arguments to be provided when instantiating the transforms.
data_module_kwargs: Additional keyword arguments to provide to the
:class:`~flash.core.data.data_module.DataModule` constructor.
Returns:
The constructed :class:`~flash.tabular.regression.data.TabularRegressionData`.
Examples
________
.. testsetup::
>>> from pandas import DataFrame
>>> DataFrame.from_dict({
... "age": [2, 4, 1],
... "animal": ["cat", "dog", "cat"],
... "weight": [6, 10, 5],
... }).to_csv("train_data.csv")
>>> DataFrame.from_dict({
... "animal": ["dog", "dog", "cat"],
... "weight": [7, 12, 5],
... }).to_csv("predict_data.csv")
We have a ``train_data.csv`` with the following contents:
.. code-block::
age,animal,weight
2,cat,6
4,dog,10
1,cat,5
and a ``predict_data.csv`` with the following contents:
.. code-block::
animal,weight
dog,7
dog,12
cat,5
.. doctest::
>>> from flash import Trainer
>>> from flash.tabular import TabularRegressor, TabularRegressionData
>>> datamodule = TabularRegressionData.from_csv(
... "animal",
... "weight",
... "age",
... train_file="train_data.csv",
... predict_file="predict_data.csv",
... batch_size=4,
... )
>>> model = TabularRegressor.from_data(datamodule, backbone="tabnet")
>>> trainer = Trainer(fast_dev_run=True)
>>> trainer.fit(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Training...
>>> trainer.predict(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Predicting...
.. testcleanup::
>>> import os
>>> os.remove("train_data.csv")
>>> os.remove("predict_data.csv")
"""
ds_kw = dict(
categorical_fields=categorical_fields,
numerical_fields=numerical_fields,
target_field=target_field,
parameters=parameters,
)
train_input = input_cls(RunningStage.TRAINING, train_file, **ds_kw)
ds_kw["parameters"] = train_input.parameters if train_input else parameters
return cls(
train_input,
input_cls(RunningStage.VALIDATING, val_file, **ds_kw),
input_cls(RunningStage.TESTING, test_file, **ds_kw),
input_cls(RunningStage.PREDICTING, predict_file, **ds_kw),
transform=transform,
transform_kwargs=transform_kwargs,
**data_module_kwargs,
)
@classmethod
def from_dicts(
cls,
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
train_dict: Optional[Dict[str, List[Any]]] = None,
val_dict: Optional[Dict[str, List[Any]]] = None,
test_dict: Optional[Dict[str, List[Any]]] = None,
predict_dict: Optional[Dict[str, List[Any]]] = None,
input_cls: Type[Input] = TabularRegressionDictInput,
transform: INPUT_TRANSFORM_TYPE = InputTransform,
transform_kwargs: Optional[Dict] = None,
**data_module_kwargs: Any,
) -> "TabularRegressionData":
"""Creates a :class:`~flash.tabular.regression.data.TabularRegressionData` object from the given
dictionary.
.. note::
The ``categorical_fields``, ``numerical_fields``, and ``target_field`` do not need to be provided if
``parameters`` are passed instead. These can be obtained from the
:attr:`~flash.tabular.data.TabularData.parameters` attribute of the
:class:`~flash.tabular.data.TabularData` object that contains your training data.
The targets will be extracted from the ``target_field`` in the data frames.
To learn how to customize the transforms applied for each stage, read our
:ref:`customizing transforms guide <customizing_transforms>`.
Args:
categorical_fields: The fields (column names) in the dictionary containing categorical data.
numerical_fields: The fields (column names) in the dictionary containing numerical data.
target_field: The field (column name) in the dictionary containing the targets.
parameters: Parameters to use if ``categorical_fields``, ``numerical_fields``, and ``target_field`` are not
provided (e.g. when loading data for inference or validation).
train_dict: The dictionary to use when training.
val_dict: The dictionary to use when validating.
test_dict: The dictionary to use when testing.
predict_dict: The dictionary to use when predicting.
input_cls: The :class:`~flash.core.data.io.input.Input` type to use for loading the data.
transform: The :class:`~flash.core.data.io.input_transform.InputTransform` type to use.
transform_kwargs: Dict of keyword arguments to be provided when instantiating the transforms.
data_module_kwargs: Additional keyword arguments to provide to the
:class:`~flash.core.data.data_module.DataModule` constructor.
Returns:
The constructed :class:`~flash.tabular.regression.data.TabularRegressionData`.
Examples
________
.. testsetup::
>>> train_data = {
... "age": [2, 4, 1],
... "animal": ["cat", "dog", "cat"],
... "weight": [6, 10, 5],
... }
>>> predict_data = {
... "animal": ["dog", "dog", "cat"],
... "weight": [7, 12, 5],
... }
We have a dictionary ``train_data`` with the following contents:
.. code-block::
{
"age": [2, 4, 1],
"animal": ["cat", "dog", "cat"],
"weight": [6, 10, 5]
}
and a dictionary ``predict_data`` with the following contents:
.. code-block::
{
"animal": ["dog", "dog", "cat"],
"weight": [7, 12, 5]
}
.. doctest::
>>> from flash import Trainer
>>> from flash.tabular import TabularRegressor, TabularRegressionData
>>> datamodule = TabularRegressionData.from_dicts(
... "animal",
... "weight",
... "age",
... train_dict=train_data,
... predict_dict=predict_data,
... batch_size=4,
... )
>>> model = TabularRegressor.from_data(datamodule, backbone="tabnet")
>>> trainer = Trainer(fast_dev_run=True)
>>> trainer.fit(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Training...
>>> trainer.predict(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Predicting...
.. testcleanup::
>>> del train_data
>>> del predict_data
"""
ds_kw = dict(
categorical_fields=categorical_fields,
numerical_fields=numerical_fields,
target_field=target_field,
parameters=parameters,
)
train_input = input_cls(RunningStage.TRAINING, train_dict, **ds_kw)
ds_kw["parameters"] = train_input.parameters if train_input else parameters
return cls(
train_input,
input_cls(RunningStage.VALIDATING, val_dict, **ds_kw),
input_cls(RunningStage.TESTING, test_dict, **ds_kw),
input_cls(RunningStage.PREDICTING, predict_dict, **ds_kw),
transform=transform,
transform_kwargs=transform_kwargs,
**data_module_kwargs,
)
@classmethod
def from_lists(
cls,
categorical_fields: Optional[Union[str, List[str]]] = None,
numerical_fields: Optional[Union[str, List[str]]] = None,
target_field: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
train_list: List[Union[tuple, dict]] = None,
val_list: List[Union[tuple, dict]] = None,
test_list: List[Union[tuple, dict]] = None,
predict_list: List[Union[tuple, dict]] = None,
input_cls: Type[Input] = TabularRegressionListInput,
transform: INPUT_TRANSFORM_TYPE = InputTransform,
transform_kwargs: Optional[Dict] = None,
**data_module_kwargs: Any,
) -> "TabularRegressionData":
"""Creates a :class:`~flash.tabular.regression.data.TabularRegressionData` object from the given data (in
the form of list of a tuple or a dictionary).
.. note::
The ``categorical_fields``, ``numerical_fields``, and ``target_field`` do not need to be provided if
``parameters`` are passed instead. These can be obtained from the
:attr:`~flash.tabular.data.TabularData.parameters` attribute of the
:class:`~flash.tabular.data.TabularData` object that contains your training data.
The targets will be extracted from the ``target_field`` in the data frames.
To learn how to customize the transforms applied for each stage, read our
:ref:`customizing transforms guide <customizing_transforms>`.
Args:
categorical_fields: The fields (column names) in the dictionary containing categorical data.
numerical_fields: The fields (column names) in the dictionary containing numerical data.
target_field: The field (column name) in the dictionary containing the targets.
parameters: Parameters to use if ``categorical_fields``, ``numerical_fields``, and ``target_field`` are not
provided (e.g. when loading data for inference or validation).
train_list: The data to use when training.
val_list: The data to use when validating.
test_list: The data to use when testing.
predict_list: The data to use when predicting.
input_cls: The :class:`~flash.core.data.io.input.Input` type to use for loading the data.
transform: The :class:`~flash.core.data.io.input_transform.InputTransform` type to use.
transform_kwargs: Dict of keyword arguments to be provided when instantiating the transforms.
data_module_kwargs: Additional keyword arguments to provide to the
:class:`~flash.core.data.data_module.DataModule` constructor.
Returns:
The constructed :class:`~flash.tabular.regression.data.TabularRegressionData`.
Examples
________
.. testsetup::
>>> train_data = [
... {"age": 2, "animal": "cat", "weight": 6},
... {"age": 4, "animal": "dog", "weight": 10},
... {"age": 1, "animal": "cat", "weight": 5},
... ]
>>> predict_data = [
... {"animal": "dog", "weight": 7},
... {"animal": "dog", "weight": 12},
... {"animal": "cat", "weight": 5},
... ]
We have a list of dictionaries ``train_data`` with the following contents:
.. code-block::
[
{"age": 2, animal": "cat", "weight": 6},
{"age": 4, animal": "dog", "weight": 10},
{"age": 1, animal": "cat", "weight": 5},
]
and a list of dictionaries ``predict_data`` with the following contents:
.. code-block::
[
{"animal": "dog", "weight": 7},
{"animal": "dog", "weight": 12},
{"animal": "cat", "weight": 5},
]
.. doctest::
>>> from flash import Trainer
>>> from flash.tabular import TabularRegressor, TabularRegressionData
>>> datamodule = TabularRegressionData.from_lists(
... "animal",
... "weight",
... "age",
... train_list=train_data,
... predict_list=predict_data,
... batch_size=4,
... )
>>> model = TabularRegressor.from_data(datamodule, backbone="tabnet")
>>> trainer = Trainer(fast_dev_run=True)
>>> trainer.fit(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Training...
>>> trainer.predict(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Predicting...
.. testcleanup::
>>> del train_data
>>> del predict_data
"""
ds_kw = dict(
categorical_fields=categorical_fields,
numerical_fields=numerical_fields,
target_field=target_field,
parameters=parameters,
)
train_input = input_cls(RunningStage.TRAINING, train_list, **ds_kw)
ds_kw["parameters"] = train_input.parameters if train_input else parameters
return cls(
train_input,
input_cls(RunningStage.VALIDATING, val_list, **ds_kw),
input_cls(RunningStage.TESTING, test_list, **ds_kw),
input_cls(RunningStage.PREDICTING, predict_list, **ds_kw),
transform=transform,
transform_kwargs=transform_kwargs,
**data_module_kwargs,
)
| 42.417722
| 119
| 0.600503
| 2,503
| 23,457
| 5.470635
| 0.093488
| 0.022493
| 0.024538
| 0.017089
| 0.849923
| 0.821076
| 0.789454
| 0.745636
| 0.738699
| 0.738699
| 0
| 0.006282
| 0.294283
| 23,457
| 552
| 120
| 42.494565
| 0.820889
| 0.611843
| 0
| 0.556291
| 0
| 0
| 0.024993
| 0.019042
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02649
| false
| 0
| 0.05298
| 0
| 0.112583
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
51ceabe7e875765b3a8f237ceaf50bbc1f1f1ad2
| 688
|
py
|
Python
|
nlpsandbox/__init__.py
|
Sage-Bionetworks/nlp-sandbox-client
|
e51720b35ca3413ccee71b9cdc223ce3578fe0fd
|
[
"Apache-2.0"
] | 3
|
2021-06-15T16:36:10.000Z
|
2021-11-15T01:44:46.000Z
|
nlpsandbox/__init__.py
|
nlpsandbox/nlpsandbox-client
|
8cba4f65ff2c06cbef7dc50f45b0aec9b8ee0476
|
[
"Apache-2.0"
] | 165
|
2020-11-23T00:36:40.000Z
|
2022-03-24T00:53:59.000Z
|
nlpsandbox/__init__.py
|
data2health/nlp-sandbox-evaluation
|
e51720b35ca3413ccee71b9cdc223ce3578fe0fd
|
[
"Apache-2.0"
] | 3
|
2020-12-11T00:04:13.000Z
|
2022-01-03T16:59:10.000Z
|
# flake8: noqa
"""
NLP Sandbox API
NLP Sandbox REST API # noqa: E501
The version of the OpenAPI document: 1.2.0
Contact: team@nlpsandbox.io
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from nlpsandbox.api_client import ApiClient
# import Configuration
from nlpsandbox.configuration import Configuration
# import exceptions
from nlpsandbox.exceptions import OpenApiException
from nlpsandbox.exceptions import ApiAttributeError
from nlpsandbox.exceptions import ApiTypeError
from nlpsandbox.exceptions import ApiValueError
from nlpsandbox.exceptions import ApiKeyError
from nlpsandbox.exceptions import ApiException
| 23.724138
| 51
| 0.799419
| 81
| 688
| 6.728395
| 0.444444
| 0.205505
| 0.26422
| 0.330275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017007
| 0.145349
| 688
| 28
| 52
| 24.571429
| 0.909864
| 0.34593
| 0
| 0
| 1
| 0
| 0.011933
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.888889
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf8e45d00c9c528530305179ae196bdf53de28e5
| 97
|
py
|
Python
|
lib/models/__init__.py
|
Cakin-Kwong/Pose2Mesh_RELEASE
|
699abe9496231f8c2fdc31a2985156ec03d8a51b
|
[
"MIT"
] | null | null | null |
lib/models/__init__.py
|
Cakin-Kwong/Pose2Mesh_RELEASE
|
699abe9496231f8c2fdc31a2985156ec03d8a51b
|
[
"MIT"
] | null | null | null |
lib/models/__init__.py
|
Cakin-Kwong/Pose2Mesh_RELEASE
|
699abe9496231f8c2fdc31a2985156ec03d8a51b
|
[
"MIT"
] | null | null | null |
import models.pose2mesh_net
import models.posenet
import models.meshnet
import models.project_net
| 24.25
| 27
| 0.886598
| 14
| 97
| 6
| 0.5
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.072165
| 97
| 4
| 28
| 24.25
| 0.922222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf9fd6079dfaf7ba9c5db0b91f92160a1e4fe5b4
| 227
|
py
|
Python
|
boundlexx/utils/backends.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | 1
|
2021-04-23T11:49:50.000Z
|
2021-04-23T11:49:50.000Z
|
boundlexx/utils/backends.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | 1
|
2021-04-17T18:17:12.000Z
|
2021-04-17T18:17:12.000Z
|
boundlexx/utils/backends.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | null | null | null |
from django_prometheus.cache.backends.redis import (
RedisCache as RedisPrometheusCache,
)
from redis_lock.django_cache import RedisCache as RedisLockCache
class RedisCache(RedisLockCache, RedisPrometheusCache):
pass
| 25.222222
| 64
| 0.828194
| 24
| 227
| 7.708333
| 0.583333
| 0.172973
| 0.194595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123348
| 227
| 8
| 65
| 28.375
| 0.929648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
cfa504d07e90320a033eb24c3bb9a4bd980cc35b
| 183
|
py
|
Python
|
model/__init__.py
|
uniaim-event-team/watch-link
|
0ad2665fd88cba2fdb7e7c8f533bb5a8e6d91b31
|
[
"MIT"
] | 2
|
2020-05-05T14:53:00.000Z
|
2020-05-05T14:53:13.000Z
|
model/__init__.py
|
uniaim-event-team/watch-link
|
0ad2665fd88cba2fdb7e7c8f533bb5a8e6d91b31
|
[
"MIT"
] | 1
|
2021-03-01T02:00:11.000Z
|
2021-03-01T02:00:11.000Z
|
model/__init__.py
|
uniaim-event-team/watch-link
|
0ad2665fd88cba2fdb7e7c8f533bb5a8e6d91b31
|
[
"MIT"
] | null | null | null |
from .base import Session, BaseObject, metadata # noqa
from .freee import * # noqa
from .google_chat import * # noqa
from .slack import * # noqa
from .watch_link import * # noqa
| 30.5
| 55
| 0.710383
| 25
| 183
| 5.12
| 0.52
| 0.25
| 0.328125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202186
| 183
| 5
| 56
| 36.6
| 0.876712
| 0.131148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5c8766bc58b05ada454e1e940bc4f52fdc686f97
| 43
|
py
|
Python
|
analytics/pyinstaller_hooks/hook-scipy.py
|
jonyrock-back/hastic-server
|
3d74de8b5d2e10ab393af36d20069afff3f4a205
|
[
"Apache-2.0"
] | null | null | null |
analytics/pyinstaller_hooks/hook-scipy.py
|
jonyrock-back/hastic-server
|
3d74de8b5d2e10ab393af36d20069afff3f4a205
|
[
"Apache-2.0"
] | null | null | null |
analytics/pyinstaller_hooks/hook-scipy.py
|
jonyrock-back/hastic-server
|
3d74de8b5d2e10ab393af36d20069afff3f4a205
|
[
"Apache-2.0"
] | null | null | null |
hiddenimports=['scipy._lib.messagestream']
| 21.5
| 42
| 0.813953
| 4
| 43
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 43
| 1
| 43
| 43
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0.55814
| 0.55814
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5cb6fee0be438e1ce17f8239f5a4d94ecef84b65
| 142
|
py
|
Python
|
src/util.py
|
kathanm/digit-recognization-neural-net
|
744cb253c1767a99b19fee6fb6acbc28f5e07822
|
[
"MIT"
] | null | null | null |
src/util.py
|
kathanm/digit-recognization-neural-net
|
744cb253c1767a99b19fee6fb6acbc28f5e07822
|
[
"MIT"
] | null | null | null |
src/util.py
|
kathanm/digit-recognization-neural-net
|
744cb253c1767a99b19fee6fb6acbc28f5e07822
|
[
"MIT"
] | null | null | null |
import numpy as np
def sigmoid(n):
return 1.0 / (1.0 + np.exp(-n))
def sigmoid_derivative(n):
return sigmoid(n) * (1 - sigmoid(n))
| 15.777778
| 40
| 0.612676
| 25
| 142
| 3.44
| 0.48
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045045
| 0.21831
| 142
| 8
| 41
| 17.75
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7a787fb857cd36efcadab672c3679f62ec06159b
| 200
|
py
|
Python
|
locale/pot/api/plotting/_autosummary/pyvista-themes-ParaViewTheme-silhouette-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-themes-DarkTheme-silhouette-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-themes-ParaViewTheme-silhouette-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
# Set parameters of the silhouette.
#
import pyvista
pyvista.global_theme.silhouette.color = 'grey'
pyvista.global_theme.silhouette.line_width = 2.0
pyvista.global_theme.silhouette.feature_angle = 20
| 28.571429
| 50
| 0.82
| 28
| 200
| 5.678571
| 0.642857
| 0.245283
| 0.339623
| 0.528302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.09
| 200
| 6
| 51
| 33.333333
| 0.851648
| 0.165
| 0
| 0
| 0
| 0
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a8cd4de03a07a5b3b5c0c83aff09fe39cb96553
| 337
|
py
|
Python
|
pfhedge/stochastic/__init__.py
|
YieldLabs/pfhedge
|
a5ba9d054a8418cb8b27bb67d81a8fc8fb83ef57
|
[
"MIT"
] | null | null | null |
pfhedge/stochastic/__init__.py
|
YieldLabs/pfhedge
|
a5ba9d054a8418cb8b27bb67d81a8fc8fb83ef57
|
[
"MIT"
] | null | null | null |
pfhedge/stochastic/__init__.py
|
YieldLabs/pfhedge
|
a5ba9d054a8418cb8b27bb67d81a8fc8fb83ef57
|
[
"MIT"
] | null | null | null |
from .brownian import generate_brownian
from .brownian import generate_geometric_brownian
from .cir import generate_cir
from .heston import generate_heston
from .local_volatility import generate_local_volatility_process
from .random import randn_antithetic
from .random import randn_sobol_boxmuller
from .vasicek import generate_vasicek
| 37.444444
| 63
| 0.881306
| 45
| 337
| 6.311111
| 0.355556
| 0.295775
| 0.126761
| 0.183099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094955
| 337
| 8
| 64
| 42.125
| 0.931148
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7aa18cf193499b1264aee8a5b6f43066f089d867
| 28
|
py
|
Python
|
datas_utils/log/__init__.py
|
iatlab/datas-utils
|
b8eef303de5a5d5a57182c0627b721dde0b6b300
|
[
"MIT"
] | 328
|
2019-05-27T03:09:02.000Z
|
2022-03-31T05:12:04.000Z
|
diva_io/utils/__init__.py
|
AnjaliPC/Object_Detection_Tracking
|
f86caaec97669a6da56f1b402cca4e179a85d2f0
|
[
"MIT"
] | 43
|
2019-06-05T14:04:09.000Z
|
2022-01-25T03:16:39.000Z
|
diva_io/utils/__init__.py
|
AnjaliPC/Object_Detection_Tracking
|
f86caaec97669a6da56f1b402cca4e179a85d2f0
|
[
"MIT"
] | 107
|
2019-05-27T06:26:38.000Z
|
2022-03-25T03:32:58.000Z
|
from .log import get_logger
| 14
| 27
| 0.821429
| 5
| 28
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7aa7df2db682a4e1d60b6d85214f1b4644e43919
| 4,765
|
py
|
Python
|
tests/test-criticisms/test_evaluate.py
|
NunoEdgarGFlowHub/edward
|
298fb539261c71e34d5e7aa5a37ed8a029df0820
|
[
"Apache-2.0"
] | 1
|
2021-01-11T03:33:36.000Z
|
2021-01-11T03:33:36.000Z
|
tests/test-criticisms/test_evaluate.py
|
NunoEdgarGFlowHub/edward
|
298fb539261c71e34d5e7aa5a37ed8a029df0820
|
[
"Apache-2.0"
] | null | null | null |
tests/test-criticisms/test_evaluate.py
|
NunoEdgarGFlowHub/edward
|
298fb539261c71e34d5e7aa5a37ed8a029df0820
|
[
"Apache-2.0"
] | 1
|
2021-06-13T06:58:00.000Z
|
2021-06-13T06:58:00.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Bernoulli, Categorical, Multinomial, Normal
class test_evaluate_class(tf.test.TestCase):
def test_metrics(self):
with self.test_session():
x = Normal(loc=0.0, scale=1.0)
x_data = tf.constant(0.0)
ed.evaluate('mean_squared_error', {x: x_data}, n_samples=1)
ed.evaluate(['mean_squared_error'], {x: x_data}, n_samples=1)
ed.evaluate(['mean_squared_error', 'mean_absolute_error'],
{x: x_data}, n_samples=1)
self.assertRaises(TypeError, ed.evaluate, x, {x: x_data}, n_samples=1)
self.assertRaises(NotImplementedError, ed.evaluate, 'hello world',
{x: x_data}, n_samples=1)
def test_metrics_classification(self):
with self.test_session():
x = Bernoulli(probs=0.51)
x_data = tf.constant(1)
self.assertAllClose(
1.0,
ed.evaluate('binary_accuracy', {x: x_data}, n_samples=1))
x = Bernoulli(probs=0.51, sample_shape=5)
x_data = tf.constant([1, 1, 1, 0, 0])
self.assertAllClose(
0.6,
ed.evaluate('binary_accuracy', {x: x_data}, n_samples=1))
x = Bernoulli(probs=tf.constant([0.51, 0.49, 0.49]))
x_data = tf.constant([1, 0, 1])
self.assertAllClose(
2.0 / 3,
ed.evaluate('binary_accuracy', {x: x_data}, n_samples=1))
x = Categorical(probs=tf.constant([0.48, 0.51, 0.01]))
x_data = tf.constant(1)
self.assertAllClose(
1.0,
ed.evaluate('sparse_categorical_accuracy', {x: x_data}, n_samples=1))
x = Categorical(probs=tf.constant([0.48, 0.51, 0.01]), sample_shape=5)
x_data = tf.constant([1, 1, 1, 0, 2])
self.assertAllClose(
0.6,
ed.evaluate('sparse_categorical_accuracy', {x: x_data}, n_samples=1))
x = Categorical(
probs=tf.constant([[0.48, 0.51, 0.01], [0.51, 0.48, 0.01]]))
x_data = tf.constant([1, 2])
self.assertAllClose(
0.5,
ed.evaluate('sparse_categorical_accuracy', {x: x_data}, n_samples=1))
x = Multinomial(total_count=1.0, probs=tf.constant([0.48, 0.51, 0.01]))
x_data = tf.constant([0, 1, 0], dtype=x.dtype.as_numpy_dtype)
self.assertAllClose(
1.0,
ed.evaluate('categorical_accuracy', {x: x_data}, n_samples=1))
x = Multinomial(total_count=1.0, probs=tf.constant([0.48, 0.51, 0.01]),
sample_shape=5)
x_data = tf.constant(
[[0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]],
dtype=x.dtype.as_numpy_dtype)
self.assertAllClose(
0.6,
ed.evaluate('categorical_accuracy', {x: x_data}, n_samples=1))
def test_data(self):
with self.test_session():
x_ph = tf.placeholder(tf.float32, [])
x = Normal(loc=x_ph, scale=1.0)
y = 2.0 * Normal(loc=0.0, scale=1.0)
x_data = tf.constant(0.0)
x_ph_data = np.array(0.0)
y_data = tf.constant(20.0)
ed.evaluate('mean_squared_error', {x: x_data, x_ph: x_ph_data},
n_samples=1)
ed.evaluate('mean_squared_error', {y: y_data}, n_samples=1)
self.assertRaises(TypeError, ed.evaluate, 'mean_squared_error',
{'y': y_data}, n_samples=1)
def test_n_samples(self):
with self.test_session():
x = Normal(loc=0.0, scale=1.0)
x_data = tf.constant(0.0)
ed.evaluate('mean_squared_error', {x: x_data}, n_samples=1)
ed.evaluate('mean_squared_error', {x: x_data}, n_samples=5)
self.assertRaises(TypeError, ed.evaluate, 'mean_squared_error',
{x: x_data}, n_samples='1')
def test_output_key(self):
with self.test_session():
x_ph = tf.placeholder(tf.float32, [])
x = Normal(loc=x_ph, scale=1.0)
y = 2.0 * x
x_data = tf.constant(0.0)
x_ph_data = np.array(0.0)
y_data = tf.constant(20.0)
ed.evaluate('mean_squared_error', {x: x_data, x_ph: x_ph_data},
n_samples=1)
ed.evaluate('mean_squared_error', {y: y_data, x_ph: x_ph_data},
n_samples=1)
ed.evaluate('mean_squared_error', {x: x_data, y: y_data, x_ph: x_ph_data},
n_samples=1, output_key=x)
self.assertRaises(KeyError, ed.evaluate, 'mean_squared_error',
{x: x_data, y: y_data, x_ph: x_ph_data}, n_samples=1)
self.assertRaises(TypeError, ed.evaluate, 'mean_squared_error',
{x: x_data, y: y_data, x_ph: x_ph_data}, n_samples=1,
output_key='x')
if __name__ == '__main__':
tf.test.main()
| 39.708333
| 80
| 0.60063
| 731
| 4,765
| 3.668947
| 0.103967
| 0.061521
| 0.107383
| 0.111484
| 0.828486
| 0.815063
| 0.784862
| 0.773676
| 0.717375
| 0.699478
| 0
| 0.054266
| 0.249738
| 4,765
| 119
| 81
| 40.042017
| 0.695944
| 0
| 0
| 0.514019
| 0
| 0
| 0.096327
| 0.016999
| 0
| 0
| 0
| 0
| 0.130841
| 1
| 0.046729
| false
| 0
| 0.065421
| 0
| 0.121495
| 0.009346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8f8734139aebfe314bc9d38b456ad61641621c21
| 95
|
py
|
Python
|
utils.py
|
tommyip/aoc2021
|
51a8b51d2910a344d94cd03388c676201e815503
|
[
"MIT"
] | null | null | null |
utils.py
|
tommyip/aoc2021
|
51a8b51d2910a344d94cd03388c676201e815503
|
[
"MIT"
] | null | null | null |
utils.py
|
tommyip/aoc2021
|
51a8b51d2910a344d94cd03388c676201e815503
|
[
"MIT"
] | null | null | null |
import sys
def read_input(f):
return [f(line.strip()) for line in sys.stdin.readlines()]
| 15.833333
| 62
| 0.684211
| 16
| 95
| 4
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168421
| 95
| 5
| 63
| 19
| 0.810127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8f95a26ced0bbfe18968f9e6efc70ee35ba34c4c
| 23
|
py
|
Python
|
SecureWitnessApp/views/comments/__init__.py
|
karttrak/3240-team10
|
d07ca17717004aab6f2e8c97f4bb9ac21453ce60
|
[
"MIT"
] | null | null | null |
SecureWitnessApp/views/comments/__init__.py
|
karttrak/3240-team10
|
d07ca17717004aab6f2e8c97f4bb9ac21453ce60
|
[
"MIT"
] | null | null | null |
SecureWitnessApp/views/comments/__init__.py
|
karttrak/3240-team10
|
d07ca17717004aab6f2e8c97f4bb9ac21453ce60
|
[
"MIT"
] | null | null | null |
from .comments import *
| 23
| 23
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8fd95b7994efd0ad9567499232cd6e8ffcc5d519
| 164
|
py
|
Python
|
src/backend/src/run.py
|
DhrubojyotiBis1/web
|
68ad34c1536f37d4f123bd1336a6ce763a68729a
|
[
"MIT"
] | null | null | null |
src/backend/src/run.py
|
DhrubojyotiBis1/web
|
68ad34c1536f37d4f123bd1336a6ce763a68729a
|
[
"MIT"
] | null | null | null |
src/backend/src/run.py
|
DhrubojyotiBis1/web
|
68ad34c1536f37d4f123bd1336a6ce763a68729a
|
[
"MIT"
] | 5
|
2020-09-16T12:03:54.000Z
|
2020-09-27T12:45:05.000Z
|
from advolet_app import app, db
from advolet_app.models import User
if __name__ == "__main__":
#development
db.create_all()
#Production
app.run()
| 18.222222
| 35
| 0.695122
| 22
| 164
| 4.681818
| 0.681818
| 0.213592
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213415
| 164
| 9
| 36
| 18.222222
| 0.79845
| 0.128049
| 0
| 0
| 0
| 0
| 0.056338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8ffba6afd9bf5e9848c891a855943ede73568c3b
| 563
|
py
|
Python
|
detic/__init__.py
|
axinc-ai/Detic
|
8beeece2a99384e1e7b66b070028b2a4fe765220
|
[
"Apache-2.0"
] | 956
|
2022-01-10T00:17:51.000Z
|
2022-03-31T17:01:38.000Z
|
detic/__init__.py
|
axinc-ai/Detic
|
8beeece2a99384e1e7b66b070028b2a4fe765220
|
[
"Apache-2.0"
] | 43
|
2022-01-10T02:24:03.000Z
|
2022-03-31T01:58:53.000Z
|
detic/__init__.py
|
axinc-ai/Detic
|
8beeece2a99384e1e7b66b070028b2a4fe765220
|
[
"Apache-2.0"
] | 62
|
2022-01-10T02:19:00.000Z
|
2022-03-31T18:43:38.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .modeling.meta_arch import custom_rcnn
from .modeling.roi_heads import detic_roi_heads
from .modeling.roi_heads import res5_roi_heads
from .modeling.backbone import swintransformer
from .modeling.backbone import timm
from .data.datasets import lvis_v1
from .data.datasets import imagenet
from .data.datasets import cc
from .data.datasets import objects365
from .data.datasets import oid
from .data.datasets import coco_zeroshot
try:
from .modeling.meta_arch import d2_deformable_detr
except:
pass
| 29.631579
| 54
| 0.820604
| 83
| 563
| 5.409639
| 0.445783
| 0.160356
| 0.213808
| 0.293987
| 0.231626
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012146
| 0.122558
| 563
| 19
| 55
| 29.631579
| 0.896761
| 0.085258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.066667
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
64ef215d00bf9c5de596314eafc522a336a8371a
| 14,691
|
py
|
Python
|
pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py
|
SteffenMeinecke/pandapipes
|
2d0631c053735e4116a145bae9975379135b9c36
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py
|
SteffenMeinecke/pandapipes
|
2d0631c053735e4116a145bae9975379135b9c36
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py
|
SteffenMeinecke/pandapipes
|
2d0631c053735e4116a145bae9975379135b9c36
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020-2022 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import os
import numpy as np
import pandapipes
import pandas as pd
import pytest
from pandapipes.component_models import Pipe, Junction
from pandapipes.idx_node import PINIT, TINIT
from pandapipes.pipeflow_setup import get_lookup
from pandapipes.test.pipeflow_internals import internals_data_path
from pandapipes.properties.fluids import _add_fluid_to_net
def test_gas_internal_nodes():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 209.1e-3
pandapipes.create_junction(net, pn_bar=51, tfluid_k=285.15)
pandapipes.create_junction(net, pn_bar=51, tfluid_k=285.15)
pandapipes.create_pipe_from_parameters(net, 0, 1, 12.0, d, k_mm=.5, sections=12)
pandapipes.create_ext_grid(net, 0, p_bar=51 - 1.01325, t_k=285.15, type="pt")
pandapipes.create_sink(net, 1, mdot_kg_per_s=0.82752 * 45000 / 3600)
_add_fluid_to_net(net, pandapipes.create_constant_fluid(
name="natural_gas", fluid_type="gas", viscosity=11.93e-6, heat_capacity=2185,
compressibility=1, der_compressibility=0, density=0.82752
))
pandapipes.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse",
transient=False, nonlinear_method="automatic", tol_p=1e-4, tol_v=1e-4)
pipe_results = Pipe.get_internal_results(net, [0])
data = pd.read_csv(os.path.join(internals_data_path, "gas_sections_an.csv"), sep=';', header=0,
keep_default_na=False)
p_an = data["p1"] / 1e5
v_an = data["v"]
v_an = v_an.drop([0])
pipe_p_data_idx = np.where(pipe_results["PINIT"][:, 0] == 0)
pipe_v_data_idx = np.where(pipe_results["VINIT_MEAN"][:, 0] == 0)
pipe_p_data = pipe_results["PINIT"][pipe_p_data_idx, 1]
pipe_v_data = pipe_results["VINIT_MEAN"][pipe_v_data_idx, 1]
node_pit = net["_pit"]["node"]
junction_idx_lookup = get_lookup(net, "node", "index")[Junction.table_name()]
from_junction_nodes = junction_idx_lookup[net["pipe"]["from_junction"].values]
to_junction_nodes = junction_idx_lookup[net["pipe"]["to_junction"].values]
p_pandapipes = np.zeros(len(pipe_p_data[0]) + 2)
p_pandapipes[0] = node_pit[from_junction_nodes[0], PINIT]
p_pandapipes[1:-1] = pipe_p_data[:]
p_pandapipes[-1] = node_pit[to_junction_nodes[0], PINIT]
p_pandapipes = p_pandapipes + 1.01325
v_pandapipes = pipe_v_data[0, :]
p_diff = np.abs(1 - p_pandapipes / p_an)
v_diff = np.abs(v_an - v_pandapipes)
assert np.all(p_diff < 0.01)
assert np.all(v_diff < 0.4)
def test_temperature_internal_nodes_single_pipe():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_pipe_from_parameters(net, 0, 1, 6, d, k_mm=.1, sections=6, alpha_w_per_m2k=5)
pandapipes.create_ext_grid(net, 0, p_bar=5, t_k=330, type="pt")
pandapipes.create_sink(net, 1, mdot_kg_per_s=1)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="nikuradse",
mode="all", transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
pipe_results = Pipe.get_internal_results(net, [0])
data = pd.read_csv(os.path.join(internals_data_path, "Temperature_one_pipe_an.csv"), sep=';',
header=0, keep_default_na=False)
temp_an = data["T"]
pipe_temp_data_idx = np.where(pipe_results["TINIT"][:, 0] == 0)
pipe_temp_data = pipe_results["TINIT"][pipe_temp_data_idx, 1]
node_pit = net["_pit"]["node"]
junction_idx_lookup = get_lookup(net, "node", "index")[Junction.table_name()]
from_junction_nodes = junction_idx_lookup[net["pipe"]["from_junction"].values]
to_junction_nodes = junction_idx_lookup[net["pipe"]["to_junction"].values]
temp_pandapipes = np.zeros(len(pipe_temp_data[0]) + 2)
temp_pandapipes[0] = node_pit[from_junction_nodes[0], TINIT]
temp_pandapipes[1:-1] = pipe_temp_data[:]
temp_pandapipes[-1] = node_pit[to_junction_nodes[0], TINIT]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
def test_temperature_internal_nodes_tee_2ab_1zu():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_ext_grid(net, j0, p_bar=5, t_k=350, type="pt")
pandapipes.create_sink(net, j2, mdot_kg_per_s=1)
pandapipes.create_sink(net, j3, mdot_kg_per_s=1)
pandapipes.create_pipe_from_parameters(net, j0, j1, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j1, j2, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j1, j3, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse",
mode='all', transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "Temperature_tee_2ab_1zu_an.csv"),
sep=';', header=0, keep_default_na=False)
temp_an = data["T"]
temp_pandapipes = net.res_junction["t_k"]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
def test_temperature_internal_nodes_tee_2zu_1ab():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_pipe_from_parameters(net, j0, j2, 2.5, d, k_mm=.1, sections=3,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j1, j2, 2.5, d, k_mm=.1, sections=3,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j2, j3, 2.5, d, k_mm=.1, sections=3,
alpha_w_per_m2k=5)
pandapipes.create_ext_grid(net, j0, p_bar=5, t_k=350, type="pt")
pandapipes.create_ext_grid(net, j1, p_bar=5, t_k=350, type="pt")
pandapipes.create_sink(net, j3, mdot_kg_per_s=1)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="nikuradse",
mode='all', transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "Temperature_tee_2zu_1ab_an.csv"),
sep=';', header=0, keep_default_na=False)
temp_an = data["T"]
temp_pandapipes = net.res_junction["t_k"]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
def test_temperature_internal_nodes_tee_2zu_1ab_direction_changed():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_ext_grid(net, j0, p_bar=5, t_k=350, type="pt")
pandapipes.create_ext_grid(net, j1, p_bar=5, t_k=350, type="pt")
pandapipes.create_sink(net, j3, mdot_kg_per_s=1)
pandapipes.create_pipe_from_parameters(net, j0, j2, 2.5, d, k_mm=.1, sections=5,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j2, j1, 2.5, d, k_mm=.1, sections=5,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j2, j3, 2.5, d, k_mm=.1, sections=5,
alpha_w_per_m2k=5)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse",
mode='all', transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "Temperature_tee_2zu_1ab_an.csv"),
sep=';', header=0, keep_default_na=False)
temp_an = data["T"]
temp_pandapipes = net.res_junction["t_k"]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
def test_temperature_internal_nodes_2zu_2ab():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_ext_grid(net, j0, p_bar=5, t_k=350, type="pt")
pandapipes.create_ext_grid(net, j1, p_bar=5, t_k=300, type="pt")
pandapipes.create_sink(net, j3, mdot_kg_per_s=1)
pandapipes.create_sink(net, j4, mdot_kg_per_s=1)
pandapipes.create_pipe_from_parameters(net, j0, j2, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j1, j2, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j2, j3, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j2, j4, 2.5, d, k_mm=.1, alpha_w_per_m2k=5)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse",
mode='all', transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "Temperature_2zu_2ab_an.csv"), sep=';',
header=0, keep_default_na=False)
temp_an = data["T"]
temp_pandapipes = net.res_junction["t_k"]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
def test_temperature_internal_nodes_masche_1load():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_pipe_from_parameters(net, j0, j1, 2.5, d, k_mm=.1, sections=6,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j1, j2, 2.5, d, k_mm=.1, sections=6,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j1, j3, 2.5, d, k_mm=.1, sections=6,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j3, j2, 2.5, d, k_mm=.1, sections=6,
alpha_w_per_m2k=5)
pandapipes.create_ext_grid(net, j0, p_bar=5, t_k=350, type="pt")
pandapipes.create_sink(net, j2, mdot_kg_per_s=1)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse",
mode='all', transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "Temperature_masche_1load_an.csv"),
sep=';', header=0, keep_default_na=False)
temp_an = data["T"]
temp_pandapipes = net.res_junction["t_k"]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
def test_temperature_internal_nodes_masche_1load_changed_direction():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
d = 75e-3
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283)
pandapipes.create_pipe_from_parameters(net, j0, j2, 2.5, d, k_mm=.1, sections=5,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j0, j3, 2.5, d, k_mm=.1, sections=5,
alpha_w_per_m2k=5)
pandapipes.create_pipe_from_parameters(net, j3, j2, 2.5, d, k_mm=.1, sections=5,
alpha_w_per_m2k=5)
pandapipes.create_fluid_from_lib(net, "water", overwrite=True)
pandapipes.create_ext_grid(net, j0, p_bar=5, t_k=350, type="pt")
pandapipes.create_sink(net, j3, mdot_kg_per_s=1)
pandapipes.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse",
mode='all', transient=False, nonlinear_method="automatic", tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path,
"Temperature_masche_1load_direction_an.csv"),
sep=';', header=0, keep_default_na=False)
temp_an = data["T"]
temp_pandapipes = net.res_junction["t_k"]
temp_diff = np.abs(1 - temp_pandapipes / temp_an)
assert np.all(temp_diff < 0.01)
if __name__ == "__main__":
pytest.main([r'pandapipes/test/pipflow_internals/test_pipeflow_analytic_comparison.py'])
| 41.617564
| 99
| 0.664829
| 2,245
| 14,691
| 4.03118
| 0.091759
| 0.153812
| 0.074254
| 0.083536
| 0.857017
| 0.846409
| 0.834586
| 0.834254
| 0.811823
| 0.807403
| 0
| 0.051985
| 0.207814
| 14,691
| 352
| 100
| 41.735795
| 0.72564
| 0.026819
| 0
| 0.640693
| 0
| 0
| 0.054479
| 0.020164
| 0
| 0
| 0
| 0
| 0.038961
| 1
| 0.034632
| false
| 0
| 0.04329
| 0
| 0.077922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8f3dd8485e1aee739dbaf7114a2a8017e49aba20
| 47
|
py
|
Python
|
Continuous_Algo/envs/gym-nqubit/gym_nqubit/envs/__init__.py
|
NemoHimma/ZeroRL
|
c37cb3105981b7d9331749941df3e3f449976fde
|
[
"MIT"
] | null | null | null |
Continuous_Algo/envs/gym-nqubit/gym_nqubit/envs/__init__.py
|
NemoHimma/ZeroRL
|
c37cb3105981b7d9331749941df3e3f449976fde
|
[
"MIT"
] | null | null | null |
Continuous_Algo/envs/gym-nqubit/gym_nqubit/envs/__init__.py
|
NemoHimma/ZeroRL
|
c37cb3105981b7d9331749941df3e3f449976fde
|
[
"MIT"
] | null | null | null |
from gym_nqubit.envs.NqubitEnv import NqubitEnv
| 47
| 47
| 0.893617
| 7
| 47
| 5.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f7fafbcfb79deb68667418aab9c334a8940bcd9
| 6,320
|
py
|
Python
|
snakecat/__init__.py
|
amironov73/snakecat
|
d23adc763060648358b3bcb458c04fdc537fe703
|
[
"MIT"
] | 2
|
2021-12-23T06:57:40.000Z
|
2021-12-23T06:58:07.000Z
|
snakecat/__init__.py
|
amironov73/snakecat
|
d23adc763060648358b3bcb458c04fdc537fe703
|
[
"MIT"
] | null | null | null |
snakecat/__init__.py
|
amironov73/snakecat
|
d23adc763060648358b3bcb458c04fdc537fe703
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Модуль содержит основную функциональность по работе с сервером ИРБИС64,
в т. ч. для манипуляций с записями.
"""
__version__ = '0.1.100'
__author__ = 'Alexey Mironov'
__email__ = 'amironov73@gmail.com'
__title__ = 'snakecat'
__summary__ = 'ctypes wrapper for irbis64_client.dll'
__uri__ = 'http://arsmagna.ru'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2021 Alexey Mironov'
from snakecat.constants import NO_ERROR, ERR_USER, ERR_BUSY, \
ERR_UNKNOWN, ERR_BUFSIZE, TERM_NOT_EXISTS, TERM_LAST_IN_LIST, \
TERM_FIRST_IN_LIST, ERR_DBEWLOCK, ERR_RECLOCKED, VERSION_ERROR, \
READ_WRONG_MFN, REC_DELETE, REC_PHYS_DELETE, ERROR_CLIENT_FMT, \
SERVER_EXECUTE_ERROR, ANSWER_LENGTH_ERROR, WRONG_PROTOCOL, \
CLIENT_NOT_IN_LIST, CLIENT_NOT_IN_USE, CLIENT_IDENTIFIER_WRONG, \
CLIENT_LIST_OVERLOAD, CLIENT_ALREADY_EXISTS, CLIENT_NOT_ALLOWED, \
WRONG_PASSWORD, FILE_NOT_EXISTS, SERVER_OVERLOAD, PROCESS_ERROR, \
GLOBAL_ERROR, SYSPATH, DATAPATH, DBNPATH2, DBNPATH3, DBNPATH10, \
FULLTEXTPATH, INTERNALRESOURCEPATH, IRBIS_READER, \
IRBIS_ADMINISTRATOR, IRBIS_CATALOG, IRBIS_COMPLECT, \
IRBIS_BOOKLAND, IRBIS_BOOKPROVD, MAX_POSTINGS_IN_PACKET, \
ANSI, UTF
from snakecat.dllwrapper import IC_reg, IC_unreg, \
IC_set_client_time_live, IC_set_show_waiting, IC_set_webserver, \
IC_set_webcgi, IC_set_blocksocket, IC_isbusy, IC_update_ini, \
IC_getresourse, IC_clearresourse, IC_getresoursegroup, \
IC_getbinaryresourse, IC_putresourse, IC_read, IC_readformat, \
IC_update, IC_updategroup, IC_runlock, IC_ifupdate, IC_maxmfn, \
IC_fieldn, IC_field, IC_fldadd, IC_fldrep, IC_nfields, IC_nocc, \
IC_fldtag, IC_fldempty, IC_changemfn, IC_recdel, IC_recundel, \
IC_recunlock, IC_getmfn, IC_recdummy, IC_isactualized, IC_islocked, \
IC_isdeleted, IC_nexttrm, IC_nexttrmgroup, IC_prevtrm, \
IC_prevtrmgroup, IC_posting, IC_postinggroup, IC_postingformat, \
IC_search, IC_searchscan, IC_sformat, IC_record_sformat, \
IC_sformatgroup, IC_print, IC_stat, IC_gbl, IC_adm_restartserver, \
IC_adm_getdeletedlist, IC_adm_getalldeletedlists, IC_adm_dbempty, \
IC_adm_newdb, IC_adm_dbunlock, IC_adm_dbunlockmfn, \
IC_adm_dbstartcreatedictionry, IC_adm_dbstartreorgmaster, \
IC_adm_getclientlist, IC_adm_getclientslist, IC_adm_getprocesslist, \
IC_adm_setclientslist, IC_adm_dbdelete, IC_adm_dbstartreorgdictionry, \
IC_nooperation, IC_reset_delim, IC_delim_reset
from snakecat.comfort import connect, disconnect, read_record, \
get_max_mfn, hide_window, error_to_string, from_ansi, \
from_utf, search, search_format, format_record, fm, \
print_form, get_deleted_records, to_ansi, to_utf, from_irbis, \
to_irbis, read_terms, trim_prefix, read_file, clear_cache, \
write_file, unlock_record, actualize_record, actualize_database, \
create_record, add_field, write_record, replace_field, \
remove_field, empty_record, delete_record, undelete_record, \
mark_record_unlocked, record_locked, record_deleted, \
record_actualized, use_web_gateway
__all__ = ['NO_ERROR', 'ERR_USER', 'ERR_BUSY', 'ERR_UNKNOWN',
'ERR_BUFSIZE', 'TERM_NOT_EXISTS', 'TERM_LAST_IN_LIST',
'TERM_FIRST_IN_LIST', 'ERR_DBEWLOCK', 'ERR_RECLOCKED',
'VERSION_ERROR', 'READ_WRONG_MFN', 'REC_DELETE',
'REC_PHYS_DELETE', 'ERROR_CLIENT_FMT',
'SERVER_EXECUTE_ERROR', 'ANSWER_LENGTH_ERROR',
'WRONG_PROTOCOL', 'CLIENT_NOT_IN_LIST',
'CLIENT_NOT_IN_USE', 'CLIENT_IDENTIFIER_WRONG',
'CLIENT_LIST_OVERLOAD', 'CLIENT_ALREADY_EXISTS',
'CLIENT_NOT_ALLOWED', 'WRONG_PASSWORD', 'FILE_NOT_EXISTS',
'SERVER_OVERLOAD', 'PROCESS_ERROR', 'GLOBAL_ERROR',
'SYSPATH', 'DATAPATH', 'DBNPATH2', 'DBNPATH3', 'DBNPATH10',
'FULLTEXTPATH', 'INTERNALRESOURCEPATH', 'IRBIS_READER',
'IRBIS_ADMINISTRATOR', 'IRBIS_CATALOG', 'IRBIS_COMPLECT',
'IRBIS_BOOKLAND', 'IRBIS_BOOKPROVD',
'MAX_POSTINGS_IN_PACKET', 'ANSI', 'UTF',
'IC_reg', 'IC_unreg', 'IC_set_client_time_live',
'IC_set_show_waiting', 'IC_set_webserver', 'IC_set_webcgi',
'IC_set_blocksocket', 'IC_isbusy', 'IC_update_ini',
'IC_getresourse', 'IC_clearresourse', 'IC_getresoursegroup',
'IC_getbinaryresourse', 'IC_putresourse', 'IC_read',
'IC_readformat', 'IC_update', 'IC_updategroup',
'IC_runlock', 'IC_ifupdate', 'IC_maxmfn',
'IC_fieldn', 'IC_field', 'IC_fldadd', 'IC_fldrep',
'IC_nfields', 'IC_nocc', 'IC_fldtag', 'IC_fldempty',
'IC_changemfn', 'IC_recdel', 'IC_recundel', 'IC_recunlock',
'IC_getmfn', 'IC_recdummy', 'IC_isactualized', 'IC_islocked',
'IC_isdeleted', 'IC_nexttrm', 'IC_nexttrmgroup', 'IC_prevtrm',
'IC_prevtrmgroup', 'IC_posting', 'IC_postinggroup',
'IC_postingformat', 'IC_search', 'IC_searchscan',
'IC_sformat', 'IC_record_sformat', 'IC_sformatgroup',
'IC_print', 'IC_stat', 'IC_gbl', 'IC_adm_restartserver',
'IC_adm_getdeletedlist', 'IC_adm_getalldeletedlists',
'IC_adm_dbempty', 'IC_adm_newdb', 'IC_adm_dbunlock',
'IC_adm_dbunlockmfn', 'IC_adm_dbstartcreatedictionry',
'IC_adm_dbstartreorgmaster', 'IC_adm_getclientlist',
'IC_adm_getclientslist', 'IC_adm_getprocesslist',
'IC_adm_setclientslist', 'IC_adm_dbdelete',
'IC_adm_dbstartreorgdictionry', 'IC_nooperation',
'IC_delim_reset', 'IC_reset_delim',
'connect', 'disconnect', 'read_record', 'get_max_mfn',
'hide_window', 'error_to_string', 'from_ansi',
'from_utf', 'search', 'search_format', 'format_record',
'fm', 'print_form', 'get_deleted_records', 'to_ansi',
'to_utf', 'from_irbis', 'to_irbis', 'read_terms',
'trim_prefix', 'read_file', 'clear_cache', 'write_file',
'unlock_record', 'actualize_record', 'actualize_database',
'create_record', 'add_field', 'write_record', 'replace_field',
'remove_field', 'empty_record', 'delete_record',
'undelete_record', 'mark_record_unlocked', 'record_locked',
'record_actualized', 'record_deleted', 'use_web_gateway']
| 54.482759
| 75
| 0.709652
| 764
| 6,320
| 5.311518
| 0.281414
| 0.036964
| 0.010843
| 0.0069
| 0.883687
| 0.883687
| 0.883687
| 0.883687
| 0.883687
| 0.883687
| 0
| 0.004623
| 0.178481
| 6,320
| 115
| 76
| 54.956522
| 0.776965
| 0.019304
| 0
| 0
| 0
| 0
| 0.353796
| 0.045234
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.019802
| 0.029703
| 0
| 0.029703
| 0.039604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
56ad567bca8735420283245c23060916dd1bd113
| 2,383
|
py
|
Python
|
layers/prepost_layer.py
|
kimmo1019/Geformer
|
776c841c3a364d6b27dd93d66526ab6f03fde56d
|
[
"MIT"
] | null | null | null |
layers/prepost_layer.py
|
kimmo1019/Geformer
|
776c841c3a364d6b27dd93d66526ab6f03fde56d
|
[
"MIT"
] | null | null | null |
layers/prepost_layer.py
|
kimmo1019/Geformer
|
776c841c3a364d6b27dd93d66526ab6f03fde56d
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
class PrePostProcessingFnnWrapper(tf.keras.layers.Layer):
"""Wrapper class for Fnn that applies layer pre-processing and post-processing."""
def __init__(self, layer, params):
super(PrePostProcessingFnnWrapper, self).__init__()
self.layer = layer
self.params = params
self.postprocess_dropout = params["layer_postprocess_dropout"]
def build(self, input_shape):
# Create normalization layer
self.layer_norm = tf.keras.layers.LayerNormalization(
epsilon=1e-6, dtype="float32")
super(PrePostProcessingFnnWrapper, self).build(input_shape)
def get_config(self):
return {
"params": self.params,
}
def call(self, x, *args, **kwargs):
"""Calls wrapped layer with same parameters."""
# Preprocessing: apply layer normalization
training = kwargs["training"]
y = self.layer_norm(x)
# Get layer output
y = self.layer(y, *args, **kwargs)
# Postprocessing: apply dropout and residual connection
if training:
y = tf.nn.dropout(y, rate=self.postprocess_dropout)
return x + y
class PrePostProcessingAttWrapper(tf.keras.layers.Layer):
"""Wrapper class for Attention that applies layer pre-processing and post-processing."""
def __init__(self, layer, params):
super(PrePostProcessingAttWrapper, self).__init__()
self.layer = layer
self.params = params
self.postprocess_dropout = params["layer_postprocess_dropout"]
def build(self, input_shape):
# Create normalization layer
self.layer_norm = tf.keras.layers.LayerNormalization(
epsilon=1e-6, dtype="float32")
super(PrePostProcessingAttWrapper, self).build(input_shape)
def get_config(self):
return {
"params": self.params,
}
def call(self, x, *args, **kwargs):
"""Calls wrapped layer with same parameters."""
# Preprocessing: apply layer normalization
training = kwargs["training"]
y = self.layer_norm(x)
# Get layer output
y, w = self.layer(y, *args, **kwargs)
# Postprocessing: apply dropout and residual connection
if training:
y = tf.nn.dropout(y, rate=self.postprocess_dropout)
return x + y, w
| 34.536232
| 92
| 0.636173
| 260
| 2,383
| 5.7
| 0.242308
| 0.060729
| 0.035088
| 0.024292
| 0.860999
| 0.860999
| 0.860999
| 0.816464
| 0.816464
| 0.816464
| 0
| 0.004556
| 0.263114
| 2,383
| 69
| 93
| 34.536232
| 0.839408
| 0.219052
| 0
| 0.697674
| 0
| 0
| 0.050164
| 0.027263
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186047
| false
| 0
| 0.023256
| 0.046512
| 0.348837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
710f71af94d501bce8bf87272db1134c046767c1
| 241
|
py
|
Python
|
djavError/models/__init__.py
|
dasmith2/djavError
|
6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd
|
[
"MIT"
] | null | null | null |
djavError/models/__init__.py
|
dasmith2/djavError
|
6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd
|
[
"MIT"
] | null | null | null |
djavError/models/__init__.py
|
dasmith2/djavError
|
6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from djavError.models.error import Error
from djavError.models.long_request import LongRequest
from djavError.models.notification import Notification
from djavError.models.too_many_queries_request import TooManyQueriesRequest
| 40.166667
| 75
| 0.879668
| 30
| 241
| 6.933333
| 0.5
| 0.25
| 0.365385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004505
| 0.078838
| 241
| 5
| 76
| 48.2
| 0.932432
| 0.049793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
713a6598eca6da1253a19280057a68e80965c90f
| 17,570
|
py
|
Python
|
pybind/slxos/v16r_1_00b/rbridge_id/fabric/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/rbridge_id/fabric/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/rbridge_id/fabric/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import ecmp
import route
import port_channel
import login_policy
class fabric(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/fabric. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This function is used to configure fabric
parameters such as ECMP load balancing parameters
and multicast priority.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ecmp','__route','__port_channel','__login_policy',)
_yang_name = 'fabric'
_rest_name = 'fabric'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__port_channel = YANGDynClass(base=YANGListType("po_id",port_channel.port_channel, yang_name="port-channel", rest_name="port-channel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='po-id', extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}), is_container='list', yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='list', is_config=True)
self.__route = YANGDynClass(base=route.route, is_container='container', presence=False, yang_name="route", rest_name="route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure routing related parameters'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
self.__login_policy = YANGDynClass(base=login_policy.login_policy, is_container='container', presence=False, yang_name="login-policy", rest_name="login-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'switch_login_policy', u'info': u'Configure switch login parameters in a fabric.'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
self.__ecmp = YANGDynClass(base=ecmp.ecmp, is_container='container', presence=False, yang_name="ecmp", rest_name="ecmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ECMP parameters', u'callpoint': u'Ecmp_loadbalance', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'fabric']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'fabric']
def _get_ecmp(self):
"""
Getter method for ecmp, mapped from YANG variable /rbridge_id/fabric/ecmp (container)
YANG Description: This function allows to configure ECMP
related parameters.
"""
return self.__ecmp
def _set_ecmp(self, v, load=False):
"""
Setter method for ecmp, mapped from YANG variable /rbridge_id/fabric/ecmp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ecmp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ecmp() directly.
YANG Description: This function allows to configure ECMP
related parameters.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ecmp.ecmp, is_container='container', presence=False, yang_name="ecmp", rest_name="ecmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ECMP parameters', u'callpoint': u'Ecmp_loadbalance', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ecmp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ecmp.ecmp, is_container='container', presence=False, yang_name="ecmp", rest_name="ecmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ECMP parameters', u'callpoint': u'Ecmp_loadbalance', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)""",
})
self.__ecmp = t
if hasattr(self, '_set'):
self._set()
def _unset_ecmp(self):
self.__ecmp = YANGDynClass(base=ecmp.ecmp, is_container='container', presence=False, yang_name="ecmp", rest_name="ecmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ECMP parameters', u'callpoint': u'Ecmp_loadbalance', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
def _get_route(self):
"""
Getter method for route, mapped from YANG variable /rbridge_id/fabric/route (container)
YANG Description: Function to configure routing related information
such as multicast priority.
"""
return self.__route
def _set_route(self, v, load=False):
"""
Setter method for route, mapped from YANG variable /rbridge_id/fabric/route (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route() directly.
YANG Description: Function to configure routing related information
such as multicast priority.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route.route, is_container='container', presence=False, yang_name="route", rest_name="route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure routing related parameters'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route.route, is_container='container', presence=False, yang_name="route", rest_name="route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure routing related parameters'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)""",
})
self.__route = t
if hasattr(self, '_set'):
self._set()
def _unset_route(self):
self.__route = YANGDynClass(base=route.route, is_container='container', presence=False, yang_name="route", rest_name="route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure routing related parameters'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
def _get_port_channel(self):
"""
Getter method for port_channel, mapped from YANG variable /rbridge_id/fabric/port_channel (list)
"""
return self.__port_channel
def _set_port_channel(self, v, load=False):
"""
Setter method for port_channel, mapped from YANG variable /rbridge_id/fabric/port_channel (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_channel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_channel() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("po_id",port_channel.port_channel, yang_name="port-channel", rest_name="port-channel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='po-id', extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}), is_container='list', yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_channel must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("po_id",port_channel.port_channel, yang_name="port-channel", rest_name="port-channel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='po-id', extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}), is_container='list', yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='list', is_config=True)""",
})
self.__port_channel = t
if hasattr(self, '_set'):
self._set()
def _unset_port_channel(self):
self.__port_channel = YANGDynClass(base=YANGListType("po_id",port_channel.port_channel, yang_name="port-channel", rest_name="port-channel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='po-id', extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}), is_container='list', yang_name="port-channel", rest_name="port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlag load-balancing', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'node_vlag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='list', is_config=True)
def _get_login_policy(self):
"""
Getter method for login_policy, mapped from YANG variable /rbridge_id/fabric/login_policy (container)
YANG Description: This function control the switch login configurations
- Allow FLOGI/FDISC duplicate port WWN to login into switch.
"""
return self.__login_policy
def _set_login_policy(self, v, load=False):
"""
Setter method for login_policy, mapped from YANG variable /rbridge_id/fabric/login_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_login_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_login_policy() directly.
YANG Description: This function control the switch login configurations
- Allow FLOGI/FDISC duplicate port WWN to login into switch.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=login_policy.login_policy, is_container='container', presence=False, yang_name="login-policy", rest_name="login-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'switch_login_policy', u'info': u'Configure switch login parameters in a fabric.'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """login_policy must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=login_policy.login_policy, is_container='container', presence=False, yang_name="login-policy", rest_name="login-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'switch_login_policy', u'info': u'Configure switch login parameters in a fabric.'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)""",
})
self.__login_policy = t
if hasattr(self, '_set'):
self._set()
def _unset_login_policy(self):
self.__login_policy = YANGDynClass(base=login_policy.login_policy, is_container='container', presence=False, yang_name="login-policy", rest_name="login-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'switch_login_policy', u'info': u'Configure switch login parameters in a fabric.'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='container', is_config=True)
ecmp = __builtin__.property(_get_ecmp, _set_ecmp)
route = __builtin__.property(_get_route, _set_route)
port_channel = __builtin__.property(_get_port_channel, _set_port_channel)
login_policy = __builtin__.property(_get_login_policy, _set_login_policy)
_pyangbind_elements = {'ecmp': ecmp, 'route': route, 'port_channel': port_channel, 'login_policy': login_policy, }
| 69.173228
| 927
| 0.736995
| 2,433
| 17,570
| 5.10522
| 0.084258
| 0.043475
| 0.045085
| 0.028983
| 0.816037
| 0.794058
| 0.785364
| 0.776749
| 0.771919
| 0.761613
| 0
| 0.000392
| 0.12988
| 17,570
| 253
| 928
| 69.44664
| 0.812075
| 0.158224
| 0
| 0.427632
| 0
| 0.026316
| 0.397863
| 0.159281
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098684
| false
| 0
| 0.078947
| 0
| 0.302632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8567ca0f8e58934e7652673eb4f35baca337e389
| 46
|
py
|
Python
|
xfconf/xfconf.py
|
cr33dog/pyxfce
|
ce3fa5e8c556e14a8127d67192484fe2f59b5595
|
[
"BSD-3-Clause"
] | 4
|
2017-08-23T06:32:19.000Z
|
2019-11-05T09:59:24.000Z
|
xfconf/xfconf.py
|
cr33dog/pyxfce
|
ce3fa5e8c556e14a8127d67192484fe2f59b5595
|
[
"BSD-3-Clause"
] | null | null | null |
xfconf/xfconf.py
|
cr33dog/pyxfce
|
ce3fa5e8c556e14a8127d67192484fe2f59b5595
|
[
"BSD-3-Clause"
] | 2
|
2017-09-03T17:32:12.000Z
|
2021-02-27T20:12:34.000Z
|
#!/usr/bin/env python
from _xfconf import *
| 9.2
| 21
| 0.695652
| 7
| 46
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 4
| 22
| 11.5
| 0.815789
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
85b568f36daeca0f9617378081e97b0fd8e6f9c3
| 40
|
py
|
Python
|
pipelines/__init__.py
|
awagot/CNN-POD
|
ebee234831ff58609563a925b7a47e0f4c30a16e
|
[
"CC0-1.0"
] | 2
|
2021-04-08T10:30:58.000Z
|
2021-08-18T11:23:05.000Z
|
pipelines/__init__.py
|
awagot/CNN-POD
|
ebee234831ff58609563a925b7a47e0f4c30a16e
|
[
"CC0-1.0"
] | 1
|
2021-04-07T21:28:59.000Z
|
2021-04-07T21:28:59.000Z
|
pipelines/__init__.py
|
awagot/CNN-POD
|
ebee234831ff58609563a925b7a47e0f4c30a16e
|
[
"CC0-1.0"
] | 2
|
2021-04-09T09:41:32.000Z
|
2021-04-16T13:09:43.000Z
|
from pipelines.default_pipeline import *
| 40
| 40
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.